repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
basho/riak-python-client
riak/mapreduce.py
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/mapreduce.py#L121-L144
def add_bucket(self, bucket, bucket_type=None): """ Adds all keys in a bucket to the inputs. :param bucket: the bucket :type bucket: string :param bucket_type: Optional name of a bucket type :type bucket_type: string, None :rtype: :class:`RiakMapReduce` """ if not riak.disable_list_exceptions: raise riak.ListError() self._input_mode = 'bucket' if isinstance(bucket, riak.RiakBucket): if bucket.bucket_type.is_default(): self._inputs = {'bucket': bucket.name} else: self._inputs = {'bucket': [bucket.bucket_type.name, bucket.name]} elif bucket_type is not None and bucket_type != "default": self._inputs = {'bucket': [bucket_type, bucket]} else: self._inputs = {'bucket': bucket} return self
[ "def", "add_bucket", "(", "self", ",", "bucket", ",", "bucket_type", "=", "None", ")", ":", "if", "not", "riak", ".", "disable_list_exceptions", ":", "raise", "riak", ".", "ListError", "(", ")", "self", ".", "_input_mode", "=", "'bucket'", "if", "isinstance", "(", "bucket", ",", "riak", ".", "RiakBucket", ")", ":", "if", "bucket", ".", "bucket_type", ".", "is_default", "(", ")", ":", "self", ".", "_inputs", "=", "{", "'bucket'", ":", "bucket", ".", "name", "}", "else", ":", "self", ".", "_inputs", "=", "{", "'bucket'", ":", "[", "bucket", ".", "bucket_type", ".", "name", ",", "bucket", ".", "name", "]", "}", "elif", "bucket_type", "is", "not", "None", "and", "bucket_type", "!=", "\"default\"", ":", "self", ".", "_inputs", "=", "{", "'bucket'", ":", "[", "bucket_type", ",", "bucket", "]", "}", "else", ":", "self", ".", "_inputs", "=", "{", "'bucket'", ":", "bucket", "}", "return", "self" ]
Adds all keys in a bucket to the inputs. :param bucket: the bucket :type bucket: string :param bucket_type: Optional name of a bucket type :type bucket_type: string, None :rtype: :class:`RiakMapReduce`
[ "Adds", "all", "keys", "in", "a", "bucket", "to", "the", "inputs", "." ]
python
train
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/exportxml.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/exportxml.py#L423-L457
def add_node_element(self, node): """Add a (syntax category) <node> to the document graph. Parameters ---------- node : etree.Element etree representation of a <node> element A <node> describes an element of a syntax tree. The root <node> element does not have a parent attribute, while non-root nodes do Example ------- <node xml:id="s1_505" cat="SIMPX" func="--"> <node xml:id="s1_501" cat="LK" func="-" parent="s1_505"> # this is the root of the syntax tree of the sentence, but # it is not the root node of the sentence, since there might # be nodes outside of the tree which are children of the # sentence root node (e.g. <word> elements representing a # quotation mark) """ node_id = self.get_element_id(node) if 'parent' in node.attrib: parent_id = self.get_parent_id(node) else: # <node> is the root of the syntax tree of a sentence, # but it might be embedded in a <edu> or <edu-range>. # we want to attach it directly to the <sentence> element parent_id = self.get_sentence_id(node) self.add_node(node_id, layers={self.ns, self.ns+':syntax'}, attr_dict=self.element_attribs_to_dict(node), label=node.attrib['cat']) self.add_edge(parent_id, node_id, edge_type=dg.EdgeTypes.dominance_relation)
[ "def", "add_node_element", "(", "self", ",", "node", ")", ":", "node_id", "=", "self", ".", "get_element_id", "(", "node", ")", "if", "'parent'", "in", "node", ".", "attrib", ":", "parent_id", "=", "self", ".", "get_parent_id", "(", "node", ")", "else", ":", "# <node> is the root of the syntax tree of a sentence,", "# but it might be embedded in a <edu> or <edu-range>.", "# we want to attach it directly to the <sentence> element", "parent_id", "=", "self", ".", "get_sentence_id", "(", "node", ")", "self", ".", "add_node", "(", "node_id", ",", "layers", "=", "{", "self", ".", "ns", ",", "self", ".", "ns", "+", "':syntax'", "}", ",", "attr_dict", "=", "self", ".", "element_attribs_to_dict", "(", "node", ")", ",", "label", "=", "node", ".", "attrib", "[", "'cat'", "]", ")", "self", ".", "add_edge", "(", "parent_id", ",", "node_id", ",", "edge_type", "=", "dg", ".", "EdgeTypes", ".", "dominance_relation", ")" ]
Add a (syntax category) <node> to the document graph. Parameters ---------- node : etree.Element etree representation of a <node> element A <node> describes an element of a syntax tree. The root <node> element does not have a parent attribute, while non-root nodes do Example ------- <node xml:id="s1_505" cat="SIMPX" func="--"> <node xml:id="s1_501" cat="LK" func="-" parent="s1_505"> # this is the root of the syntax tree of the sentence, but # it is not the root node of the sentence, since there might # be nodes outside of the tree which are children of the # sentence root node (e.g. <word> elements representing a # quotation mark)
[ "Add", "a", "(", "syntax", "category", ")", "<node", ">", "to", "the", "document", "graph", "." ]
python
train
stevearc/dql
dql/engine.py
https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L344-L357
def _parse_throttle(self, tablename, throttle): """ Parse a 'throttle' statement and return a RateLimit """ amount = [] desc = self.describe(tablename) throughputs = [desc.read_throughput, desc.write_throughput] for value, throughput in zip(throttle[1:], throughputs): if value == "*": amount.append(0) elif value[-1] == "%": amount.append(throughput * float(value[:-1]) / 100.0) else: amount.append(float(value)) cap = Capacity(*amount) # pylint: disable=E1120 return RateLimit(total=cap, callback=self._on_throttle)
[ "def", "_parse_throttle", "(", "self", ",", "tablename", ",", "throttle", ")", ":", "amount", "=", "[", "]", "desc", "=", "self", ".", "describe", "(", "tablename", ")", "throughputs", "=", "[", "desc", ".", "read_throughput", ",", "desc", ".", "write_throughput", "]", "for", "value", ",", "throughput", "in", "zip", "(", "throttle", "[", "1", ":", "]", ",", "throughputs", ")", ":", "if", "value", "==", "\"*\"", ":", "amount", ".", "append", "(", "0", ")", "elif", "value", "[", "-", "1", "]", "==", "\"%\"", ":", "amount", ".", "append", "(", "throughput", "*", "float", "(", "value", "[", ":", "-", "1", "]", ")", "/", "100.0", ")", "else", ":", "amount", ".", "append", "(", "float", "(", "value", ")", ")", "cap", "=", "Capacity", "(", "*", "amount", ")", "# pylint: disable=E1120", "return", "RateLimit", "(", "total", "=", "cap", ",", "callback", "=", "self", ".", "_on_throttle", ")" ]
Parse a 'throttle' statement and return a RateLimit
[ "Parse", "a", "throttle", "statement", "and", "return", "a", "RateLimit" ]
python
train
boriel/zxbasic
arch/zx48k/backend/__16bit.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__16bit.py#L279-L333
def _divu16(ins): ''' Divides 2 16bit unsigned integers. The result is pushed onto the stack. Optimizations: * If 2nd op is 1 then do nothing * If 2nd op is 2 then Shift Right Logical ''' op1, op2 = tuple(ins.quad[2:]) if is_int(op1) and int(op1) == 0: # 0 / A = 0 if op2[0] in ('_', '$'): output = [] # Optimization: Discard previous op if not from the stack else: output = _16bit_oper(op2) # Normalize stack output.append('ld hl, 0') output.append('push hl') return output if is_int(op2): op = int16(op2) output = _16bit_oper(op1) if op2 == 0: # A * 0 = 0 * A = 0 if op1[0] in ('_', '$'): output = [] # Optimization: Discard previous op if not from the stack output.append('ld hl, 0') output.append('push hl') return output if op == 1: output.append('push hl') return output if op == 2: output.append('srl h') output.append('rr l') output.append('push hl') return output output.append('ld de, %i' % op) else: if op2[0] == '_': # Optimization when 2nd operand is an id rev = True op1, op2 = op2, op1 else: rev = False output = _16bit_oper(op1, op2, rev) output.append('call __DIVU16') output.append('push hl') REQUIRES.add('div16.asm') return output
[ "def", "_divu16", "(", "ins", ")", ":", "op1", ",", "op2", "=", "tuple", "(", "ins", ".", "quad", "[", "2", ":", "]", ")", "if", "is_int", "(", "op1", ")", "and", "int", "(", "op1", ")", "==", "0", ":", "# 0 / A = 0", "if", "op2", "[", "0", "]", "in", "(", "'_'", ",", "'$'", ")", ":", "output", "=", "[", "]", "# Optimization: Discard previous op if not from the stack", "else", ":", "output", "=", "_16bit_oper", "(", "op2", ")", "# Normalize stack", "output", ".", "append", "(", "'ld hl, 0'", ")", "output", ".", "append", "(", "'push hl'", ")", "return", "output", "if", "is_int", "(", "op2", ")", ":", "op", "=", "int16", "(", "op2", ")", "output", "=", "_16bit_oper", "(", "op1", ")", "if", "op2", "==", "0", ":", "# A * 0 = 0 * A = 0", "if", "op1", "[", "0", "]", "in", "(", "'_'", ",", "'$'", ")", ":", "output", "=", "[", "]", "# Optimization: Discard previous op if not from the stack", "output", ".", "append", "(", "'ld hl, 0'", ")", "output", ".", "append", "(", "'push hl'", ")", "return", "output", "if", "op", "==", "1", ":", "output", ".", "append", "(", "'push hl'", ")", "return", "output", "if", "op", "==", "2", ":", "output", ".", "append", "(", "'srl h'", ")", "output", ".", "append", "(", "'rr l'", ")", "output", ".", "append", "(", "'push hl'", ")", "return", "output", "output", ".", "append", "(", "'ld de, %i'", "%", "op", ")", "else", ":", "if", "op2", "[", "0", "]", "==", "'_'", ":", "# Optimization when 2nd operand is an id", "rev", "=", "True", "op1", ",", "op2", "=", "op2", ",", "op1", "else", ":", "rev", "=", "False", "output", "=", "_16bit_oper", "(", "op1", ",", "op2", ",", "rev", ")", "output", ".", "append", "(", "'call __DIVU16'", ")", "output", ".", "append", "(", "'push hl'", ")", "REQUIRES", ".", "add", "(", "'div16.asm'", ")", "return", "output" ]
Divides 2 16bit unsigned integers. The result is pushed onto the stack. Optimizations: * If 2nd op is 1 then do nothing * If 2nd op is 2 then Shift Right Logical
[ "Divides", "2", "16bit", "unsigned", "integers", ".", "The", "result", "is", "pushed", "onto", "the", "stack", "." ]
python
train
RedFantom/ttkwidgets
ttkwidgets/table.py
https://github.com/RedFantom/ttkwidgets/blob/02150322060f867b6e59a175522ef84b09168019/ttkwidgets/table.py#L472-L481
def _config_drag_cols(self, drag_cols): """Configure a new drag_cols state""" self._drag_cols = drag_cols # remove/display drag icon if self._drag_cols: self._im_drag.paste(self._im_draggable) else: self._im_drag.paste(self._im_not_draggable) self.focus_set() self.update_idletasks()
[ "def", "_config_drag_cols", "(", "self", ",", "drag_cols", ")", ":", "self", ".", "_drag_cols", "=", "drag_cols", "# remove/display drag icon", "if", "self", ".", "_drag_cols", ":", "self", ".", "_im_drag", ".", "paste", "(", "self", ".", "_im_draggable", ")", "else", ":", "self", ".", "_im_drag", ".", "paste", "(", "self", ".", "_im_not_draggable", ")", "self", ".", "focus_set", "(", ")", "self", ".", "update_idletasks", "(", ")" ]
Configure a new drag_cols state
[ "Configure", "a", "new", "drag_cols", "state" ]
python
train
gwastro/pycbc-glue
pycbc_glue/pipeline.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/pipeline.py#L1322-L1335
def add_file_opt(self,opt,filename,file_is_output_file=False): """ Add a variable (macro) option for this node. If the option specified does not exist in the CondorJob, it is added so the submit file will be correct when written. The value of the option is also added to the list of input files for the DAX. @param opt: option name. @param value: value of the option for this node in the DAG. @param file_is_output_file: A boolean if the file will be an output file instead of an input file. The default is to have it be an input. """ self.add_var_opt(opt,filename) if file_is_output_file: self.add_output_file(filename) else: self.add_input_file(filename)
[ "def", "add_file_opt", "(", "self", ",", "opt", ",", "filename", ",", "file_is_output_file", "=", "False", ")", ":", "self", ".", "add_var_opt", "(", "opt", ",", "filename", ")", "if", "file_is_output_file", ":", "self", ".", "add_output_file", "(", "filename", ")", "else", ":", "self", ".", "add_input_file", "(", "filename", ")" ]
Add a variable (macro) option for this node. If the option specified does not exist in the CondorJob, it is added so the submit file will be correct when written. The value of the option is also added to the list of input files for the DAX. @param opt: option name. @param value: value of the option for this node in the DAG. @param file_is_output_file: A boolean if the file will be an output file instead of an input file. The default is to have it be an input.
[ "Add", "a", "variable", "(", "macro", ")", "option", "for", "this", "node", ".", "If", "the", "option", "specified", "does", "not", "exist", "in", "the", "CondorJob", "it", "is", "added", "so", "the", "submit", "file", "will", "be", "correct", "when", "written", ".", "The", "value", "of", "the", "option", "is", "also", "added", "to", "the", "list", "of", "input", "files", "for", "the", "DAX", "." ]
python
train
ZELLMECHANIK-DRESDEN/dclab
dclab/parse_funcs.py
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/parse_funcs.py#L8-L22
def fbool(value): """boolean""" if isinstance(value, str_types): value = value.lower() if value == "false": value = False elif value == "true": value = True elif value: value = bool(float(value)) else: raise ValueError("empty string") else: value = bool(float(value)) return value
[ "def", "fbool", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "str_types", ")", ":", "value", "=", "value", ".", "lower", "(", ")", "if", "value", "==", "\"false\"", ":", "value", "=", "False", "elif", "value", "==", "\"true\"", ":", "value", "=", "True", "elif", "value", ":", "value", "=", "bool", "(", "float", "(", "value", ")", ")", "else", ":", "raise", "ValueError", "(", "\"empty string\"", ")", "else", ":", "value", "=", "bool", "(", "float", "(", "value", ")", ")", "return", "value" ]
boolean
[ "boolean" ]
python
train
fredRos/pypmc
pypmc/tools/_doc.py
https://github.com/fredRos/pypmc/blob/9138b67c976f0d58edd080353d16769a47794d09/pypmc/tools/_doc.py#L5-L39
def _inherit_docstring(from_class): '''Private wrapper function to inherit docstrings from Base class Usage example: class mood(object): 'describes typical reactions of a person in a specific mood' def how_are_you(self): 'returns a typical answer to How are you? being in specific mood' raise NotImplementedError('No mood specified') class good_mood(mood): @_inherit_docstring(mood) def how_are_you(self): print 'Fine, thanks.' >>> help(good_mood.how_are_you) Help on method how_are_you in module __main__: how_are_you(self) unbound __main__.good_mood method returns a typical answer to How are you? being in specific mood ''' def wrapper(method): funcname = method.__name__ parent_doc = from_class.__dict__[funcname].__doc__ if method.__doc__ is not None: method.__doc__ += '\n ' + parent_doc else: method.__doc__ = parent_doc return method return wrapper
[ "def", "_inherit_docstring", "(", "from_class", ")", ":", "def", "wrapper", "(", "method", ")", ":", "funcname", "=", "method", ".", "__name__", "parent_doc", "=", "from_class", ".", "__dict__", "[", "funcname", "]", ".", "__doc__", "if", "method", ".", "__doc__", "is", "not", "None", ":", "method", ".", "__doc__", "+=", "'\\n '", "+", "parent_doc", "else", ":", "method", ".", "__doc__", "=", "parent_doc", "return", "method", "return", "wrapper" ]
Private wrapper function to inherit docstrings from Base class Usage example: class mood(object): 'describes typical reactions of a person in a specific mood' def how_are_you(self): 'returns a typical answer to How are you? being in specific mood' raise NotImplementedError('No mood specified') class good_mood(mood): @_inherit_docstring(mood) def how_are_you(self): print 'Fine, thanks.' >>> help(good_mood.how_are_you) Help on method how_are_you in module __main__: how_are_you(self) unbound __main__.good_mood method returns a typical answer to How are you? being in specific mood
[ "Private", "wrapper", "function", "to", "inherit", "docstrings", "from", "Base", "class", "Usage", "example", ":", "class", "mood", "(", "object", ")", ":", "describes", "typical", "reactions", "of", "a", "person", "in", "a", "specific", "mood", "def", "how_are_you", "(", "self", ")", ":", "returns", "a", "typical", "answer", "to", "How", "are", "you?", "being", "in", "specific", "mood", "raise", "NotImplementedError", "(", "No", "mood", "specified", ")", "class", "good_mood", "(", "mood", ")", ":" ]
python
train
jleclanche/fireplace
fireplace/entity.py
https://github.com/jleclanche/fireplace/blob/d0fc0e97e185c0210de86631be20638659c0609e/fireplace/entity.py#L79-L86
def get_damage(self, amount: int, target) -> int: """ Override to modify the damage dealt to a target from the given amount. """ if target.immune: self.log("%r is immune to %s for %i damage", target, self, amount) return 0 return amount
[ "def", "get_damage", "(", "self", ",", "amount", ":", "int", ",", "target", ")", "->", "int", ":", "if", "target", ".", "immune", ":", "self", ".", "log", "(", "\"%r is immune to %s for %i damage\"", ",", "target", ",", "self", ",", "amount", ")", "return", "0", "return", "amount" ]
Override to modify the damage dealt to a target from the given amount.
[ "Override", "to", "modify", "the", "damage", "dealt", "to", "a", "target", "from", "the", "given", "amount", "." ]
python
train
tmontaigu/pylas
pylas/point/record.py
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/point/record.py#L253-L282
def from_stream(cls, stream, point_format, count): """ Construct the point record by reading the points from the stream """ points_dtype = point_format.dtype point_data_buffer = bytearray(stream.read(count * points_dtype.itemsize)) try: data = np.frombuffer(point_data_buffer, dtype=points_dtype, count=count) except ValueError: expected_bytes_len = count * points_dtype.itemsize if len(point_data_buffer) % points_dtype.itemsize != 0: missing_bytes_len = expected_bytes_len - len(point_data_buffer) raise_not_enough_bytes_error( expected_bytes_len, missing_bytes_len, len(point_data_buffer), points_dtype, ) else: actual_count = len(point_data_buffer) // points_dtype.itemsize logger.critical( "Expected {} points, there are {} ({} missing)".format( count, actual_count, count - actual_count ) ) data = np.frombuffer( point_data_buffer, dtype=points_dtype, count=actual_count ) return cls(data, point_format)
[ "def", "from_stream", "(", "cls", ",", "stream", ",", "point_format", ",", "count", ")", ":", "points_dtype", "=", "point_format", ".", "dtype", "point_data_buffer", "=", "bytearray", "(", "stream", ".", "read", "(", "count", "*", "points_dtype", ".", "itemsize", ")", ")", "try", ":", "data", "=", "np", ".", "frombuffer", "(", "point_data_buffer", ",", "dtype", "=", "points_dtype", ",", "count", "=", "count", ")", "except", "ValueError", ":", "expected_bytes_len", "=", "count", "*", "points_dtype", ".", "itemsize", "if", "len", "(", "point_data_buffer", ")", "%", "points_dtype", ".", "itemsize", "!=", "0", ":", "missing_bytes_len", "=", "expected_bytes_len", "-", "len", "(", "point_data_buffer", ")", "raise_not_enough_bytes_error", "(", "expected_bytes_len", ",", "missing_bytes_len", ",", "len", "(", "point_data_buffer", ")", ",", "points_dtype", ",", ")", "else", ":", "actual_count", "=", "len", "(", "point_data_buffer", ")", "//", "points_dtype", ".", "itemsize", "logger", ".", "critical", "(", "\"Expected {} points, there are {} ({} missing)\"", ".", "format", "(", "count", ",", "actual_count", ",", "count", "-", "actual_count", ")", ")", "data", "=", "np", ".", "frombuffer", "(", "point_data_buffer", ",", "dtype", "=", "points_dtype", ",", "count", "=", "actual_count", ")", "return", "cls", "(", "data", ",", "point_format", ")" ]
Construct the point record by reading the points from the stream
[ "Construct", "the", "point", "record", "by", "reading", "the", "points", "from", "the", "stream" ]
python
test
DarkEnergySurvey/ugali
ugali/utils/logger.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/logger.py#L39-L45
def file_found(filename,force): """Check if a file exists""" if os.path.exists(filename) and not force: logger.info("Found %s; skipping..."%filename) return True else: return False
[ "def", "file_found", "(", "filename", ",", "force", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", "and", "not", "force", ":", "logger", ".", "info", "(", "\"Found %s; skipping...\"", "%", "filename", ")", "return", "True", "else", ":", "return", "False" ]
Check if a file exists
[ "Check", "if", "a", "file", "exists" ]
python
train
google/grr
grr/server/grr_response_server/hunt.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/hunt.py#L197-L207
def CreateHunt(hunt_obj): """Creates a hunt using a given hunt object.""" data_store.REL_DB.WriteHuntObject(hunt_obj) if hunt_obj.HasField("output_plugins"): output_plugins_states = flow.GetOutputPluginStates( hunt_obj.output_plugins, source="hunts/%s" % hunt_obj.hunt_id, token=access_control.ACLToken(username=hunt_obj.creator)) data_store.REL_DB.WriteHuntOutputPluginsStates(hunt_obj.hunt_id, output_plugins_states)
[ "def", "CreateHunt", "(", "hunt_obj", ")", ":", "data_store", ".", "REL_DB", ".", "WriteHuntObject", "(", "hunt_obj", ")", "if", "hunt_obj", ".", "HasField", "(", "\"output_plugins\"", ")", ":", "output_plugins_states", "=", "flow", ".", "GetOutputPluginStates", "(", "hunt_obj", ".", "output_plugins", ",", "source", "=", "\"hunts/%s\"", "%", "hunt_obj", ".", "hunt_id", ",", "token", "=", "access_control", ".", "ACLToken", "(", "username", "=", "hunt_obj", ".", "creator", ")", ")", "data_store", ".", "REL_DB", ".", "WriteHuntOutputPluginsStates", "(", "hunt_obj", ".", "hunt_id", ",", "output_plugins_states", ")" ]
Creates a hunt using a given hunt object.
[ "Creates", "a", "hunt", "using", "a", "given", "hunt", "object", "." ]
python
train
flowersteam/explauto
explauto/sensorimotor_model/inverse/cma.py
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/cma.py#L7250-L7276
def plot(self, plot_cmd=None, tf=lambda y: y): """plot the data we have, return ``self``""" if not plot_cmd: plot_cmd = self.plot_cmd colors = 'bgrcmyk' pyplot.hold(False) res = self.res flatx, flatf = self.flattened() minf = np.inf for i in flatf: minf = min((minf, min(flatf[i]))) addf = 1e-9 - minf if minf <= 1e-9 else 0 for i in sorted(res.keys()): # we plot not all values here if isinstance(i, int): color = colors[i % len(colors)] arx = sorted(res[i].keys()) plot_cmd(arx, [tf(np.median(res[i][x]) + addf) for x in arx], color + '-') pyplot.text(arx[-1], tf(np.median(res[i][arx[-1]])), i) pyplot.hold(True) plot_cmd(flatx[i], tf(np.array(flatf[i]) + addf), color + 'o') pyplot.ylabel('f + ' + str(addf)) pyplot.draw() pyplot.ion() pyplot.show() # raw_input('press return') return self
[ "def", "plot", "(", "self", ",", "plot_cmd", "=", "None", ",", "tf", "=", "lambda", "y", ":", "y", ")", ":", "if", "not", "plot_cmd", ":", "plot_cmd", "=", "self", ".", "plot_cmd", "colors", "=", "'bgrcmyk'", "pyplot", ".", "hold", "(", "False", ")", "res", "=", "self", ".", "res", "flatx", ",", "flatf", "=", "self", ".", "flattened", "(", ")", "minf", "=", "np", ".", "inf", "for", "i", "in", "flatf", ":", "minf", "=", "min", "(", "(", "minf", ",", "min", "(", "flatf", "[", "i", "]", ")", ")", ")", "addf", "=", "1e-9", "-", "minf", "if", "minf", "<=", "1e-9", "else", "0", "for", "i", "in", "sorted", "(", "res", ".", "keys", "(", ")", ")", ":", "# we plot not all values here", "if", "isinstance", "(", "i", ",", "int", ")", ":", "color", "=", "colors", "[", "i", "%", "len", "(", "colors", ")", "]", "arx", "=", "sorted", "(", "res", "[", "i", "]", ".", "keys", "(", ")", ")", "plot_cmd", "(", "arx", ",", "[", "tf", "(", "np", ".", "median", "(", "res", "[", "i", "]", "[", "x", "]", ")", "+", "addf", ")", "for", "x", "in", "arx", "]", ",", "color", "+", "'-'", ")", "pyplot", ".", "text", "(", "arx", "[", "-", "1", "]", ",", "tf", "(", "np", ".", "median", "(", "res", "[", "i", "]", "[", "arx", "[", "-", "1", "]", "]", ")", ")", ",", "i", ")", "pyplot", ".", "hold", "(", "True", ")", "plot_cmd", "(", "flatx", "[", "i", "]", ",", "tf", "(", "np", ".", "array", "(", "flatf", "[", "i", "]", ")", "+", "addf", ")", ",", "color", "+", "'o'", ")", "pyplot", ".", "ylabel", "(", "'f + '", "+", "str", "(", "addf", ")", ")", "pyplot", ".", "draw", "(", ")", "pyplot", ".", "ion", "(", ")", "pyplot", ".", "show", "(", ")", "# raw_input('press return')", "return", "self" ]
plot the data we have, return ``self``
[ "plot", "the", "data", "we", "have", "return", "self" ]
python
train
istresearch/scrapy-cluster
crawler/crawling/pipelines.py
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/pipelines.py#L138-L153
def _clean_item(self, item): ''' Cleans the item to be logged ''' item_copy = dict(item) del item_copy['body'] del item_copy['links'] del item_copy['response_headers'] del item_copy['request_headers'] del item_copy['status_code'] del item_copy['status_msg'] item_copy['action'] = 'ack' item_copy['logger'] = self.logger.name item_copy return item_copy
[ "def", "_clean_item", "(", "self", ",", "item", ")", ":", "item_copy", "=", "dict", "(", "item", ")", "del", "item_copy", "[", "'body'", "]", "del", "item_copy", "[", "'links'", "]", "del", "item_copy", "[", "'response_headers'", "]", "del", "item_copy", "[", "'request_headers'", "]", "del", "item_copy", "[", "'status_code'", "]", "del", "item_copy", "[", "'status_msg'", "]", "item_copy", "[", "'action'", "]", "=", "'ack'", "item_copy", "[", "'logger'", "]", "=", "self", ".", "logger", ".", "name", "item_copy", "return", "item_copy" ]
Cleans the item to be logged
[ "Cleans", "the", "item", "to", "be", "logged" ]
python
train
tdegeus/GooseMPL
GooseMPL/__init__.py
https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L1080-L1129
def hist(P, edges, **kwargs): r''' Plot histogram. ''' from matplotlib.collections import PatchCollection from matplotlib.patches import Polygon # extract local options axis = kwargs.pop( 'axis' , plt.gca() ) cindex = kwargs.pop( 'cindex' , None ) autoscale = kwargs.pop( 'autoscale' , True ) # set defaults kwargs.setdefault('edgecolor','k') # no color-index -> set transparent if cindex is None: kwargs.setdefault('facecolor',(0.,0.,0.,0.)) # convert -> list of Polygons poly = [] for p, xl, xu in zip(P, edges[:-1], edges[1:]): coor = np.array([ [xl, 0.], [xu, 0.], [xu, p ], [xl, p ], ]) poly.append(Polygon(coor)) args = (poly) # convert patches -> matplotlib-objects p = PatchCollection(args,**kwargs) # add colors to patches if cindex is not None: p.set_array(cindex) # add patches to axis axis.add_collection(p) # rescale the axes manually if autoscale: # - get limits xlim = [ edges[0], edges[-1] ] ylim = [ 0 , np.max(P) ] # - set limits +/- 10% extra margin axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])]) axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])]) return p
[ "def", "hist", "(", "P", ",", "edges", ",", "*", "*", "kwargs", ")", ":", "from", "matplotlib", ".", "collections", "import", "PatchCollection", "from", "matplotlib", ".", "patches", "import", "Polygon", "# extract local options", "axis", "=", "kwargs", ".", "pop", "(", "'axis'", ",", "plt", ".", "gca", "(", ")", ")", "cindex", "=", "kwargs", ".", "pop", "(", "'cindex'", ",", "None", ")", "autoscale", "=", "kwargs", ".", "pop", "(", "'autoscale'", ",", "True", ")", "# set defaults", "kwargs", ".", "setdefault", "(", "'edgecolor'", ",", "'k'", ")", "# no color-index -> set transparent", "if", "cindex", "is", "None", ":", "kwargs", ".", "setdefault", "(", "'facecolor'", ",", "(", "0.", ",", "0.", ",", "0.", ",", "0.", ")", ")", "# convert -> list of Polygons", "poly", "=", "[", "]", "for", "p", ",", "xl", ",", "xu", "in", "zip", "(", "P", ",", "edges", "[", ":", "-", "1", "]", ",", "edges", "[", "1", ":", "]", ")", ":", "coor", "=", "np", ".", "array", "(", "[", "[", "xl", ",", "0.", "]", ",", "[", "xu", ",", "0.", "]", ",", "[", "xu", ",", "p", "]", ",", "[", "xl", ",", "p", "]", ",", "]", ")", "poly", ".", "append", "(", "Polygon", "(", "coor", ")", ")", "args", "=", "(", "poly", ")", "# convert patches -> matplotlib-objects", "p", "=", "PatchCollection", "(", "args", ",", "*", "*", "kwargs", ")", "# add colors to patches", "if", "cindex", "is", "not", "None", ":", "p", ".", "set_array", "(", "cindex", ")", "# add patches to axis", "axis", ".", "add_collection", "(", "p", ")", "# rescale the axes manually", "if", "autoscale", ":", "# - get limits", "xlim", "=", "[", "edges", "[", "0", "]", ",", "edges", "[", "-", "1", "]", "]", "ylim", "=", "[", "0", ",", "np", ".", "max", "(", "P", ")", "]", "# - set limits +/- 10% extra margin", "axis", ".", "set_xlim", "(", "[", "xlim", "[", "0", "]", "-", ".1", "*", "(", "xlim", "[", "1", "]", "-", "xlim", "[", "0", "]", ")", ",", "xlim", "[", "1", "]", "+", ".1", "*", "(", "xlim", "[", "1", "]", "-", "xlim", "[", "0", "]", ")", "]", ")", "axis", ".", "set_ylim", "(", "[", "ylim", "[", "0", "]", "-", ".1", "*", "(", "ylim", "[", "1", "]", "-", "ylim", "[", "0", "]", ")", ",", "ylim", "[", "1", "]", "+", ".1", "*", "(", "ylim", "[", "1", "]", "-", "ylim", "[", "0", "]", ")", "]", ")", "return", "p" ]
r''' Plot histogram.
[ "r", "Plot", "histogram", "." ]
python
train
deanmalmgren/textract
textract/parsers/html_parser.py
https://github.com/deanmalmgren/textract/blob/117ea191d93d80321e4bf01f23cc1ac54d69a075/textract/parsers/html_parser.py#L27-L34
def _visible(self, element): """Used to filter text elements that have invisible text on the page. """ if element.name in self._disallowed_names: return False elif re.match(u'<!--.*-->', six.text_type(element.extract())): return False return True
[ "def", "_visible", "(", "self", ",", "element", ")", ":", "if", "element", ".", "name", "in", "self", ".", "_disallowed_names", ":", "return", "False", "elif", "re", ".", "match", "(", "u'<!--.*-->'", ",", "six", ".", "text_type", "(", "element", ".", "extract", "(", ")", ")", ")", ":", "return", "False", "return", "True" ]
Used to filter text elements that have invisible text on the page.
[ "Used", "to", "filter", "text", "elements", "that", "have", "invisible", "text", "on", "the", "page", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/research/glow_ops.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L548-L603
def conv_block(name, x, mid_channels, dilations=None, activation="relu", dropout=0.0): """2 layer conv block used in the affine coupling layer. Args: name: variable scope. x: 4-D or 5-D Tensor. mid_channels: Output channels of the second layer. dilations: Optional, list of integers. activation: relu or gatu. If relu, the second layer is relu(W*x) If gatu, the second layer is tanh(W1*x) * sigmoid(W2*x) dropout: Dropout probability. Returns: x: 4-D Tensor: Output activations. """ with tf.variable_scope(name, reuse=tf.AUTO_REUSE): x_shape = common_layers.shape_list(x) is_2d = len(x_shape) == 4 num_steps = x_shape[1] if is_2d: first_filter = [3, 3] second_filter = [1, 1] else: # special case when number of steps equal 1 to avoid # padding. if num_steps == 1: first_filter = [1, 3, 3] else: first_filter = [2, 3, 3] second_filter = [1, 1, 1] # Edge Padding + conv2d + actnorm + relu: # [output: 512 channels] x = conv("1_1", x, output_channels=mid_channels, filter_size=first_filter, dilations=dilations) x = tf.nn.relu(x) x = get_dropout(x, rate=dropout) # Padding + conv2d + actnorm + activation. # [input, output: 512 channels] if activation == "relu": x = conv("1_2", x, output_channels=mid_channels, filter_size=second_filter, dilations=dilations) x = tf.nn.relu(x) elif activation == "gatu": # x = tanh(w1*x) * sigm(w2*x) x_tanh = conv("1_tanh", x, output_channels=mid_channels, filter_size=second_filter, dilations=dilations) x_sigm = conv("1_sigm", x, output_channels=mid_channels, filter_size=second_filter, dilations=dilations) x = tf.nn.tanh(x_tanh) * tf.nn.sigmoid(x_sigm) x = get_dropout(x, rate=dropout) return x
[ "def", "conv_block", "(", "name", ",", "x", ",", "mid_channels", ",", "dilations", "=", "None", ",", "activation", "=", "\"relu\"", ",", "dropout", "=", "0.0", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "reuse", "=", "tf", ".", "AUTO_REUSE", ")", ":", "x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "is_2d", "=", "len", "(", "x_shape", ")", "==", "4", "num_steps", "=", "x_shape", "[", "1", "]", "if", "is_2d", ":", "first_filter", "=", "[", "3", ",", "3", "]", "second_filter", "=", "[", "1", ",", "1", "]", "else", ":", "# special case when number of steps equal 1 to avoid", "# padding.", "if", "num_steps", "==", "1", ":", "first_filter", "=", "[", "1", ",", "3", ",", "3", "]", "else", ":", "first_filter", "=", "[", "2", ",", "3", ",", "3", "]", "second_filter", "=", "[", "1", ",", "1", ",", "1", "]", "# Edge Padding + conv2d + actnorm + relu:", "# [output: 512 channels]", "x", "=", "conv", "(", "\"1_1\"", ",", "x", ",", "output_channels", "=", "mid_channels", ",", "filter_size", "=", "first_filter", ",", "dilations", "=", "dilations", ")", "x", "=", "tf", ".", "nn", ".", "relu", "(", "x", ")", "x", "=", "get_dropout", "(", "x", ",", "rate", "=", "dropout", ")", "# Padding + conv2d + actnorm + activation.", "# [input, output: 512 channels]", "if", "activation", "==", "\"relu\"", ":", "x", "=", "conv", "(", "\"1_2\"", ",", "x", ",", "output_channels", "=", "mid_channels", ",", "filter_size", "=", "second_filter", ",", "dilations", "=", "dilations", ")", "x", "=", "tf", ".", "nn", ".", "relu", "(", "x", ")", "elif", "activation", "==", "\"gatu\"", ":", "# x = tanh(w1*x) * sigm(w2*x)", "x_tanh", "=", "conv", "(", "\"1_tanh\"", ",", "x", ",", "output_channels", "=", "mid_channels", ",", "filter_size", "=", "second_filter", ",", "dilations", "=", "dilations", ")", "x_sigm", "=", "conv", "(", "\"1_sigm\"", ",", "x", ",", "output_channels", "=", "mid_channels", ",", "filter_size", "=", "second_filter", ",", "dilations", "=", "dilations", ")", "x", "=", "tf", ".", "nn", ".", "tanh", "(", "x_tanh", ")", "*", "tf", ".", "nn", ".", "sigmoid", "(", "x_sigm", ")", "x", "=", "get_dropout", "(", "x", ",", "rate", "=", "dropout", ")", "return", "x" ]
2 layer conv block used in the affine coupling layer. Args: name: variable scope. x: 4-D or 5-D Tensor. mid_channels: Output channels of the second layer. dilations: Optional, list of integers. activation: relu or gatu. If relu, the second layer is relu(W*x) If gatu, the second layer is tanh(W1*x) * sigmoid(W2*x) dropout: Dropout probability. Returns: x: 4-D Tensor: Output activations.
[ "2", "layer", "conv", "block", "used", "in", "the", "affine", "coupling", "layer", "." ]
python
train
Robpol86/libnl
libnl/genl/family.py
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/genl/family.py#L171-L184
def genl_family_add_grp(family, id_, name): """https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/family.c#L366. Positional arguments: family -- Generic Netlink family object (genl_family class instance). id_ -- new numeric identifier (integer). name -- new human readable name (string). Returns: 0 """ grp = genl_family_grp(id_=id_, name=name) nl_list_add_tail(grp.list_, family.gf_mc_grps) return 0
[ "def", "genl_family_add_grp", "(", "family", ",", "id_", ",", "name", ")", ":", "grp", "=", "genl_family_grp", "(", "id_", "=", "id_", ",", "name", "=", "name", ")", "nl_list_add_tail", "(", "grp", ".", "list_", ",", "family", ".", "gf_mc_grps", ")", "return", "0" ]
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/family.c#L366. Positional arguments: family -- Generic Netlink family object (genl_family class instance). id_ -- new numeric identifier (integer). name -- new human readable name (string). Returns: 0
[ "https", ":", "//", "github", ".", "com", "/", "thom311", "/", "libnl", "/", "blob", "/", "libnl3_2_25", "/", "lib", "/", "genl", "/", "family", ".", "c#L366", "." ]
python
train
saltstack/salt
salt/utils/verify.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/verify.py#L120-L146
def verify_socket(interface, pub_port, ret_port): ''' Attempt to bind to the sockets to verify that they are available ''' addr_family = lookup_family(interface) for port in pub_port, ret_port: sock = socket.socket(addr_family, socket.SOCK_STREAM) try: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind((interface, int(port))) except Exception as exc: msg = 'Unable to bind socket {0}:{1}'.format(interface, port) if exc.args: msg = '{0}, error: {1}'.format(msg, str(exc)) else: msg = '{0}, this might not be a problem.'.format(msg) msg += '; Is there another salt-master running?' if is_console_configured(): log.warning(msg) else: sys.stderr.write('WARNING: {0}\n'.format(msg)) return False finally: sock.close() return True
[ "def", "verify_socket", "(", "interface", ",", "pub_port", ",", "ret_port", ")", ":", "addr_family", "=", "lookup_family", "(", "interface", ")", "for", "port", "in", "pub_port", ",", "ret_port", ":", "sock", "=", "socket", ".", "socket", "(", "addr_family", ",", "socket", ".", "SOCK_STREAM", ")", "try", ":", "sock", ".", "setsockopt", "(", "socket", ".", "SOL_SOCKET", ",", "socket", ".", "SO_REUSEADDR", ",", "1", ")", "sock", ".", "bind", "(", "(", "interface", ",", "int", "(", "port", ")", ")", ")", "except", "Exception", "as", "exc", ":", "msg", "=", "'Unable to bind socket {0}:{1}'", ".", "format", "(", "interface", ",", "port", ")", "if", "exc", ".", "args", ":", "msg", "=", "'{0}, error: {1}'", ".", "format", "(", "msg", ",", "str", "(", "exc", ")", ")", "else", ":", "msg", "=", "'{0}, this might not be a problem.'", ".", "format", "(", "msg", ")", "msg", "+=", "'; Is there another salt-master running?'", "if", "is_console_configured", "(", ")", ":", "log", ".", "warning", "(", "msg", ")", "else", ":", "sys", ".", "stderr", ".", "write", "(", "'WARNING: {0}\\n'", ".", "format", "(", "msg", ")", ")", "return", "False", "finally", ":", "sock", ".", "close", "(", ")", "return", "True" ]
Attempt to bind to the sockets to verify that they are available
[ "Attempt", "to", "bind", "to", "the", "sockets", "to", "verify", "that", "they", "are", "available" ]
python
train
fm4d/KickassAPI
KickassAPI.py
https://github.com/fm4d/KickassAPI/blob/6ecc6846dcec0d6f6e493bf776031aa92d55604f/KickassAPI.py#L322-L326
def all(self): """ Yield torrents in range from current page to last page """ return self.pages(self.url.page, self.url.max_page)
[ "def", "all", "(", "self", ")", ":", "return", "self", ".", "pages", "(", "self", ".", "url", ".", "page", ",", "self", ".", "url", ".", "max_page", ")" ]
Yield torrents in range from current page to last page
[ "Yield", "torrents", "in", "range", "from", "current", "page", "to", "last", "page" ]
python
train
bitesofcode/projexui
projexui/dialogs/xwizardbrowserdialog/xwizardplugin.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/dialogs/xwizardbrowserdialog/xwizardplugin.py#L152-L167
def groupIcon( cls, groupName, default = None ): """ Returns the icon for the inputed group name. :param groupName | <str> default | <str> || None :return <str> """ if ( cls._groupIcons is None ): cls._groupIcons = {} if ( not default ): default = projexui.resources.find('img/settings_32.png') return cls._groupIcons.get(nativestring(groupName), default)
[ "def", "groupIcon", "(", "cls", ",", "groupName", ",", "default", "=", "None", ")", ":", "if", "(", "cls", ".", "_groupIcons", "is", "None", ")", ":", "cls", ".", "_groupIcons", "=", "{", "}", "if", "(", "not", "default", ")", ":", "default", "=", "projexui", ".", "resources", ".", "find", "(", "'img/settings_32.png'", ")", "return", "cls", ".", "_groupIcons", ".", "get", "(", "nativestring", "(", "groupName", ")", ",", "default", ")" ]
Returns the icon for the inputed group name. :param groupName | <str> default | <str> || None :return <str>
[ "Returns", "the", "icon", "for", "the", "inputed", "group", "name", ".", ":", "param", "groupName", "|", "<str", ">", "default", "|", "<str", ">", "||", "None", ":", "return", "<str", ">" ]
python
train
quodlibet/mutagen
mutagen/_util.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/_util.py#L942-L971
def encode_endian(text, encoding, errors="strict", le=True): """Like text.encode(encoding) but always returns little endian/big endian BOMs instead of the system one. Args: text (text) encoding (str) errors (str) le (boolean): if little endian Returns: bytes Raises: UnicodeEncodeError LookupError """ encoding = codecs.lookup(encoding).name if encoding == "utf-16": if le: return codecs.BOM_UTF16_LE + text.encode("utf-16-le", errors) else: return codecs.BOM_UTF16_BE + text.encode("utf-16-be", errors) elif encoding == "utf-32": if le: return codecs.BOM_UTF32_LE + text.encode("utf-32-le", errors) else: return codecs.BOM_UTF32_BE + text.encode("utf-32-be", errors) else: return text.encode(encoding, errors)
[ "def", "encode_endian", "(", "text", ",", "encoding", ",", "errors", "=", "\"strict\"", ",", "le", "=", "True", ")", ":", "encoding", "=", "codecs", ".", "lookup", "(", "encoding", ")", ".", "name", "if", "encoding", "==", "\"utf-16\"", ":", "if", "le", ":", "return", "codecs", ".", "BOM_UTF16_LE", "+", "text", ".", "encode", "(", "\"utf-16-le\"", ",", "errors", ")", "else", ":", "return", "codecs", ".", "BOM_UTF16_BE", "+", "text", ".", "encode", "(", "\"utf-16-be\"", ",", "errors", ")", "elif", "encoding", "==", "\"utf-32\"", ":", "if", "le", ":", "return", "codecs", ".", "BOM_UTF32_LE", "+", "text", ".", "encode", "(", "\"utf-32-le\"", ",", "errors", ")", "else", ":", "return", "codecs", ".", "BOM_UTF32_BE", "+", "text", ".", "encode", "(", "\"utf-32-be\"", ",", "errors", ")", "else", ":", "return", "text", ".", "encode", "(", "encoding", ",", "errors", ")" ]
Like text.encode(encoding) but always returns little endian/big endian BOMs instead of the system one. Args: text (text) encoding (str) errors (str) le (boolean): if little endian Returns: bytes Raises: UnicodeEncodeError LookupError
[ "Like", "text", ".", "encode", "(", "encoding", ")", "but", "always", "returns", "little", "endian", "/", "big", "endian", "BOMs", "instead", "of", "the", "system", "one", "." ]
python
train
kstaniek/condoor
condoor/utils.py
https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/utils.py#L49-L82
def is_reachable(host, port=23): """Check reachability for specified hostname/port. It tries to open TCP socket. It supports IPv6. :param host: hostname or ip address string :rtype: str :param port: tcp port number :rtype: number :return: True if host is reachable else false """ try: addresses = socket.getaddrinfo( host, port, socket.AF_UNSPEC, socket.SOCK_STREAM ) except socket.gaierror: return False for family, _, _, _, sockaddr in addresses: sock = socket.socket(family, socket.SOCK_STREAM) sock.settimeout(5) try: sock.connect(sockaddr) except IOError: continue sock.shutdown(socket.SHUT_RDWR) sock.close() # Wait 2 sec for socket to shutdown time.sleep(2) break else: return False return True
[ "def", "is_reachable", "(", "host", ",", "port", "=", "23", ")", ":", "try", ":", "addresses", "=", "socket", ".", "getaddrinfo", "(", "host", ",", "port", ",", "socket", ".", "AF_UNSPEC", ",", "socket", ".", "SOCK_STREAM", ")", "except", "socket", ".", "gaierror", ":", "return", "False", "for", "family", ",", "_", ",", "_", ",", "_", ",", "sockaddr", "in", "addresses", ":", "sock", "=", "socket", ".", "socket", "(", "family", ",", "socket", ".", "SOCK_STREAM", ")", "sock", ".", "settimeout", "(", "5", ")", "try", ":", "sock", ".", "connect", "(", "sockaddr", ")", "except", "IOError", ":", "continue", "sock", ".", "shutdown", "(", "socket", ".", "SHUT_RDWR", ")", "sock", ".", "close", "(", ")", "# Wait 2 sec for socket to shutdown", "time", ".", "sleep", "(", "2", ")", "break", "else", ":", "return", "False", "return", "True" ]
Check reachability for specified hostname/port. It tries to open TCP socket. It supports IPv6. :param host: hostname or ip address string :rtype: str :param port: tcp port number :rtype: number :return: True if host is reachable else false
[ "Check", "reachability", "for", "specified", "hostname", "/", "port", "." ]
python
train
wummel/patool
patoolib/programs/py_gzip.py
https://github.com/wummel/patool/blob/d7e64d9fd60faaa4b3f824bd97c43ce59b185c40/patoolib/programs/py_gzip.py#L24-L37
def extract_gzip (archive, compression, cmd, verbosity, interactive, outdir): """Extract a GZIP archive with the gzip Python module.""" targetname = util.get_single_outfile(outdir, archive) try: with gzip.GzipFile(archive) as gzipfile: with open(targetname, 'wb') as targetfile: data = gzipfile.read(READ_SIZE_BYTES) while data: targetfile.write(data) data = gzipfile.read(READ_SIZE_BYTES) except Exception as err: msg = "error extracting %s to %s: %s" % (archive, targetname, err) raise util.PatoolError(msg) return None
[ "def", "extract_gzip", "(", "archive", ",", "compression", ",", "cmd", ",", "verbosity", ",", "interactive", ",", "outdir", ")", ":", "targetname", "=", "util", ".", "get_single_outfile", "(", "outdir", ",", "archive", ")", "try", ":", "with", "gzip", ".", "GzipFile", "(", "archive", ")", "as", "gzipfile", ":", "with", "open", "(", "targetname", ",", "'wb'", ")", "as", "targetfile", ":", "data", "=", "gzipfile", ".", "read", "(", "READ_SIZE_BYTES", ")", "while", "data", ":", "targetfile", ".", "write", "(", "data", ")", "data", "=", "gzipfile", ".", "read", "(", "READ_SIZE_BYTES", ")", "except", "Exception", "as", "err", ":", "msg", "=", "\"error extracting %s to %s: %s\"", "%", "(", "archive", ",", "targetname", ",", "err", ")", "raise", "util", ".", "PatoolError", "(", "msg", ")", "return", "None" ]
Extract a GZIP archive with the gzip Python module.
[ "Extract", "a", "GZIP", "archive", "with", "the", "gzip", "Python", "module", "." ]
python
train
heitzmann/gdspy
gdspy/__init__.py
https://github.com/heitzmann/gdspy/blob/2c8d1313248c544e2066d19095b7ad7158c79bc9/gdspy/__init__.py#L100-L120
def _eight_byte_real_to_float(value): """ Convert a number from GDSII 8 byte real format to float. Parameters ---------- value : string The GDSII binary string representation of the number. Returns ------- out : float The number represented by ``value``. """ short1, short2, long3 = struct.unpack('>HHL', value) exponent = (short1 & 0x7f00) // 256 - 64 mantissa = (((short1 & 0x00ff) * 65536 + short2) * 4294967296 + long3) / 72057594037927936.0 if short1 & 0x8000: return -mantissa * 16.**exponent return mantissa * 16.**exponent
[ "def", "_eight_byte_real_to_float", "(", "value", ")", ":", "short1", ",", "short2", ",", "long3", "=", "struct", ".", "unpack", "(", "'>HHL'", ",", "value", ")", "exponent", "=", "(", "short1", "&", "0x7f00", ")", "//", "256", "-", "64", "mantissa", "=", "(", "(", "(", "short1", "&", "0x00ff", ")", "*", "65536", "+", "short2", ")", "*", "4294967296", "+", "long3", ")", "/", "72057594037927936.0", "if", "short1", "&", "0x8000", ":", "return", "-", "mantissa", "*", "16.", "**", "exponent", "return", "mantissa", "*", "16.", "**", "exponent" ]
Convert a number from GDSII 8 byte real format to float. Parameters ---------- value : string The GDSII binary string representation of the number. Returns ------- out : float The number represented by ``value``.
[ "Convert", "a", "number", "from", "GDSII", "8", "byte", "real", "format", "to", "float", "." ]
python
train
dossier/dossier.web
dossier/web/label_folders.py
https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/label_folders.py#L157-L185
def items(self, folder_id, subfolder_id, ann_id=None): '''Yields an unodered generator of items in a subfolder. The generator yields items, which are represented by a tuple of ``content_id`` and ``subtopic_id``. The format of these identifiers is unspecified. By default (with ``ann_id=None``), subfolders are shown for all anonymous users. Optionally, ``ann_id`` can be set to a username, which restricts the list to only subfolders owned by that user. :param str folder_id: Folder id :param str subfolder_id: Subfolder id :param str ann_id: Username :rtype: generator of ``(content_id, subtopic_id)`` ''' self.assert_valid_folder_id(folder_id) self.assert_valid_folder_id(subfolder_id) ann_id = self._annotator(ann_id) folder_cid = self.wrap_folder_content_id(ann_id, folder_id) subfolder_sid = self.wrap_subfolder_subtopic_id(subfolder_id) ident = (folder_cid, subfolder_sid) if self.store.get(folder_cid) is None: raise KeyError(folder_id) for lab in self.label_store.directly_connected(ident): cid = lab.other(folder_cid) subid = lab.subtopic_for(cid) yield (cid, subid)
[ "def", "items", "(", "self", ",", "folder_id", ",", "subfolder_id", ",", "ann_id", "=", "None", ")", ":", "self", ".", "assert_valid_folder_id", "(", "folder_id", ")", "self", ".", "assert_valid_folder_id", "(", "subfolder_id", ")", "ann_id", "=", "self", ".", "_annotator", "(", "ann_id", ")", "folder_cid", "=", "self", ".", "wrap_folder_content_id", "(", "ann_id", ",", "folder_id", ")", "subfolder_sid", "=", "self", ".", "wrap_subfolder_subtopic_id", "(", "subfolder_id", ")", "ident", "=", "(", "folder_cid", ",", "subfolder_sid", ")", "if", "self", ".", "store", ".", "get", "(", "folder_cid", ")", "is", "None", ":", "raise", "KeyError", "(", "folder_id", ")", "for", "lab", "in", "self", ".", "label_store", ".", "directly_connected", "(", "ident", ")", ":", "cid", "=", "lab", ".", "other", "(", "folder_cid", ")", "subid", "=", "lab", ".", "subtopic_for", "(", "cid", ")", "yield", "(", "cid", ",", "subid", ")" ]
Yields an unodered generator of items in a subfolder. The generator yields items, which are represented by a tuple of ``content_id`` and ``subtopic_id``. The format of these identifiers is unspecified. By default (with ``ann_id=None``), subfolders are shown for all anonymous users. Optionally, ``ann_id`` can be set to a username, which restricts the list to only subfolders owned by that user. :param str folder_id: Folder id :param str subfolder_id: Subfolder id :param str ann_id: Username :rtype: generator of ``(content_id, subtopic_id)``
[ "Yields", "an", "unodered", "generator", "of", "items", "in", "a", "subfolder", "." ]
python
train
minio/minio-py
minio/api.py
https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/minio/api.py#L840-L908
def list_objects(self, bucket_name, prefix='', recursive=False): """ List objects in the given bucket. Examples: objects = minio.list_objects('foo') for current_object in objects: print(current_object) # hello # hello/ # hello/ # world/ objects = minio.list_objects('foo', prefix='hello/') for current_object in objects: print(current_object) # hello/world/ objects = minio.list_objects('foo', recursive=True) for current_object in objects: print(current_object) # hello/world/1 # world/world/2 # ... objects = minio.list_objects('foo', prefix='hello/', recursive=True) for current_object in objects: print(current_object) # hello/world/1 # hello/world/2 :param bucket_name: Bucket to list objects from :param prefix: String specifying objects returned must begin with :param recursive: If yes, returns all objects for a specified prefix :return: An iterator of objects in alphabetical order. """ is_valid_bucket_name(bucket_name) # If someone explicitly set prefix to None convert it to empty string. if prefix is None: prefix = '' method = 'GET' # Initialize query parameters. query = { 'max-keys': '1000', 'prefix': prefix } # Delimited by default. if not recursive: query['delimiter'] = '/' marker = '' is_truncated = True while is_truncated: if marker: query['marker'] = marker headers = {} response = self._url_open(method, bucket_name=bucket_name, query=query, headers=headers) objects, is_truncated, marker = parse_list_objects(response.data, bucket_name=bucket_name) for obj in objects: yield obj
[ "def", "list_objects", "(", "self", ",", "bucket_name", ",", "prefix", "=", "''", ",", "recursive", "=", "False", ")", ":", "is_valid_bucket_name", "(", "bucket_name", ")", "# If someone explicitly set prefix to None convert it to empty string.", "if", "prefix", "is", "None", ":", "prefix", "=", "''", "method", "=", "'GET'", "# Initialize query parameters.", "query", "=", "{", "'max-keys'", ":", "'1000'", ",", "'prefix'", ":", "prefix", "}", "# Delimited by default.", "if", "not", "recursive", ":", "query", "[", "'delimiter'", "]", "=", "'/'", "marker", "=", "''", "is_truncated", "=", "True", "while", "is_truncated", ":", "if", "marker", ":", "query", "[", "'marker'", "]", "=", "marker", "headers", "=", "{", "}", "response", "=", "self", ".", "_url_open", "(", "method", ",", "bucket_name", "=", "bucket_name", ",", "query", "=", "query", ",", "headers", "=", "headers", ")", "objects", ",", "is_truncated", ",", "marker", "=", "parse_list_objects", "(", "response", ".", "data", ",", "bucket_name", "=", "bucket_name", ")", "for", "obj", "in", "objects", ":", "yield", "obj" ]
List objects in the given bucket. Examples: objects = minio.list_objects('foo') for current_object in objects: print(current_object) # hello # hello/ # hello/ # world/ objects = minio.list_objects('foo', prefix='hello/') for current_object in objects: print(current_object) # hello/world/ objects = minio.list_objects('foo', recursive=True) for current_object in objects: print(current_object) # hello/world/1 # world/world/2 # ... objects = minio.list_objects('foo', prefix='hello/', recursive=True) for current_object in objects: print(current_object) # hello/world/1 # hello/world/2 :param bucket_name: Bucket to list objects from :param prefix: String specifying objects returned must begin with :param recursive: If yes, returns all objects for a specified prefix :return: An iterator of objects in alphabetical order.
[ "List", "objects", "in", "the", "given", "bucket", "." ]
python
train
google/gin-config
gin/config.py
https://github.com/google/gin-config/blob/17a170e0a6711005d1c78e67cf493dc44674d44f/gin/config.py#L565-L602
def bind_parameter(binding_key, value): """Binds the parameter value specified by `binding_key` to `value`. The `binding_key` argument should either be a string of the form `maybe/scope/optional.module.names.configurable_name.parameter_name`, or a list or tuple of `(scope, selector, parameter_name)`, where `selector` corresponds to `optional.module.names.configurable_name`. Once this function has been called, subsequent calls (in the specified scope) to the specified configurable function will have `value` supplied to their `parameter_name` parameter. Example: @configurable('fully_connected_network') def network_fn(num_layers=5, units_per_layer=1024): ... def main(_): config.bind_parameter('fully_connected_network.num_layers', 3) network_fn() # Called with num_layers == 3, not the default of 5. Args: binding_key: The parameter whose value should be set. This can either be a string, or a tuple of the form `(scope, selector, parameter)`. value: The desired value. Raises: RuntimeError: If the config is locked. ValueError: If no function can be found matching the configurable name specified by `binding_key`, or if the specified parameter name is blacklisted or not in the function's whitelist (if present). """ if config_is_locked(): raise RuntimeError('Attempted to modify locked Gin config.') pbk = ParsedBindingKey(binding_key) fn_dict = _CONFIG.setdefault(pbk.config_key, {}) fn_dict[pbk.arg_name] = value
[ "def", "bind_parameter", "(", "binding_key", ",", "value", ")", ":", "if", "config_is_locked", "(", ")", ":", "raise", "RuntimeError", "(", "'Attempted to modify locked Gin config.'", ")", "pbk", "=", "ParsedBindingKey", "(", "binding_key", ")", "fn_dict", "=", "_CONFIG", ".", "setdefault", "(", "pbk", ".", "config_key", ",", "{", "}", ")", "fn_dict", "[", "pbk", ".", "arg_name", "]", "=", "value" ]
Binds the parameter value specified by `binding_key` to `value`. The `binding_key` argument should either be a string of the form `maybe/scope/optional.module.names.configurable_name.parameter_name`, or a list or tuple of `(scope, selector, parameter_name)`, where `selector` corresponds to `optional.module.names.configurable_name`. Once this function has been called, subsequent calls (in the specified scope) to the specified configurable function will have `value` supplied to their `parameter_name` parameter. Example: @configurable('fully_connected_network') def network_fn(num_layers=5, units_per_layer=1024): ... def main(_): config.bind_parameter('fully_connected_network.num_layers', 3) network_fn() # Called with num_layers == 3, not the default of 5. Args: binding_key: The parameter whose value should be set. This can either be a string, or a tuple of the form `(scope, selector, parameter)`. value: The desired value. Raises: RuntimeError: If the config is locked. ValueError: If no function can be found matching the configurable name specified by `binding_key`, or if the specified parameter name is blacklisted or not in the function's whitelist (if present).
[ "Binds", "the", "parameter", "value", "specified", "by", "binding_key", "to", "value", "." ]
python
test
bcbio/bcbio-nextgen
bcbio/variation/coverage.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/coverage.py#L249-L269
def coverage_region_detailed_stats(target_name, bed_file, data, out_dir): """ Calculate coverage at different completeness cutoff for region in coverage option. """ if bed_file and utils.file_exists(bed_file): ready_depth = tz.get_in(["depth", target_name], data) if ready_depth: cov_file = ready_depth["regions"] dist_file = ready_depth["dist"] thresholds_file = ready_depth.get("thresholds") out_cov_file = os.path.join(out_dir, os.path.basename(cov_file)) out_dist_file = os.path.join(out_dir, os.path.basename(dist_file)) out_thresholds_file = os.path.join(out_dir, os.path.basename(thresholds_file)) \ if thresholds_file and os.path.isfile(thresholds_file) else None if not utils.file_uptodate(out_cov_file, cov_file): utils.copy_plus(cov_file, out_cov_file) utils.copy_plus(dist_file, out_dist_file) utils.copy_plus(thresholds_file, out_thresholds_file) if out_thresholds_file else None return [out_cov_file, out_dist_file] + ([out_thresholds_file] if out_thresholds_file else []) return []
[ "def", "coverage_region_detailed_stats", "(", "target_name", ",", "bed_file", ",", "data", ",", "out_dir", ")", ":", "if", "bed_file", "and", "utils", ".", "file_exists", "(", "bed_file", ")", ":", "ready_depth", "=", "tz", ".", "get_in", "(", "[", "\"depth\"", ",", "target_name", "]", ",", "data", ")", "if", "ready_depth", ":", "cov_file", "=", "ready_depth", "[", "\"regions\"", "]", "dist_file", "=", "ready_depth", "[", "\"dist\"", "]", "thresholds_file", "=", "ready_depth", ".", "get", "(", "\"thresholds\"", ")", "out_cov_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "os", ".", "path", ".", "basename", "(", "cov_file", ")", ")", "out_dist_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "os", ".", "path", ".", "basename", "(", "dist_file", ")", ")", "out_thresholds_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "os", ".", "path", ".", "basename", "(", "thresholds_file", ")", ")", "if", "thresholds_file", "and", "os", ".", "path", ".", "isfile", "(", "thresholds_file", ")", "else", "None", "if", "not", "utils", ".", "file_uptodate", "(", "out_cov_file", ",", "cov_file", ")", ":", "utils", ".", "copy_plus", "(", "cov_file", ",", "out_cov_file", ")", "utils", ".", "copy_plus", "(", "dist_file", ",", "out_dist_file", ")", "utils", ".", "copy_plus", "(", "thresholds_file", ",", "out_thresholds_file", ")", "if", "out_thresholds_file", "else", "None", "return", "[", "out_cov_file", ",", "out_dist_file", "]", "+", "(", "[", "out_thresholds_file", "]", "if", "out_thresholds_file", "else", "[", "]", ")", "return", "[", "]" ]
Calculate coverage at different completeness cutoff for region in coverage option.
[ "Calculate", "coverage", "at", "different", "completeness", "cutoff", "for", "region", "in", "coverage", "option", "." ]
python
train
wilson-eft/wilson
wilson/util/wetutil.py
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/util/wetutil.py#L83-L105
def JMS_to_array(C, sectors=None): """For a dictionary with JMS Wilson coefficients, return a dictionary of arrays.""" if sectors is None: wc_keys = wcxf.Basis['WET', 'JMS'].all_wcs else: try: wc_keys = [k for s in sectors for k in wcxf.Basis['WET', 'JMS'].sectors[s]] except KeyError: print(sectors) # fill in zeros for missing coefficients C_complete = {k: C.get(k, 0) for k in wc_keys} Ca = _scalar2array(C_complete) for k in Ca: if k in C_symm_keys[5]: Ca[k] = _symm_herm(Ca[k]) if k in C_symm_keys[41]: Ca[k] = _symm_current(Ca[k]) if k in C_symm_keys[4]: Ca[k] = _symm_herm(_symm_current(Ca[k])) if k in C_symm_keys[9]: Ca[k] = _antisymm_12(Ca[k]) return Ca
[ "def", "JMS_to_array", "(", "C", ",", "sectors", "=", "None", ")", ":", "if", "sectors", "is", "None", ":", "wc_keys", "=", "wcxf", ".", "Basis", "[", "'WET'", ",", "'JMS'", "]", ".", "all_wcs", "else", ":", "try", ":", "wc_keys", "=", "[", "k", "for", "s", "in", "sectors", "for", "k", "in", "wcxf", ".", "Basis", "[", "'WET'", ",", "'JMS'", "]", ".", "sectors", "[", "s", "]", "]", "except", "KeyError", ":", "print", "(", "sectors", ")", "# fill in zeros for missing coefficients", "C_complete", "=", "{", "k", ":", "C", ".", "get", "(", "k", ",", "0", ")", "for", "k", "in", "wc_keys", "}", "Ca", "=", "_scalar2array", "(", "C_complete", ")", "for", "k", "in", "Ca", ":", "if", "k", "in", "C_symm_keys", "[", "5", "]", ":", "Ca", "[", "k", "]", "=", "_symm_herm", "(", "Ca", "[", "k", "]", ")", "if", "k", "in", "C_symm_keys", "[", "41", "]", ":", "Ca", "[", "k", "]", "=", "_symm_current", "(", "Ca", "[", "k", "]", ")", "if", "k", "in", "C_symm_keys", "[", "4", "]", ":", "Ca", "[", "k", "]", "=", "_symm_herm", "(", "_symm_current", "(", "Ca", "[", "k", "]", ")", ")", "if", "k", "in", "C_symm_keys", "[", "9", "]", ":", "Ca", "[", "k", "]", "=", "_antisymm_12", "(", "Ca", "[", "k", "]", ")", "return", "Ca" ]
For a dictionary with JMS Wilson coefficients, return a dictionary of arrays.
[ "For", "a", "dictionary", "with", "JMS", "Wilson", "coefficients", "return", "a", "dictionary", "of", "arrays", "." ]
python
train
paydunya/paydunya-python
paydunya/opr.py
https://github.com/paydunya/paydunya-python/blob/bb55791e2814788aec74162d9d78970815f37c30/paydunya/opr.py#L38-L45
def charge(self, data): """Second stage of an OPR request""" token = data.get("token", self._response["token"]) data = { "token": token, "confirm_token": data.get("confirm_token") } return self._process('opr/charge', data)
[ "def", "charge", "(", "self", ",", "data", ")", ":", "token", "=", "data", ".", "get", "(", "\"token\"", ",", "self", ".", "_response", "[", "\"token\"", "]", ")", "data", "=", "{", "\"token\"", ":", "token", ",", "\"confirm_token\"", ":", "data", ".", "get", "(", "\"confirm_token\"", ")", "}", "return", "self", ".", "_process", "(", "'opr/charge'", ",", "data", ")" ]
Second stage of an OPR request
[ "Second", "stage", "of", "an", "OPR", "request" ]
python
train
tensorflow/probability
tensorflow_probability/python/distributions/dirichlet_multinomial.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/dirichlet_multinomial.py#L340-L350
def _maybe_assert_valid_sample(self, counts): """Check counts for proper shape, values, then return tensor version.""" if not self.validate_args: return counts counts = distribution_util.embed_check_nonnegative_integer_form(counts) return distribution_util.with_dependencies([ assert_util.assert_equal( self.total_count, tf.reduce_sum(input_tensor=counts, axis=-1), message="counts last-dimension must sum to `self.total_count`"), ], counts)
[ "def", "_maybe_assert_valid_sample", "(", "self", ",", "counts", ")", ":", "if", "not", "self", ".", "validate_args", ":", "return", "counts", "counts", "=", "distribution_util", ".", "embed_check_nonnegative_integer_form", "(", "counts", ")", "return", "distribution_util", ".", "with_dependencies", "(", "[", "assert_util", ".", "assert_equal", "(", "self", ".", "total_count", ",", "tf", ".", "reduce_sum", "(", "input_tensor", "=", "counts", ",", "axis", "=", "-", "1", ")", ",", "message", "=", "\"counts last-dimension must sum to `self.total_count`\"", ")", ",", "]", ",", "counts", ")" ]
Check counts for proper shape, values, then return tensor version.
[ "Check", "counts", "for", "proper", "shape", "values", "then", "return", "tensor", "version", "." ]
python
test
array-split/array_split
array_split/split.py
https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/split.py#L1588-L1615
def array_split( ary, indices_or_sections=None, axis=None, tile_shape=None, max_tile_bytes=None, max_tile_shape=None, sub_tile_shape=None, halo=None ): "To be replaced." return [ ary[slyce] for slyce in shape_split( array_shape=ary.shape, indices_or_sections=indices_or_sections, axis=axis, array_start=None, array_itemsize=ary.itemsize, tile_shape=tile_shape, max_tile_bytes=max_tile_bytes, max_tile_shape=max_tile_shape, sub_tile_shape=sub_tile_shape, halo=halo, tile_bounds_policy=ARRAY_BOUNDS ).flatten() ]
[ "def", "array_split", "(", "ary", ",", "indices_or_sections", "=", "None", ",", "axis", "=", "None", ",", "tile_shape", "=", "None", ",", "max_tile_bytes", "=", "None", ",", "max_tile_shape", "=", "None", ",", "sub_tile_shape", "=", "None", ",", "halo", "=", "None", ")", ":", "return", "[", "ary", "[", "slyce", "]", "for", "slyce", "in", "shape_split", "(", "array_shape", "=", "ary", ".", "shape", ",", "indices_or_sections", "=", "indices_or_sections", ",", "axis", "=", "axis", ",", "array_start", "=", "None", ",", "array_itemsize", "=", "ary", ".", "itemsize", ",", "tile_shape", "=", "tile_shape", ",", "max_tile_bytes", "=", "max_tile_bytes", ",", "max_tile_shape", "=", "max_tile_shape", ",", "sub_tile_shape", "=", "sub_tile_shape", ",", "halo", "=", "halo", ",", "tile_bounds_policy", "=", "ARRAY_BOUNDS", ")", ".", "flatten", "(", ")", "]" ]
To be replaced.
[ "To", "be", "replaced", "." ]
python
train
ARMmbed/icetea
icetea_lib/Result.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/Result.py#L232-L241
def build_data(self): """ get build data. :return: build data or None if not found """ # pylint: disable=len-as-condition if len(self.dutinformation) > 0 and (self.dutinformation.get(0).build is not None): return self.dutinformation.get(0).build.get_data() return None
[ "def", "build_data", "(", "self", ")", ":", "# pylint: disable=len-as-condition", "if", "len", "(", "self", ".", "dutinformation", ")", ">", "0", "and", "(", "self", ".", "dutinformation", ".", "get", "(", "0", ")", ".", "build", "is", "not", "None", ")", ":", "return", "self", ".", "dutinformation", ".", "get", "(", "0", ")", ".", "build", ".", "get_data", "(", ")", "return", "None" ]
get build data. :return: build data or None if not found
[ "get", "build", "data", "." ]
python
train
klen/django-netauth
netauth/utils.py
https://github.com/klen/django-netauth/blob/228e4297fda98d5f9df35f86a01f87c6fb05ab1d/netauth/utils.py#L12-L21
def parse_template(template_path, **kwargs): """ Load and render template. First line of template should contain the subject of email. Return tuple with subject and content. """ template = get_template(template_path) context = Context(kwargs) data = template.render(context).strip() subject, content = re.split(r'\r?\n', data, 1) return (subject.strip(), content.strip())
[ "def", "parse_template", "(", "template_path", ",", "*", "*", "kwargs", ")", ":", "template", "=", "get_template", "(", "template_path", ")", "context", "=", "Context", "(", "kwargs", ")", "data", "=", "template", ".", "render", "(", "context", ")", ".", "strip", "(", ")", "subject", ",", "content", "=", "re", ".", "split", "(", "r'\\r?\\n'", ",", "data", ",", "1", ")", "return", "(", "subject", ".", "strip", "(", ")", ",", "content", ".", "strip", "(", ")", ")" ]
Load and render template. First line of template should contain the subject of email. Return tuple with subject and content.
[ "Load", "and", "render", "template", ".", "First", "line", "of", "template", "should", "contain", "the", "subject", "of", "email", ".", "Return", "tuple", "with", "subject", "and", "content", "." ]
python
train
numenta/htmresearch
projects/union_path_integration/convergence_simulation.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/union_path_integration/convergence_simulation.py#L256-L281
def runMultiprocessNoiseExperiment(resultName, repeat, numWorkers, appendResults, **kwargs): """ :param kwargs: Pass lists to distribute as lists, lists that should be passed intact as tuples. :return: results, in the format [(arguments, results)]. Also saved to json at resultName, in the same format. """ experiments = [{}] for key, values in kwargs.items(): if type(values) is list: newExperiments = [] for experiment in experiments: for val in values: newExperiment = copy(experiment) newExperiment[key] = val newExperiments.append(newExperiment) experiments = newExperiments else: [a.__setitem__(key, values) for a in experiments] newExperiments = [] for experiment in experiments: for _ in xrange(repeat): newExperiments.append(copy(experiment)) experiments = newExperiments return runExperiments(experiments, resultName, numWorkers, appendResults)
[ "def", "runMultiprocessNoiseExperiment", "(", "resultName", ",", "repeat", ",", "numWorkers", ",", "appendResults", ",", "*", "*", "kwargs", ")", ":", "experiments", "=", "[", "{", "}", "]", "for", "key", ",", "values", "in", "kwargs", ".", "items", "(", ")", ":", "if", "type", "(", "values", ")", "is", "list", ":", "newExperiments", "=", "[", "]", "for", "experiment", "in", "experiments", ":", "for", "val", "in", "values", ":", "newExperiment", "=", "copy", "(", "experiment", ")", "newExperiment", "[", "key", "]", "=", "val", "newExperiments", ".", "append", "(", "newExperiment", ")", "experiments", "=", "newExperiments", "else", ":", "[", "a", ".", "__setitem__", "(", "key", ",", "values", ")", "for", "a", "in", "experiments", "]", "newExperiments", "=", "[", "]", "for", "experiment", "in", "experiments", ":", "for", "_", "in", "xrange", "(", "repeat", ")", ":", "newExperiments", ".", "append", "(", "copy", "(", "experiment", ")", ")", "experiments", "=", "newExperiments", "return", "runExperiments", "(", "experiments", ",", "resultName", ",", "numWorkers", ",", "appendResults", ")" ]
:param kwargs: Pass lists to distribute as lists, lists that should be passed intact as tuples. :return: results, in the format [(arguments, results)]. Also saved to json at resultName, in the same format.
[ ":", "param", "kwargs", ":", "Pass", "lists", "to", "distribute", "as", "lists", "lists", "that", "should", "be", "passed", "intact", "as", "tuples", ".", ":", "return", ":", "results", "in", "the", "format", "[", "(", "arguments", "results", ")", "]", ".", "Also", "saved", "to", "json", "at", "resultName", "in", "the", "same", "format", "." ]
python
train
ebroecker/canmatrix
src/canmatrix/copy.py
https://github.com/ebroecker/canmatrix/blob/d6150b7a648350f051a11c431e9628308c8d5593/src/canmatrix/copy.py#L131-L200
def copy_frame(frame_id, source_db, target_db): # type: (cm.ArbitrationId, cm.CanMatrix, cm.CanMatrix) -> bool """ Copy a Frame identified by ArbitrationId from source CAN matrix to target CAN matrix. This function additionally copy all relevant ECUs and Defines. :param frame_id: Frame arbitration od :param source_db: Source CAN matrix :param target_db: Destination CAN matrix """ frame_list = [source_db.frame_by_id(frame_id)] for frame in frame_list: logger.info("Copying Frame " + frame.name) if target_db.frame_by_id(frame.arbitration_id) is not None: # frame already in target_db... return False # copy Frame-Object: target_db.add_frame(copy.deepcopy(frame)) # ECUs: # each transmitter of Frame could be ECU that is not listed already for transmitter in frame.transmitters: target_ecu = target_db.ecu_by_name(transmitter) source_ecu = source_db.ecu_by_name(transmitter) if source_ecu is not None and target_ecu is None: copy_ecu(source_ecu, source_db, target_db) # trigger all signals of Frame for sig in frame.signals: # each receiver of Signal could be ECU that is not listed already for receiver in sig.receivers: target_ecu = target_db.ecu_by_name(receiver) source_ecu = source_db.ecu_by_name(receiver) if source_ecu is not None and target_ecu is None: copy_ecu(source_ecu, source_db, target_db) # copy all frame-defines attributes = frame.attributes for attribute in attributes: if attribute not in target_db.frame_defines: target_db.add_frame_defines( copy.deepcopy(attribute), copy.deepcopy(source_db.frame_defines[attribute].definition)) target_db.add_define_default( copy.deepcopy(attribute), copy.deepcopy(source_db.frame_defines[attribute].defaultValue)) # update enum data types if needed: if source_db.frame_defines[attribute].type == 'ENUM': temp_attr = frame.attribute(attribute, db=source_db) if temp_attr not in target_db.frame_defines[attribute].values: target_db.frame_defines[attribute].values.append(copy.deepcopy(temp_attr)) target_db.frame_defines[attribute].update() # trigger all signals of Frame for sig in frame.signals: # delete all 'unknown' attributes for attribute in sig.attributes: target_db.add_signal_defines( copy.deepcopy(attribute), copy.deepcopy(source_db.signal_defines[attribute].definition)) target_db.add_define_default( copy.deepcopy(attribute), copy.deepcopy(source_db.signal_defines[attribute].defaultValue)) # update enum data types if needed: if source_db.signal_defines[attribute].type == 'ENUM': temp_attr = sig.attribute(attribute, db=source_db) if temp_attr not in target_db.signal_defines[attribute].values: target_db.signal_defines[attribute].values.append(copy.deepcopy(temp_attr)) target_db.signal_defines[attribute].update() return True
[ "def", "copy_frame", "(", "frame_id", ",", "source_db", ",", "target_db", ")", ":", "# type: (cm.ArbitrationId, cm.CanMatrix, cm.CanMatrix) -> bool", "frame_list", "=", "[", "source_db", ".", "frame_by_id", "(", "frame_id", ")", "]", "for", "frame", "in", "frame_list", ":", "logger", ".", "info", "(", "\"Copying Frame \"", "+", "frame", ".", "name", ")", "if", "target_db", ".", "frame_by_id", "(", "frame", ".", "arbitration_id", ")", "is", "not", "None", ":", "# frame already in target_db...", "return", "False", "# copy Frame-Object:", "target_db", ".", "add_frame", "(", "copy", ".", "deepcopy", "(", "frame", ")", ")", "# ECUs:", "# each transmitter of Frame could be ECU that is not listed already", "for", "transmitter", "in", "frame", ".", "transmitters", ":", "target_ecu", "=", "target_db", ".", "ecu_by_name", "(", "transmitter", ")", "source_ecu", "=", "source_db", ".", "ecu_by_name", "(", "transmitter", ")", "if", "source_ecu", "is", "not", "None", "and", "target_ecu", "is", "None", ":", "copy_ecu", "(", "source_ecu", ",", "source_db", ",", "target_db", ")", "# trigger all signals of Frame", "for", "sig", "in", "frame", ".", "signals", ":", "# each receiver of Signal could be ECU that is not listed already", "for", "receiver", "in", "sig", ".", "receivers", ":", "target_ecu", "=", "target_db", ".", "ecu_by_name", "(", "receiver", ")", "source_ecu", "=", "source_db", ".", "ecu_by_name", "(", "receiver", ")", "if", "source_ecu", "is", "not", "None", "and", "target_ecu", "is", "None", ":", "copy_ecu", "(", "source_ecu", ",", "source_db", ",", "target_db", ")", "# copy all frame-defines", "attributes", "=", "frame", ".", "attributes", "for", "attribute", "in", "attributes", ":", "if", "attribute", "not", "in", "target_db", ".", "frame_defines", ":", "target_db", ".", "add_frame_defines", "(", "copy", ".", "deepcopy", "(", "attribute", ")", ",", "copy", ".", "deepcopy", "(", "source_db", ".", "frame_defines", "[", "attribute", "]", ".", "definition", ")", ")", "target_db", ".", "add_define_default", "(", "copy", ".", "deepcopy", "(", "attribute", ")", ",", "copy", ".", "deepcopy", "(", "source_db", ".", "frame_defines", "[", "attribute", "]", ".", "defaultValue", ")", ")", "# update enum data types if needed:", "if", "source_db", ".", "frame_defines", "[", "attribute", "]", ".", "type", "==", "'ENUM'", ":", "temp_attr", "=", "frame", ".", "attribute", "(", "attribute", ",", "db", "=", "source_db", ")", "if", "temp_attr", "not", "in", "target_db", ".", "frame_defines", "[", "attribute", "]", ".", "values", ":", "target_db", ".", "frame_defines", "[", "attribute", "]", ".", "values", ".", "append", "(", "copy", ".", "deepcopy", "(", "temp_attr", ")", ")", "target_db", ".", "frame_defines", "[", "attribute", "]", ".", "update", "(", ")", "# trigger all signals of Frame", "for", "sig", "in", "frame", ".", "signals", ":", "# delete all 'unknown' attributes", "for", "attribute", "in", "sig", ".", "attributes", ":", "target_db", ".", "add_signal_defines", "(", "copy", ".", "deepcopy", "(", "attribute", ")", ",", "copy", ".", "deepcopy", "(", "source_db", ".", "signal_defines", "[", "attribute", "]", ".", "definition", ")", ")", "target_db", ".", "add_define_default", "(", "copy", ".", "deepcopy", "(", "attribute", ")", ",", "copy", ".", "deepcopy", "(", "source_db", ".", "signal_defines", "[", "attribute", "]", ".", "defaultValue", ")", ")", "# update enum data types if needed:", "if", "source_db", ".", "signal_defines", "[", "attribute", "]", ".", "type", "==", "'ENUM'", ":", "temp_attr", "=", "sig", ".", "attribute", "(", "attribute", ",", "db", "=", "source_db", ")", "if", "temp_attr", "not", "in", "target_db", ".", "signal_defines", "[", "attribute", "]", ".", "values", ":", "target_db", ".", "signal_defines", "[", "attribute", "]", ".", "values", ".", "append", "(", "copy", ".", "deepcopy", "(", "temp_attr", ")", ")", "target_db", ".", "signal_defines", "[", "attribute", "]", ".", "update", "(", ")", "return", "True" ]
Copy a Frame identified by ArbitrationId from source CAN matrix to target CAN matrix. This function additionally copy all relevant ECUs and Defines. :param frame_id: Frame arbitration od :param source_db: Source CAN matrix :param target_db: Destination CAN matrix
[ "Copy", "a", "Frame", "identified", "by", "ArbitrationId", "from", "source", "CAN", "matrix", "to", "target", "CAN", "matrix", ".", "This", "function", "additionally", "copy", "all", "relevant", "ECUs", "and", "Defines", "." ]
python
train
nitely/django-hooks
hooks/signalhook.py
https://github.com/nitely/django-hooks/blob/26ea2150c9be110e90b9ee60fbfd1065ac30ab1d/hooks/signalhook.py#L72-L88
def send(self, name, sender=None, **kwargs): """ Sends the signal. Return every function response\ that was hooked to hook-name as a list: [(func, response), ] :param str name: The hook name :param class sender: Optional sender __class__ to which\ registered callback should match (see :py:func:`.connect` method) :return: Signal responses as a sequence of tuples (func, response) :rtype: list """ try: signal = self._registry[name] except KeyError: return [] return signal.send(sender=sender, **kwargs)
[ "def", "send", "(", "self", ",", "name", ",", "sender", "=", "None", ",", "*", "*", "kwargs", ")", ":", "try", ":", "signal", "=", "self", ".", "_registry", "[", "name", "]", "except", "KeyError", ":", "return", "[", "]", "return", "signal", ".", "send", "(", "sender", "=", "sender", ",", "*", "*", "kwargs", ")" ]
Sends the signal. Return every function response\ that was hooked to hook-name as a list: [(func, response), ] :param str name: The hook name :param class sender: Optional sender __class__ to which\ registered callback should match (see :py:func:`.connect` method) :return: Signal responses as a sequence of tuples (func, response) :rtype: list
[ "Sends", "the", "signal", ".", "Return", "every", "function", "response", "\\", "that", "was", "hooked", "to", "hook", "-", "name", "as", "a", "list", ":", "[", "(", "func", "response", ")", "]" ]
python
train
tjvr/kurt
kurt/scratch14/objtable.py
https://github.com/tjvr/kurt/blob/fcccd80cae11dc233f6dd02b40ec9a388c62f259/kurt/scratch14/objtable.py#L383-L443
def encode_network(root): """Yield ref-containing obj table entries from object network""" def fix_values(obj): if isinstance(obj, Container): obj.update((k, get_ref(v)) for (k, v) in obj.items() if k != 'class_name') fixed_obj = obj elif isinstance(obj, Dictionary): fixed_obj = obj.__class__(dict( (get_ref(field), get_ref(value)) for (field, value) in obj.value.items() )) elif isinstance(obj, dict): fixed_obj = dict( (get_ref(field), get_ref(value)) for (field, value) in obj.items() ) elif isinstance(obj, list): fixed_obj = [get_ref(field) for field in obj] elif isinstance(obj, Form): fixed_obj = obj.__class__(**dict( (field, get_ref(value)) for (field, value) in obj.value.items() )) elif isinstance(obj, ContainsRefs): fixed_obj = obj.__class__([get_ref(field) for field in obj.value]) else: return obj fixed_obj._made_from = obj return fixed_obj objects = [] def get_ref(obj, objects=objects): obj = PythonicAdapter(Pass)._encode(obj, None) if isinstance(obj, (FixedObject, Container)): if getattr(obj, '_index', None): index = obj._index else: objects.append(None) obj._index = index = len(objects) objects[index - 1] = fix_values(obj) return Ref(index) else: return obj # Inline value get_ref(root) for obj in objects: if getattr(obj, '_index', None): del obj._index return objects
[ "def", "encode_network", "(", "root", ")", ":", "def", "fix_values", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "Container", ")", ":", "obj", ".", "update", "(", "(", "k", ",", "get_ref", "(", "v", ")", ")", "for", "(", "k", ",", "v", ")", "in", "obj", ".", "items", "(", ")", "if", "k", "!=", "'class_name'", ")", "fixed_obj", "=", "obj", "elif", "isinstance", "(", "obj", ",", "Dictionary", ")", ":", "fixed_obj", "=", "obj", ".", "__class__", "(", "dict", "(", "(", "get_ref", "(", "field", ")", ",", "get_ref", "(", "value", ")", ")", "for", "(", "field", ",", "value", ")", "in", "obj", ".", "value", ".", "items", "(", ")", ")", ")", "elif", "isinstance", "(", "obj", ",", "dict", ")", ":", "fixed_obj", "=", "dict", "(", "(", "get_ref", "(", "field", ")", ",", "get_ref", "(", "value", ")", ")", "for", "(", "field", ",", "value", ")", "in", "obj", ".", "items", "(", ")", ")", "elif", "isinstance", "(", "obj", ",", "list", ")", ":", "fixed_obj", "=", "[", "get_ref", "(", "field", ")", "for", "field", "in", "obj", "]", "elif", "isinstance", "(", "obj", ",", "Form", ")", ":", "fixed_obj", "=", "obj", ".", "__class__", "(", "*", "*", "dict", "(", "(", "field", ",", "get_ref", "(", "value", ")", ")", "for", "(", "field", ",", "value", ")", "in", "obj", ".", "value", ".", "items", "(", ")", ")", ")", "elif", "isinstance", "(", "obj", ",", "ContainsRefs", ")", ":", "fixed_obj", "=", "obj", ".", "__class__", "(", "[", "get_ref", "(", "field", ")", "for", "field", "in", "obj", ".", "value", "]", ")", "else", ":", "return", "obj", "fixed_obj", ".", "_made_from", "=", "obj", "return", "fixed_obj", "objects", "=", "[", "]", "def", "get_ref", "(", "obj", ",", "objects", "=", "objects", ")", ":", "obj", "=", "PythonicAdapter", "(", "Pass", ")", ".", "_encode", "(", "obj", ",", "None", ")", "if", "isinstance", "(", "obj", ",", "(", "FixedObject", ",", "Container", ")", ")", ":", "if", "getattr", "(", "obj", ",", "'_index'", ",", "None", ")", ":", "index", "=", "obj", ".", "_index", "else", ":", "objects", ".", "append", "(", "None", ")", "obj", ".", "_index", "=", "index", "=", "len", "(", "objects", ")", "objects", "[", "index", "-", "1", "]", "=", "fix_values", "(", "obj", ")", "return", "Ref", "(", "index", ")", "else", ":", "return", "obj", "# Inline value", "get_ref", "(", "root", ")", "for", "obj", "in", "objects", ":", "if", "getattr", "(", "obj", ",", "'_index'", ",", "None", ")", ":", "del", "obj", ".", "_index", "return", "objects" ]
Yield ref-containing obj table entries from object network
[ "Yield", "ref", "-", "containing", "obj", "table", "entries", "from", "object", "network" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/terminal/console/interactiveshell.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/terminal/console/interactiveshell.py#L220-L233
def wait_for_kernel(self, timeout=None): """method to wait for a kernel to be ready""" tic = time.time() self.km.hb_channel.unpause() while True: self.run_cell('1', False) if self.km.hb_channel.is_beating(): # heart failure was not the reason this returned break else: # heart failed if timeout is not None and (time.time() - tic) > timeout: return False return True
[ "def", "wait_for_kernel", "(", "self", ",", "timeout", "=", "None", ")", ":", "tic", "=", "time", ".", "time", "(", ")", "self", ".", "km", ".", "hb_channel", ".", "unpause", "(", ")", "while", "True", ":", "self", ".", "run_cell", "(", "'1'", ",", "False", ")", "if", "self", ".", "km", ".", "hb_channel", ".", "is_beating", "(", ")", ":", "# heart failure was not the reason this returned", "break", "else", ":", "# heart failed", "if", "timeout", "is", "not", "None", "and", "(", "time", ".", "time", "(", ")", "-", "tic", ")", ">", "timeout", ":", "return", "False", "return", "True" ]
method to wait for a kernel to be ready
[ "method", "to", "wait", "for", "a", "kernel", "to", "be", "ready" ]
python
test
LLNL/certipy
certipy/certipy.py
https://github.com/LLNL/certipy/blob/8705a8ba32655e12021d2893cf1c3c98c697edd7/certipy/certipy.py#L393-L412
def remove_record(self, common_name): """Delete the record associated with this common name""" bundle = self.get_files(common_name) num_signees = len(Counter(bundle.record['signees'])) if bundle.is_ca() and num_signees > 0: raise CertificateAuthorityInUseError( "Authority {name} has signed {x} certificates" .format(name=common_name, x=num_signees) ) try: ca_name = bundle.record['parent_ca'] ca_record = self.get_record(ca_name) self.remove_sign_link(ca_name, common_name) except CertNotFoundError: pass record_copy = dict(self.store[common_name]) del self.store[common_name] self.save() return record_copy
[ "def", "remove_record", "(", "self", ",", "common_name", ")", ":", "bundle", "=", "self", ".", "get_files", "(", "common_name", ")", "num_signees", "=", "len", "(", "Counter", "(", "bundle", ".", "record", "[", "'signees'", "]", ")", ")", "if", "bundle", ".", "is_ca", "(", ")", "and", "num_signees", ">", "0", ":", "raise", "CertificateAuthorityInUseError", "(", "\"Authority {name} has signed {x} certificates\"", ".", "format", "(", "name", "=", "common_name", ",", "x", "=", "num_signees", ")", ")", "try", ":", "ca_name", "=", "bundle", ".", "record", "[", "'parent_ca'", "]", "ca_record", "=", "self", ".", "get_record", "(", "ca_name", ")", "self", ".", "remove_sign_link", "(", "ca_name", ",", "common_name", ")", "except", "CertNotFoundError", ":", "pass", "record_copy", "=", "dict", "(", "self", ".", "store", "[", "common_name", "]", ")", "del", "self", ".", "store", "[", "common_name", "]", "self", ".", "save", "(", ")", "return", "record_copy" ]
Delete the record associated with this common name
[ "Delete", "the", "record", "associated", "with", "this", "common", "name" ]
python
train
inasafe/inasafe
safe/gui/tools/batch/batch_dialog.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/batch/batch_dialog.py#L187-L192
def save_state(self): """Save current state of GUI to configuration file.""" set_setting('lastSourceDir', self.source_directory.text()) set_setting('lastOutputDir', self.output_directory.text()) set_setting( 'useDefaultOutputDir', self.scenario_directory_radio.isChecked())
[ "def", "save_state", "(", "self", ")", ":", "set_setting", "(", "'lastSourceDir'", ",", "self", ".", "source_directory", ".", "text", "(", ")", ")", "set_setting", "(", "'lastOutputDir'", ",", "self", ".", "output_directory", ".", "text", "(", ")", ")", "set_setting", "(", "'useDefaultOutputDir'", ",", "self", ".", "scenario_directory_radio", ".", "isChecked", "(", ")", ")" ]
Save current state of GUI to configuration file.
[ "Save", "current", "state", "of", "GUI", "to", "configuration", "file", "." ]
python
train
daknuett/py_register_machine2
core/processor.py
https://github.com/daknuett/py_register_machine2/blob/599c53cd7576297d0d7a53344ed5d9aa98acc751/core/processor.py#L218-L226
def interrupt(self, address): """ Interrupts the Processor and forces him to jump to ``address``. If ``push_pc`` is enabled this will push the PC to the stack. """ if(self.push_pc): self.memory_bus.write_word(self.sp, self.pc) self._set_sp(self.sp - 1) self._set_pc(address)
[ "def", "interrupt", "(", "self", ",", "address", ")", ":", "if", "(", "self", ".", "push_pc", ")", ":", "self", ".", "memory_bus", ".", "write_word", "(", "self", ".", "sp", ",", "self", ".", "pc", ")", "self", ".", "_set_sp", "(", "self", ".", "sp", "-", "1", ")", "self", ".", "_set_pc", "(", "address", ")" ]
Interrupts the Processor and forces him to jump to ``address``. If ``push_pc`` is enabled this will push the PC to the stack.
[ "Interrupts", "the", "Processor", "and", "forces", "him", "to", "jump", "to", "address", ".", "If", "push_pc", "is", "enabled", "this", "will", "push", "the", "PC", "to", "the", "stack", "." ]
python
train
KnowledgeLinks/rdfframework
rdfframework/rdfclass/esconversion.py
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/rdfclass/esconversion.py#L67-L83
def get_idx_types(rng_def, ranges): """ Returns the elasticsearch index types for the obj args: rng_def: the range defintion dictionay ranges: rdfproperty ranges """ idx_types = rng_def.get('kds_esIndexType', []).copy() if not idx_types: nested = False for rng in ranges: if range_is_obj(rng, __MODULE__.rdfclass): nested = True if nested: idx_types.append('es_Nested') return idx_types
[ "def", "get_idx_types", "(", "rng_def", ",", "ranges", ")", ":", "idx_types", "=", "rng_def", ".", "get", "(", "'kds_esIndexType'", ",", "[", "]", ")", ".", "copy", "(", ")", "if", "not", "idx_types", ":", "nested", "=", "False", "for", "rng", "in", "ranges", ":", "if", "range_is_obj", "(", "rng", ",", "__MODULE__", ".", "rdfclass", ")", ":", "nested", "=", "True", "if", "nested", ":", "idx_types", ".", "append", "(", "'es_Nested'", ")", "return", "idx_types" ]
Returns the elasticsearch index types for the obj args: rng_def: the range defintion dictionay ranges: rdfproperty ranges
[ "Returns", "the", "elasticsearch", "index", "types", "for", "the", "obj" ]
python
train
studionow/pybrightcove
pybrightcove/video.py
https://github.com/studionow/pybrightcove/blob/19c946b689a80156e070fe9bc35589c4b768e614/pybrightcove/video.py#L537-L557
def save(self, create_multiple_renditions=True, preserve_source_rendition=True, encode_to=enums.EncodeToEnum.FLV): """ Creates or updates the video """ if is_ftp_connection(self.connection) and len(self.assets) > 0: self.connection.post(xml=self.to_xml(), assets=self.assets) elif not self.id and self._filename: self.id = self.connection.post('create_video', self._filename, create_multiple_renditions=create_multiple_renditions, preserve_source_rendition=preserve_source_rendition, encode_to=encode_to, video=self._to_dict()) elif not self.id and len(self.renditions) > 0: self.id = self.connection.post('create_video', video=self._to_dict()) elif self.id: data = self.connection.post('update_video', video=self._to_dict()) if data: self._load(data)
[ "def", "save", "(", "self", ",", "create_multiple_renditions", "=", "True", ",", "preserve_source_rendition", "=", "True", ",", "encode_to", "=", "enums", ".", "EncodeToEnum", ".", "FLV", ")", ":", "if", "is_ftp_connection", "(", "self", ".", "connection", ")", "and", "len", "(", "self", ".", "assets", ")", ">", "0", ":", "self", ".", "connection", ".", "post", "(", "xml", "=", "self", ".", "to_xml", "(", ")", ",", "assets", "=", "self", ".", "assets", ")", "elif", "not", "self", ".", "id", "and", "self", ".", "_filename", ":", "self", ".", "id", "=", "self", ".", "connection", ".", "post", "(", "'create_video'", ",", "self", ".", "_filename", ",", "create_multiple_renditions", "=", "create_multiple_renditions", ",", "preserve_source_rendition", "=", "preserve_source_rendition", ",", "encode_to", "=", "encode_to", ",", "video", "=", "self", ".", "_to_dict", "(", ")", ")", "elif", "not", "self", ".", "id", "and", "len", "(", "self", ".", "renditions", ")", ">", "0", ":", "self", ".", "id", "=", "self", ".", "connection", ".", "post", "(", "'create_video'", ",", "video", "=", "self", ".", "_to_dict", "(", ")", ")", "elif", "self", ".", "id", ":", "data", "=", "self", ".", "connection", ".", "post", "(", "'update_video'", ",", "video", "=", "self", ".", "_to_dict", "(", ")", ")", "if", "data", ":", "self", ".", "_load", "(", "data", ")" ]
Creates or updates the video
[ "Creates", "or", "updates", "the", "video" ]
python
train
mixmastamyk/console
console/detection.py
https://github.com/mixmastamyk/console/blob/afe6c95d5a7b83d85376f450454e3769e4a5c3d0/console/detection.py#L452-L525
def get_color(name, number=None): ''' Query the default terminal, for colors, etc. Direct queries supported on xterm, iTerm, perhaps others. Arguments: str: name, one of ('foreground', 'fg', 'background', 'bg', or 'index') # index grabs a palette index int: or a "dynamic color number of (4, 10-19)," see links below. str: number - if name is index, number should be an int from 0…255 Queries terminal using ``OSC # ? BEL`` sequence, call responds with a color in this X Window format syntax: - ``rgb:DEAD/BEEF/CAFE`` - `Control sequences <http://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Operating-System-Commands>`_ - `X11 colors <https://www.x.org/releases/X11R7.7/doc/libX11/libX11/libX11.html#RGB_Device_String_Specification>`_ Returns: tuple[int]:  A tuple of four-digit hex strings after parsing, the last two digits are the least significant and can be chopped if needed: ``('DEAD', 'BEEF', 'CAFE')`` If an error occurs during retrieval or parsing, the tuple will be empty. Examples: >>> get_color('bg') ('0000', '0000', '0000') >>> get_color('index', 2) # second color in indexed ('4e4d', '9a9a', '0605') # palette, 2 aka 32 in basic Note: Blocks if terminal does not support the function. Checks is_a_tty() first, since function would also block if i/o were redirected through a pipe. On Windows, only able to find palette defaults, which may be different if they were customized. To find the palette index instead, see ``windows.get_color``. ''' colors = () if is_a_tty() and not env.SSH_CLIENT: if not 'index' in _color_code_map: _color_code_map['index'] = '4;' + str(number or '') if os_name == 'nt': from .windows import get_color color_id = get_color(name) if sys.getwindowsversion()[2] > 16299: # Win10 FCU, new palette basic_palette = color_tables.cmd1709_palette4 else: basic_palette = color_tables.cmd_palette4 colors = (f'{i:02x}' for i in basic_palette[color_id]) # compat elif sys.platform == 'darwin': if env.TERM_PROGRAM == 'iTerm.app': # supports, though returns two chars per colors = _get_color_xterm(name, number) elif os_name == 'posix': if sys.platform.startswith('freebsd'): pass elif env.TERM and env.TERM.startswith('xterm'): colors = _get_color_xterm(name, number) return tuple(colors)
[ "def", "get_color", "(", "name", ",", "number", "=", "None", ")", ":", "colors", "=", "(", ")", "if", "is_a_tty", "(", ")", "and", "not", "env", ".", "SSH_CLIENT", ":", "if", "not", "'index'", "in", "_color_code_map", ":", "_color_code_map", "[", "'index'", "]", "=", "'4;'", "+", "str", "(", "number", "or", "''", ")", "if", "os_name", "==", "'nt'", ":", "from", ".", "windows", "import", "get_color", "color_id", "=", "get_color", "(", "name", ")", "if", "sys", ".", "getwindowsversion", "(", ")", "[", "2", "]", ">", "16299", ":", "# Win10 FCU, new palette", "basic_palette", "=", "color_tables", ".", "cmd1709_palette4", "else", ":", "basic_palette", "=", "color_tables", ".", "cmd_palette4", "colors", "=", "(", "f'{i:02x}'", "for", "i", "in", "basic_palette", "[", "color_id", "]", ")", "# compat", "elif", "sys", ".", "platform", "==", "'darwin'", ":", "if", "env", ".", "TERM_PROGRAM", "==", "'iTerm.app'", ":", "# supports, though returns two chars per", "colors", "=", "_get_color_xterm", "(", "name", ",", "number", ")", "elif", "os_name", "==", "'posix'", ":", "if", "sys", ".", "platform", ".", "startswith", "(", "'freebsd'", ")", ":", "pass", "elif", "env", ".", "TERM", "and", "env", ".", "TERM", ".", "startswith", "(", "'xterm'", ")", ":", "colors", "=", "_get_color_xterm", "(", "name", ",", "number", ")", "return", "tuple", "(", "colors", ")" ]
Query the default terminal, for colors, etc. Direct queries supported on xterm, iTerm, perhaps others. Arguments: str: name, one of ('foreground', 'fg', 'background', 'bg', or 'index') # index grabs a palette index int: or a "dynamic color number of (4, 10-19)," see links below. str: number - if name is index, number should be an int from 0…255 Queries terminal using ``OSC # ? BEL`` sequence, call responds with a color in this X Window format syntax: - ``rgb:DEAD/BEEF/CAFE`` - `Control sequences <http://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Operating-System-Commands>`_ - `X11 colors <https://www.x.org/releases/X11R7.7/doc/libX11/libX11/libX11.html#RGB_Device_String_Specification>`_ Returns: tuple[int]:  A tuple of four-digit hex strings after parsing, the last two digits are the least significant and can be chopped if needed: ``('DEAD', 'BEEF', 'CAFE')`` If an error occurs during retrieval or parsing, the tuple will be empty. Examples: >>> get_color('bg') ('0000', '0000', '0000') >>> get_color('index', 2) # second color in indexed ('4e4d', '9a9a', '0605') # palette, 2 aka 32 in basic Note: Blocks if terminal does not support the function. Checks is_a_tty() first, since function would also block if i/o were redirected through a pipe. On Windows, only able to find palette defaults, which may be different if they were customized. To find the palette index instead, see ``windows.get_color``.
[ "Query", "the", "default", "terminal", "for", "colors", "etc", "." ]
python
train
PyFilesystem/pyfilesystem2
fs/wildcard.py
https://github.com/PyFilesystem/pyfilesystem2/blob/047f3593f297d1442194cda3da7a7335bcc9c14a/fs/wildcard.py#L61-L78
def match_any(patterns, name): # type: (Iterable[Text], Text) -> bool """Test if a name matches any of a list of patterns. Will return `True` if ``patterns`` is an empty list. Arguments: patterns (list): A list of wildcard pattern, e.g ``["*.py", "*.pyc"]`` name (str): A filename. Returns: bool: `True` if the name matches at least one of the patterns. """ if not patterns: return True return any(match(pattern, name) for pattern in patterns)
[ "def", "match_any", "(", "patterns", ",", "name", ")", ":", "# type: (Iterable[Text], Text) -> bool", "if", "not", "patterns", ":", "return", "True", "return", "any", "(", "match", "(", "pattern", ",", "name", ")", "for", "pattern", "in", "patterns", ")" ]
Test if a name matches any of a list of patterns. Will return `True` if ``patterns`` is an empty list. Arguments: patterns (list): A list of wildcard pattern, e.g ``["*.py", "*.pyc"]`` name (str): A filename. Returns: bool: `True` if the name matches at least one of the patterns.
[ "Test", "if", "a", "name", "matches", "any", "of", "a", "list", "of", "patterns", "." ]
python
train
FujiMakoto/IPS-Vagrant
ips_vagrant/common/progress.py
https://github.com/FujiMakoto/IPS-Vagrant/blob/7b1d6d095034dd8befb026d9315ecc6494d52269/ips_vagrant/common/progress.py#L88-L97
def update(self, pbar): """ Handle progress bar updates @type pbar: ProgressBar @rtype: str """ if pbar.label != self._label: self.label = pbar.label return self.label
[ "def", "update", "(", "self", ",", "pbar", ")", ":", "if", "pbar", ".", "label", "!=", "self", ".", "_label", ":", "self", ".", "label", "=", "pbar", ".", "label", "return", "self", ".", "label" ]
Handle progress bar updates @type pbar: ProgressBar @rtype: str
[ "Handle", "progress", "bar", "updates" ]
python
train
google/grumpy
third_party/pythonparser/parser.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pythonparser/parser.py#L1278-L1289
def with_stmt__26(self, with_loc, context, with_var, colon_loc, body): """(2.6, 3.0) with_stmt: 'with' test [ with_var ] ':' suite""" if with_var: as_loc, optional_vars = with_var item = ast.withitem(context_expr=context, optional_vars=optional_vars, as_loc=as_loc, loc=context.loc.join(optional_vars.loc)) else: item = ast.withitem(context_expr=context, optional_vars=None, as_loc=None, loc=context.loc) return ast.With(items=[item], body=body, keyword_loc=with_loc, colon_loc=colon_loc, loc=with_loc.join(body[-1].loc))
[ "def", "with_stmt__26", "(", "self", ",", "with_loc", ",", "context", ",", "with_var", ",", "colon_loc", ",", "body", ")", ":", "if", "with_var", ":", "as_loc", ",", "optional_vars", "=", "with_var", "item", "=", "ast", ".", "withitem", "(", "context_expr", "=", "context", ",", "optional_vars", "=", "optional_vars", ",", "as_loc", "=", "as_loc", ",", "loc", "=", "context", ".", "loc", ".", "join", "(", "optional_vars", ".", "loc", ")", ")", "else", ":", "item", "=", "ast", ".", "withitem", "(", "context_expr", "=", "context", ",", "optional_vars", "=", "None", ",", "as_loc", "=", "None", ",", "loc", "=", "context", ".", "loc", ")", "return", "ast", ".", "With", "(", "items", "=", "[", "item", "]", ",", "body", "=", "body", ",", "keyword_loc", "=", "with_loc", ",", "colon_loc", "=", "colon_loc", ",", "loc", "=", "with_loc", ".", "join", "(", "body", "[", "-", "1", "]", ".", "loc", ")", ")" ]
(2.6, 3.0) with_stmt: 'with' test [ with_var ] ':' suite
[ "(", "2", ".", "6", "3", ".", "0", ")", "with_stmt", ":", "with", "test", "[", "with_var", "]", ":", "suite" ]
python
valid
StaticCube/python-synology
SynologyDSM/SynologyDSM.py
https://github.com/StaticCube/python-synology/blob/a5446a052fc91a38f7589803dc7a654180db2566/SynologyDSM/SynologyDSM.py#L280-L296
def volume_disk_temp_avg(self, volume): """Average temperature of all disks making up the volume""" volume = self._get_volume(volume) if volume is not None: vol_disks = volume["disks"] if vol_disks is not None: total_temp = 0 total_disks = 0 for vol_disk in vol_disks: disk_temp = self.disk_temp(vol_disk) if disk_temp is not None: total_disks += 1 total_temp += disk_temp if total_temp > 0 and total_disks > 0: return round(total_temp / total_disks, 0)
[ "def", "volume_disk_temp_avg", "(", "self", ",", "volume", ")", ":", "volume", "=", "self", ".", "_get_volume", "(", "volume", ")", "if", "volume", "is", "not", "None", ":", "vol_disks", "=", "volume", "[", "\"disks\"", "]", "if", "vol_disks", "is", "not", "None", ":", "total_temp", "=", "0", "total_disks", "=", "0", "for", "vol_disk", "in", "vol_disks", ":", "disk_temp", "=", "self", ".", "disk_temp", "(", "vol_disk", ")", "if", "disk_temp", "is", "not", "None", ":", "total_disks", "+=", "1", "total_temp", "+=", "disk_temp", "if", "total_temp", ">", "0", "and", "total_disks", ">", "0", ":", "return", "round", "(", "total_temp", "/", "total_disks", ",", "0", ")" ]
Average temperature of all disks making up the volume
[ "Average", "temperature", "of", "all", "disks", "making", "up", "the", "volume" ]
python
test
devassistant/devassistant
devassistant/command_runners.py
https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/command_runners.py#L42-L61
def register_command_runner(arg): """Decorator that registers a command runner. Accepts either: - CommandRunner directly or - String prefix to register a command runner under (returning a decorator) """ if isinstance(arg, str): def inner(command_runner): command_runners.setdefault(arg, []) command_runners[arg].append(command_runner) return command_runner return inner elif issubclass(arg, CommandRunner): command_runners.setdefault('', []) command_runners[''].append(arg) return arg else: msg = 'register_command_runner expects str or CommandRunner as argument, got: {0}'.\ format(arg) raise ValueError(msg)
[ "def", "register_command_runner", "(", "arg", ")", ":", "if", "isinstance", "(", "arg", ",", "str", ")", ":", "def", "inner", "(", "command_runner", ")", ":", "command_runners", ".", "setdefault", "(", "arg", ",", "[", "]", ")", "command_runners", "[", "arg", "]", ".", "append", "(", "command_runner", ")", "return", "command_runner", "return", "inner", "elif", "issubclass", "(", "arg", ",", "CommandRunner", ")", ":", "command_runners", ".", "setdefault", "(", "''", ",", "[", "]", ")", "command_runners", "[", "''", "]", ".", "append", "(", "arg", ")", "return", "arg", "else", ":", "msg", "=", "'register_command_runner expects str or CommandRunner as argument, got: {0}'", ".", "format", "(", "arg", ")", "raise", "ValueError", "(", "msg", ")" ]
Decorator that registers a command runner. Accepts either: - CommandRunner directly or - String prefix to register a command runner under (returning a decorator)
[ "Decorator", "that", "registers", "a", "command", "runner", ".", "Accepts", "either", ":" ]
python
train
python-diamond/Diamond
src/diamond/handler/null.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/diamond/handler/null.py#L21-L30
def get_default_config_help(self): """ Returns the help text for the configuration options for this handler """ config = super(NullHandler, self).get_default_config_help() config.update({ }) return config
[ "def", "get_default_config_help", "(", "self", ")", ":", "config", "=", "super", "(", "NullHandler", ",", "self", ")", ".", "get_default_config_help", "(", ")", "config", ".", "update", "(", "{", "}", ")", "return", "config" ]
Returns the help text for the configuration options for this handler
[ "Returns", "the", "help", "text", "for", "the", "configuration", "options", "for", "this", "handler" ]
python
train
someones/jaweson
jaweson/serialisable.py
https://github.com/someones/jaweson/blob/744c3ca0f3af86c48738e2d89ea69646f48cc013/jaweson/serialisable.py#L76-L90
def from_dict(cls, jobj): '''Deserialises the object. Automatically inspects the object's __init__ function and extracts the parameters. Can be trivially over-written. ''' try: obj = cls.__new__(cls) blacklist = set(['__class__', '__type__'] + cls.__blacklist) for k in set(jobj.keys()) - blacklist: setattr(obj, k, jobj[k]) return obj except Exception as e: raise TypeError('Failed to deserialise {}: {} - args: {}'.format(cls.__name__, str(e), kwargs))
[ "def", "from_dict", "(", "cls", ",", "jobj", ")", ":", "try", ":", "obj", "=", "cls", ".", "__new__", "(", "cls", ")", "blacklist", "=", "set", "(", "[", "'__class__'", ",", "'__type__'", "]", "+", "cls", ".", "__blacklist", ")", "for", "k", "in", "set", "(", "jobj", ".", "keys", "(", ")", ")", "-", "blacklist", ":", "setattr", "(", "obj", ",", "k", ",", "jobj", "[", "k", "]", ")", "return", "obj", "except", "Exception", "as", "e", ":", "raise", "TypeError", "(", "'Failed to deserialise {}: {} - args: {}'", ".", "format", "(", "cls", ".", "__name__", ",", "str", "(", "e", ")", ",", "kwargs", ")", ")" ]
Deserialises the object. Automatically inspects the object's __init__ function and extracts the parameters. Can be trivially over-written.
[ "Deserialises", "the", "object", ".", "Automatically", "inspects", "the", "object", "s", "__init__", "function", "and", "extracts", "the", "parameters", ".", "Can", "be", "trivially", "over", "-", "written", "." ]
python
train
berkeley-cocosci/Wallace
examples/rogers/experiment.py
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/examples/rogers/experiment.py#L369-L376
def step(self): """Prompt the environment to change.""" current_state = max(self.infos(type=State), key=attrgetter('creation_time')) current_contents = float(current_state.contents) new_contents = 1 - current_contents info_out = State(origin=self, contents=new_contents) transformations.Mutation(info_in=current_state, info_out=info_out)
[ "def", "step", "(", "self", ")", ":", "current_state", "=", "max", "(", "self", ".", "infos", "(", "type", "=", "State", ")", ",", "key", "=", "attrgetter", "(", "'creation_time'", ")", ")", "current_contents", "=", "float", "(", "current_state", ".", "contents", ")", "new_contents", "=", "1", "-", "current_contents", "info_out", "=", "State", "(", "origin", "=", "self", ",", "contents", "=", "new_contents", ")", "transformations", ".", "Mutation", "(", "info_in", "=", "current_state", ",", "info_out", "=", "info_out", ")" ]
Prompt the environment to change.
[ "Prompt", "the", "environment", "to", "change", "." ]
python
train
flask-restful/flask-restful
flask_restful/__init__.py
https://github.com/flask-restful/flask-restful/blob/25544d697c1f82bafbd1320960df459f58a58e03/flask_restful/__init__.py#L239-L247
def _has_fr_route(self): """Encapsulating the rules for whether the request was to a Flask endpoint""" # 404's, 405's, which might not have a url_rule if self._should_use_fr_error_handler(): return True # for all other errors, just check if FR dispatched the route if not request.url_rule: return False return self.owns_endpoint(request.url_rule.endpoint)
[ "def", "_has_fr_route", "(", "self", ")", ":", "# 404's, 405's, which might not have a url_rule", "if", "self", ".", "_should_use_fr_error_handler", "(", ")", ":", "return", "True", "# for all other errors, just check if FR dispatched the route", "if", "not", "request", ".", "url_rule", ":", "return", "False", "return", "self", ".", "owns_endpoint", "(", "request", ".", "url_rule", ".", "endpoint", ")" ]
Encapsulating the rules for whether the request was to a Flask endpoint
[ "Encapsulating", "the", "rules", "for", "whether", "the", "request", "was", "to", "a", "Flask", "endpoint" ]
python
train
foremast/foremast
src/foremast/pipeline/create_pipeline.py
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/pipeline/create_pipeline.py#L161-L191
def compare_with_existing(self, region='us-east-1', onetime=False): """Compare desired pipeline with existing pipelines. Args: region (str): Region of desired pipeline. onetime (bool): Looks for different pipeline if Onetime Returns: str: pipeline_id if existing, empty string of not. """ pipelines = self.get_existing_pipelines() pipeline_id = None found = False for pipeline in pipelines: correct_app_and_region = (pipeline['application'] == self.app_name) and (region in pipeline['name']) if onetime: onetime_str = "(onetime-{})".format(self.environments[0]) if correct_app_and_region and onetime_str in pipeline['name']: found = True elif correct_app_and_region: found = True if found: self.log.info('Existing pipeline found - %s', pipeline['name']) pipeline_id = pipeline['id'] break else: self.log.info('No existing pipeline found') return pipeline_id
[ "def", "compare_with_existing", "(", "self", ",", "region", "=", "'us-east-1'", ",", "onetime", "=", "False", ")", ":", "pipelines", "=", "self", ".", "get_existing_pipelines", "(", ")", "pipeline_id", "=", "None", "found", "=", "False", "for", "pipeline", "in", "pipelines", ":", "correct_app_and_region", "=", "(", "pipeline", "[", "'application'", "]", "==", "self", ".", "app_name", ")", "and", "(", "region", "in", "pipeline", "[", "'name'", "]", ")", "if", "onetime", ":", "onetime_str", "=", "\"(onetime-{})\"", ".", "format", "(", "self", ".", "environments", "[", "0", "]", ")", "if", "correct_app_and_region", "and", "onetime_str", "in", "pipeline", "[", "'name'", "]", ":", "found", "=", "True", "elif", "correct_app_and_region", ":", "found", "=", "True", "if", "found", ":", "self", ".", "log", ".", "info", "(", "'Existing pipeline found - %s'", ",", "pipeline", "[", "'name'", "]", ")", "pipeline_id", "=", "pipeline", "[", "'id'", "]", "break", "else", ":", "self", ".", "log", ".", "info", "(", "'No existing pipeline found'", ")", "return", "pipeline_id" ]
Compare desired pipeline with existing pipelines. Args: region (str): Region of desired pipeline. onetime (bool): Looks for different pipeline if Onetime Returns: str: pipeline_id if existing, empty string of not.
[ "Compare", "desired", "pipeline", "with", "existing", "pipelines", "." ]
python
train
scikit-hep/root_numpy
root_numpy/_matrix.py
https://github.com/scikit-hep/root_numpy/blob/3a9bfbcf89f90dc20ca6869480a63a85e1ceebb8/root_numpy/_matrix.py#L9-L42
def matrix(mat): """Convert a ROOT TMatrix into a NumPy matrix. Parameters ---------- mat : ROOT TMatrixT A ROOT TMatrixD or TMatrixF Returns ------- mat : numpy.matrix A NumPy matrix Examples -------- >>> from root_numpy import matrix >>> from ROOT import TMatrixD >>> a = TMatrixD(4, 4) >>> a[1][2] = 2 >>> matrix(a) matrix([[ 0., 0., 0., 0.], [ 0., 0., 2., 0.], [ 0., 0., 0., 0.], [ 0., 0., 0., 0.]]) """ import ROOT if isinstance(mat, (ROOT.TMatrixD, ROOT.TMatrixDSym)): return _librootnumpy.matrix_d(ROOT.AsCObject(mat)) elif isinstance(mat, (ROOT.TMatrixF, ROOT.TMatrixFSym)): return _librootnumpy.matrix_f(ROOT.AsCObject(mat)) raise TypeError( "unable to convert object of type {0} " "into a numpy matrix".format(type(mat)))
[ "def", "matrix", "(", "mat", ")", ":", "import", "ROOT", "if", "isinstance", "(", "mat", ",", "(", "ROOT", ".", "TMatrixD", ",", "ROOT", ".", "TMatrixDSym", ")", ")", ":", "return", "_librootnumpy", ".", "matrix_d", "(", "ROOT", ".", "AsCObject", "(", "mat", ")", ")", "elif", "isinstance", "(", "mat", ",", "(", "ROOT", ".", "TMatrixF", ",", "ROOT", ".", "TMatrixFSym", ")", ")", ":", "return", "_librootnumpy", ".", "matrix_f", "(", "ROOT", ".", "AsCObject", "(", "mat", ")", ")", "raise", "TypeError", "(", "\"unable to convert object of type {0} \"", "\"into a numpy matrix\"", ".", "format", "(", "type", "(", "mat", ")", ")", ")" ]
Convert a ROOT TMatrix into a NumPy matrix. Parameters ---------- mat : ROOT TMatrixT A ROOT TMatrixD or TMatrixF Returns ------- mat : numpy.matrix A NumPy matrix Examples -------- >>> from root_numpy import matrix >>> from ROOT import TMatrixD >>> a = TMatrixD(4, 4) >>> a[1][2] = 2 >>> matrix(a) matrix([[ 0., 0., 0., 0.], [ 0., 0., 2., 0.], [ 0., 0., 0., 0.], [ 0., 0., 0., 0.]])
[ "Convert", "a", "ROOT", "TMatrix", "into", "a", "NumPy", "matrix", "." ]
python
train
saltstack/salt
salt/utils/hashutils.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/hashutils.py#L152-L171
def get_hash(path, form='sha256', chunk_size=65536): ''' Get the hash sum of a file This is better than ``get_sum`` for the following reasons: - It does not read the entire file into memory. - It does not return a string on error. The returned value of ``get_sum`` cannot really be trusted since it is vulnerable to collisions: ``get_sum(..., 'xyz') == 'Hash xyz not supported'`` ''' hash_type = hasattr(hashlib, form) and getattr(hashlib, form) or None if hash_type is None: raise ValueError('Invalid hash type: {0}'.format(form)) with salt.utils.files.fopen(path, 'rb') as ifile: hash_obj = hash_type() # read the file in in chunks, not the entire file for chunk in iter(lambda: ifile.read(chunk_size), b''): hash_obj.update(chunk) return hash_obj.hexdigest()
[ "def", "get_hash", "(", "path", ",", "form", "=", "'sha256'", ",", "chunk_size", "=", "65536", ")", ":", "hash_type", "=", "hasattr", "(", "hashlib", ",", "form", ")", "and", "getattr", "(", "hashlib", ",", "form", ")", "or", "None", "if", "hash_type", "is", "None", ":", "raise", "ValueError", "(", "'Invalid hash type: {0}'", ".", "format", "(", "form", ")", ")", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "path", ",", "'rb'", ")", "as", "ifile", ":", "hash_obj", "=", "hash_type", "(", ")", "# read the file in in chunks, not the entire file", "for", "chunk", "in", "iter", "(", "lambda", ":", "ifile", ".", "read", "(", "chunk_size", ")", ",", "b''", ")", ":", "hash_obj", ".", "update", "(", "chunk", ")", "return", "hash_obj", ".", "hexdigest", "(", ")" ]
Get the hash sum of a file This is better than ``get_sum`` for the following reasons: - It does not read the entire file into memory. - It does not return a string on error. The returned value of ``get_sum`` cannot really be trusted since it is vulnerable to collisions: ``get_sum(..., 'xyz') == 'Hash xyz not supported'``
[ "Get", "the", "hash", "sum", "of", "a", "file" ]
python
train
FlorianRhiem/pyGLFW
glfw/__init__.py
https://github.com/FlorianRhiem/pyGLFW/blob/87767dfbe15ba15d2a8338cdfddf6afc6a25dff5/glfw/__init__.py#L110-L116
def wrap(self, video_mode): """ Wraps a nested python sequence. """ size, bits, self.refresh_rate = video_mode self.width, self.height = size self.red_bits, self.green_bits, self.blue_bits = bits
[ "def", "wrap", "(", "self", ",", "video_mode", ")", ":", "size", ",", "bits", ",", "self", ".", "refresh_rate", "=", "video_mode", "self", ".", "width", ",", "self", ".", "height", "=", "size", "self", ".", "red_bits", ",", "self", ".", "green_bits", ",", "self", ".", "blue_bits", "=", "bits" ]
Wraps a nested python sequence.
[ "Wraps", "a", "nested", "python", "sequence", "." ]
python
train
inveniosoftware/invenio-deposit
invenio_deposit/api.py
https://github.com/inveniosoftware/invenio-deposit/blob/f243ea1d01ab0a3bc92ade3262d1abdd2bc32447/invenio_deposit/api.py#L303-L348
def publish(self, pid=None, id_=None): """Publish a deposit. If it's the first time: * it calls the minter and set the following meta information inside the deposit: .. code-block:: python deposit['_deposit'] = { 'type': pid_type, 'value': pid_value, 'revision_id': 0, } * A dump of all information inside the deposit is done. * A snapshot of the files is done. Otherwise, published the new edited version. In this case, if in the mainwhile someone already published a new version, it'll try to merge the changes with the latest version. .. note:: no need for indexing as it calls `self.commit()`. Status required: ``'draft'``. :param pid: Force the new pid value. (Default: ``None``) :param id_: Force the new uuid value as deposit id. (Default: ``None``) :returns: Returns itself. """ pid = pid or self.pid if not pid.is_registered(): raise PIDInvalidAction() self['_deposit']['status'] = 'published' if self['_deposit'].get('pid') is None: # First publishing self._publish_new(id_=id_) else: # Update after edit record = self._publish_edited() record.commit() self.commit() return self
[ "def", "publish", "(", "self", ",", "pid", "=", "None", ",", "id_", "=", "None", ")", ":", "pid", "=", "pid", "or", "self", ".", "pid", "if", "not", "pid", ".", "is_registered", "(", ")", ":", "raise", "PIDInvalidAction", "(", ")", "self", "[", "'_deposit'", "]", "[", "'status'", "]", "=", "'published'", "if", "self", "[", "'_deposit'", "]", ".", "get", "(", "'pid'", ")", "is", "None", ":", "# First publishing", "self", ".", "_publish_new", "(", "id_", "=", "id_", ")", "else", ":", "# Update after edit", "record", "=", "self", ".", "_publish_edited", "(", ")", "record", ".", "commit", "(", ")", "self", ".", "commit", "(", ")", "return", "self" ]
Publish a deposit. If it's the first time: * it calls the minter and set the following meta information inside the deposit: .. code-block:: python deposit['_deposit'] = { 'type': pid_type, 'value': pid_value, 'revision_id': 0, } * A dump of all information inside the deposit is done. * A snapshot of the files is done. Otherwise, published the new edited version. In this case, if in the mainwhile someone already published a new version, it'll try to merge the changes with the latest version. .. note:: no need for indexing as it calls `self.commit()`. Status required: ``'draft'``. :param pid: Force the new pid value. (Default: ``None``) :param id_: Force the new uuid value as deposit id. (Default: ``None``) :returns: Returns itself.
[ "Publish", "a", "deposit", "." ]
python
valid
xolox/python-rotate-backups
rotate_backups/__init__.py
https://github.com/xolox/python-rotate-backups/blob/611c72b2806952bf2bb84c38a4b5f856ea334707/rotate_backups/__init__.py#L603-L649
def apply_rotation_scheme(self, backups_by_frequency, most_recent_backup): """ Apply the user defined rotation scheme to the result of :func:`group_backups()`. :param backups_by_frequency: A :class:`dict` in the format generated by :func:`group_backups()`. :param most_recent_backup: The :class:`~datetime.datetime` of the most recent backup. :raises: :exc:`~exceptions.ValueError` when the rotation scheme dictionary is empty (this would cause all backups to be deleted). .. note:: This method mutates the given data structure by removing all backups that should be removed to apply the user defined rotation scheme. """ if not self.rotation_scheme: raise ValueError("Refusing to use empty rotation scheme! (all backups would be deleted)") for frequency, backups in backups_by_frequency.items(): # Ignore frequencies not specified by the user. if frequency not in self.rotation_scheme: backups.clear() else: # Reduce the number of backups in each time slot of this # rotation frequency to a single backup (the oldest one or the # newest one). for period, backups_in_period in backups.items(): index = -1 if self.prefer_recent else 0 selected_backup = sorted(backups_in_period)[index] backups[period] = [selected_backup] # Check if we need to rotate away backups in old periods. retention_period = self.rotation_scheme[frequency] if retention_period != 'always': # Remove backups created before the minimum date of this # rotation frequency? (relative to the most recent backup) if self.strict: minimum_date = most_recent_backup - SUPPORTED_FREQUENCIES[frequency] * retention_period for period, backups_in_period in list(backups.items()): for backup in backups_in_period: if backup.timestamp < minimum_date: backups_in_period.remove(backup) if not backups_in_period: backups.pop(period) # If there are more periods remaining than the user # requested to be preserved we delete the oldest one(s). items_to_preserve = sorted(backups.items())[-retention_period:] backups_by_frequency[frequency] = dict(items_to_preserve)
[ "def", "apply_rotation_scheme", "(", "self", ",", "backups_by_frequency", ",", "most_recent_backup", ")", ":", "if", "not", "self", ".", "rotation_scheme", ":", "raise", "ValueError", "(", "\"Refusing to use empty rotation scheme! (all backups would be deleted)\"", ")", "for", "frequency", ",", "backups", "in", "backups_by_frequency", ".", "items", "(", ")", ":", "# Ignore frequencies not specified by the user.", "if", "frequency", "not", "in", "self", ".", "rotation_scheme", ":", "backups", ".", "clear", "(", ")", "else", ":", "# Reduce the number of backups in each time slot of this", "# rotation frequency to a single backup (the oldest one or the", "# newest one).", "for", "period", ",", "backups_in_period", "in", "backups", ".", "items", "(", ")", ":", "index", "=", "-", "1", "if", "self", ".", "prefer_recent", "else", "0", "selected_backup", "=", "sorted", "(", "backups_in_period", ")", "[", "index", "]", "backups", "[", "period", "]", "=", "[", "selected_backup", "]", "# Check if we need to rotate away backups in old periods.", "retention_period", "=", "self", ".", "rotation_scheme", "[", "frequency", "]", "if", "retention_period", "!=", "'always'", ":", "# Remove backups created before the minimum date of this", "# rotation frequency? (relative to the most recent backup)", "if", "self", ".", "strict", ":", "minimum_date", "=", "most_recent_backup", "-", "SUPPORTED_FREQUENCIES", "[", "frequency", "]", "*", "retention_period", "for", "period", ",", "backups_in_period", "in", "list", "(", "backups", ".", "items", "(", ")", ")", ":", "for", "backup", "in", "backups_in_period", ":", "if", "backup", ".", "timestamp", "<", "minimum_date", ":", "backups_in_period", ".", "remove", "(", "backup", ")", "if", "not", "backups_in_period", ":", "backups", ".", "pop", "(", "period", ")", "# If there are more periods remaining than the user", "# requested to be preserved we delete the oldest one(s).", "items_to_preserve", "=", "sorted", "(", "backups", ".", "items", "(", ")", ")", "[", "-", "retention_period", ":", "]", "backups_by_frequency", "[", "frequency", "]", "=", "dict", "(", "items_to_preserve", ")" ]
Apply the user defined rotation scheme to the result of :func:`group_backups()`. :param backups_by_frequency: A :class:`dict` in the format generated by :func:`group_backups()`. :param most_recent_backup: The :class:`~datetime.datetime` of the most recent backup. :raises: :exc:`~exceptions.ValueError` when the rotation scheme dictionary is empty (this would cause all backups to be deleted). .. note:: This method mutates the given data structure by removing all backups that should be removed to apply the user defined rotation scheme.
[ "Apply", "the", "user", "defined", "rotation", "scheme", "to", "the", "result", "of", ":", "func", ":", "group_backups", "()", "." ]
python
train
waqasbhatti/astrobase
astrobase/lcproc/checkplotproc.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcproc/checkplotproc.py#L952-L1126
def parallel_update_objectinfo_cpdir(cpdir, cpglob='checkplot-*.pkl*', liststartindex=None, maxobjects=None, nworkers=NCPUS, fast_mode=False, findercmap='gray_r', finderconvolve=None, deredden_object=True, custom_bandpasses=None, gaia_submit_timeout=10.0, gaia_submit_tries=3, gaia_max_timeout=180.0, gaia_mirror=None, complete_query_later=True, lclistpkl=None, nbrradiusarcsec=60.0, maxnumneighbors=5, plotdpi=100, findercachedir='~/.astrobase/stamp-cache', verbose=True): '''This updates the objectinfo for a directory of checkplot pickles. Useful in cases where a previous round of GAIA/finderchart/external catalog acquisition failed. This will preserve the following keys in the checkplots if they exist: comments varinfo objectinfo.objecttags Parameters ---------- cpdir : str The directory to look for checkplot pickles in. cpglob : str The UNIX fileglob to use when searching for checkplot pickle files. liststartindex : int The index of the input list to start working at. maxobjects : int The maximum number of objects to process in this run. Use this with `liststartindex` to effectively distribute working on a large list of input checkplot pickles over several sessions or machines. nworkers : int The number of parallel workers that will work on the checkplot update process. fast_mode : bool or float This runs the external catalog operations in a "fast" mode, with short timeouts and not trying to hit external catalogs that take a long time to respond. See the docstring for `checkplot.pkl_utils._pkl_finder_objectinfo` for details on how this works. If this is True, will run in "fast" mode with default timeouts (5 seconds in most cases). If this is a float, will run in "fast" mode with the provided timeout value in seconds. findercmap : str or matplotlib.cm.Colormap object findercmap : str or matplotlib.cm.ColorMap object The Colormap object to use for the finder chart image. finderconvolve : astropy.convolution.Kernel object or None If not None, the Kernel object to use for convolving the finder image. deredden_objects : bool If this is True, will use the 2MASS DUST service to get extinction coefficients in various bands, and then try to deredden the magnitudes and colors of the object already present in the checkplot's objectinfo dict. custom_bandpasses : dict This is a dict used to provide custom bandpass definitions for any magnitude measurements in the objectinfo dict that are not automatically recognized by the `varclass.starfeatures.color_features` function. See its docstring for details on the required format. gaia_submit_timeout : float Sets the timeout in seconds to use when submitting a request to look up the object's information to the GAIA service. Note that if `fast_mode` is set, this is ignored. gaia_submit_tries : int Sets the maximum number of times the GAIA services will be contacted to obtain this object's information. If `fast_mode` is set, this is ignored, and the services will be contacted only once (meaning that a failure to respond will be silently ignored and no GAIA data will be added to the checkplot's objectinfo dict). gaia_max_timeout : float Sets the timeout in seconds to use when waiting for the GAIA service to respond to our request for the object's information. Note that if `fast_mode` is set, this is ignored. gaia_mirror : str This sets the GAIA mirror to use. This is a key in the `services.gaia.GAIA_URLS` dict which defines the URLs to hit for each mirror. complete_query_later : bool If this is True, saves the state of GAIA queries that are not yet complete when `gaia_max_timeout` is reached while waiting for the GAIA service to respond to our request. A later call for GAIA info on the same object will attempt to pick up the results from the existing query if it's completed. If `fast_mode` is True, this is ignored. lclistpkl : dict or str If this is provided, must be a dict resulting from reading a catalog produced by the `lcproc.catalogs.make_lclist` function or a str path pointing to the pickle file produced by that function. This catalog is used to find neighbors of the current object in the current light curve collection. Looking at neighbors of the object within the radius specified by `nbrradiusarcsec` is useful for light curves produced by instruments that have a large pixel scale, so are susceptible to blending of variability and potential confusion of neighbor variability with that of the actual object being looked at. If this is None, no neighbor lookups will be performed. nbrradiusarcsec : float The radius in arcseconds to use for a search conducted around the coordinates of this object to look for any potential confusion and blending of variability amplitude caused by their proximity. maxnumneighbors : int The maximum number of neighbors that will have their light curves and magnitudes noted in this checkplot as potential blends with the target object. plotdpi : int The resolution in DPI of the plots to generate in this function (e.g. the finder chart, etc.) findercachedir : str The path to the astrobase cache directory for finder chart downloads from the NASA SkyView service. verbose : bool If True, will indicate progress and warn about potential problems. Returns ------- list of str Paths to the updated checkplot pickle file. ''' cplist = sorted(glob.glob(os.path.join(cpdir, cpglob))) return parallel_update_objectinfo_cplist( cplist, liststartindex=liststartindex, maxobjects=maxobjects, nworkers=nworkers, fast_mode=fast_mode, findercmap=findercmap, finderconvolve=finderconvolve, deredden_object=deredden_object, custom_bandpasses=custom_bandpasses, gaia_submit_timeout=gaia_submit_timeout, gaia_submit_tries=gaia_submit_tries, gaia_max_timeout=gaia_max_timeout, gaia_mirror=gaia_mirror, complete_query_later=complete_query_later, lclistpkl=lclistpkl, nbrradiusarcsec=nbrradiusarcsec, maxnumneighbors=maxnumneighbors, plotdpi=plotdpi, findercachedir=findercachedir, verbose=verbose )
[ "def", "parallel_update_objectinfo_cpdir", "(", "cpdir", ",", "cpglob", "=", "'checkplot-*.pkl*'", ",", "liststartindex", "=", "None", ",", "maxobjects", "=", "None", ",", "nworkers", "=", "NCPUS", ",", "fast_mode", "=", "False", ",", "findercmap", "=", "'gray_r'", ",", "finderconvolve", "=", "None", ",", "deredden_object", "=", "True", ",", "custom_bandpasses", "=", "None", ",", "gaia_submit_timeout", "=", "10.0", ",", "gaia_submit_tries", "=", "3", ",", "gaia_max_timeout", "=", "180.0", ",", "gaia_mirror", "=", "None", ",", "complete_query_later", "=", "True", ",", "lclistpkl", "=", "None", ",", "nbrradiusarcsec", "=", "60.0", ",", "maxnumneighbors", "=", "5", ",", "plotdpi", "=", "100", ",", "findercachedir", "=", "'~/.astrobase/stamp-cache'", ",", "verbose", "=", "True", ")", ":", "cplist", "=", "sorted", "(", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "cpdir", ",", "cpglob", ")", ")", ")", "return", "parallel_update_objectinfo_cplist", "(", "cplist", ",", "liststartindex", "=", "liststartindex", ",", "maxobjects", "=", "maxobjects", ",", "nworkers", "=", "nworkers", ",", "fast_mode", "=", "fast_mode", ",", "findercmap", "=", "findercmap", ",", "finderconvolve", "=", "finderconvolve", ",", "deredden_object", "=", "deredden_object", ",", "custom_bandpasses", "=", "custom_bandpasses", ",", "gaia_submit_timeout", "=", "gaia_submit_timeout", ",", "gaia_submit_tries", "=", "gaia_submit_tries", ",", "gaia_max_timeout", "=", "gaia_max_timeout", ",", "gaia_mirror", "=", "gaia_mirror", ",", "complete_query_later", "=", "complete_query_later", ",", "lclistpkl", "=", "lclistpkl", ",", "nbrradiusarcsec", "=", "nbrradiusarcsec", ",", "maxnumneighbors", "=", "maxnumneighbors", ",", "plotdpi", "=", "plotdpi", ",", "findercachedir", "=", "findercachedir", ",", "verbose", "=", "verbose", ")" ]
This updates the objectinfo for a directory of checkplot pickles. Useful in cases where a previous round of GAIA/finderchart/external catalog acquisition failed. This will preserve the following keys in the checkplots if they exist: comments varinfo objectinfo.objecttags Parameters ---------- cpdir : str The directory to look for checkplot pickles in. cpglob : str The UNIX fileglob to use when searching for checkplot pickle files. liststartindex : int The index of the input list to start working at. maxobjects : int The maximum number of objects to process in this run. Use this with `liststartindex` to effectively distribute working on a large list of input checkplot pickles over several sessions or machines. nworkers : int The number of parallel workers that will work on the checkplot update process. fast_mode : bool or float This runs the external catalog operations in a "fast" mode, with short timeouts and not trying to hit external catalogs that take a long time to respond. See the docstring for `checkplot.pkl_utils._pkl_finder_objectinfo` for details on how this works. If this is True, will run in "fast" mode with default timeouts (5 seconds in most cases). If this is a float, will run in "fast" mode with the provided timeout value in seconds. findercmap : str or matplotlib.cm.Colormap object findercmap : str or matplotlib.cm.ColorMap object The Colormap object to use for the finder chart image. finderconvolve : astropy.convolution.Kernel object or None If not None, the Kernel object to use for convolving the finder image. deredden_objects : bool If this is True, will use the 2MASS DUST service to get extinction coefficients in various bands, and then try to deredden the magnitudes and colors of the object already present in the checkplot's objectinfo dict. custom_bandpasses : dict This is a dict used to provide custom bandpass definitions for any magnitude measurements in the objectinfo dict that are not automatically recognized by the `varclass.starfeatures.color_features` function. See its docstring for details on the required format. gaia_submit_timeout : float Sets the timeout in seconds to use when submitting a request to look up the object's information to the GAIA service. Note that if `fast_mode` is set, this is ignored. gaia_submit_tries : int Sets the maximum number of times the GAIA services will be contacted to obtain this object's information. If `fast_mode` is set, this is ignored, and the services will be contacted only once (meaning that a failure to respond will be silently ignored and no GAIA data will be added to the checkplot's objectinfo dict). gaia_max_timeout : float Sets the timeout in seconds to use when waiting for the GAIA service to respond to our request for the object's information. Note that if `fast_mode` is set, this is ignored. gaia_mirror : str This sets the GAIA mirror to use. This is a key in the `services.gaia.GAIA_URLS` dict which defines the URLs to hit for each mirror. complete_query_later : bool If this is True, saves the state of GAIA queries that are not yet complete when `gaia_max_timeout` is reached while waiting for the GAIA service to respond to our request. A later call for GAIA info on the same object will attempt to pick up the results from the existing query if it's completed. If `fast_mode` is True, this is ignored. lclistpkl : dict or str If this is provided, must be a dict resulting from reading a catalog produced by the `lcproc.catalogs.make_lclist` function or a str path pointing to the pickle file produced by that function. This catalog is used to find neighbors of the current object in the current light curve collection. Looking at neighbors of the object within the radius specified by `nbrradiusarcsec` is useful for light curves produced by instruments that have a large pixel scale, so are susceptible to blending of variability and potential confusion of neighbor variability with that of the actual object being looked at. If this is None, no neighbor lookups will be performed. nbrradiusarcsec : float The radius in arcseconds to use for a search conducted around the coordinates of this object to look for any potential confusion and blending of variability amplitude caused by their proximity. maxnumneighbors : int The maximum number of neighbors that will have their light curves and magnitudes noted in this checkplot as potential blends with the target object. plotdpi : int The resolution in DPI of the plots to generate in this function (e.g. the finder chart, etc.) findercachedir : str The path to the astrobase cache directory for finder chart downloads from the NASA SkyView service. verbose : bool If True, will indicate progress and warn about potential problems. Returns ------- list of str Paths to the updated checkplot pickle file.
[ "This", "updates", "the", "objectinfo", "for", "a", "directory", "of", "checkplot", "pickles", "." ]
python
valid
tino/pyFirmata
pyfirmata/pyfirmata.py
https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/pyfirmata.py#L183-L194
def add_cmd_handler(self, cmd, func): """Adds a command handler for a command.""" len_args = len(inspect.getargspec(func)[0]) def add_meta(f): def decorator(*args, **kwargs): f(*args, **kwargs) decorator.bytes_needed = len_args - 1 # exclude self decorator.__name__ = f.__name__ return decorator func = add_meta(func) self._command_handlers[cmd] = func
[ "def", "add_cmd_handler", "(", "self", ",", "cmd", ",", "func", ")", ":", "len_args", "=", "len", "(", "inspect", ".", "getargspec", "(", "func", ")", "[", "0", "]", ")", "def", "add_meta", "(", "f", ")", ":", "def", "decorator", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "decorator", ".", "bytes_needed", "=", "len_args", "-", "1", "# exclude self", "decorator", ".", "__name__", "=", "f", ".", "__name__", "return", "decorator", "func", "=", "add_meta", "(", "func", ")", "self", ".", "_command_handlers", "[", "cmd", "]", "=", "func" ]
Adds a command handler for a command.
[ "Adds", "a", "command", "handler", "for", "a", "command", "." ]
python
train
pybel/pybel
src/pybel/tokens.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/tokens.py#L48-L76
def _fusion_to_dsl(tokens) -> FusionBase: """Convert a PyParsing data dictionary to a PyBEL fusion data dictionary. :param tokens: A PyParsing data dictionary representing a fusion :type tokens: ParseResult """ func = tokens[FUNCTION] fusion_dsl = FUNC_TO_FUSION_DSL[func] member_dsl = FUNC_TO_DSL[func] partner_5p = member_dsl( namespace=tokens[FUSION][PARTNER_5P][NAMESPACE], name=tokens[FUSION][PARTNER_5P][NAME] ) partner_3p = member_dsl( namespace=tokens[FUSION][PARTNER_3P][NAMESPACE], name=tokens[FUSION][PARTNER_3P][NAME] ) range_5p = _fusion_range_to_dsl(tokens[FUSION][RANGE_5P]) range_3p = _fusion_range_to_dsl(tokens[FUSION][RANGE_3P]) return fusion_dsl( partner_5p=partner_5p, partner_3p=partner_3p, range_5p=range_5p, range_3p=range_3p, )
[ "def", "_fusion_to_dsl", "(", "tokens", ")", "->", "FusionBase", ":", "func", "=", "tokens", "[", "FUNCTION", "]", "fusion_dsl", "=", "FUNC_TO_FUSION_DSL", "[", "func", "]", "member_dsl", "=", "FUNC_TO_DSL", "[", "func", "]", "partner_5p", "=", "member_dsl", "(", "namespace", "=", "tokens", "[", "FUSION", "]", "[", "PARTNER_5P", "]", "[", "NAMESPACE", "]", ",", "name", "=", "tokens", "[", "FUSION", "]", "[", "PARTNER_5P", "]", "[", "NAME", "]", ")", "partner_3p", "=", "member_dsl", "(", "namespace", "=", "tokens", "[", "FUSION", "]", "[", "PARTNER_3P", "]", "[", "NAMESPACE", "]", ",", "name", "=", "tokens", "[", "FUSION", "]", "[", "PARTNER_3P", "]", "[", "NAME", "]", ")", "range_5p", "=", "_fusion_range_to_dsl", "(", "tokens", "[", "FUSION", "]", "[", "RANGE_5P", "]", ")", "range_3p", "=", "_fusion_range_to_dsl", "(", "tokens", "[", "FUSION", "]", "[", "RANGE_3P", "]", ")", "return", "fusion_dsl", "(", "partner_5p", "=", "partner_5p", ",", "partner_3p", "=", "partner_3p", ",", "range_5p", "=", "range_5p", ",", "range_3p", "=", "range_3p", ",", ")" ]
Convert a PyParsing data dictionary to a PyBEL fusion data dictionary. :param tokens: A PyParsing data dictionary representing a fusion :type tokens: ParseResult
[ "Convert", "a", "PyParsing", "data", "dictionary", "to", "a", "PyBEL", "fusion", "data", "dictionary", "." ]
python
train
LordSputnik/mutagen
mutagen/id3.py
https://github.com/LordSputnik/mutagen/blob/38e62c8dc35c72b16554f5dbe7c0fde91acc3411/mutagen/id3.py#L833-L887
def MakeID3v1(id3): """Return an ID3v1.1 tag string from a dict of ID3v2.4 frames.""" v1 = {} for v2id, name in {"TIT2": "title", "TPE1": "artist", "TALB": "album"}.items(): if v2id in id3: text = id3[v2id].text[0].encode('latin1', 'replace')[:30] else: text = b'' v1[name] = text + (b'\x00' * (30 - len(text))) if "COMM" in id3: cmnt = id3["COMM"].text[0].encode('latin1', 'replace')[:28] else: cmnt = b'' v1['comment'] = cmnt + (b'\x00' * (29 - len(cmnt))) if "TRCK" in id3: try: v1["track"] = chr_(+id3["TRCK"]) except ValueError: v1["track"] = b'\x00' else: v1["track"] = b'\x00' if "TCON" in id3: try: genre = id3["TCON"].genres[0] except IndexError: pass else: if genre in TCON.GENRES: v1["genre"] = chr_(TCON.GENRES.index(genre)) if "genre" not in v1: v1["genre"] = b"\xff" if "TDRC" in id3: year = text_type(id3["TDRC"]).encode('latin1', 'replace') elif "TYER" in id3: year = text_type(id3["TYER"]).encode('latin1', 'replace') else: year = b'' v1['year'] = (year + b'\x00\x00\x00\x00')[:4] return (b'TAG' + v1['title'] + v1['artist'] + v1['album'] + v1['year'] + v1['comment'] + v1['track'] + v1['genre'])
[ "def", "MakeID3v1", "(", "id3", ")", ":", "v1", "=", "{", "}", "for", "v2id", ",", "name", "in", "{", "\"TIT2\"", ":", "\"title\"", ",", "\"TPE1\"", ":", "\"artist\"", ",", "\"TALB\"", ":", "\"album\"", "}", ".", "items", "(", ")", ":", "if", "v2id", "in", "id3", ":", "text", "=", "id3", "[", "v2id", "]", ".", "text", "[", "0", "]", ".", "encode", "(", "'latin1'", ",", "'replace'", ")", "[", ":", "30", "]", "else", ":", "text", "=", "b''", "v1", "[", "name", "]", "=", "text", "+", "(", "b'\\x00'", "*", "(", "30", "-", "len", "(", "text", ")", ")", ")", "if", "\"COMM\"", "in", "id3", ":", "cmnt", "=", "id3", "[", "\"COMM\"", "]", ".", "text", "[", "0", "]", ".", "encode", "(", "'latin1'", ",", "'replace'", ")", "[", ":", "28", "]", "else", ":", "cmnt", "=", "b''", "v1", "[", "'comment'", "]", "=", "cmnt", "+", "(", "b'\\x00'", "*", "(", "29", "-", "len", "(", "cmnt", ")", ")", ")", "if", "\"TRCK\"", "in", "id3", ":", "try", ":", "v1", "[", "\"track\"", "]", "=", "chr_", "(", "+", "id3", "[", "\"TRCK\"", "]", ")", "except", "ValueError", ":", "v1", "[", "\"track\"", "]", "=", "b'\\x00'", "else", ":", "v1", "[", "\"track\"", "]", "=", "b'\\x00'", "if", "\"TCON\"", "in", "id3", ":", "try", ":", "genre", "=", "id3", "[", "\"TCON\"", "]", ".", "genres", "[", "0", "]", "except", "IndexError", ":", "pass", "else", ":", "if", "genre", "in", "TCON", ".", "GENRES", ":", "v1", "[", "\"genre\"", "]", "=", "chr_", "(", "TCON", ".", "GENRES", ".", "index", "(", "genre", ")", ")", "if", "\"genre\"", "not", "in", "v1", ":", "v1", "[", "\"genre\"", "]", "=", "b\"\\xff\"", "if", "\"TDRC\"", "in", "id3", ":", "year", "=", "text_type", "(", "id3", "[", "\"TDRC\"", "]", ")", ".", "encode", "(", "'latin1'", ",", "'replace'", ")", "elif", "\"TYER\"", "in", "id3", ":", "year", "=", "text_type", "(", "id3", "[", "\"TYER\"", "]", ")", ".", "encode", "(", "'latin1'", ",", "'replace'", ")", "else", ":", "year", "=", "b''", "v1", "[", "'year'", "]", "=", "(", "year", "+", "b'\\x00\\x00\\x00\\x00'", ")", "[", ":", "4", "]", "return", "(", "b'TAG'", "+", "v1", "[", "'title'", "]", "+", "v1", "[", "'artist'", "]", "+", "v1", "[", "'album'", "]", "+", "v1", "[", "'year'", "]", "+", "v1", "[", "'comment'", "]", "+", "v1", "[", "'track'", "]", "+", "v1", "[", "'genre'", "]", ")" ]
Return an ID3v1.1 tag string from a dict of ID3v2.4 frames.
[ "Return", "an", "ID3v1", ".", "1", "tag", "string", "from", "a", "dict", "of", "ID3v2", ".", "4", "frames", "." ]
python
test
pandas-dev/pandas
pandas/core/generic.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L2678-L2773
def to_xarray(self): """ Return an xarray object from the pandas object. Returns ------- xarray.DataArray or xarray.Dataset Data in the pandas structure converted to Dataset if the object is a DataFrame, or a DataArray if the object is a Series. See Also -------- DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Notes ----- See the `xarray docs <http://xarray.pydata.org/en/stable/>`__ Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2), ... ('parrot', 'bird', 24.0, 2), ... ('lion', 'mammal', 80.5, 4), ... ('monkey', 'mammal', np.nan, 4)], ... columns=['name', 'class', 'max_speed', ... 'num_legs']) >>> df name class max_speed num_legs 0 falcon bird 389.0 2 1 parrot bird 24.0 2 2 lion mammal 80.5 4 3 monkey mammal NaN 4 >>> df.to_xarray() <xarray.Dataset> Dimensions: (index: 4) Coordinates: * index (index) int64 0 1 2 3 Data variables: name (index) object 'falcon' 'parrot' 'lion' 'monkey' class (index) object 'bird' 'bird' 'mammal' 'mammal' max_speed (index) float64 389.0 24.0 80.5 nan num_legs (index) int64 2 2 4 4 >>> df['max_speed'].to_xarray() <xarray.DataArray 'max_speed' (index: 4)> array([389. , 24. , 80.5, nan]) Coordinates: * index (index) int64 0 1 2 3 >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01', ... '2018-01-02', '2018-01-02']) >>> df_multiindex = pd.DataFrame({'date': dates, ... 'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}).set_index(['date', ... 'animal']) >>> df_multiindex speed date animal 2018-01-01 falcon 350 parrot 18 2018-01-02 falcon 361 parrot 15 >>> df_multiindex.to_xarray() <xarray.Dataset> Dimensions: (animal: 2, date: 2) Coordinates: * date (date) datetime64[ns] 2018-01-01 2018-01-02 * animal (animal) object 'falcon' 'parrot' Data variables: speed (date, animal) int64 350 18 361 15 """ try: import xarray except ImportError: # Give a nice error message raise ImportError("the xarray library is not installed\n" "you can install via conda\n" "conda install xarray\n" "or via pip\n" "pip install xarray\n") if self.ndim == 1: return xarray.DataArray.from_series(self) elif self.ndim == 2: return xarray.Dataset.from_dataframe(self) # > 2 dims coords = [(a, self._get_axis(a)) for a in self._AXIS_ORDERS] return xarray.DataArray(self, coords=coords, )
[ "def", "to_xarray", "(", "self", ")", ":", "try", ":", "import", "xarray", "except", "ImportError", ":", "# Give a nice error message", "raise", "ImportError", "(", "\"the xarray library is not installed\\n\"", "\"you can install via conda\\n\"", "\"conda install xarray\\n\"", "\"or via pip\\n\"", "\"pip install xarray\\n\"", ")", "if", "self", ".", "ndim", "==", "1", ":", "return", "xarray", ".", "DataArray", ".", "from_series", "(", "self", ")", "elif", "self", ".", "ndim", "==", "2", ":", "return", "xarray", ".", "Dataset", ".", "from_dataframe", "(", "self", ")", "# > 2 dims", "coords", "=", "[", "(", "a", ",", "self", ".", "_get_axis", "(", "a", ")", ")", "for", "a", "in", "self", ".", "_AXIS_ORDERS", "]", "return", "xarray", ".", "DataArray", "(", "self", ",", "coords", "=", "coords", ",", ")" ]
Return an xarray object from the pandas object. Returns ------- xarray.DataArray or xarray.Dataset Data in the pandas structure converted to Dataset if the object is a DataFrame, or a DataArray if the object is a Series. See Also -------- DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Notes ----- See the `xarray docs <http://xarray.pydata.org/en/stable/>`__ Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2), ... ('parrot', 'bird', 24.0, 2), ... ('lion', 'mammal', 80.5, 4), ... ('monkey', 'mammal', np.nan, 4)], ... columns=['name', 'class', 'max_speed', ... 'num_legs']) >>> df name class max_speed num_legs 0 falcon bird 389.0 2 1 parrot bird 24.0 2 2 lion mammal 80.5 4 3 monkey mammal NaN 4 >>> df.to_xarray() <xarray.Dataset> Dimensions: (index: 4) Coordinates: * index (index) int64 0 1 2 3 Data variables: name (index) object 'falcon' 'parrot' 'lion' 'monkey' class (index) object 'bird' 'bird' 'mammal' 'mammal' max_speed (index) float64 389.0 24.0 80.5 nan num_legs (index) int64 2 2 4 4 >>> df['max_speed'].to_xarray() <xarray.DataArray 'max_speed' (index: 4)> array([389. , 24. , 80.5, nan]) Coordinates: * index (index) int64 0 1 2 3 >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01', ... '2018-01-02', '2018-01-02']) >>> df_multiindex = pd.DataFrame({'date': dates, ... 'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}).set_index(['date', ... 'animal']) >>> df_multiindex speed date animal 2018-01-01 falcon 350 parrot 18 2018-01-02 falcon 361 parrot 15 >>> df_multiindex.to_xarray() <xarray.Dataset> Dimensions: (animal: 2, date: 2) Coordinates: * date (date) datetime64[ns] 2018-01-01 2018-01-02 * animal (animal) object 'falcon' 'parrot' Data variables: speed (date, animal) int64 350 18 361 15
[ "Return", "an", "xarray", "object", "from", "the", "pandas", "object", "." ]
python
train
raymondEhlers/pachyderm
pachyderm/remove_outliers.py
https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/remove_outliers.py#L165-L235
def _determine_outliers_for_moving_average(moving_average: np.ndarray, moving_average_threshold: float, number_of_values_to_search_ahead: int, limit_of_number_of_values_below_threshold: int) -> int: """ Determine outliers to remove from a given moving average. Note: The index returned is when the moving average first drops below the threshold for a moving average calculated with that bin at the center. This is somewhat different from a standard moving average calculation which would only look forward in the array. Args: moving_average: Moving average. moving_average_threshold: Value of moving average under which we consider the moving average to be 0. Default: 2. number_of_values_to_search_ahead: Number of values to search ahead in the array when calculating the moving average. Default: 5. limit_of_number_of_values_below_threshold: Number of consecutive bins below the threshold to be considered the beginning of outliers. Default: None, which will correspond to number_of_values_to_search_ahead - 1. Returns: 0-indexed index of the histogram axes where the outliers begin. """ below_threshold = moving_average < moving_average_threshold # Build up a list of values to check if they are below threshold. This list allows us to easily look # forward in the below_threshold array. values_to_check = [] for i in range(limit_of_number_of_values_below_threshold): # Basically, this gives us (for limit_of_number_of_values_below_threshold = 4): # below_threshold[0:-3], below_threshold[1:-2], below_threshold[2:-1], below_threshold[3:None] values_to_check.append( below_threshold[i:-(limit_of_number_of_values_below_threshold - 1 - i) or None] ) # Some helpful logging information. #logger.debug(f"values_to_check: {values_to_check}") #logger.debug(f"moving avg length: {len(moving_average)}, length of values_to_check entries: {[len(v) for v in values_to_check]}") # Must have at least one bin above the specified threshold. found_at_least_one_bin_above_threshold = False # Index we will search for from which outliers will be cut. cut_index = -1 # Determine the index where the limit_of_number_of_values_below_threshold bins are consequentially below the threshold. for i, values in enumerate(zip(*values_to_check)): # Skip the first bin because some old pt hard bin trains had a large number of erroneous entries # in the first bin (regardless of the actual pt hard bin). This should be resolved in the embedding # helper now. In any case, it doesn't make sense to encounter outliers in the first bin, so this is a # fine bin to skip. if i == 0: continue # True if below threshold, so check if not True. above_threshold = [not value for value in values] # We require the values to go above the moving average threshold at least once. if any(above_threshold): #logger.debug(f"Found bin i {i} above threshold with moving average: {moving_average[i]}") found_at_least_one_bin_above_threshold = True # All values from which we are looking ahead must be below the threshold to consider the index # as below threshold. if found_at_least_one_bin_above_threshold and all(np.invert(above_threshold)): # The previous outlier removal implementation used a moving average centered on a value # (ie. it checked ``arr[-2 + current_index:current_index + 3]``). Thus, we need to # shift the cut_index that we assign by limit_of_number_of_values_below_threshold // 2 for # the index where we have found all values below the threshold. logger.debug(f"i at found cut_index: {i} with moving_average: {moving_average[i]}") cut_index = i + limit_of_number_of_values_below_threshold // 2 break return cut_index
[ "def", "_determine_outliers_for_moving_average", "(", "moving_average", ":", "np", ".", "ndarray", ",", "moving_average_threshold", ":", "float", ",", "number_of_values_to_search_ahead", ":", "int", ",", "limit_of_number_of_values_below_threshold", ":", "int", ")", "->", "int", ":", "below_threshold", "=", "moving_average", "<", "moving_average_threshold", "# Build up a list of values to check if they are below threshold. This list allows us to easily look", "# forward in the below_threshold array.", "values_to_check", "=", "[", "]", "for", "i", "in", "range", "(", "limit_of_number_of_values_below_threshold", ")", ":", "# Basically, this gives us (for limit_of_number_of_values_below_threshold = 4):", "# below_threshold[0:-3], below_threshold[1:-2], below_threshold[2:-1], below_threshold[3:None]", "values_to_check", ".", "append", "(", "below_threshold", "[", "i", ":", "-", "(", "limit_of_number_of_values_below_threshold", "-", "1", "-", "i", ")", "or", "None", "]", ")", "# Some helpful logging information.", "#logger.debug(f\"values_to_check: {values_to_check}\")", "#logger.debug(f\"moving avg length: {len(moving_average)}, length of values_to_check entries: {[len(v) for v in values_to_check]}\")", "# Must have at least one bin above the specified threshold.", "found_at_least_one_bin_above_threshold", "=", "False", "# Index we will search for from which outliers will be cut.", "cut_index", "=", "-", "1", "# Determine the index where the limit_of_number_of_values_below_threshold bins are consequentially below the threshold.", "for", "i", ",", "values", "in", "enumerate", "(", "zip", "(", "*", "values_to_check", ")", ")", ":", "# Skip the first bin because some old pt hard bin trains had a large number of erroneous entries", "# in the first bin (regardless of the actual pt hard bin). This should be resolved in the embedding", "# helper now. In any case, it doesn't make sense to encounter outliers in the first bin, so this is a", "# fine bin to skip.", "if", "i", "==", "0", ":", "continue", "# True if below threshold, so check if not True.", "above_threshold", "=", "[", "not", "value", "for", "value", "in", "values", "]", "# We require the values to go above the moving average threshold at least once.", "if", "any", "(", "above_threshold", ")", ":", "#logger.debug(f\"Found bin i {i} above threshold with moving average: {moving_average[i]}\")", "found_at_least_one_bin_above_threshold", "=", "True", "# All values from which we are looking ahead must be below the threshold to consider the index", "# as below threshold.", "if", "found_at_least_one_bin_above_threshold", "and", "all", "(", "np", ".", "invert", "(", "above_threshold", ")", ")", ":", "# The previous outlier removal implementation used a moving average centered on a value", "# (ie. it checked ``arr[-2 + current_index:current_index + 3]``). Thus, we need to", "# shift the cut_index that we assign by limit_of_number_of_values_below_threshold // 2 for", "# the index where we have found all values below the threshold.", "logger", ".", "debug", "(", "f\"i at found cut_index: {i} with moving_average: {moving_average[i]}\"", ")", "cut_index", "=", "i", "+", "limit_of_number_of_values_below_threshold", "//", "2", "break", "return", "cut_index" ]
Determine outliers to remove from a given moving average. Note: The index returned is when the moving average first drops below the threshold for a moving average calculated with that bin at the center. This is somewhat different from a standard moving average calculation which would only look forward in the array. Args: moving_average: Moving average. moving_average_threshold: Value of moving average under which we consider the moving average to be 0. Default: 2. number_of_values_to_search_ahead: Number of values to search ahead in the array when calculating the moving average. Default: 5. limit_of_number_of_values_below_threshold: Number of consecutive bins below the threshold to be considered the beginning of outliers. Default: None, which will correspond to number_of_values_to_search_ahead - 1. Returns: 0-indexed index of the histogram axes where the outliers begin.
[ "Determine", "outliers", "to", "remove", "from", "a", "given", "moving", "average", "." ]
python
train
mfcloud/python-zvm-sdk
zvmsdk/dist.py
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/zvmsdk/dist.py#L679-L688
def _set_sysfs(self, fcp, target_wwpn, target_lun): """rhel6 set WWPN and LUN in sysfs""" device = '0.0.%s' % fcp port_add = "echo '%s' > " % target_wwpn port_add += "/sys/bus/ccw/drivers/zfcp/%s/port_add" % device unit_add = "echo '%s' > " % target_lun unit_add += "/sys/bus/ccw/drivers/zfcp/%(device)s/%(wwpn)s/unit_add\n"\ % {'device': device, 'wwpn': target_wwpn} return '\n'.join((port_add, unit_add))
[ "def", "_set_sysfs", "(", "self", ",", "fcp", ",", "target_wwpn", ",", "target_lun", ")", ":", "device", "=", "'0.0.%s'", "%", "fcp", "port_add", "=", "\"echo '%s' > \"", "%", "target_wwpn", "port_add", "+=", "\"/sys/bus/ccw/drivers/zfcp/%s/port_add\"", "%", "device", "unit_add", "=", "\"echo '%s' > \"", "%", "target_lun", "unit_add", "+=", "\"/sys/bus/ccw/drivers/zfcp/%(device)s/%(wwpn)s/unit_add\\n\"", "%", "{", "'device'", ":", "device", ",", "'wwpn'", ":", "target_wwpn", "}", "return", "'\\n'", ".", "join", "(", "(", "port_add", ",", "unit_add", ")", ")" ]
rhel6 set WWPN and LUN in sysfs
[ "rhel6", "set", "WWPN", "and", "LUN", "in", "sysfs" ]
python
train
elastic/elasticsearch-py
elasticsearch/client/cat.py
https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/cat.py#L229-L246
def segments(self, index=None, params=None): """ The segments command is the detailed view of Lucene segments per index. `<https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-segments.html>`_ :arg index: A comma-separated list of index names to limit the returned information :arg bytes: The unit in which to display byte values, valid choices are: 'b', 'k', 'kb', 'm', 'mb', 'g', 'gb', 't', 'tb', 'p', 'pb' :arg format: a short version of the Accept header, e.g. json, yaml :arg h: Comma-separated list of column names to display :arg help: Return help information, default False :arg s: Comma-separated list of column names or column aliases to sort by :arg v: Verbose mode. Display column headers, default False """ return self.transport.perform_request('GET', _make_path('_cat', 'segments', index), params=params)
[ "def", "segments", "(", "self", ",", "index", "=", "None", ",", "params", "=", "None", ")", ":", "return", "self", ".", "transport", ".", "perform_request", "(", "'GET'", ",", "_make_path", "(", "'_cat'", ",", "'segments'", ",", "index", ")", ",", "params", "=", "params", ")" ]
The segments command is the detailed view of Lucene segments per index. `<https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-segments.html>`_ :arg index: A comma-separated list of index names to limit the returned information :arg bytes: The unit in which to display byte values, valid choices are: 'b', 'k', 'kb', 'm', 'mb', 'g', 'gb', 't', 'tb', 'p', 'pb' :arg format: a short version of the Accept header, e.g. json, yaml :arg h: Comma-separated list of column names to display :arg help: Return help information, default False :arg s: Comma-separated list of column names or column aliases to sort by :arg v: Verbose mode. Display column headers, default False
[ "The", "segments", "command", "is", "the", "detailed", "view", "of", "Lucene", "segments", "per", "index", ".", "<https", ":", "//", "www", ".", "elastic", ".", "co", "/", "guide", "/", "en", "/", "elasticsearch", "/", "reference", "/", "current", "/", "cat", "-", "segments", ".", "html", ">", "_" ]
python
train
bloomreach/s4cmd
s4cmd.py
https://github.com/bloomreach/s4cmd/blob/bb51075bf43703e7cd95aa39288cf7732ec13a6d/s4cmd.py#L1754-L1761
def match_date(self, value): '''Search for date information in the string''' m = self.REGEX_DATE.search(value) date = datetime.datetime.utcnow().date() if m: date = datetime.date(int(m.group(1)), int(m.group(2)), int(m.group(3))) value = self.REGEX_DATE.sub('', value) return (date, value)
[ "def", "match_date", "(", "self", ",", "value", ")", ":", "m", "=", "self", ".", "REGEX_DATE", ".", "search", "(", "value", ")", "date", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "date", "(", ")", "if", "m", ":", "date", "=", "datetime", ".", "date", "(", "int", "(", "m", ".", "group", "(", "1", ")", ")", ",", "int", "(", "m", ".", "group", "(", "2", ")", ")", ",", "int", "(", "m", ".", "group", "(", "3", ")", ")", ")", "value", "=", "self", ".", "REGEX_DATE", ".", "sub", "(", "''", ",", "value", ")", "return", "(", "date", ",", "value", ")" ]
Search for date information in the string
[ "Search", "for", "date", "information", "in", "the", "string" ]
python
test
splitkeycoffee/pyhottop
pyhottop/pyhottop.py
https://github.com/splitkeycoffee/pyhottop/blob/2986bbb2d848f7e41fa3ece5ebb1b33c8882219c/pyhottop/pyhottop.py#L919-L930
def set_cooling_motor(self, cooling_motor): """Set the cooling motor config. :param cooling_motor: Value to set the cooling motor :type cooling_motor: bool :returns: None :raises: InvalidInput """ if type(cooling_motor) != bool: raise InvalidInput("Cooling motor value must be bool") self._config['cooling_motor'] = bool2int(cooling_motor) self._q.put(self._config)
[ "def", "set_cooling_motor", "(", "self", ",", "cooling_motor", ")", ":", "if", "type", "(", "cooling_motor", ")", "!=", "bool", ":", "raise", "InvalidInput", "(", "\"Cooling motor value must be bool\"", ")", "self", ".", "_config", "[", "'cooling_motor'", "]", "=", "bool2int", "(", "cooling_motor", ")", "self", ".", "_q", ".", "put", "(", "self", ".", "_config", ")" ]
Set the cooling motor config. :param cooling_motor: Value to set the cooling motor :type cooling_motor: bool :returns: None :raises: InvalidInput
[ "Set", "the", "cooling", "motor", "config", "." ]
python
train
Alignak-monitoring/alignak
alignak/http/arbiter_interface.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/http/arbiter_interface.py#L138-L224
def command(self, command=None, timestamp=None, element=None, host=None, service=None, user=None, parameters=None): # pylint: disable=too-many-branches """ Request to execute an external command Allowed parameters are: `command`: mandatory parameter containing the whole command line or only the command name `timestamp`: optional parameter containing the timestamp. If not present, the current timestamp is added in the command line `element`: the targeted element that will be appended after the command name (`command`). If element contains a '/' character it is split to make an host and service. `host`, `service` or `user`: the targeted host, service or user. Takes precedence over the `element` to target a specific element `parameters`: the parameter that will be appended after all the arguments When using this endpoint with the HTTP GET method, the semi colons that are commonly used to separate the parameters must be replace with %3B! This because the ; is an accepted URL query parameters separator... Indeed, the recommended way of using this endpoint is to use the HTTP POST method. In case of any error, this function returns an object containing some properties: '_status': 'ERR' because of the error `_message`: some more explanations about the error The `_status` field is 'OK' with an according `_message` to explain what the Arbiter will do depending upon the notification. The `command` property contains the formatted external command. :return: dict """ if cherrypy.request.method in ["POST"]: if not cherrypy.request.json: return {'_status': u'ERR', '_message': u'You must POST parameters on this endpoint.'} if command is None: try: command = cherrypy.request.json.get('command', None) timestamp = cherrypy.request.json.get('timestamp', None) element = cherrypy.request.json.get('element', None) host = cherrypy.request.json.get('host', None) service = cherrypy.request.json.get('service', None) user = cherrypy.request.json.get('user', None) parameters = cherrypy.request.json.get('parameters', None) except AttributeError: return {'_status': u'ERR', '_message': u'Missing command parameters'} if not command: return {'_status': u'ERR', '_message': u'Missing command parameter'} fields = split_semicolon(command) command_line = command.replace(fields[0], fields[0].upper()) if timestamp: try: timestamp = int(timestamp) except ValueError: return {'_status': u'ERR', '_message': u'Timestamp must be an integer value'} command_line = '[%d] %s' % (timestamp, command_line) if host or service or user: if host: command_line = '%s;%s' % (command_line, host) if service: command_line = '%s;%s' % (command_line, service) if user: command_line = '%s;%s' % (command_line, user) elif element: if '/' in element: # Replace only the first / element = element.replace('/', ';', 1) command_line = '%s;%s' % (command_line, element) if parameters: command_line = '%s;%s' % (command_line, parameters) # Add a command to get managed logger.warning("Got an external command: %s", command_line) self.app.add(ExternalCommand(command_line)) return {'_status': u'OK', '_message': u"Got command: %s" % command_line, 'command': command_line}
[ "def", "command", "(", "self", ",", "command", "=", "None", ",", "timestamp", "=", "None", ",", "element", "=", "None", ",", "host", "=", "None", ",", "service", "=", "None", ",", "user", "=", "None", ",", "parameters", "=", "None", ")", ":", "# pylint: disable=too-many-branches", "if", "cherrypy", ".", "request", ".", "method", "in", "[", "\"POST\"", "]", ":", "if", "not", "cherrypy", ".", "request", ".", "json", ":", "return", "{", "'_status'", ":", "u'ERR'", ",", "'_message'", ":", "u'You must POST parameters on this endpoint.'", "}", "if", "command", "is", "None", ":", "try", ":", "command", "=", "cherrypy", ".", "request", ".", "json", ".", "get", "(", "'command'", ",", "None", ")", "timestamp", "=", "cherrypy", ".", "request", ".", "json", ".", "get", "(", "'timestamp'", ",", "None", ")", "element", "=", "cherrypy", ".", "request", ".", "json", ".", "get", "(", "'element'", ",", "None", ")", "host", "=", "cherrypy", ".", "request", ".", "json", ".", "get", "(", "'host'", ",", "None", ")", "service", "=", "cherrypy", ".", "request", ".", "json", ".", "get", "(", "'service'", ",", "None", ")", "user", "=", "cherrypy", ".", "request", ".", "json", ".", "get", "(", "'user'", ",", "None", ")", "parameters", "=", "cherrypy", ".", "request", ".", "json", ".", "get", "(", "'parameters'", ",", "None", ")", "except", "AttributeError", ":", "return", "{", "'_status'", ":", "u'ERR'", ",", "'_message'", ":", "u'Missing command parameters'", "}", "if", "not", "command", ":", "return", "{", "'_status'", ":", "u'ERR'", ",", "'_message'", ":", "u'Missing command parameter'", "}", "fields", "=", "split_semicolon", "(", "command", ")", "command_line", "=", "command", ".", "replace", "(", "fields", "[", "0", "]", ",", "fields", "[", "0", "]", ".", "upper", "(", ")", ")", "if", "timestamp", ":", "try", ":", "timestamp", "=", "int", "(", "timestamp", ")", "except", "ValueError", ":", "return", "{", "'_status'", ":", "u'ERR'", ",", "'_message'", ":", "u'Timestamp must be an integer value'", "}", "command_line", "=", "'[%d] %s'", "%", "(", "timestamp", ",", "command_line", ")", "if", "host", "or", "service", "or", "user", ":", "if", "host", ":", "command_line", "=", "'%s;%s'", "%", "(", "command_line", ",", "host", ")", "if", "service", ":", "command_line", "=", "'%s;%s'", "%", "(", "command_line", ",", "service", ")", "if", "user", ":", "command_line", "=", "'%s;%s'", "%", "(", "command_line", ",", "user", ")", "elif", "element", ":", "if", "'/'", "in", "element", ":", "# Replace only the first /", "element", "=", "element", ".", "replace", "(", "'/'", ",", "';'", ",", "1", ")", "command_line", "=", "'%s;%s'", "%", "(", "command_line", ",", "element", ")", "if", "parameters", ":", "command_line", "=", "'%s;%s'", "%", "(", "command_line", ",", "parameters", ")", "# Add a command to get managed", "logger", ".", "warning", "(", "\"Got an external command: %s\"", ",", "command_line", ")", "self", ".", "app", ".", "add", "(", "ExternalCommand", "(", "command_line", ")", ")", "return", "{", "'_status'", ":", "u'OK'", ",", "'_message'", ":", "u\"Got command: %s\"", "%", "command_line", ",", "'command'", ":", "command_line", "}" ]
Request to execute an external command Allowed parameters are: `command`: mandatory parameter containing the whole command line or only the command name `timestamp`: optional parameter containing the timestamp. If not present, the current timestamp is added in the command line `element`: the targeted element that will be appended after the command name (`command`). If element contains a '/' character it is split to make an host and service. `host`, `service` or `user`: the targeted host, service or user. Takes precedence over the `element` to target a specific element `parameters`: the parameter that will be appended after all the arguments When using this endpoint with the HTTP GET method, the semi colons that are commonly used to separate the parameters must be replace with %3B! This because the ; is an accepted URL query parameters separator... Indeed, the recommended way of using this endpoint is to use the HTTP POST method. In case of any error, this function returns an object containing some properties: '_status': 'ERR' because of the error `_message`: some more explanations about the error The `_status` field is 'OK' with an according `_message` to explain what the Arbiter will do depending upon the notification. The `command` property contains the formatted external command. :return: dict
[ "Request", "to", "execute", "an", "external", "command" ]
python
train
bokeh/bokeh
bokeh/document/document.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/document/document.py#L671-L678
def on_session_destroyed(self, *callbacks): ''' Provide callbacks to invoke when the session serving the Document is destroyed ''' for callback in callbacks: _check_callback(callback, ('session_context',)) self._session_destroyed_callbacks.add(callback)
[ "def", "on_session_destroyed", "(", "self", ",", "*", "callbacks", ")", ":", "for", "callback", "in", "callbacks", ":", "_check_callback", "(", "callback", ",", "(", "'session_context'", ",", ")", ")", "self", ".", "_session_destroyed_callbacks", ".", "add", "(", "callback", ")" ]
Provide callbacks to invoke when the session serving the Document is destroyed
[ "Provide", "callbacks", "to", "invoke", "when", "the", "session", "serving", "the", "Document", "is", "destroyed" ]
python
train
edx/ecommerce-worker
ecommerce_worker/sailthru/v1/tasks.py
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/sailthru/v1/tasks.py#L57-L85
def _record_purchase(sailthru_client, email, item, purchase_incomplete, message_id, options): """Record a purchase in Sailthru Arguments: sailthru_client (object): SailthruClient email (str): user's email address item (dict): Sailthru required information about the course purchase_incomplete (boolean): True if adding item to shopping cart message_id (str): Cookie used to identify marketing campaign options (dict): Sailthru purchase API options (e.g. template name) Returns: False if retryable error, else True """ try: sailthru_response = sailthru_client.purchase(email, [item], incomplete=purchase_incomplete, message_id=message_id, options=options) if not sailthru_response.is_ok(): error = sailthru_response.get_error() logger.error("Error attempting to record purchase in Sailthru: %s", error.get_message()) return not can_retry_sailthru_request(error) except SailthruClientError as exc: logger.exception("Exception attempting to record purchase for %s in Sailthru - %s", email, text_type(exc)) return False return True
[ "def", "_record_purchase", "(", "sailthru_client", ",", "email", ",", "item", ",", "purchase_incomplete", ",", "message_id", ",", "options", ")", ":", "try", ":", "sailthru_response", "=", "sailthru_client", ".", "purchase", "(", "email", ",", "[", "item", "]", ",", "incomplete", "=", "purchase_incomplete", ",", "message_id", "=", "message_id", ",", "options", "=", "options", ")", "if", "not", "sailthru_response", ".", "is_ok", "(", ")", ":", "error", "=", "sailthru_response", ".", "get_error", "(", ")", "logger", ".", "error", "(", "\"Error attempting to record purchase in Sailthru: %s\"", ",", "error", ".", "get_message", "(", ")", ")", "return", "not", "can_retry_sailthru_request", "(", "error", ")", "except", "SailthruClientError", "as", "exc", ":", "logger", ".", "exception", "(", "\"Exception attempting to record purchase for %s in Sailthru - %s\"", ",", "email", ",", "text_type", "(", "exc", ")", ")", "return", "False", "return", "True" ]
Record a purchase in Sailthru Arguments: sailthru_client (object): SailthruClient email (str): user's email address item (dict): Sailthru required information about the course purchase_incomplete (boolean): True if adding item to shopping cart message_id (str): Cookie used to identify marketing campaign options (dict): Sailthru purchase API options (e.g. template name) Returns: False if retryable error, else True
[ "Record", "a", "purchase", "in", "Sailthru" ]
python
test
inveniosoftware/invenio-migrator
invenio_migrator/legacy/bibdocfile.py
https://github.com/inveniosoftware/invenio-migrator/blob/6902c6968a39b747d15e32363f43b7dffe2622c2/invenio_migrator/legacy/bibdocfile.py#L143-L150
def check(id_): """Check bibdocs.""" BibRecDocs, BibDoc = _import_bibdoc() try: BibDoc(id_).list_all_files() except Exception: click.secho("BibDoc {0} failed check.".format(id_), fg='red')
[ "def", "check", "(", "id_", ")", ":", "BibRecDocs", ",", "BibDoc", "=", "_import_bibdoc", "(", ")", "try", ":", "BibDoc", "(", "id_", ")", ".", "list_all_files", "(", ")", "except", "Exception", ":", "click", ".", "secho", "(", "\"BibDoc {0} failed check.\"", ".", "format", "(", "id_", ")", ",", "fg", "=", "'red'", ")" ]
Check bibdocs.
[ "Check", "bibdocs", "." ]
python
test
pyvisa/pyvisa
pyvisa/shell.py
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/shell.py#L278-L356
def do_attr(self, args): """Get or set the state for a visa attribute. List all attributes: attr Get an attribute state: attr <name> Set an attribute state: attr <name> <state> """ if not self.current: print('There are no resources in use. Use the command "open".') return args = args.strip() if not args: self.print_attribute_list() return args = args.split(' ') if len(args) > 2: print('Invalid syntax, use `attr <name>` to get; or `attr <name> <value>` to set') elif len(args) == 1: # Get attr_name = args[0] if attr_name.startswith('VI_'): try: print(self.current.get_visa_attribute(getattr(constants, attr_name))) except Exception as e: print(e) else: try: print(getattr(self.current, attr_name)) except Exception as e: print(e) else: attr_name, attr_state = args[0], args[1] if attr_name.startswith('VI_'): try: attributeId = getattr(constants, attr_name) attr = attributes.AttributesByID[attributeId] datatype = attr.visa_type retcode = None if datatype == 'ViBoolean': if attr_state == 'True': attr_state = True elif attr_state == 'False': attr_state = False else: retcode = constants.StatusCode.error_nonsupported_attribute_state elif datatype in ['ViUInt8', 'ViUInt16', 'ViUInt32', 'ViInt8', 'ViInt16', 'ViInt32']: try: attr_state = int(attr_state) except ValueError: retcode = constants.StatusCode.error_nonsupported_attribute_state if not retcode: retcode = self.current.set_visa_attribute(attributeId, attr_state) if retcode: print('Error {}'.format(str(retcode))) else: print('Done') except Exception as e: print(e) else: print('Setting Resource Attributes by python name is not yet supported.') return try: print(getattr(self.current, attr_name)) print('Done') except Exception as e: print(e)
[ "def", "do_attr", "(", "self", ",", "args", ")", ":", "if", "not", "self", ".", "current", ":", "print", "(", "'There are no resources in use. Use the command \"open\".'", ")", "return", "args", "=", "args", ".", "strip", "(", ")", "if", "not", "args", ":", "self", ".", "print_attribute_list", "(", ")", "return", "args", "=", "args", ".", "split", "(", "' '", ")", "if", "len", "(", "args", ")", ">", "2", ":", "print", "(", "'Invalid syntax, use `attr <name>` to get; or `attr <name> <value>` to set'", ")", "elif", "len", "(", "args", ")", "==", "1", ":", "# Get", "attr_name", "=", "args", "[", "0", "]", "if", "attr_name", ".", "startswith", "(", "'VI_'", ")", ":", "try", ":", "print", "(", "self", ".", "current", ".", "get_visa_attribute", "(", "getattr", "(", "constants", ",", "attr_name", ")", ")", ")", "except", "Exception", "as", "e", ":", "print", "(", "e", ")", "else", ":", "try", ":", "print", "(", "getattr", "(", "self", ".", "current", ",", "attr_name", ")", ")", "except", "Exception", "as", "e", ":", "print", "(", "e", ")", "else", ":", "attr_name", ",", "attr_state", "=", "args", "[", "0", "]", ",", "args", "[", "1", "]", "if", "attr_name", ".", "startswith", "(", "'VI_'", ")", ":", "try", ":", "attributeId", "=", "getattr", "(", "constants", ",", "attr_name", ")", "attr", "=", "attributes", ".", "AttributesByID", "[", "attributeId", "]", "datatype", "=", "attr", ".", "visa_type", "retcode", "=", "None", "if", "datatype", "==", "'ViBoolean'", ":", "if", "attr_state", "==", "'True'", ":", "attr_state", "=", "True", "elif", "attr_state", "==", "'False'", ":", "attr_state", "=", "False", "else", ":", "retcode", "=", "constants", ".", "StatusCode", ".", "error_nonsupported_attribute_state", "elif", "datatype", "in", "[", "'ViUInt8'", ",", "'ViUInt16'", ",", "'ViUInt32'", ",", "'ViInt8'", ",", "'ViInt16'", ",", "'ViInt32'", "]", ":", "try", ":", "attr_state", "=", "int", "(", "attr_state", ")", "except", "ValueError", ":", "retcode", "=", "constants", ".", "StatusCode", ".", "error_nonsupported_attribute_state", "if", "not", "retcode", ":", "retcode", "=", "self", ".", "current", ".", "set_visa_attribute", "(", "attributeId", ",", "attr_state", ")", "if", "retcode", ":", "print", "(", "'Error {}'", ".", "format", "(", "str", "(", "retcode", ")", ")", ")", "else", ":", "print", "(", "'Done'", ")", "except", "Exception", "as", "e", ":", "print", "(", "e", ")", "else", ":", "print", "(", "'Setting Resource Attributes by python name is not yet supported.'", ")", "return", "try", ":", "print", "(", "getattr", "(", "self", ".", "current", ",", "attr_name", ")", ")", "print", "(", "'Done'", ")", "except", "Exception", "as", "e", ":", "print", "(", "e", ")" ]
Get or set the state for a visa attribute. List all attributes: attr Get an attribute state: attr <name> Set an attribute state: attr <name> <state>
[ "Get", "or", "set", "the", "state", "for", "a", "visa", "attribute", "." ]
python
train
secnot/rectpack
rectpack/packer.py
https://github.com/secnot/rectpack/blob/21d46be48fd453500ea49de699bc9eabc427bdf7/rectpack/packer.py#L404-L420
def _find_best_fit(self, pbin): """ Return best fitness rectangle from rectangles packing _sorted_rect list Arguments: pbin (PackingAlgorithm): Packing bin Returns: key of the rectangle with best fitness """ fit = ((pbin.fitness(r[0], r[1]), k) for k, r in self._sorted_rect.items()) fit = (f for f in fit if f[0] is not None) try: _, rect = min(fit, key=self.first_item) return rect except ValueError: return None
[ "def", "_find_best_fit", "(", "self", ",", "pbin", ")", ":", "fit", "=", "(", "(", "pbin", ".", "fitness", "(", "r", "[", "0", "]", ",", "r", "[", "1", "]", ")", ",", "k", ")", "for", "k", ",", "r", "in", "self", ".", "_sorted_rect", ".", "items", "(", ")", ")", "fit", "=", "(", "f", "for", "f", "in", "fit", "if", "f", "[", "0", "]", "is", "not", "None", ")", "try", ":", "_", ",", "rect", "=", "min", "(", "fit", ",", "key", "=", "self", ".", "first_item", ")", "return", "rect", "except", "ValueError", ":", "return", "None" ]
Return best fitness rectangle from rectangles packing _sorted_rect list Arguments: pbin (PackingAlgorithm): Packing bin Returns: key of the rectangle with best fitness
[ "Return", "best", "fitness", "rectangle", "from", "rectangles", "packing", "_sorted_rect", "list" ]
python
train
littlemo/moear-api-common
moear_api_common/utils/img.py
https://github.com/littlemo/moear-api-common/blob/2a89a052d92592dd8e67bd50ec2fadc0219ac5d0/moear_api_common/utils/img.py#L5-L64
def rescale_image( data, maxsizeb=4000000, dimen=None, png2jpg=False, graying=True, reduceto=(600, 800)): ''' 若 ``png2jpg`` 为 ``True`` 则将图片转换为 ``JPEG`` 格式,所有透明像素被设置为 *白色* 。确保结果图片尺寸小于 ``maxsizeb`` 的约束限制。 如果 ``dimen`` 不为空,则生成一个相应约束的缩略图。依据 ``dimen`` 的类型,设置约束为 ``width=dimen, height=dimen`` 或者 ``width, height = dimen`` :param data: 原始图片字节数据 :type data: bytes or io.BytesIO :param int maxsizeb: 文件大小约束,单位:字节 :param dimen: 缩略图尺寸约束,宽&高 :type dimen: int or (int, int) :param bool png2jpg: 是否将图片转换为 JPG 格式 :param bool graying: 是否将图片进行灰度处理 :param reduceto: 若图片大于此约束则进行相应缩小处理,宽&高 :type reduceto: (int, int) :return: 处理后的图片字节数据,可直接以 ``wb`` 模式输出到文件中 :rtype: bytes ''' if not isinstance(data, BytesIO): data = BytesIO(data) img = Image.open(data) width, height = img.size fmt = img.format if graying and img.mode != "L": img = img.convert("L") reducewidth, reduceheight = reduceto if dimen is not None: if hasattr(dimen, '__len__'): width, height = dimen else: width = height = dimen img.thumbnail((width, height)) if png2jpg and fmt == 'PNG': fmt = 'JPEG' data = BytesIO() img.save(data, fmt) elif width > reducewidth or height > reduceheight: ratio = min( float(reducewidth) / float(width), float(reduceheight) / float(height)) img = img.resize(( int(width * ratio), int(height * ratio)), Image.ANTIALIAS) if png2jpg and fmt == 'PNG': fmt = 'JPEG' data = BytesIO() img.save(data, fmt) elif png2jpg and fmt == 'PNG': data = BytesIO() img.save(data, 'JPEG') else: data = BytesIO() img.save(data, fmt) return data.getvalue()
[ "def", "rescale_image", "(", "data", ",", "maxsizeb", "=", "4000000", ",", "dimen", "=", "None", ",", "png2jpg", "=", "False", ",", "graying", "=", "True", ",", "reduceto", "=", "(", "600", ",", "800", ")", ")", ":", "if", "not", "isinstance", "(", "data", ",", "BytesIO", ")", ":", "data", "=", "BytesIO", "(", "data", ")", "img", "=", "Image", ".", "open", "(", "data", ")", "width", ",", "height", "=", "img", ".", "size", "fmt", "=", "img", ".", "format", "if", "graying", "and", "img", ".", "mode", "!=", "\"L\"", ":", "img", "=", "img", ".", "convert", "(", "\"L\"", ")", "reducewidth", ",", "reduceheight", "=", "reduceto", "if", "dimen", "is", "not", "None", ":", "if", "hasattr", "(", "dimen", ",", "'__len__'", ")", ":", "width", ",", "height", "=", "dimen", "else", ":", "width", "=", "height", "=", "dimen", "img", ".", "thumbnail", "(", "(", "width", ",", "height", ")", ")", "if", "png2jpg", "and", "fmt", "==", "'PNG'", ":", "fmt", "=", "'JPEG'", "data", "=", "BytesIO", "(", ")", "img", ".", "save", "(", "data", ",", "fmt", ")", "elif", "width", ">", "reducewidth", "or", "height", ">", "reduceheight", ":", "ratio", "=", "min", "(", "float", "(", "reducewidth", ")", "/", "float", "(", "width", ")", ",", "float", "(", "reduceheight", ")", "/", "float", "(", "height", ")", ")", "img", "=", "img", ".", "resize", "(", "(", "int", "(", "width", "*", "ratio", ")", ",", "int", "(", "height", "*", "ratio", ")", ")", ",", "Image", ".", "ANTIALIAS", ")", "if", "png2jpg", "and", "fmt", "==", "'PNG'", ":", "fmt", "=", "'JPEG'", "data", "=", "BytesIO", "(", ")", "img", ".", "save", "(", "data", ",", "fmt", ")", "elif", "png2jpg", "and", "fmt", "==", "'PNG'", ":", "data", "=", "BytesIO", "(", ")", "img", ".", "save", "(", "data", ",", "'JPEG'", ")", "else", ":", "data", "=", "BytesIO", "(", ")", "img", ".", "save", "(", "data", ",", "fmt", ")", "return", "data", ".", "getvalue", "(", ")" ]
若 ``png2jpg`` 为 ``True`` 则将图片转换为 ``JPEG`` 格式,所有透明像素被设置为 *白色* 。确保结果图片尺寸小于 ``maxsizeb`` 的约束限制。 如果 ``dimen`` 不为空,则生成一个相应约束的缩略图。依据 ``dimen`` 的类型,设置约束为 ``width=dimen, height=dimen`` 或者 ``width, height = dimen`` :param data: 原始图片字节数据 :type data: bytes or io.BytesIO :param int maxsizeb: 文件大小约束,单位:字节 :param dimen: 缩略图尺寸约束,宽&高 :type dimen: int or (int, int) :param bool png2jpg: 是否将图片转换为 JPG 格式 :param bool graying: 是否将图片进行灰度处理 :param reduceto: 若图片大于此约束则进行相应缩小处理,宽&高 :type reduceto: (int, int) :return: 处理后的图片字节数据,可直接以 ``wb`` 模式输出到文件中 :rtype: bytes
[ "若", "png2jpg", "为", "True", "则将图片转换为", "JPEG", "格式,所有透明像素被设置为", "*", "白色", "*", "。确保结果图片尺寸小于", "maxsizeb", "的约束限制。" ]
python
train
merll/docker-map
dockermap/map/policy/utils.py
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/map/policy/utils.py#L54-L78
def get_shared_volume_path(container_map, vol, instance=None): """ Resolves a volume alias of a container configuration or a tuple of two paths to the host and container paths. :param container_map: Container map. :type container_map: dockermap.map.config.main.ContainerMap :param vol: SharedVolume or HostVolume tuple. :type vol: dockermap.map.input.HostVolume | dockermap.map.input.SharedVolume :param instance: Optional instance name. :type instance: unicode | str :return: Tuple of host path and container bind path. :rtype: tuple[unicode | str] """ if isinstance(vol, HostVolume): c_path = resolve_value(vol.path) if is_path(c_path): return c_path, get_host_path(container_map.host.root, vol.host_path, instance) raise ValueError("Host-container-binding must be described by two paths or one alias name.", vol) alias = vol.name volume_config = resolve_value(container_map.volumes.get(alias)) h_path = container_map.host.get_path(alias, instance) if volume_config and h_path: return volume_config.default_path, h_path raise KeyError("No host-volume information found for alias {0}.".format(alias))
[ "def", "get_shared_volume_path", "(", "container_map", ",", "vol", ",", "instance", "=", "None", ")", ":", "if", "isinstance", "(", "vol", ",", "HostVolume", ")", ":", "c_path", "=", "resolve_value", "(", "vol", ".", "path", ")", "if", "is_path", "(", "c_path", ")", ":", "return", "c_path", ",", "get_host_path", "(", "container_map", ".", "host", ".", "root", ",", "vol", ".", "host_path", ",", "instance", ")", "raise", "ValueError", "(", "\"Host-container-binding must be described by two paths or one alias name.\"", ",", "vol", ")", "alias", "=", "vol", ".", "name", "volume_config", "=", "resolve_value", "(", "container_map", ".", "volumes", ".", "get", "(", "alias", ")", ")", "h_path", "=", "container_map", ".", "host", ".", "get_path", "(", "alias", ",", "instance", ")", "if", "volume_config", "and", "h_path", ":", "return", "volume_config", ".", "default_path", ",", "h_path", "raise", "KeyError", "(", "\"No host-volume information found for alias {0}.\"", ".", "format", "(", "alias", ")", ")" ]
Resolves a volume alias of a container configuration or a tuple of two paths to the host and container paths. :param container_map: Container map. :type container_map: dockermap.map.config.main.ContainerMap :param vol: SharedVolume or HostVolume tuple. :type vol: dockermap.map.input.HostVolume | dockermap.map.input.SharedVolume :param instance: Optional instance name. :type instance: unicode | str :return: Tuple of host path and container bind path. :rtype: tuple[unicode | str]
[ "Resolves", "a", "volume", "alias", "of", "a", "container", "configuration", "or", "a", "tuple", "of", "two", "paths", "to", "the", "host", "and", "container", "paths", "." ]
python
train
3ll3d00d/vibe
backend/src/analyser/common/signal.py
https://github.com/3ll3d00d/vibe/blob/124b029f13ac746723e92cb47e9cb56edd2e54b5/backend/src/analyser/common/signal.py#L252-L277
def readWav(inputSignalFile, selectedChannel=1, start=None, end=None) -> Signal: """ reads a wav file into a Signal. :param inputSignalFile: a path to the input signal file :param selectedChannel: the channel to read. :param start: the time to start reading from in HH:mm:ss.SSS format. :param end: the time to end reading from in HH:mm:ss.SSS format. :returns: Signal. """ def asFrames(time, fs): hours, minutes, seconds = (time.split(":"))[-3:] hours = int(hours) minutes = int(minutes) seconds = float(seconds) millis = int((3600000 * hours) + (60000 * minutes) + (1000 * seconds)) return int(millis * (fs / 1000)) import soundfile as sf if start is not None or end is not None: info = sf.info(inputSignalFile) startFrame = 0 if start is None else asFrames(start, info.samplerate) endFrame = None if end is None else asFrames(end, info.samplerate) ys, frameRate = sf.read(inputSignalFile, start=startFrame, stop=endFrame) else: ys, frameRate = sf.read(inputSignalFile) return Signal(ys[::selectedChannel], frameRate)
[ "def", "readWav", "(", "inputSignalFile", ",", "selectedChannel", "=", "1", ",", "start", "=", "None", ",", "end", "=", "None", ")", "->", "Signal", ":", "def", "asFrames", "(", "time", ",", "fs", ")", ":", "hours", ",", "minutes", ",", "seconds", "=", "(", "time", ".", "split", "(", "\":\"", ")", ")", "[", "-", "3", ":", "]", "hours", "=", "int", "(", "hours", ")", "minutes", "=", "int", "(", "minutes", ")", "seconds", "=", "float", "(", "seconds", ")", "millis", "=", "int", "(", "(", "3600000", "*", "hours", ")", "+", "(", "60000", "*", "minutes", ")", "+", "(", "1000", "*", "seconds", ")", ")", "return", "int", "(", "millis", "*", "(", "fs", "/", "1000", ")", ")", "import", "soundfile", "as", "sf", "if", "start", "is", "not", "None", "or", "end", "is", "not", "None", ":", "info", "=", "sf", ".", "info", "(", "inputSignalFile", ")", "startFrame", "=", "0", "if", "start", "is", "None", "else", "asFrames", "(", "start", ",", "info", ".", "samplerate", ")", "endFrame", "=", "None", "if", "end", "is", "None", "else", "asFrames", "(", "end", ",", "info", ".", "samplerate", ")", "ys", ",", "frameRate", "=", "sf", ".", "read", "(", "inputSignalFile", ",", "start", "=", "startFrame", ",", "stop", "=", "endFrame", ")", "else", ":", "ys", ",", "frameRate", "=", "sf", ".", "read", "(", "inputSignalFile", ")", "return", "Signal", "(", "ys", "[", ":", ":", "selectedChannel", "]", ",", "frameRate", ")" ]
reads a wav file into a Signal. :param inputSignalFile: a path to the input signal file :param selectedChannel: the channel to read. :param start: the time to start reading from in HH:mm:ss.SSS format. :param end: the time to end reading from in HH:mm:ss.SSS format. :returns: Signal.
[ "reads", "a", "wav", "file", "into", "a", "Signal", ".", ":", "param", "inputSignalFile", ":", "a", "path", "to", "the", "input", "signal", "file", ":", "param", "selectedChannel", ":", "the", "channel", "to", "read", ".", ":", "param", "start", ":", "the", "time", "to", "start", "reading", "from", "in", "HH", ":", "mm", ":", "ss", ".", "SSS", "format", ".", ":", "param", "end", ":", "the", "time", "to", "end", "reading", "from", "in", "HH", ":", "mm", ":", "ss", ".", "SSS", "format", ".", ":", "returns", ":", "Signal", "." ]
python
train
wmayner/pyphi
pyphi/cache.py
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/cache.py#L426-L458
def method(cache_name, key_prefix=None): """Caching decorator for object-level method caches. Cache key generation is delegated to the cache. Args: cache_name (str): The name of the (already-instantiated) cache on the decorated object which should be used to store results of this method. *key_prefix: A constant to use as part of the cache key in addition to the method arguments. """ def decorator(func): if (func.__name__ in ['cause_repertoire', 'effect_repertoire'] and not config.CACHE_REPERTOIRES): return func @wraps(func) def wrapper(obj, *args, **kwargs): cache = getattr(obj, cache_name) # Delegate key generation key = cache.key(*args, _prefix=key_prefix, **kwargs) # Get cached value, or compute value = cache.get(key) if value is None: # miss value = func(obj, *args, **kwargs) cache.set(key, value) return value return wrapper return decorator
[ "def", "method", "(", "cache_name", ",", "key_prefix", "=", "None", ")", ":", "def", "decorator", "(", "func", ")", ":", "if", "(", "func", ".", "__name__", "in", "[", "'cause_repertoire'", ",", "'effect_repertoire'", "]", "and", "not", "config", ".", "CACHE_REPERTOIRES", ")", ":", "return", "func", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "obj", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "cache", "=", "getattr", "(", "obj", ",", "cache_name", ")", "# Delegate key generation", "key", "=", "cache", ".", "key", "(", "*", "args", ",", "_prefix", "=", "key_prefix", ",", "*", "*", "kwargs", ")", "# Get cached value, or compute", "value", "=", "cache", ".", "get", "(", "key", ")", "if", "value", "is", "None", ":", "# miss", "value", "=", "func", "(", "obj", ",", "*", "args", ",", "*", "*", "kwargs", ")", "cache", ".", "set", "(", "key", ",", "value", ")", "return", "value", "return", "wrapper", "return", "decorator" ]
Caching decorator for object-level method caches. Cache key generation is delegated to the cache. Args: cache_name (str): The name of the (already-instantiated) cache on the decorated object which should be used to store results of this method. *key_prefix: A constant to use as part of the cache key in addition to the method arguments.
[ "Caching", "decorator", "for", "object", "-", "level", "method", "caches", "." ]
python
train
not-na/peng3d
peng3d/gui/button.py
https://github.com/not-na/peng3d/blob/1151be665b26cc8a479f6307086ba919e4d32d85/peng3d/gui/button.py#L126-L146
def getColors(self): """ Overrideable function that generates the colors to be used by various borderstyles. Should return a 5-tuple of ``(bg,o,i,s,h)``\ . ``bg`` is the base color of the background. ``o`` is the outer color, it is usually the same as the background color. ``i`` is the inner color, it is usually lighter than the background color. ``s`` is the shadow color, it is usually quite a bit darker than the background. ``h`` is the highlight color, it is usually quite a bit lighter than the background. """ bg = self.submenu.bg[:3] if isinstance(self.submenu.bg,list) or isinstance(self.submenu.bg,tuple) else [242,241,240] o,i = bg, [min(bg[0]+8,255),min(bg[1]+8,255),min(bg[2]+8,255)] s,h = [max(bg[0]-40,0),max(bg[1]-40,0),max(bg[2]-40,0)], [min(bg[0]+12,255),min(bg[1]+12,255),min(bg[2]+12,255)] # Outer,Inner,Shadow,Highlight return bg,o,i,s,h
[ "def", "getColors", "(", "self", ")", ":", "bg", "=", "self", ".", "submenu", ".", "bg", "[", ":", "3", "]", "if", "isinstance", "(", "self", ".", "submenu", ".", "bg", ",", "list", ")", "or", "isinstance", "(", "self", ".", "submenu", ".", "bg", ",", "tuple", ")", "else", "[", "242", ",", "241", ",", "240", "]", "o", ",", "i", "=", "bg", ",", "[", "min", "(", "bg", "[", "0", "]", "+", "8", ",", "255", ")", ",", "min", "(", "bg", "[", "1", "]", "+", "8", ",", "255", ")", ",", "min", "(", "bg", "[", "2", "]", "+", "8", ",", "255", ")", "]", "s", ",", "h", "=", "[", "max", "(", "bg", "[", "0", "]", "-", "40", ",", "0", ")", ",", "max", "(", "bg", "[", "1", "]", "-", "40", ",", "0", ")", ",", "max", "(", "bg", "[", "2", "]", "-", "40", ",", "0", ")", "]", ",", "[", "min", "(", "bg", "[", "0", "]", "+", "12", ",", "255", ")", ",", "min", "(", "bg", "[", "1", "]", "+", "12", ",", "255", ")", ",", "min", "(", "bg", "[", "2", "]", "+", "12", ",", "255", ")", "]", "# Outer,Inner,Shadow,Highlight", "return", "bg", ",", "o", ",", "i", ",", "s", ",", "h" ]
Overrideable function that generates the colors to be used by various borderstyles. Should return a 5-tuple of ``(bg,o,i,s,h)``\ . ``bg`` is the base color of the background. ``o`` is the outer color, it is usually the same as the background color. ``i`` is the inner color, it is usually lighter than the background color. ``s`` is the shadow color, it is usually quite a bit darker than the background. ``h`` is the highlight color, it is usually quite a bit lighter than the background.
[ "Overrideable", "function", "that", "generates", "the", "colors", "to", "be", "used", "by", "various", "borderstyles", ".", "Should", "return", "a", "5", "-", "tuple", "of", "(", "bg", "o", "i", "s", "h", ")", "\\", ".", "bg", "is", "the", "base", "color", "of", "the", "background", ".", "o", "is", "the", "outer", "color", "it", "is", "usually", "the", "same", "as", "the", "background", "color", ".", "i", "is", "the", "inner", "color", "it", "is", "usually", "lighter", "than", "the", "background", "color", ".", "s", "is", "the", "shadow", "color", "it", "is", "usually", "quite", "a", "bit", "darker", "than", "the", "background", ".", "h", "is", "the", "highlight", "color", "it", "is", "usually", "quite", "a", "bit", "lighter", "than", "the", "background", "." ]
python
test
Cairnarvon/uptime
src/__init__.py
https://github.com/Cairnarvon/uptime/blob/1ddfd06bb300c00e6dc4bd2a9ddf9bf1aa27b1bb/src/__init__.py#L301-L323
def _uptime_windows(): """ Returns uptime in seconds or None, on Windows. Warning: may return incorrect answers after 49.7 days on versions older than Vista. """ if hasattr(ctypes, 'windll') and hasattr(ctypes.windll, 'kernel32'): lib = ctypes.windll.kernel32 else: try: # Windows CE uses the cdecl calling convention. lib = ctypes.CDLL('coredll.lib') except (AttributeError, OSError): return None if hasattr(lib, 'GetTickCount64'): # Vista/Server 2008 or later. lib.GetTickCount64.restype = ctypes.c_uint64 return lib.GetTickCount64() / 1000. if hasattr(lib, 'GetTickCount'): # WinCE and Win2k or later; gives wrong answers after 49.7 days. lib.GetTickCount.restype = ctypes.c_uint32 return lib.GetTickCount() / 1000. return None
[ "def", "_uptime_windows", "(", ")", ":", "if", "hasattr", "(", "ctypes", ",", "'windll'", ")", "and", "hasattr", "(", "ctypes", ".", "windll", ",", "'kernel32'", ")", ":", "lib", "=", "ctypes", ".", "windll", ".", "kernel32", "else", ":", "try", ":", "# Windows CE uses the cdecl calling convention.", "lib", "=", "ctypes", ".", "CDLL", "(", "'coredll.lib'", ")", "except", "(", "AttributeError", ",", "OSError", ")", ":", "return", "None", "if", "hasattr", "(", "lib", ",", "'GetTickCount64'", ")", ":", "# Vista/Server 2008 or later.", "lib", ".", "GetTickCount64", ".", "restype", "=", "ctypes", ".", "c_uint64", "return", "lib", ".", "GetTickCount64", "(", ")", "/", "1000.", "if", "hasattr", "(", "lib", ",", "'GetTickCount'", ")", ":", "# WinCE and Win2k or later; gives wrong answers after 49.7 days.", "lib", ".", "GetTickCount", ".", "restype", "=", "ctypes", ".", "c_uint32", "return", "lib", ".", "GetTickCount", "(", ")", "/", "1000.", "return", "None" ]
Returns uptime in seconds or None, on Windows. Warning: may return incorrect answers after 49.7 days on versions older than Vista.
[ "Returns", "uptime", "in", "seconds", "or", "None", "on", "Windows", ".", "Warning", ":", "may", "return", "incorrect", "answers", "after", "49", ".", "7", "days", "on", "versions", "older", "than", "Vista", "." ]
python
valid
jmgilman/Neolib
neolib/pyamf/amf3.py
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/amf3.py#L713-L721
def addProxyObject(self, obj, proxied): """ Stores a reference to the unproxied and proxied versions of C{obj} for later retrieval. @since: 0.6 """ self.proxied_objects[id(obj)] = proxied self.proxied_objects[id(proxied)] = obj
[ "def", "addProxyObject", "(", "self", ",", "obj", ",", "proxied", ")", ":", "self", ".", "proxied_objects", "[", "id", "(", "obj", ")", "]", "=", "proxied", "self", ".", "proxied_objects", "[", "id", "(", "proxied", ")", "]", "=", "obj" ]
Stores a reference to the unproxied and proxied versions of C{obj} for later retrieval. @since: 0.6
[ "Stores", "a", "reference", "to", "the", "unproxied", "and", "proxied", "versions", "of", "C", "{", "obj", "}", "for", "later", "retrieval", "." ]
python
train
freshbooks/refreshbooks
refreshbooks/api.py
https://github.com/freshbooks/refreshbooks/blob/cfd65ecd38cb6be3b61dbf6a01f93800603f34b1/refreshbooks/api.py#L94-L120
def TokenClient( domain, token, user_agent=None, request_encoder=default_request_encoder, response_decoder=default_response_decoder, ): """Creates a Freshbooks client for a freshbooks domain, using token-based auth. The optional request_encoder and response_decoder parameters can be passed the logging_request_encoder and logging_response_decoder objects from this module, or custom encoders, to aid debugging or change the behaviour of refreshbooks' request-to-XML-to-response mapping. The optional user_agent keyword parameter can be used to specify the user agent string passed to FreshBooks. If unset, a default user agent string is used. """ return AuthorizingClient( domain, transport.TokenAuthorization(token), request_encoder, response_decoder, user_agent=user_agent )
[ "def", "TokenClient", "(", "domain", ",", "token", ",", "user_agent", "=", "None", ",", "request_encoder", "=", "default_request_encoder", ",", "response_decoder", "=", "default_response_decoder", ",", ")", ":", "return", "AuthorizingClient", "(", "domain", ",", "transport", ".", "TokenAuthorization", "(", "token", ")", ",", "request_encoder", ",", "response_decoder", ",", "user_agent", "=", "user_agent", ")" ]
Creates a Freshbooks client for a freshbooks domain, using token-based auth. The optional request_encoder and response_decoder parameters can be passed the logging_request_encoder and logging_response_decoder objects from this module, or custom encoders, to aid debugging or change the behaviour of refreshbooks' request-to-XML-to-response mapping. The optional user_agent keyword parameter can be used to specify the user agent string passed to FreshBooks. If unset, a default user agent string is used.
[ "Creates", "a", "Freshbooks", "client", "for", "a", "freshbooks", "domain", "using", "token", "-", "based", "auth", ".", "The", "optional", "request_encoder", "and", "response_decoder", "parameters", "can", "be", "passed", "the", "logging_request_encoder", "and", "logging_response_decoder", "objects", "from", "this", "module", "or", "custom", "encoders", "to", "aid", "debugging", "or", "change", "the", "behaviour", "of", "refreshbooks", "request", "-", "to", "-", "XML", "-", "to", "-", "response", "mapping", ".", "The", "optional", "user_agent", "keyword", "parameter", "can", "be", "used", "to", "specify", "the", "user", "agent", "string", "passed", "to", "FreshBooks", ".", "If", "unset", "a", "default", "user", "agent", "string", "is", "used", "." ]
python
train
pyvisa/pyvisa-sim
pyvisa-sim/devices.py
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/devices.py#L214-L250
def write(self, data): """Write data into the device input buffer. :param data: single element byte :type data: bytes """ logger.debug('Writing into device input buffer: %r' % data) if not isinstance(data, bytes): raise TypeError('data must be an instance of bytes') if len(data) != 1: msg = 'data must have a length of 1, not %d' raise ValueError(msg % len(data)) self._input_buffer.extend(data) l = len(self._query_eom) if not self._input_buffer.endswith(self._query_eom): return try: message = bytes(self._input_buffer[:-l]) queries = (message.split(self.delimiter) if self.delimiter else [message]) for query in queries: response = self._match(query) eom = self._response_eom if response is None: response = self.error_response('command_error') if response is not NoResponse: self._output_buffer.extend(response) self._output_buffer.extend(eom) finally: self._input_buffer = bytearray()
[ "def", "write", "(", "self", ",", "data", ")", ":", "logger", ".", "debug", "(", "'Writing into device input buffer: %r'", "%", "data", ")", "if", "not", "isinstance", "(", "data", ",", "bytes", ")", ":", "raise", "TypeError", "(", "'data must be an instance of bytes'", ")", "if", "len", "(", "data", ")", "!=", "1", ":", "msg", "=", "'data must have a length of 1, not %d'", "raise", "ValueError", "(", "msg", "%", "len", "(", "data", ")", ")", "self", ".", "_input_buffer", ".", "extend", "(", "data", ")", "l", "=", "len", "(", "self", ".", "_query_eom", ")", "if", "not", "self", ".", "_input_buffer", ".", "endswith", "(", "self", ".", "_query_eom", ")", ":", "return", "try", ":", "message", "=", "bytes", "(", "self", ".", "_input_buffer", "[", ":", "-", "l", "]", ")", "queries", "=", "(", "message", ".", "split", "(", "self", ".", "delimiter", ")", "if", "self", ".", "delimiter", "else", "[", "message", "]", ")", "for", "query", "in", "queries", ":", "response", "=", "self", ".", "_match", "(", "query", ")", "eom", "=", "self", ".", "_response_eom", "if", "response", "is", "None", ":", "response", "=", "self", ".", "error_response", "(", "'command_error'", ")", "if", "response", "is", "not", "NoResponse", ":", "self", ".", "_output_buffer", ".", "extend", "(", "response", ")", "self", ".", "_output_buffer", ".", "extend", "(", "eom", ")", "finally", ":", "self", ".", "_input_buffer", "=", "bytearray", "(", ")" ]
Write data into the device input buffer. :param data: single element byte :type data: bytes
[ "Write", "data", "into", "the", "device", "input", "buffer", "." ]
python
train
planetarypy/planetaryimage
planetaryimage/pds3image.py
https://github.com/planetarypy/planetaryimage/blob/ee9aef4746ff7a003b1457565acb13f5f1db0375/planetaryimage/pds3image.py#L336-L341
def dtype(self): """Pixel data type.""" try: return self.data.dtype except AttributeError: return numpy.dtype('%s%d' % (self._sample_type, self._sample_bytes))
[ "def", "dtype", "(", "self", ")", ":", "try", ":", "return", "self", ".", "data", ".", "dtype", "except", "AttributeError", ":", "return", "numpy", ".", "dtype", "(", "'%s%d'", "%", "(", "self", ".", "_sample_type", ",", "self", ".", "_sample_bytes", ")", ")" ]
Pixel data type.
[ "Pixel", "data", "type", "." ]
python
train
ChargePoint/pydnp3
examples/master_cmd.py
https://github.com/ChargePoint/pydnp3/blob/5bcd8240d1fc0aa1579e71f2efcab63b4c61c547/examples/master_cmd.py#L123-L125
def do_restart(self, line): """Request that the Outstation perform a cold restart. Command syntax is: restart""" self.application.master.Restart(opendnp3.RestartType.COLD, restart_callback)
[ "def", "do_restart", "(", "self", ",", "line", ")", ":", "self", ".", "application", ".", "master", ".", "Restart", "(", "opendnp3", ".", "RestartType", ".", "COLD", ",", "restart_callback", ")" ]
Request that the Outstation perform a cold restart. Command syntax is: restart
[ "Request", "that", "the", "Outstation", "perform", "a", "cold", "restart", ".", "Command", "syntax", "is", ":", "restart" ]
python
valid
scidash/sciunit
sciunit/models/backends.py
https://github.com/scidash/sciunit/blob/41b2e38c45c0776727ab1f281a572b65be19cea1/sciunit/models/backends.py#L64-L72
def init_disk_cache(self): """Initialize the on-disk version of the cache.""" try: # Cleanup old disk cache files path = self.disk_cache_location os.remove(path) except Exception: pass self.disk_cache_location = os.path.join(tempfile.mkdtemp(), 'cache')
[ "def", "init_disk_cache", "(", "self", ")", ":", "try", ":", "# Cleanup old disk cache files", "path", "=", "self", ".", "disk_cache_location", "os", ".", "remove", "(", "path", ")", "except", "Exception", ":", "pass", "self", ".", "disk_cache_location", "=", "os", ".", "path", ".", "join", "(", "tempfile", ".", "mkdtemp", "(", ")", ",", "'cache'", ")" ]
Initialize the on-disk version of the cache.
[ "Initialize", "the", "on", "-", "disk", "version", "of", "the", "cache", "." ]
python
train
halcy/Mastodon.py
mastodon/Mastodon.py
https://github.com/halcy/Mastodon.py/blob/35c43562dd3d34d6ebf7a0f757c09e8fcccc957c/mastodon/Mastodon.py#L1622-L1630
def status_mute(self, id): """ Mute notifications for a status. Returns a `toot dict`_ with the now muted status """ id = self.__unpack_id(id) url = '/api/v1/statuses/{0}/mute'.format(str(id)) return self.__api_request('POST', url)
[ "def", "status_mute", "(", "self", ",", "id", ")", ":", "id", "=", "self", ".", "__unpack_id", "(", "id", ")", "url", "=", "'/api/v1/statuses/{0}/mute'", ".", "format", "(", "str", "(", "id", ")", ")", "return", "self", ".", "__api_request", "(", "'POST'", ",", "url", ")" ]
Mute notifications for a status. Returns a `toot dict`_ with the now muted status
[ "Mute", "notifications", "for", "a", "status", "." ]
python
train
tylerbutler/engineer
engineer/devtools/theme_tools.py
https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/devtools/theme_tools.py#L20-L42
def compile_theme(theme_id=None): """Compiles a theme.""" from engineer.processors import convert_less from engineer.themes import ThemeManager if theme_id is None: themes = ThemeManager.themes().values() else: themes = [ThemeManager.theme(theme_id)] with(indent(2)): puts(colored.yellow("Compiling %s themes." % len(themes))) for theme in themes: theme_output_path = (theme.static_root / ('stylesheets/%s_precompiled.css' % theme.id)).normpath() puts(colored.cyan("Compiling theme %s to %s" % (theme.id, theme_output_path))) with indent(4): puts("Compiling...") convert_less(theme.static_root / ('stylesheets/%s.less' % theme.id), theme_output_path, minify=True) puts(colored.green("Done.", bold=True))
[ "def", "compile_theme", "(", "theme_id", "=", "None", ")", ":", "from", "engineer", ".", "processors", "import", "convert_less", "from", "engineer", ".", "themes", "import", "ThemeManager", "if", "theme_id", "is", "None", ":", "themes", "=", "ThemeManager", ".", "themes", "(", ")", ".", "values", "(", ")", "else", ":", "themes", "=", "[", "ThemeManager", ".", "theme", "(", "theme_id", ")", "]", "with", "(", "indent", "(", "2", ")", ")", ":", "puts", "(", "colored", ".", "yellow", "(", "\"Compiling %s themes.\"", "%", "len", "(", "themes", ")", ")", ")", "for", "theme", "in", "themes", ":", "theme_output_path", "=", "(", "theme", ".", "static_root", "/", "(", "'stylesheets/%s_precompiled.css'", "%", "theme", ".", "id", ")", ")", ".", "normpath", "(", ")", "puts", "(", "colored", ".", "cyan", "(", "\"Compiling theme %s to %s\"", "%", "(", "theme", ".", "id", ",", "theme_output_path", ")", ")", ")", "with", "indent", "(", "4", ")", ":", "puts", "(", "\"Compiling...\"", ")", "convert_less", "(", "theme", ".", "static_root", "/", "(", "'stylesheets/%s.less'", "%", "theme", ".", "id", ")", ",", "theme_output_path", ",", "minify", "=", "True", ")", "puts", "(", "colored", ".", "green", "(", "\"Done.\"", ",", "bold", "=", "True", ")", ")" ]
Compiles a theme.
[ "Compiles", "a", "theme", "." ]
python
train
jay-johnson/network-pipeline
network_pipeline/connect_forwarder.py
https://github.com/jay-johnson/network-pipeline/blob/4e53ae13fe12085e0cf2e5e1aff947368f4f1ffa/network_pipeline/connect_forwarder.py#L10-L56
def connect_forwarder(forward_host=None, forward_port=None, max_retries=-1, sleep_interval=1.0): """connect_forwarder :param forward_host: host for receiving forwarded packets :param forward_port: port for the forwarded packets :param max_retries: retries, -1 = infinite :param sleep_interval: how often to retry in this loop """ forward_skt = None retry_count = 0 if max_retries == -1: retry_count = -2 if forward_host and forward_port: while not forward_skt and \ retry_count < max_retries: try: forward_skt = socket.socket() log.info(("connecting to forward={}:{}") .format(forward_host, forward_port)) forward_skt.connect((forward_host, forward_port)) log.debug(("connected to forward={}:{}") .format(forward_host, forward_port)) except Exception as s: forward_skt = None log.error(("Failed to connect forward address={}:{} " "with ex={}") .format(forward_host, forward_port, s)) if max_retries == -1: retry_count = -2 else: retry_count += 1 # end of try/ex time.sleep(sleep_interval) # end of setting up forward # end forward_host and forward_port return forward_skt
[ "def", "connect_forwarder", "(", "forward_host", "=", "None", ",", "forward_port", "=", "None", ",", "max_retries", "=", "-", "1", ",", "sleep_interval", "=", "1.0", ")", ":", "forward_skt", "=", "None", "retry_count", "=", "0", "if", "max_retries", "==", "-", "1", ":", "retry_count", "=", "-", "2", "if", "forward_host", "and", "forward_port", ":", "while", "not", "forward_skt", "and", "retry_count", "<", "max_retries", ":", "try", ":", "forward_skt", "=", "socket", ".", "socket", "(", ")", "log", ".", "info", "(", "(", "\"connecting to forward={}:{}\"", ")", ".", "format", "(", "forward_host", ",", "forward_port", ")", ")", "forward_skt", ".", "connect", "(", "(", "forward_host", ",", "forward_port", ")", ")", "log", ".", "debug", "(", "(", "\"connected to forward={}:{}\"", ")", ".", "format", "(", "forward_host", ",", "forward_port", ")", ")", "except", "Exception", "as", "s", ":", "forward_skt", "=", "None", "log", ".", "error", "(", "(", "\"Failed to connect forward address={}:{} \"", "\"with ex={}\"", ")", ".", "format", "(", "forward_host", ",", "forward_port", ",", "s", ")", ")", "if", "max_retries", "==", "-", "1", ":", "retry_count", "=", "-", "2", "else", ":", "retry_count", "+=", "1", "# end of try/ex", "time", ".", "sleep", "(", "sleep_interval", ")", "# end of setting up forward", "# end forward_host and forward_port", "return", "forward_skt" ]
connect_forwarder :param forward_host: host for receiving forwarded packets :param forward_port: port for the forwarded packets :param max_retries: retries, -1 = infinite :param sleep_interval: how often to retry in this loop
[ "connect_forwarder" ]
python
train
jeffh/rpi_courses
rpi_courses/utils.py
https://github.com/jeffh/rpi_courses/blob/c97176f73f866f112c785910ebf3ff8a790e8e9a/rpi_courses/utils.py#L6-L19
def safeInt(n, warn_only=False): """Throws an exception if the number starts with a 0 (may be significant). If the value cannot be converted to an int, it is returned as is. """ if str(n).startswith('0'): if not warn_only: raise TypeError("Unsafe Int: " + str(n)) print "Unsafe Int: %s" % n return int(n) try: return int(n) except ValueError: return n
[ "def", "safeInt", "(", "n", ",", "warn_only", "=", "False", ")", ":", "if", "str", "(", "n", ")", ".", "startswith", "(", "'0'", ")", ":", "if", "not", "warn_only", ":", "raise", "TypeError", "(", "\"Unsafe Int: \"", "+", "str", "(", "n", ")", ")", "print", "\"Unsafe Int: %s\"", "%", "n", "return", "int", "(", "n", ")", "try", ":", "return", "int", "(", "n", ")", "except", "ValueError", ":", "return", "n" ]
Throws an exception if the number starts with a 0 (may be significant). If the value cannot be converted to an int, it is returned as is.
[ "Throws", "an", "exception", "if", "the", "number", "starts", "with", "a", "0", "(", "may", "be", "significant", ")", "." ]
python
train
Neurita/boyle
boyle/nifti/sets.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/sets.py#L199-L272
def to_matrix(self, smooth_fwhm=0, outdtype=None): """Return numpy.ndarray with the masked or flatten image data and the relevant information (mask indices and volume shape). Parameters ---------- smooth__fwhm: int Integer indicating the size of the FWHM Gaussian smoothing kernel to smooth the subject volumes before creating the data matrix outdtype: dtype Type of the elements of the array, if None will obtain the dtype from the first nifti file. Returns ------- outmat, mask_indices, vol_shape outmat: Numpy array with shape N x prod(vol.shape) containing the N files as flat vectors. mask_indices: matrix with indices of the voxels in the mask vol_shape: Tuple with shape of the volumes, for reshaping. """ if not self.all_compatible: raise ValueError("`self.all_compatible` must be True in order to use this function.") if not outdtype: outdtype = self.items[0].dtype # extract some info from the mask n_voxels = None mask_indices = None mask_shape = self.items[0].shape[:3] if self.has_mask: mask_arr = self.mask.get_data() mask_indices = np.nonzero(mask_arr) mask_shape = self.mask.shape n_voxels = np.count_nonzero(mask_arr) # if the mask is empty will use the whole image if n_voxels is None: log.debug('Non-zero voxels have not been found in mask {}'.format(self.mask)) n_voxels = np.prod(mask_shape) mask_indices = None # get the shape of the flattened subject data ndims = self.items[0].ndim if ndims == 3: subj_flat_shape = (n_voxels, ) elif ndims == 4: subj_flat_shape = (n_voxels, self.items[0].shape[3]) else: raise NotImplementedError('The subject images have {} dimensions. ' 'Still have not implemented t_matrix for this shape.'.format(ndims)) # create and fill the big matrix outmat = np.zeros((self.n_subjs, ) + subj_flat_shape, dtype=outdtype) try: for i, image in enumerate(self.items): if smooth_fwhm > 0: image.fwhm = smooth_fwhm if self.has_mask: image.set_mask(self.mask) outmat[i, :], _, _ = image.mask_and_flatten() image.clear_data() except Exception as exc: raise Exception('Error flattening file {0}'.format(image)) from exc else: return outmat, mask_indices, mask_shape
[ "def", "to_matrix", "(", "self", ",", "smooth_fwhm", "=", "0", ",", "outdtype", "=", "None", ")", ":", "if", "not", "self", ".", "all_compatible", ":", "raise", "ValueError", "(", "\"`self.all_compatible` must be True in order to use this function.\"", ")", "if", "not", "outdtype", ":", "outdtype", "=", "self", ".", "items", "[", "0", "]", ".", "dtype", "# extract some info from the mask", "n_voxels", "=", "None", "mask_indices", "=", "None", "mask_shape", "=", "self", ".", "items", "[", "0", "]", ".", "shape", "[", ":", "3", "]", "if", "self", ".", "has_mask", ":", "mask_arr", "=", "self", ".", "mask", ".", "get_data", "(", ")", "mask_indices", "=", "np", ".", "nonzero", "(", "mask_arr", ")", "mask_shape", "=", "self", ".", "mask", ".", "shape", "n_voxels", "=", "np", ".", "count_nonzero", "(", "mask_arr", ")", "# if the mask is empty will use the whole image", "if", "n_voxels", "is", "None", ":", "log", ".", "debug", "(", "'Non-zero voxels have not been found in mask {}'", ".", "format", "(", "self", ".", "mask", ")", ")", "n_voxels", "=", "np", ".", "prod", "(", "mask_shape", ")", "mask_indices", "=", "None", "# get the shape of the flattened subject data", "ndims", "=", "self", ".", "items", "[", "0", "]", ".", "ndim", "if", "ndims", "==", "3", ":", "subj_flat_shape", "=", "(", "n_voxels", ",", ")", "elif", "ndims", "==", "4", ":", "subj_flat_shape", "=", "(", "n_voxels", ",", "self", ".", "items", "[", "0", "]", ".", "shape", "[", "3", "]", ")", "else", ":", "raise", "NotImplementedError", "(", "'The subject images have {} dimensions. '", "'Still have not implemented t_matrix for this shape.'", ".", "format", "(", "ndims", ")", ")", "# create and fill the big matrix", "outmat", "=", "np", ".", "zeros", "(", "(", "self", ".", "n_subjs", ",", ")", "+", "subj_flat_shape", ",", "dtype", "=", "outdtype", ")", "try", ":", "for", "i", ",", "image", "in", "enumerate", "(", "self", ".", "items", ")", ":", "if", "smooth_fwhm", ">", "0", ":", "image", ".", "fwhm", "=", "smooth_fwhm", "if", "self", ".", "has_mask", ":", "image", ".", "set_mask", "(", "self", ".", "mask", ")", "outmat", "[", "i", ",", ":", "]", ",", "_", ",", "_", "=", "image", ".", "mask_and_flatten", "(", ")", "image", ".", "clear_data", "(", ")", "except", "Exception", "as", "exc", ":", "raise", "Exception", "(", "'Error flattening file {0}'", ".", "format", "(", "image", ")", ")", "from", "exc", "else", ":", "return", "outmat", ",", "mask_indices", ",", "mask_shape" ]
Return numpy.ndarray with the masked or flatten image data and the relevant information (mask indices and volume shape). Parameters ---------- smooth__fwhm: int Integer indicating the size of the FWHM Gaussian smoothing kernel to smooth the subject volumes before creating the data matrix outdtype: dtype Type of the elements of the array, if None will obtain the dtype from the first nifti file. Returns ------- outmat, mask_indices, vol_shape outmat: Numpy array with shape N x prod(vol.shape) containing the N files as flat vectors. mask_indices: matrix with indices of the voxels in the mask vol_shape: Tuple with shape of the volumes, for reshaping.
[ "Return", "numpy", ".", "ndarray", "with", "the", "masked", "or", "flatten", "image", "data", "and", "the", "relevant", "information", "(", "mask", "indices", "and", "volume", "shape", ")", "." ]
python
valid
log2timeline/plaso
plaso/parsers/sqlite.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/sqlite.py#L32-L93
def CacheQueryResults( self, sql_results, attribute_name, key_name, column_names): """Build a dictionary object based on a SQL command. This function will take a SQL command, execute it and for each resulting row it will store a key in a dictionary. An example:: sql_results = A SQL result object after executing the SQL command: 'SELECT foo, bla, bar FROM my_table' attribute_name = 'all_the_things' key_name = 'foo' column_names = ['bla', 'bar'] Results from running this against the database: 'first', 'stuff', 'things' 'second', 'another stuff', 'another thing' This will result in a dictionary object being created in the cache, called 'all_the_things' and it will contain the following value:: all_the_things = { 'first': ['stuff', 'things'], 'second': ['another_stuff', 'another_thing'], 'third': ['single_thing']} Args: sql_results (sqlite3.Cursor): result after executing a SQL command on a database. attribute_name (str): attribute name in the cache to store results to. This will be the name of the dictionary attribute. key_name (str): name of the result field that should be used as a key in the resulting dictionary that is created. column_names (list[str]): of column names that are stored as values to the dictionary. If this list has only one value in it the value will be stored directly, otherwise the value will be a list containing the extracted results based on the names provided in this list. """ row = sql_results.fetchone() if not row: return # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". keys_name_to_index_map = { name: index for index, name in enumerate(row.keys())} attribute_value = {} while row: value_index = keys_name_to_index_map.get(key_name) key_value = row[value_index] attribute_value[key_value] = [] for column_name in column_names: value_index = keys_name_to_index_map.get(column_name) column_value = row[value_index] attribute_value[key_value].append(column_value) row = sql_results.fetchone() setattr(self, attribute_name, attribute_value)
[ "def", "CacheQueryResults", "(", "self", ",", "sql_results", ",", "attribute_name", ",", "key_name", ",", "column_names", ")", ":", "row", "=", "sql_results", ".", "fetchone", "(", ")", "if", "not", "row", ":", "return", "# Note that pysqlite does not accept a Unicode string in row['string'] and", "# will raise \"IndexError: Index must be int or string\".", "keys_name_to_index_map", "=", "{", "name", ":", "index", "for", "index", ",", "name", "in", "enumerate", "(", "row", ".", "keys", "(", ")", ")", "}", "attribute_value", "=", "{", "}", "while", "row", ":", "value_index", "=", "keys_name_to_index_map", ".", "get", "(", "key_name", ")", "key_value", "=", "row", "[", "value_index", "]", "attribute_value", "[", "key_value", "]", "=", "[", "]", "for", "column_name", "in", "column_names", ":", "value_index", "=", "keys_name_to_index_map", ".", "get", "(", "column_name", ")", "column_value", "=", "row", "[", "value_index", "]", "attribute_value", "[", "key_value", "]", ".", "append", "(", "column_value", ")", "row", "=", "sql_results", ".", "fetchone", "(", ")", "setattr", "(", "self", ",", "attribute_name", ",", "attribute_value", ")" ]
Build a dictionary object based on a SQL command. This function will take a SQL command, execute it and for each resulting row it will store a key in a dictionary. An example:: sql_results = A SQL result object after executing the SQL command: 'SELECT foo, bla, bar FROM my_table' attribute_name = 'all_the_things' key_name = 'foo' column_names = ['bla', 'bar'] Results from running this against the database: 'first', 'stuff', 'things' 'second', 'another stuff', 'another thing' This will result in a dictionary object being created in the cache, called 'all_the_things' and it will contain the following value:: all_the_things = { 'first': ['stuff', 'things'], 'second': ['another_stuff', 'another_thing'], 'third': ['single_thing']} Args: sql_results (sqlite3.Cursor): result after executing a SQL command on a database. attribute_name (str): attribute name in the cache to store results to. This will be the name of the dictionary attribute. key_name (str): name of the result field that should be used as a key in the resulting dictionary that is created. column_names (list[str]): of column names that are stored as values to the dictionary. If this list has only one value in it the value will be stored directly, otherwise the value will be a list containing the extracted results based on the names provided in this list.
[ "Build", "a", "dictionary", "object", "based", "on", "a", "SQL", "command", "." ]
python
train
ianmiell/shutit
shutit_class.py
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_class.py#L1390-L1419
def insert_text(self, text, fname, pattern=None, expect=None, shutit_pexpect_child=None, before=False, force=False, note=None, replace=False, line_oriented=True, create=True, loglevel=logging.DEBUG): """Insert a chunk of text at the end of a file, or after (or before) the first matching pattern in given file fname. See change_text""" shutit_global.shutit_global_object.yield_to_draw() return self.change_text(text=text, fname=fname, pattern=pattern, expect=expect, shutit_pexpect_child=shutit_pexpect_child, before=before, force=force, note=note, line_oriented=line_oriented, create=create, replace=replace, delete=False, loglevel=loglevel)
[ "def", "insert_text", "(", "self", ",", "text", ",", "fname", ",", "pattern", "=", "None", ",", "expect", "=", "None", ",", "shutit_pexpect_child", "=", "None", ",", "before", "=", "False", ",", "force", "=", "False", ",", "note", "=", "None", ",", "replace", "=", "False", ",", "line_oriented", "=", "True", ",", "create", "=", "True", ",", "loglevel", "=", "logging", ".", "DEBUG", ")", ":", "shutit_global", ".", "shutit_global_object", ".", "yield_to_draw", "(", ")", "return", "self", ".", "change_text", "(", "text", "=", "text", ",", "fname", "=", "fname", ",", "pattern", "=", "pattern", ",", "expect", "=", "expect", ",", "shutit_pexpect_child", "=", "shutit_pexpect_child", ",", "before", "=", "before", ",", "force", "=", "force", ",", "note", "=", "note", ",", "line_oriented", "=", "line_oriented", ",", "create", "=", "create", ",", "replace", "=", "replace", ",", "delete", "=", "False", ",", "loglevel", "=", "loglevel", ")" ]
Insert a chunk of text at the end of a file, or after (or before) the first matching pattern in given file fname. See change_text
[ "Insert", "a", "chunk", "of", "text", "at", "the", "end", "of", "a", "file", "or", "after", "(", "or", "before", ")", "the", "first", "matching", "pattern", "in", "given", "file", "fname", ".", "See", "change_text" ]
python
train
Autodesk/aomi
aomi/model/resource.py
https://github.com/Autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/model/resource.py#L31-L51
def thaw(self, tmp_dir): """Will perform some validation and copy a decrypted secret to it's final location""" for sfile in self.secrets(): src_file = "%s/%s" % (tmp_dir, sfile) err_msg = "%s secret missing from icefile" % (self) if not os.path.exists(src_file): if hasattr(self.opt, 'ignore_missing') and \ self.opt.ignore_missing: LOG.warning(err_msg) continue else: raise aomi_excep.IceFile(err_msg) dest_file = "%s/%s" % (self.opt.secrets, sfile) dest_dir = os.path.dirname(dest_file) if not os.path.exists(dest_dir): os.mkdir(dest_dir) shutil.copy(src_file, dest_file) LOG.debug("Thawed %s %s", self, sfile)
[ "def", "thaw", "(", "self", ",", "tmp_dir", ")", ":", "for", "sfile", "in", "self", ".", "secrets", "(", ")", ":", "src_file", "=", "\"%s/%s\"", "%", "(", "tmp_dir", ",", "sfile", ")", "err_msg", "=", "\"%s secret missing from icefile\"", "%", "(", "self", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "src_file", ")", ":", "if", "hasattr", "(", "self", ".", "opt", ",", "'ignore_missing'", ")", "and", "self", ".", "opt", ".", "ignore_missing", ":", "LOG", ".", "warning", "(", "err_msg", ")", "continue", "else", ":", "raise", "aomi_excep", ".", "IceFile", "(", "err_msg", ")", "dest_file", "=", "\"%s/%s\"", "%", "(", "self", ".", "opt", ".", "secrets", ",", "sfile", ")", "dest_dir", "=", "os", ".", "path", ".", "dirname", "(", "dest_file", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "dest_dir", ")", ":", "os", ".", "mkdir", "(", "dest_dir", ")", "shutil", ".", "copy", "(", "src_file", ",", "dest_file", ")", "LOG", ".", "debug", "(", "\"Thawed %s %s\"", ",", "self", ",", "sfile", ")" ]
Will perform some validation and copy a decrypted secret to it's final location
[ "Will", "perform", "some", "validation", "and", "copy", "a", "decrypted", "secret", "to", "it", "s", "final", "location" ]
python
train
whiteclover/dbpy
db/__init__.py
https://github.com/whiteclover/dbpy/blob/3d9ce85f55cfb39cced22081e525f79581b26b3a/db/__init__.py#L94-L107
def query(sql, args=None, many=None, as_dict=False, key='default'): """The connection raw sql query, when select table, show table to fetch records, it is compatible the dbi execute method:: :param sql string: the sql stamtement like 'select * from %s' :param args list: Wen set None, will use dbi execute(sql), else dbi execute(sql, args), the args keep the original rules, it shuld be tuple or list of list :param many int: when set, the query method will return genarate an iterate :param as_dict bool: when is True, the type of row will be dict, otherwise is tuple :param key: a key for your dabtabase you wanna use """ database = choice(__db[key + '.slave']) return database.query(sql, args, many, as_dict)
[ "def", "query", "(", "sql", ",", "args", "=", "None", ",", "many", "=", "None", ",", "as_dict", "=", "False", ",", "key", "=", "'default'", ")", ":", "database", "=", "choice", "(", "__db", "[", "key", "+", "'.slave'", "]", ")", "return", "database", ".", "query", "(", "sql", ",", "args", ",", "many", ",", "as_dict", ")" ]
The connection raw sql query, when select table, show table to fetch records, it is compatible the dbi execute method:: :param sql string: the sql stamtement like 'select * from %s' :param args list: Wen set None, will use dbi execute(sql), else dbi execute(sql, args), the args keep the original rules, it shuld be tuple or list of list :param many int: when set, the query method will return genarate an iterate :param as_dict bool: when is True, the type of row will be dict, otherwise is tuple :param key: a key for your dabtabase you wanna use
[ "The", "connection", "raw", "sql", "query", "when", "select", "table", "show", "table", "to", "fetch", "records", "it", "is", "compatible", "the", "dbi", "execute", "method", "::" ]
python
train
tensorflow/mesh
mesh_tensorflow/ops.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L1099-L1119
def make_slices(self, tf_tensor, tensor_shape): """Turns a single tf.Tensor into a list of slices, one for each processor. Args: tf_tensor: tf.Tensor. tensor_shape: Shape. Returns: list of tf.tensor with length self.size. """ tensor_layout = self.tensor_layout(tensor_shape) slice_shape = self.slice_shape(tensor_shape) def my_fn(pnum): if tensor_layout.is_fully_replicated: return tf_tensor else: slice_begin = self.slice_begin(tensor_shape, pnum) return tf.slice(tf_tensor, slice_begin, slice_shape) return parallel([tf_tensor.device] * self.size, my_fn, list(xrange(self.size)))
[ "def", "make_slices", "(", "self", ",", "tf_tensor", ",", "tensor_shape", ")", ":", "tensor_layout", "=", "self", ".", "tensor_layout", "(", "tensor_shape", ")", "slice_shape", "=", "self", ".", "slice_shape", "(", "tensor_shape", ")", "def", "my_fn", "(", "pnum", ")", ":", "if", "tensor_layout", ".", "is_fully_replicated", ":", "return", "tf_tensor", "else", ":", "slice_begin", "=", "self", ".", "slice_begin", "(", "tensor_shape", ",", "pnum", ")", "return", "tf", ".", "slice", "(", "tf_tensor", ",", "slice_begin", ",", "slice_shape", ")", "return", "parallel", "(", "[", "tf_tensor", ".", "device", "]", "*", "self", ".", "size", ",", "my_fn", ",", "list", "(", "xrange", "(", "self", ".", "size", ")", ")", ")" ]
Turns a single tf.Tensor into a list of slices, one for each processor. Args: tf_tensor: tf.Tensor. tensor_shape: Shape. Returns: list of tf.tensor with length self.size.
[ "Turns", "a", "single", "tf", ".", "Tensor", "into", "a", "list", "of", "slices", "one", "for", "each", "processor", "." ]
python
train
diffeo/rejester
rejester/workers.py
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/workers.py#L790-L893
def do_some_work(self, can_start_more): '''Run one cycle of the main loop. If the log child has died, restart it. If any of the worker children have died, collect their status codes and remove them from the child set. If there is a worker slot available, start exactly one child. :param bool can_start_more: Allowed to start a child? :return: Time to wait before calling this function again ''' any_happy_children = False any_sad_children = False any_bored_children = False self.debug('loop', 'starting work loop, can_start_more={0!r}' .format(can_start_more)) # See if anyone has died while True: try: pid, status = os.waitpid(-1, os.WNOHANG) except OSError, e: if e.errno == errno.ECHILD: # No children at all pid = 0 else: raise if pid == 0: break elif pid == self.log_child: self.debug('children', 'log child with pid {0} exited'.format(pid)) self.start_log_child() elif pid in self.children: self.children.remove(pid) if os.WIFEXITED(status): code = os.WEXITSTATUS(status) self.debug('children', 'worker {0} exited with code {1}' .format(pid, code)) if code == SingleWorker.EXIT_SUCCESS: any_happy_children = True elif code == SingleWorker.EXIT_EXCEPTION: self.log(logging.WARNING, 'child {0} reported failure'.format(pid)) any_sad_children = True elif code == SingleWorker.EXIT_BORED: any_bored_children = True else: self.log(logging.WARNING, 'child {0} had odd exit code {1}' .format(pid, code)) elif os.WIFSIGNALED(status): self.log(logging.WARNING, 'child {0} exited with signal {1}' .format(pid, os.WTERMSIG(status))) any_sad_children = True else: self.log(logging.WARNING, 'child {0} went away with unknown status {1}' .format(pid, status)) any_sad_children = True else: self.log(logging.WARNING, 'child {0} exited, but we don\'t recognize it' .format(pid)) # ...what next? # (Don't log anything here; either we logged a WARNING message # above when things went badly, or we're in a very normal flow # and don't want to spam the log) if any_sad_children: self.debug('loop', 'exit work loop with sad child') return self.poll_interval if any_bored_children: self.debug('loop', 'exit work loop with no work') return self.poll_interval # This means we get to start a child, maybe. if can_start_more and len(self.children) < self.num_workers: pid = os.fork() if pid == 0: # We are the child self.clear_signal_handlers() if self.log_fd: os.close(self.log_fd) LoopWorker.as_child(yakonfig.get_global_config(), parent=self.worker_id) # This should never return, but just in case sys.exit(SingleWorker.EXIT_EXCEPTION) else: # We are the parent self.debug('children', 'new worker with pid {0}'.format(pid)) self.children.add(pid) self.debug('loop', 'exit work loop with a new worker') return self.spawn_interval # Absolutely nothing is happening; which means we have all # of our potential workers and they're doing work self.debug('loop', 'exit work loop with full system') return self.poll_interval
[ "def", "do_some_work", "(", "self", ",", "can_start_more", ")", ":", "any_happy_children", "=", "False", "any_sad_children", "=", "False", "any_bored_children", "=", "False", "self", ".", "debug", "(", "'loop'", ",", "'starting work loop, can_start_more={0!r}'", ".", "format", "(", "can_start_more", ")", ")", "# See if anyone has died", "while", "True", ":", "try", ":", "pid", ",", "status", "=", "os", ".", "waitpid", "(", "-", "1", ",", "os", ".", "WNOHANG", ")", "except", "OSError", ",", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "ECHILD", ":", "# No children at all", "pid", "=", "0", "else", ":", "raise", "if", "pid", "==", "0", ":", "break", "elif", "pid", "==", "self", ".", "log_child", ":", "self", ".", "debug", "(", "'children'", ",", "'log child with pid {0} exited'", ".", "format", "(", "pid", ")", ")", "self", ".", "start_log_child", "(", ")", "elif", "pid", "in", "self", ".", "children", ":", "self", ".", "children", ".", "remove", "(", "pid", ")", "if", "os", ".", "WIFEXITED", "(", "status", ")", ":", "code", "=", "os", ".", "WEXITSTATUS", "(", "status", ")", "self", ".", "debug", "(", "'children'", ",", "'worker {0} exited with code {1}'", ".", "format", "(", "pid", ",", "code", ")", ")", "if", "code", "==", "SingleWorker", ".", "EXIT_SUCCESS", ":", "any_happy_children", "=", "True", "elif", "code", "==", "SingleWorker", ".", "EXIT_EXCEPTION", ":", "self", ".", "log", "(", "logging", ".", "WARNING", ",", "'child {0} reported failure'", ".", "format", "(", "pid", ")", ")", "any_sad_children", "=", "True", "elif", "code", "==", "SingleWorker", ".", "EXIT_BORED", ":", "any_bored_children", "=", "True", "else", ":", "self", ".", "log", "(", "logging", ".", "WARNING", ",", "'child {0} had odd exit code {1}'", ".", "format", "(", "pid", ",", "code", ")", ")", "elif", "os", ".", "WIFSIGNALED", "(", "status", ")", ":", "self", ".", "log", "(", "logging", ".", "WARNING", ",", "'child {0} exited with signal {1}'", ".", "format", "(", "pid", ",", "os", ".", "WTERMSIG", "(", "status", ")", ")", ")", "any_sad_children", "=", "True", "else", ":", "self", ".", "log", "(", "logging", ".", "WARNING", ",", "'child {0} went away with unknown status {1}'", ".", "format", "(", "pid", ",", "status", ")", ")", "any_sad_children", "=", "True", "else", ":", "self", ".", "log", "(", "logging", ".", "WARNING", ",", "'child {0} exited, but we don\\'t recognize it'", ".", "format", "(", "pid", ")", ")", "# ...what next?", "# (Don't log anything here; either we logged a WARNING message", "# above when things went badly, or we're in a very normal flow", "# and don't want to spam the log)", "if", "any_sad_children", ":", "self", ".", "debug", "(", "'loop'", ",", "'exit work loop with sad child'", ")", "return", "self", ".", "poll_interval", "if", "any_bored_children", ":", "self", ".", "debug", "(", "'loop'", ",", "'exit work loop with no work'", ")", "return", "self", ".", "poll_interval", "# This means we get to start a child, maybe.", "if", "can_start_more", "and", "len", "(", "self", ".", "children", ")", "<", "self", ".", "num_workers", ":", "pid", "=", "os", ".", "fork", "(", ")", "if", "pid", "==", "0", ":", "# We are the child", "self", ".", "clear_signal_handlers", "(", ")", "if", "self", ".", "log_fd", ":", "os", ".", "close", "(", "self", ".", "log_fd", ")", "LoopWorker", ".", "as_child", "(", "yakonfig", ".", "get_global_config", "(", ")", ",", "parent", "=", "self", ".", "worker_id", ")", "# This should never return, but just in case", "sys", ".", "exit", "(", "SingleWorker", ".", "EXIT_EXCEPTION", ")", "else", ":", "# We are the parent", "self", ".", "debug", "(", "'children'", ",", "'new worker with pid {0}'", ".", "format", "(", "pid", ")", ")", "self", ".", "children", ".", "add", "(", "pid", ")", "self", ".", "debug", "(", "'loop'", ",", "'exit work loop with a new worker'", ")", "return", "self", ".", "spawn_interval", "# Absolutely nothing is happening; which means we have all", "# of our potential workers and they're doing work", "self", ".", "debug", "(", "'loop'", ",", "'exit work loop with full system'", ")", "return", "self", ".", "poll_interval" ]
Run one cycle of the main loop. If the log child has died, restart it. If any of the worker children have died, collect their status codes and remove them from the child set. If there is a worker slot available, start exactly one child. :param bool can_start_more: Allowed to start a child? :return: Time to wait before calling this function again
[ "Run", "one", "cycle", "of", "the", "main", "loop", "." ]
python
train