text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def floats(s): """Convert string to float. Handles more string formats that the standard python conversion.""" try: return float(s) except ValueError: s = re.sub(r'(\d)\s*\(\d+(\.\d+)?\)', r'\1', s) # Remove bracketed numbers from end s = re.sub(r'(\d)\s*±\s*\d+(\.\d+)?', r'\1', s) # Remove uncertainties from end s = s.rstrip('\'"+-=<>/,.:;!?)]}…∼~≈×*_≥≤') # Remove trailing punctuation s = s.lstrip('\'"+=<>/([{∼~≈×*_≥≤£$€#§') # Remove leading punctuation s = s.replace(',', '') # Remove commas s = ''.join(s.split()) # Strip whitespace s = re.sub(r'(\d)\s*[×x]\s*10\^?(-?\d)', r'\1e\2', s) # Convert scientific notation return float(s)
[ "def", "floats", "(", "s", ")", ":", "try", ":", "return", "float", "(", "s", ")", "except", "ValueError", ":", "s", "=", "re", ".", "sub", "(", "r'(\\d)\\s*\\(\\d+(\\.\\d+)?\\)'", ",", "r'\\1'", ",", "s", ")", "# Remove bracketed numbers from end", "s", "=", "re", ".", "sub", "(", "r'(\\d)\\s*±\\s*\\d+(\\.\\d+)?',", " ", "'\\1',", " ", ")", " ", " Remove uncertainties from end", "s", "=", "s", ".", "rstrip", "(", "'\\'\"+-=<>/,.:;!?)]}…∼~≈×*_≥≤') ", " ", "ailing punctuation", "s", "=", "s", ".", "lstrip", "(", "'\\'\"+=<>/([{∼~≈×*_≥≤£$€#§') ", " ", "ing punctuation", "s", "=", "s", ".", "replace", "(", "','", ",", "''", ")", "# Remove commas", "s", "=", "''", ".", "join", "(", "s", ".", "split", "(", ")", ")", "# Strip whitespace", "s", "=", "re", ".", "sub", "(", "r'(\\d)\\s*[×x]\\s*10\\^?(-?\\d)',", " ", "'\\1e\\2',", " ", ")", " ", " Convert scientific notation", "return", "float", "(", "s", ")" ]
62.923077
35.307692
def support_support_param_username(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") support = ET.SubElement(config, "support", xmlns="urn:brocade.com:mgmt:brocade-ras") support_param = ET.SubElement(support, "support-param") username = ET.SubElement(support_param, "username") username.text = kwargs.pop('username') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "support_support_param_username", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "support", "=", "ET", ".", "SubElement", "(", "config", ",", "\"support\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-ras\"", ")", "support_param", "=", "ET", ".", "SubElement", "(", "support", ",", "\"support-param\"", ")", "username", "=", "ET", ".", "SubElement", "(", "support_param", ",", "\"username\"", ")", "username", ".", "text", "=", "kwargs", ".", "pop", "(", "'username'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
43.363636
16.363636
def reset_parameters(self, init_weight): """ Sets initial random values for trainable parameters. """ stdv = 1. / math.sqrt(self.num_units) self.linear_att.data.uniform_(-init_weight, init_weight) if self.normalize: self.normalize_scalar.data.fill_(stdv) self.normalize_bias.data.zero_()
[ "def", "reset_parameters", "(", "self", ",", "init_weight", ")", ":", "stdv", "=", "1.", "/", "math", ".", "sqrt", "(", "self", ".", "num_units", ")", "self", ".", "linear_att", ".", "data", ".", "uniform_", "(", "-", "init_weight", ",", "init_weight", ")", "if", "self", ".", "normalize", ":", "self", ".", "normalize_scalar", ".", "data", ".", "fill_", "(", "stdv", ")", "self", ".", "normalize_bias", ".", "data", ".", "zero_", "(", ")" ]
35.1
11.7
def get_SAM(self,min_intron_size=68): """Get a SAM object representation of the alignment. :returns: SAM representation :rtype: SAM """ from seqtools.format.sam import SAM #ar is target then query qname = self.alignment_ranges[0][1].chr flag = 0 if self.strand == '-': flag = 16 rname = self.alignment_ranges[0][0].chr pos = self.alignment_ranges[0][0].start mapq = 255 cigar = self.construct_cigar(min_intron_size) rnext = '*' pnext = 0 tlen = 0 # possible to set if we have a reference if self._options.reference: if rname in self._options.reference: tlen = len(self._options.reference[rname]) seq = self.query_sequence if not seq: seq = '*' qual = self.query_quality if not qual: qual = '*' #seq = '*' #qual = '*' if self.strand == '-': seq = rc(seq) qual = qual[::-1] ln = qname + "\t" + str(flag) + "\t" + rname + "\t" + \ str(pos) + "\t" + str(mapq) + "\t" + cigar + "\t" + \ rnext + "\t" + str(pnext) + "\t" + str(tlen) + "\t" + \ seq + "\t" + qual return SAM(ln,reference=self._reference)
[ "def", "get_SAM", "(", "self", ",", "min_intron_size", "=", "68", ")", ":", "from", "seqtools", ".", "format", ".", "sam", "import", "SAM", "#ar is target then query", "qname", "=", "self", ".", "alignment_ranges", "[", "0", "]", "[", "1", "]", ".", "chr", "flag", "=", "0", "if", "self", ".", "strand", "==", "'-'", ":", "flag", "=", "16", "rname", "=", "self", ".", "alignment_ranges", "[", "0", "]", "[", "0", "]", ".", "chr", "pos", "=", "self", ".", "alignment_ranges", "[", "0", "]", "[", "0", "]", ".", "start", "mapq", "=", "255", "cigar", "=", "self", ".", "construct_cigar", "(", "min_intron_size", ")", "rnext", "=", "'*'", "pnext", "=", "0", "tlen", "=", "0", "# possible to set if we have a reference", "if", "self", ".", "_options", ".", "reference", ":", "if", "rname", "in", "self", ".", "_options", ".", "reference", ":", "tlen", "=", "len", "(", "self", ".", "_options", ".", "reference", "[", "rname", "]", ")", "seq", "=", "self", ".", "query_sequence", "if", "not", "seq", ":", "seq", "=", "'*'", "qual", "=", "self", ".", "query_quality", "if", "not", "qual", ":", "qual", "=", "'*'", "#seq = '*'", "#qual = '*'", "if", "self", ".", "strand", "==", "'-'", ":", "seq", "=", "rc", "(", "seq", ")", "qual", "=", "qual", "[", ":", ":", "-", "1", "]", "ln", "=", "qname", "+", "\"\\t\"", "+", "str", "(", "flag", ")", "+", "\"\\t\"", "+", "rname", "+", "\"\\t\"", "+", "str", "(", "pos", ")", "+", "\"\\t\"", "+", "str", "(", "mapq", ")", "+", "\"\\t\"", "+", "cigar", "+", "\"\\t\"", "+", "rnext", "+", "\"\\t\"", "+", "str", "(", "pnext", ")", "+", "\"\\t\"", "+", "str", "(", "tlen", ")", "+", "\"\\t\"", "+", "seq", "+", "\"\\t\"", "+", "qual", "return", "SAM", "(", "ln", ",", "reference", "=", "self", ".", "_reference", ")" ]
31.222222
14.75
def _process_queue_tasks(self, queue, queue_lock, task_ids, now, log): """Process tasks in queue.""" processed_count = 0 # Get all tasks serialized_tasks = self.connection.mget([ self._key('task', task_id) for task_id in task_ids ]) # Parse tasks tasks = [] for task_id, serialized_task in zip(task_ids, serialized_tasks): if serialized_task: task_data = json.loads(serialized_task) else: # In the rare case where we don't find the task which is # queued (see ReliabilityTestCase.test_task_disappears), # we log an error and remove the task below. We need to # at least initialize the Task object with an ID so we can # remove it. task_data = {'id': task_id} task = Task(self.tiger, queue=queue, _data=task_data, _state=ACTIVE, _ts=now) if not serialized_task: # Remove task as per comment above log.error('not found', task_id=task_id) task._move() elif task.id != task_id: log.error('task ID mismatch', task_id=task_id) # Remove task task._move() else: tasks.append(task) # List of task IDs that exist and we will update the heartbeat on. valid_task_ids = set(task.id for task in tasks) # Group by task func tasks_by_func = OrderedDict() for task in tasks: func = task.serialized_func if func in tasks_by_func: tasks_by_func[func].append(task) else: tasks_by_func[func] = [task] # Execute tasks for each task func for tasks in tasks_by_func.values(): success, processed_tasks = self._execute_task_group(queue, tasks, valid_task_ids, queue_lock) processed_count = processed_count + len(processed_tasks) log.debug('processed', attempted=len(tasks), processed=processed_count) for task in processed_tasks: self._finish_task_processing(queue, task, success) return processed_count
[ "def", "_process_queue_tasks", "(", "self", ",", "queue", ",", "queue_lock", ",", "task_ids", ",", "now", ",", "log", ")", ":", "processed_count", "=", "0", "# Get all tasks", "serialized_tasks", "=", "self", ".", "connection", ".", "mget", "(", "[", "self", ".", "_key", "(", "'task'", ",", "task_id", ")", "for", "task_id", "in", "task_ids", "]", ")", "# Parse tasks", "tasks", "=", "[", "]", "for", "task_id", ",", "serialized_task", "in", "zip", "(", "task_ids", ",", "serialized_tasks", ")", ":", "if", "serialized_task", ":", "task_data", "=", "json", ".", "loads", "(", "serialized_task", ")", "else", ":", "# In the rare case where we don't find the task which is", "# queued (see ReliabilityTestCase.test_task_disappears),", "# we log an error and remove the task below. We need to", "# at least initialize the Task object with an ID so we can", "# remove it.", "task_data", "=", "{", "'id'", ":", "task_id", "}", "task", "=", "Task", "(", "self", ".", "tiger", ",", "queue", "=", "queue", ",", "_data", "=", "task_data", ",", "_state", "=", "ACTIVE", ",", "_ts", "=", "now", ")", "if", "not", "serialized_task", ":", "# Remove task as per comment above", "log", ".", "error", "(", "'not found'", ",", "task_id", "=", "task_id", ")", "task", ".", "_move", "(", ")", "elif", "task", ".", "id", "!=", "task_id", ":", "log", ".", "error", "(", "'task ID mismatch'", ",", "task_id", "=", "task_id", ")", "# Remove task", "task", ".", "_move", "(", ")", "else", ":", "tasks", ".", "append", "(", "task", ")", "# List of task IDs that exist and we will update the heartbeat on.", "valid_task_ids", "=", "set", "(", "task", ".", "id", "for", "task", "in", "tasks", ")", "# Group by task func", "tasks_by_func", "=", "OrderedDict", "(", ")", "for", "task", "in", "tasks", ":", "func", "=", "task", ".", "serialized_func", "if", "func", "in", "tasks_by_func", ":", "tasks_by_func", "[", "func", "]", ".", "append", "(", "task", ")", "else", ":", "tasks_by_func", "[", "func", "]", "=", "[", "task", "]", "# Execute tasks for each task func", "for", "tasks", "in", "tasks_by_func", ".", "values", "(", ")", ":", "success", ",", "processed_tasks", "=", "self", ".", "_execute_task_group", "(", "queue", ",", "tasks", ",", "valid_task_ids", ",", "queue_lock", ")", "processed_count", "=", "processed_count", "+", "len", "(", "processed_tasks", ")", "log", ".", "debug", "(", "'processed'", ",", "attempted", "=", "len", "(", "tasks", ")", ",", "processed", "=", "processed_count", ")", "for", "task", "in", "processed_tasks", ":", "self", ".", "_finish_task_processing", "(", "queue", ",", "task", ",", "success", ")", "return", "processed_count" ]
37.683333
19.2
def do_intersect(bb1, bb2): """ Helper function that returns True if two bounding boxes overlap. """ if bb1[0] + bb1[2] < bb2[0] or bb2[0] + bb2[2] < bb1[0]: return False if bb1[1] + bb1[3] < bb2[1] or bb2[1] + bb2[3] < bb1[1]: return False return True
[ "def", "do_intersect", "(", "bb1", ",", "bb2", ")", ":", "if", "bb1", "[", "0", "]", "+", "bb1", "[", "2", "]", "<", "bb2", "[", "0", "]", "or", "bb2", "[", "0", "]", "+", "bb2", "[", "2", "]", "<", "bb1", "[", "0", "]", ":", "return", "False", "if", "bb1", "[", "1", "]", "+", "bb1", "[", "3", "]", "<", "bb2", "[", "1", "]", "or", "bb2", "[", "1", "]", "+", "bb2", "[", "3", "]", "<", "bb1", "[", "1", "]", ":", "return", "False", "return", "True" ]
31.555556
16.222222
def pre_render(self): """Last things to do before rendering""" self.add_styles() self.add_scripts() self.root.set( 'viewBox', '0 0 %d %d' % (self.graph.width, self.graph.height) ) if self.graph.explicit_size: self.root.set('width', str(self.graph.width)) self.root.set('height', str(self.graph.height))
[ "def", "pre_render", "(", "self", ")", ":", "self", ".", "add_styles", "(", ")", "self", ".", "add_scripts", "(", ")", "self", ".", "root", ".", "set", "(", "'viewBox'", ",", "'0 0 %d %d'", "%", "(", "self", ".", "graph", ".", "width", ",", "self", ".", "graph", ".", "height", ")", ")", "if", "self", ".", "graph", ".", "explicit_size", ":", "self", ".", "root", ".", "set", "(", "'width'", ",", "str", "(", "self", ".", "graph", ".", "width", ")", ")", "self", ".", "root", ".", "set", "(", "'height'", ",", "str", "(", "self", ".", "graph", ".", "height", ")", ")" ]
37.7
17.1
def volumes_maximum_size_bytes(self): """Gets the biggest logical drive :returns the size in MiB. """ return utils.max_safe([member.volumes.maximum_size_bytes for member in self.get_members()])
[ "def", "volumes_maximum_size_bytes", "(", "self", ")", ":", "return", "utils", ".", "max_safe", "(", "[", "member", ".", "volumes", ".", "maximum_size_bytes", "for", "member", "in", "self", ".", "get_members", "(", ")", "]", ")" ]
35.857143
14.142857
def send_callback_json_message(value, *args, **kwargs): """ useful for sending messages from callbacks as it puts the result of the callback in the dict for serialization """ if value: kwargs['result'] = value send_json_message(args[0], args[1], **kwargs) return value
[ "def", "send_callback_json_message", "(", "value", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "value", ":", "kwargs", "[", "'result'", "]", "=", "value", "send_json_message", "(", "args", "[", "0", "]", ",", "args", "[", "1", "]", ",", "*", "*", "kwargs", ")", "return", "value" ]
25.333333
20.666667
def wildcard_import_names(self): """The list of imported names when this module is 'wildcard imported'. It doesn't include the '__builtins__' name which is added by the current CPython implementation of wildcard imports. :returns: The list of imported names. :rtype: list(str) """ # We separate the different steps of lookup in try/excepts # to avoid catching too many Exceptions default = [name for name in self.keys() if not name.startswith("_")] try: all_values = self["__all__"] except KeyError: return default try: explicit = next(all_values.assigned_stmts()) except exceptions.InferenceError: return default except AttributeError: # not an assignment node # XXX infer? return default # Try our best to detect the exported name. inferred = [] try: explicit = next(explicit.infer()) except exceptions.InferenceError: return default if not isinstance(explicit, (node_classes.Tuple, node_classes.List)): return default str_const = lambda node: ( isinstance(node, node_classes.Const) and isinstance(node.value, str) ) for node in explicit.elts: if str_const(node): inferred.append(node.value) else: try: inferred_node = next(node.infer()) except exceptions.InferenceError: continue if str_const(inferred_node): inferred.append(inferred_node.value) return inferred
[ "def", "wildcard_import_names", "(", "self", ")", ":", "# We separate the different steps of lookup in try/excepts", "# to avoid catching too many Exceptions", "default", "=", "[", "name", "for", "name", "in", "self", ".", "keys", "(", ")", "if", "not", "name", ".", "startswith", "(", "\"_\"", ")", "]", "try", ":", "all_values", "=", "self", "[", "\"__all__\"", "]", "except", "KeyError", ":", "return", "default", "try", ":", "explicit", "=", "next", "(", "all_values", ".", "assigned_stmts", "(", ")", ")", "except", "exceptions", ".", "InferenceError", ":", "return", "default", "except", "AttributeError", ":", "# not an assignment node", "# XXX infer?", "return", "default", "# Try our best to detect the exported name.", "inferred", "=", "[", "]", "try", ":", "explicit", "=", "next", "(", "explicit", ".", "infer", "(", ")", ")", "except", "exceptions", ".", "InferenceError", ":", "return", "default", "if", "not", "isinstance", "(", "explicit", ",", "(", "node_classes", ".", "Tuple", ",", "node_classes", ".", "List", ")", ")", ":", "return", "default", "str_const", "=", "lambda", "node", ":", "(", "isinstance", "(", "node", ",", "node_classes", ".", "Const", ")", "and", "isinstance", "(", "node", ".", "value", ",", "str", ")", ")", "for", "node", "in", "explicit", ".", "elts", ":", "if", "str_const", "(", "node", ")", ":", "inferred", ".", "append", "(", "node", ".", "value", ")", "else", ":", "try", ":", "inferred_node", "=", "next", "(", "node", ".", "infer", "(", ")", ")", "except", "exceptions", ".", "InferenceError", ":", "continue", "if", "str_const", "(", "inferred_node", ")", ":", "inferred", ".", "append", "(", "inferred_node", ".", "value", ")", "return", "inferred" ]
34.387755
17.306122
def _GetDecodedStreamSize(self): """Retrieves the decoded stream size. Returns: int: decoded stream size. """ self._file_object.seek(0, os.SEEK_SET) self._decoder = self._GetDecoder() self._decoded_data = b'' encoded_data_offset = 0 encoded_data_size = self._file_object.get_size() decoded_stream_size = 0 while encoded_data_offset < encoded_data_size: read_count = self._ReadEncodedData(self._ENCODED_DATA_BUFFER_SIZE) if read_count == 0: break encoded_data_offset += read_count decoded_stream_size += self._decoded_data_size return decoded_stream_size
[ "def", "_GetDecodedStreamSize", "(", "self", ")", ":", "self", ".", "_file_object", ".", "seek", "(", "0", ",", "os", ".", "SEEK_SET", ")", "self", ".", "_decoder", "=", "self", ".", "_GetDecoder", "(", ")", "self", ".", "_decoded_data", "=", "b''", "encoded_data_offset", "=", "0", "encoded_data_size", "=", "self", ".", "_file_object", ".", "get_size", "(", ")", "decoded_stream_size", "=", "0", "while", "encoded_data_offset", "<", "encoded_data_size", ":", "read_count", "=", "self", ".", "_ReadEncodedData", "(", "self", ".", "_ENCODED_DATA_BUFFER_SIZE", ")", "if", "read_count", "==", "0", ":", "break", "encoded_data_offset", "+=", "read_count", "decoded_stream_size", "+=", "self", ".", "_decoded_data_size", "return", "decoded_stream_size" ]
25.75
18.583333
def write_file(self, target_path, html): """ Writes out the provided HTML to the provided path. """ logger.debug("Building to {}{}".format(self.fs_name, target_path)) with self.fs.open(smart_text(target_path), 'wb') as outfile: outfile.write(six.binary_type(html)) outfile.close()
[ "def", "write_file", "(", "self", ",", "target_path", ",", "html", ")", ":", "logger", ".", "debug", "(", "\"Building to {}{}\"", ".", "format", "(", "self", ".", "fs_name", ",", "target_path", ")", ")", "with", "self", ".", "fs", ".", "open", "(", "smart_text", "(", "target_path", ")", ",", "'wb'", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "six", ".", "binary_type", "(", "html", ")", ")", "outfile", ".", "close", "(", ")" ]
42.125
12.625
def set_nearest_border(self): """Snaps the port to the correct side upon state size change """ px, py = self._point nw_x, nw_y, se_x, se_y = self.get_adjusted_border_positions() if self._port.side == SnappedSide.RIGHT: _update(px, se_x) elif self._port.side == SnappedSide.BOTTOM: _update(py, se_y) elif self._port.side == SnappedSide.LEFT: _update(px, nw_x) elif self._port.side == SnappedSide.TOP: _update(py, nw_y)
[ "def", "set_nearest_border", "(", "self", ")", ":", "px", ",", "py", "=", "self", ".", "_point", "nw_x", ",", "nw_y", ",", "se_x", ",", "se_y", "=", "self", ".", "get_adjusted_border_positions", "(", ")", "if", "self", ".", "_port", ".", "side", "==", "SnappedSide", ".", "RIGHT", ":", "_update", "(", "px", ",", "se_x", ")", "elif", "self", ".", "_port", ".", "side", "==", "SnappedSide", ".", "BOTTOM", ":", "_update", "(", "py", ",", "se_y", ")", "elif", "self", ".", "_port", ".", "side", "==", "SnappedSide", ".", "LEFT", ":", "_update", "(", "px", ",", "nw_x", ")", "elif", "self", ".", "_port", ".", "side", "==", "SnappedSide", ".", "TOP", ":", "_update", "(", "py", ",", "nw_y", ")" ]
36.928571
12.285714
def _logpdf(self, **kwargs): """Returns the log of the pdf at the given values. The keyword arguments must contain all of parameters in self's params. Unrecognized arguments are ignored. """ if kwargs in self: return sum([self._lognorm[p] + self._expnorm[p]*(kwargs[p]-self._mean[p])**2. for p in self._params]) else: return -numpy.inf
[ "def", "_logpdf", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", "in", "self", ":", "return", "sum", "(", "[", "self", ".", "_lognorm", "[", "p", "]", "+", "self", ".", "_expnorm", "[", "p", "]", "*", "(", "kwargs", "[", "p", "]", "-", "self", ".", "_mean", "[", "p", "]", ")", "**", "2.", "for", "p", "in", "self", ".", "_params", "]", ")", "else", ":", "return", "-", "numpy", ".", "inf" ]
40.454545
13.818182
def deserialize_duration(attr): """Deserialize ISO-8601 formatted string into TimeDelta object. :param str attr: response string to be deserialized. :rtype: TimeDelta :raises: DeserializationError if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text try: duration = isodate.parse_duration(attr) except(ValueError, OverflowError, AttributeError) as err: msg = "Cannot deserialize duration object." raise_with_traceback(DeserializationError, msg, err) else: return duration
[ "def", "deserialize_duration", "(", "attr", ")", ":", "if", "isinstance", "(", "attr", ",", "ET", ".", "Element", ")", ":", "attr", "=", "attr", ".", "text", "try", ":", "duration", "=", "isodate", ".", "parse_duration", "(", "attr", ")", "except", "(", "ValueError", ",", "OverflowError", ",", "AttributeError", ")", "as", "err", ":", "msg", "=", "\"Cannot deserialize duration object.\"", "raise_with_traceback", "(", "DeserializationError", ",", "msg", ",", "err", ")", "else", ":", "return", "duration" ]
38.5
16.375
def _create_pattern_set(self, pattern, values): """Create a new pattern set.""" type_ = self._get_type(values) version = self._get_version(values) comment = values.get(COMMENT) self._pattern_set = self._spec.new_pattern_set( type_, version, pattern, self, comment )
[ "def", "_create_pattern_set", "(", "self", ",", "pattern", ",", "values", ")", ":", "type_", "=", "self", ".", "_get_type", "(", "values", ")", "version", "=", "self", ".", "_get_version", "(", "values", ")", "comment", "=", "values", ".", "get", "(", "COMMENT", ")", "self", ".", "_pattern_set", "=", "self", ".", "_spec", ".", "new_pattern_set", "(", "type_", ",", "version", ",", "pattern", ",", "self", ",", "comment", ")" ]
39.75
8.875
def transform_properties(properties, schema): """Transform properties types according to a schema. Parameters ---------- properties : dict Properties to transform. schema : dict Fiona schema containing the types. """ new_properties = properties.copy() for prop_value, (prop_name, prop_type) in zip(new_properties.values(), schema["properties"].items()): if prop_value is None: continue elif prop_type == "time": new_properties[prop_name] = parse_date(prop_value).time() elif prop_type == "date": new_properties[prop_name] = parse_date(prop_value).date() elif prop_type == "datetime": new_properties[prop_name] = parse_date(prop_value) return new_properties
[ "def", "transform_properties", "(", "properties", ",", "schema", ")", ":", "new_properties", "=", "properties", ".", "copy", "(", ")", "for", "prop_value", ",", "(", "prop_name", ",", "prop_type", ")", "in", "zip", "(", "new_properties", ".", "values", "(", ")", ",", "schema", "[", "\"properties\"", "]", ".", "items", "(", ")", ")", ":", "if", "prop_value", "is", "None", ":", "continue", "elif", "prop_type", "==", "\"time\"", ":", "new_properties", "[", "prop_name", "]", "=", "parse_date", "(", "prop_value", ")", ".", "time", "(", ")", "elif", "prop_type", "==", "\"date\"", ":", "new_properties", "[", "prop_name", "]", "=", "parse_date", "(", "prop_value", ")", ".", "date", "(", ")", "elif", "prop_type", "==", "\"datetime\"", ":", "new_properties", "[", "prop_name", "]", "=", "parse_date", "(", "prop_value", ")", "return", "new_properties" ]
33.434783
19.043478
def list_passwords(kwargs=None, call=None): ''' List all password on the account .. versionadded:: 2015.8.0 ''' response = _query('support', 'password/list') ret = {} for item in response['list']: if 'server' in item: server = item['server']['name'] if server not in ret: ret[server] = [] ret[server].append(item) return ret
[ "def", "list_passwords", "(", "kwargs", "=", "None", ",", "call", "=", "None", ")", ":", "response", "=", "_query", "(", "'support'", ",", "'password/list'", ")", "ret", "=", "{", "}", "for", "item", "in", "response", "[", "'list'", "]", ":", "if", "'server'", "in", "item", ":", "server", "=", "item", "[", "'server'", "]", "[", "'name'", "]", "if", "server", "not", "in", "ret", ":", "ret", "[", "server", "]", "=", "[", "]", "ret", "[", "server", "]", ".", "append", "(", "item", ")", "return", "ret" ]
23.705882
18.058824
def HA2(credentials, request, algorithm): """Create HA2 md5 hash If the qop directive's value is "auth" or is unspecified, then HA2: HA2 = md5(A2) = MD5(method:digestURI) If the qop directive's value is "auth-int" , then HA2 is HA2 = md5(A2) = MD5(method:digestURI:MD5(entityBody)) """ if credentials.get("qop") == "auth" or credentials.get('qop') is None: return H(b":".join([request['method'].encode('utf-8'), request['uri'].encode('utf-8')]), algorithm) elif credentials.get("qop") == "auth-int": for k in 'method', 'uri', 'body': if k not in request: raise ValueError("%s required" % k) A2 = b":".join([request['method'].encode('utf-8'), request['uri'].encode('utf-8'), H(request['body'], algorithm).encode('utf-8')]) return H(A2, algorithm) raise ValueError
[ "def", "HA2", "(", "credentials", ",", "request", ",", "algorithm", ")", ":", "if", "credentials", ".", "get", "(", "\"qop\"", ")", "==", "\"auth\"", "or", "credentials", ".", "get", "(", "'qop'", ")", "is", "None", ":", "return", "H", "(", "b\":\"", ".", "join", "(", "[", "request", "[", "'method'", "]", ".", "encode", "(", "'utf-8'", ")", ",", "request", "[", "'uri'", "]", ".", "encode", "(", "'utf-8'", ")", "]", ")", ",", "algorithm", ")", "elif", "credentials", ".", "get", "(", "\"qop\"", ")", "==", "\"auth-int\"", ":", "for", "k", "in", "'method'", ",", "'uri'", ",", "'body'", ":", "if", "k", "not", "in", "request", ":", "raise", "ValueError", "(", "\"%s required\"", "%", "k", ")", "A2", "=", "b\":\"", ".", "join", "(", "[", "request", "[", "'method'", "]", ".", "encode", "(", "'utf-8'", ")", ",", "request", "[", "'uri'", "]", ".", "encode", "(", "'utf-8'", ")", ",", "H", "(", "request", "[", "'body'", "]", ",", "algorithm", ")", ".", "encode", "(", "'utf-8'", ")", "]", ")", "return", "H", "(", "A2", ",", "algorithm", ")", "raise", "ValueError" ]
47.210526
17.789474
def check_auth(self): """Check authentication/authorization of client""" # access permissions if self.auth is not None: return self.auth(self.request) return self.public_readble, self.public_writable
[ "def", "check_auth", "(", "self", ")", ":", "# access permissions", "if", "self", ".", "auth", "is", "not", "None", ":", "return", "self", ".", "auth", "(", "self", ".", "request", ")", "return", "self", ".", "public_readble", ",", "self", ".", "public_writable" ]
34
13.714286
def get_digests(self): '''return a list of layers from a manifest. The function is intended to work with both version 1 and 2 of the schema. All layers (including redundant) are returned. By default, we try version 2 first, then fall back to version 1. For version 1 manifests: extraction is reversed Parameters ========== manifest: the manifest to read_layers from ''' if not hasattr(self, 'manifests'): bot.error('Please retrieve manifests for an image first.') sys.exit(1) digests = [] reverseLayers = False schemaVersions = list(self.manifests.keys()) schemaVersions.reverse() # Select the manifest to use for schemaVersion in schemaVersions: manifest = self.manifests[schemaVersion] if manifest['schemaVersion'] == 1: reverseLayers = True # version 2 indices used by default layer_key = 'layers' digest_key = 'digest' # Docker manifest-v2-2.md#image-manifest if 'layers' in manifest: bot.debug('Image manifest version 2.2 found.') break # Docker manifest-v2-1.md#example-manifest # noqa elif 'fsLayers' in manifest: layer_key = 'fsLayers' digest_key = 'blobSum' bot.debug('Image manifest version 2.1 found.') break else: msg = "Improperly formed manifest, " msg += "layers, manifests, or fsLayers must be present" bot.error(msg) sys.exit(1) for layer in manifest[layer_key]: if digest_key in layer: bot.debug("Adding digest %s" % layer[digest_key]) digests.append(layer[digest_key]) # Reverse layer order for manifest version 1.0 if reverseLayers is True: message = 'v%s manifest, reversing layers' % schemaVersion bot.debug(message) digests.reverse() return digests
[ "def", "get_digests", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'manifests'", ")", ":", "bot", ".", "error", "(", "'Please retrieve manifests for an image first.'", ")", "sys", ".", "exit", "(", "1", ")", "digests", "=", "[", "]", "reverseLayers", "=", "False", "schemaVersions", "=", "list", "(", "self", ".", "manifests", ".", "keys", "(", ")", ")", "schemaVersions", ".", "reverse", "(", ")", "# Select the manifest to use", "for", "schemaVersion", "in", "schemaVersions", ":", "manifest", "=", "self", ".", "manifests", "[", "schemaVersion", "]", "if", "manifest", "[", "'schemaVersion'", "]", "==", "1", ":", "reverseLayers", "=", "True", "# version 2 indices used by default", "layer_key", "=", "'layers'", "digest_key", "=", "'digest'", "# Docker manifest-v2-2.md#image-manifest", "if", "'layers'", "in", "manifest", ":", "bot", ".", "debug", "(", "'Image manifest version 2.2 found.'", ")", "break", "# Docker manifest-v2-1.md#example-manifest # noqa", "elif", "'fsLayers'", "in", "manifest", ":", "layer_key", "=", "'fsLayers'", "digest_key", "=", "'blobSum'", "bot", ".", "debug", "(", "'Image manifest version 2.1 found.'", ")", "break", "else", ":", "msg", "=", "\"Improperly formed manifest, \"", "msg", "+=", "\"layers, manifests, or fsLayers must be present\"", "bot", ".", "error", "(", "msg", ")", "sys", ".", "exit", "(", "1", ")", "for", "layer", "in", "manifest", "[", "layer_key", "]", ":", "if", "digest_key", "in", "layer", ":", "bot", ".", "debug", "(", "\"Adding digest %s\"", "%", "layer", "[", "digest_key", "]", ")", "digests", ".", "append", "(", "layer", "[", "digest_key", "]", ")", "# Reverse layer order for manifest version 1.0", "if", "reverseLayers", "is", "True", ":", "message", "=", "'v%s manifest, reversing layers'", "%", "schemaVersion", "bot", ".", "debug", "(", "message", ")", "digests", ".", "reverse", "(", ")", "return", "digests" ]
29
19.818182
def render_template_with_args_in_file(file, template_file_name, **kwargs): """ Get a file and render the content of the template_file_name with kwargs in a file :param file: A File Stream to write :param template_file_name: path to route with template name :param **kwargs: Args to be rendered in template """ template_file_content = "".join( codecs.open( template_file_name, encoding='UTF-8' ).readlines() ) template_rendered = string.Template(template_file_content).safe_substitute(**kwargs) file.write(template_rendered)
[ "def", "render_template_with_args_in_file", "(", "file", ",", "template_file_name", ",", "*", "*", "kwargs", ")", ":", "template_file_content", "=", "\"\"", ".", "join", "(", "codecs", ".", "open", "(", "template_file_name", ",", "encoding", "=", "'UTF-8'", ")", ".", "readlines", "(", ")", ")", "template_rendered", "=", "string", ".", "Template", "(", "template_file_content", ")", ".", "safe_substitute", "(", "*", "*", "kwargs", ")", "file", ".", "write", "(", "template_rendered", ")" ]
39.266667
17.933333
def first(pipe, items=1): ''' first is essentially the next() function except it's second argument determines how many of the first items you want. If items is more than 1 the output is an islice of the generator. If items is 1, the first item is returned ''' pipe = iter(pipe) return next(pipe) if items == 1 else islice(pipe, 0, items)
[ "def", "first", "(", "pipe", ",", "items", "=", "1", ")", ":", "pipe", "=", "iter", "(", "pipe", ")", "return", "next", "(", "pipe", ")", "if", "items", "==", "1", "else", "islice", "(", "pipe", ",", "0", ",", "items", ")" ]
46.25
27
def connect(*, dsn, autocommit=False, ansi=False, timeout=0, loop=None, executor=None, echo=False, after_created=None, **kwargs): """Accepts an ODBC connection string and returns a new Connection object. The connection string can be passed as the string `str`, as a list of keywords,or a combination of the two. Any keywords except autocommit, ansi, and timeout are simply added to the connection string. :param autocommit bool: False or zero, the default, if True or non-zero, the connection is put into ODBC autocommit mode and statements are committed automatically. :param ansi bool: By default, pyodbc first attempts to connect using the Unicode version of SQLDriverConnectW. If the driver returns IM001 indicating it does not support the Unicode version, the ANSI version is tried. :param timeout int: An integer login timeout in seconds, used to set the SQL_ATTR_LOGIN_TIMEOUT attribute of the connection. The default is 0 which means the database's default timeout, if any, is use :param after_created callable: support customize configuration after connection is connected. Must be an async unary function, or leave it as None. """ return _ContextManager(_connect(dsn=dsn, autocommit=autocommit, ansi=ansi, timeout=timeout, loop=loop, executor=executor, echo=echo, after_created=after_created, **kwargs))
[ "def", "connect", "(", "*", ",", "dsn", ",", "autocommit", "=", "False", ",", "ansi", "=", "False", ",", "timeout", "=", "0", ",", "loop", "=", "None", ",", "executor", "=", "None", ",", "echo", "=", "False", ",", "after_created", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "_ContextManager", "(", "_connect", "(", "dsn", "=", "dsn", ",", "autocommit", "=", "autocommit", ",", "ansi", "=", "ansi", ",", "timeout", "=", "timeout", ",", "loop", "=", "loop", ",", "executor", "=", "executor", ",", "echo", "=", "echo", ",", "after_created", "=", "after_created", ",", "*", "*", "kwargs", ")", ")" ]
57.653846
27.884615
def u16le_list_to_byte_list(data): """! @brief Convert a halfword array into a byte array""" byteData = [] for h in data: byteData.extend([h & 0xff, (h >> 8) & 0xff]) return byteData
[ "def", "u16le_list_to_byte_list", "(", "data", ")", ":", "byteData", "=", "[", "]", "for", "h", "in", "data", ":", "byteData", ".", "extend", "(", "[", "h", "&", "0xff", ",", "(", "h", ">>", "8", ")", "&", "0xff", "]", ")", "return", "byteData" ]
33.5
14
def next(self, acceleration=0.0): """Continue rotation in direction of last drag.""" q = quaternion_slerp(self._qpre, self._qnow, 2.0 + acceleration, False) self._qpre, self._qnow = self._qnow, q
[ "def", "next", "(", "self", ",", "acceleration", "=", "0.0", ")", ":", "q", "=", "quaternion_slerp", "(", "self", ".", "_qpre", ",", "self", ".", "_qnow", ",", "2.0", "+", "acceleration", ",", "False", ")", "self", ".", "_qpre", ",", "self", ".", "_qnow", "=", "self", ".", "_qnow", ",", "q" ]
54
13
def create(self): """ Creates the node. """ log.info("{module}: {name} [{id}] created".format(module=self.manager.module_name, name=self.name, id=self.id))
[ "def", "create", "(", "self", ")", ":", "log", ".", "info", "(", "\"{module}: {name} [{id}] created\"", ".", "format", "(", "module", "=", "self", ".", "manager", ".", "module_name", ",", "name", "=", "self", ".", "name", ",", "id", "=", "self", ".", "id", ")", ")" ]
37.125
23.875
def upscale(file_name, scale=1.5, margin_x=0, margin_y=0, suffix='scaled', tempdir=None): """Upscale a PDF to a large size.""" # Set output file name if tempdir: output = NamedTemporaryFile(suffix='.pdf', dir=tempdir, delete=False).name elif suffix: output = os.path.join(os.path.dirname(file_name), add_suffix(file_name, suffix)) else: output = NamedTemporaryFile(suffix='.pdf').name reader = PdfFileReader(file_name) writer = PdfFileWriter() dims = dimensions(file_name) target_w = dims['w'] * scale target_h = dims['h'] * scale # Number of pages in input document page_count = reader.getNumPages() for page_number in range(page_count): wtrmrk = reader.getPage(page_number) page = PageObject.createBlankPage(width=target_w, height=target_h) page.mergeScaledTranslatedPage(wtrmrk, scale, margin_x, margin_y) writer.addPage(page) with open(output, "wb") as outputStream: writer.write(outputStream) return output
[ "def", "upscale", "(", "file_name", ",", "scale", "=", "1.5", ",", "margin_x", "=", "0", ",", "margin_y", "=", "0", ",", "suffix", "=", "'scaled'", ",", "tempdir", "=", "None", ")", ":", "# Set output file name", "if", "tempdir", ":", "output", "=", "NamedTemporaryFile", "(", "suffix", "=", "'.pdf'", ",", "dir", "=", "tempdir", ",", "delete", "=", "False", ")", ".", "name", "elif", "suffix", ":", "output", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "file_name", ")", ",", "add_suffix", "(", "file_name", ",", "suffix", ")", ")", "else", ":", "output", "=", "NamedTemporaryFile", "(", "suffix", "=", "'.pdf'", ")", ".", "name", "reader", "=", "PdfFileReader", "(", "file_name", ")", "writer", "=", "PdfFileWriter", "(", ")", "dims", "=", "dimensions", "(", "file_name", ")", "target_w", "=", "dims", "[", "'w'", "]", "*", "scale", "target_h", "=", "dims", "[", "'h'", "]", "*", "scale", "# Number of pages in input document", "page_count", "=", "reader", ".", "getNumPages", "(", ")", "for", "page_number", "in", "range", "(", "page_count", ")", ":", "wtrmrk", "=", "reader", ".", "getPage", "(", "page_number", ")", "page", "=", "PageObject", ".", "createBlankPage", "(", "width", "=", "target_w", ",", "height", "=", "target_h", ")", "page", ".", "mergeScaledTranslatedPage", "(", "wtrmrk", ",", "scale", ",", "margin_x", ",", "margin_y", ")", "writer", ".", "addPage", "(", "page", ")", "with", "open", "(", "output", ",", "\"wb\"", ")", "as", "outputStream", ":", "writer", ".", "write", "(", "outputStream", ")", "return", "output" ]
33.733333
21.6
def copy_file(stream, target, maxread=-1, buffer_size=2*16): ''' Read from :stream and write to :target until :maxread or EOF. ''' size, read = 0, stream.read while 1: to_read = buffer_size if maxread < 0 else min(buffer_size, maxread-size) part = read(to_read) if not part: return size target.write(part) size += len(part)
[ "def", "copy_file", "(", "stream", ",", "target", ",", "maxread", "=", "-", "1", ",", "buffer_size", "=", "2", "*", "16", ")", ":", "size", ",", "read", "=", "0", ",", "stream", ".", "read", "while", "1", ":", "to_read", "=", "buffer_size", "if", "maxread", "<", "0", "else", "min", "(", "buffer_size", ",", "maxread", "-", "size", ")", "part", "=", "read", "(", "to_read", ")", "if", "not", "part", ":", "return", "size", "target", ".", "write", "(", "part", ")", "size", "+=", "len", "(", "part", ")" ]
37.8
20.8
def propertySearch(self, pid, getall=0): """ Searches this 'GameTree' for nodes containing matching properties. Returns a 'GameTree' containing the matched node(s). Arguments: - pid : string -- ID of properties to search for. - getall : boolean -- Set to true (1) to return all 'Node''s that match, or to false (0) to return only the first match.""" matches = [] for n in self: if n.has_key(pid): matches.append(n) if not getall: break else: # getall or not matches: for v in self.variations: matches = matches + v.propertySearch(pid, getall) if not getall and matches: break return GameTree(matches)
[ "def", "propertySearch", "(", "self", ",", "pid", ",", "getall", "=", "0", ")", ":", "matches", "=", "[", "]", "for", "n", "in", "self", ":", "if", "n", ".", "has_key", "(", "pid", ")", ":", "matches", ".", "append", "(", "n", ")", "if", "not", "getall", ":", "break", "else", ":", "# getall or not matches:", "for", "v", "in", "self", ".", "variations", ":", "matches", "=", "matches", "+", "v", ".", "propertySearch", "(", "pid", ",", "getall", ")", "if", "not", "getall", "and", "matches", ":", "break", "return", "GameTree", "(", "matches", ")" ]
33.736842
16.947368
def generate_nonce_timestamp(): """ Generate unique nonce with counter, uuid and rng.""" global count rng = botan.rng().get(30) uuid4 = uuid.uuid4().bytes # 16 byte tmpnonce = (bytes(str(count).encode('utf-8'))) + uuid4 + rng nonce = tmpnonce[:41] # 41 byte (328 bit) count += 1 return nonce
[ "def", "generate_nonce_timestamp", "(", ")", ":", "global", "count", "rng", "=", "botan", ".", "rng", "(", ")", ".", "get", "(", "30", ")", "uuid4", "=", "uuid", ".", "uuid4", "(", ")", ".", "bytes", "# 16 byte", "tmpnonce", "=", "(", "bytes", "(", "str", "(", "count", ")", ".", "encode", "(", "'utf-8'", ")", ")", ")", "+", "uuid4", "+", "rng", "nonce", "=", "tmpnonce", "[", ":", "41", "]", "# 41 byte (328 bit)", "count", "+=", "1", "return", "nonce" ]
35.222222
13.888889
def _update_record(self, identifier, rtype=None, name=None, content=None): """Updates the specified record in a new Gandi zone 'content' should be a string or a list of strings """ if self.protocol == 'rpc': return self.rpc_helper.update_record(identifier, rtype, name, content) data = {} if rtype: data['rrset_type'] = rtype if name: data['rrset_name'] = self._relative_name(name) if content: if isinstance(content, (list, tuple, set)): data['rrset_values'] = list(content) else: data['rrset_values'] = [content] if rtype is not None: # replace the records of a specific rtype url = '/domains/{0}/records/{1}/{2}'.format(self.domain_id, identifier or self._relative_name( name), rtype) self._put(url, data) else: # replace all records with a matching name url = '/domains/{0}/records/{1}'.format(self.domain_id, identifier or self._relative_name(name)) self._put(url, {'items': [data]}) LOGGER.debug('update_record: %s', True) return True
[ "def", "_update_record", "(", "self", ",", "identifier", ",", "rtype", "=", "None", ",", "name", "=", "None", ",", "content", "=", "None", ")", ":", "if", "self", ".", "protocol", "==", "'rpc'", ":", "return", "self", ".", "rpc_helper", ".", "update_record", "(", "identifier", ",", "rtype", ",", "name", ",", "content", ")", "data", "=", "{", "}", "if", "rtype", ":", "data", "[", "'rrset_type'", "]", "=", "rtype", "if", "name", ":", "data", "[", "'rrset_name'", "]", "=", "self", ".", "_relative_name", "(", "name", ")", "if", "content", ":", "if", "isinstance", "(", "content", ",", "(", "list", ",", "tuple", ",", "set", ")", ")", ":", "data", "[", "'rrset_values'", "]", "=", "list", "(", "content", ")", "else", ":", "data", "[", "'rrset_values'", "]", "=", "[", "content", "]", "if", "rtype", "is", "not", "None", ":", "# replace the records of a specific rtype", "url", "=", "'/domains/{0}/records/{1}/{2}'", ".", "format", "(", "self", ".", "domain_id", ",", "identifier", "or", "self", ".", "_relative_name", "(", "name", ")", ",", "rtype", ")", "self", ".", "_put", "(", "url", ",", "data", ")", "else", ":", "# replace all records with a matching name", "url", "=", "'/domains/{0}/records/{1}'", ".", "format", "(", "self", ".", "domain_id", ",", "identifier", "or", "self", ".", "_relative_name", "(", "name", ")", ")", "self", ".", "_put", "(", "url", ",", "{", "'items'", ":", "[", "data", "]", "}", ")", "LOGGER", ".", "debug", "(", "'update_record: %s'", ",", "True", ")", "return", "True" ]
43.5625
20.6875
def create_config_file(filename): """ Create main configuration file if it doesn't exist. """ import textwrap from six.moves.urllib import parse if not os.path.exists(filename): old_default_config_file = os.path.join(os.path.dirname(filename), '.tksrc') if os.path.exists(old_default_config_file): upgrade = click.confirm("\n".join(textwrap.wrap( "It looks like you recently updated Taxi. Some " "configuration changes are required. You can either let " "me upgrade your configuration file or do it " "manually.")) + "\n\nProceed with automatic configuration " "file upgrade?", default=True ) if upgrade: settings = Settings(old_default_config_file) settings.convert_to_4() with open(filename, 'w') as config_file: settings.config.write(config_file) os.remove(old_default_config_file) return else: print("Ok then.") sys.exit(0) welcome_msg = "Welcome to Taxi!" click.secho(welcome_msg, fg='green', bold=True) click.secho('=' * len(welcome_msg) + '\n', fg='green', bold=True) click.echo(click.wrap_text( "It looks like this is the first time you run Taxi. You will need " "a configuration file ({}) in order to proceed. Please answer a " "few questions to create your configuration file.".format( filename ) ) + '\n') config = pkg_resources.resource_string('taxi', 'etc/taxirc.sample').decode('utf-8') context = {} available_backends = plugins_registry.get_available_backends() context['backend'] = click.prompt( "Backend you want to use (choices are %s)" % ', '.join(available_backends), type=click.Choice(available_backends) ) context['username'] = click.prompt("Username or token") context['password'] = parse.quote( click.prompt("Password (leave empty if you're using" " a token)", hide_input=True, default=''), safe='' ) # Password can be empty in case of token auth so the ':' separator # is not included in the template config, so we add it if the user # has set a password if context['password']: context['password'] = ':' + context['password'] context['hostname'] = click.prompt( "Hostname of the backend (eg. timesheets.example.com)", type=Hostname() ) editor = Editor().get_editor() context['editor'] = click.prompt( "Editor command to edit your timesheets", default=editor ) templated_config = config.format(**context) directory = os.path.dirname(filename) if not os.path.exists(directory): os.makedirs(directory) with open(filename, 'w') as f: f.write(templated_config) else: settings = Settings(filename) conversions = settings.needed_conversions if conversions: for conversion in conversions: conversion() settings.write_config()
[ "def", "create_config_file", "(", "filename", ")", ":", "import", "textwrap", "from", "six", ".", "moves", ".", "urllib", "import", "parse", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "old_default_config_file", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ",", "'.tksrc'", ")", "if", "os", ".", "path", ".", "exists", "(", "old_default_config_file", ")", ":", "upgrade", "=", "click", ".", "confirm", "(", "\"\\n\"", ".", "join", "(", "textwrap", ".", "wrap", "(", "\"It looks like you recently updated Taxi. Some \"", "\"configuration changes are required. You can either let \"", "\"me upgrade your configuration file or do it \"", "\"manually.\"", ")", ")", "+", "\"\\n\\nProceed with automatic configuration \"", "\"file upgrade?\"", ",", "default", "=", "True", ")", "if", "upgrade", ":", "settings", "=", "Settings", "(", "old_default_config_file", ")", "settings", ".", "convert_to_4", "(", ")", "with", "open", "(", "filename", ",", "'w'", ")", "as", "config_file", ":", "settings", ".", "config", ".", "write", "(", "config_file", ")", "os", ".", "remove", "(", "old_default_config_file", ")", "return", "else", ":", "print", "(", "\"Ok then.\"", ")", "sys", ".", "exit", "(", "0", ")", "welcome_msg", "=", "\"Welcome to Taxi!\"", "click", ".", "secho", "(", "welcome_msg", ",", "fg", "=", "'green'", ",", "bold", "=", "True", ")", "click", ".", "secho", "(", "'='", "*", "len", "(", "welcome_msg", ")", "+", "'\\n'", ",", "fg", "=", "'green'", ",", "bold", "=", "True", ")", "click", ".", "echo", "(", "click", ".", "wrap_text", "(", "\"It looks like this is the first time you run Taxi. You will need \"", "\"a configuration file ({}) in order to proceed. Please answer a \"", "\"few questions to create your configuration file.\"", ".", "format", "(", "filename", ")", ")", "+", "'\\n'", ")", "config", "=", "pkg_resources", ".", "resource_string", "(", "'taxi'", ",", "'etc/taxirc.sample'", ")", ".", "decode", "(", "'utf-8'", ")", "context", "=", "{", "}", "available_backends", "=", "plugins_registry", ".", "get_available_backends", "(", ")", "context", "[", "'backend'", "]", "=", "click", ".", "prompt", "(", "\"Backend you want to use (choices are %s)\"", "%", "', '", ".", "join", "(", "available_backends", ")", ",", "type", "=", "click", ".", "Choice", "(", "available_backends", ")", ")", "context", "[", "'username'", "]", "=", "click", ".", "prompt", "(", "\"Username or token\"", ")", "context", "[", "'password'", "]", "=", "parse", ".", "quote", "(", "click", ".", "prompt", "(", "\"Password (leave empty if you're using\"", "\" a token)\"", ",", "hide_input", "=", "True", ",", "default", "=", "''", ")", ",", "safe", "=", "''", ")", "# Password can be empty in case of token auth so the ':' separator", "# is not included in the template config, so we add it if the user", "# has set a password", "if", "context", "[", "'password'", "]", ":", "context", "[", "'password'", "]", "=", "':'", "+", "context", "[", "'password'", "]", "context", "[", "'hostname'", "]", "=", "click", ".", "prompt", "(", "\"Hostname of the backend (eg. timesheets.example.com)\"", ",", "type", "=", "Hostname", "(", ")", ")", "editor", "=", "Editor", "(", ")", ".", "get_editor", "(", ")", "context", "[", "'editor'", "]", "=", "click", ".", "prompt", "(", "\"Editor command to edit your timesheets\"", ",", "default", "=", "editor", ")", "templated_config", "=", "config", ".", "format", "(", "*", "*", "context", ")", "directory", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "directory", ")", ":", "os", ".", "makedirs", "(", "directory", ")", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "templated_config", ")", "else", ":", "settings", "=", "Settings", "(", "filename", ")", "conversions", "=", "settings", ".", "needed_conversions", "if", "conversions", ":", "for", "conversion", "in", "conversions", ":", "conversion", "(", ")", "settings", ".", "write_config", "(", ")" ]
36.788889
19.988889
def association_pivot(self, association_resource): """Pivot point on association for this resource. This method will return all *resources* (group, indicators, task, victims, etc) for this resource that are associated with the provided resource. **Example Endpoints URI's** +---------+--------------------------------------------------------------------------------+ | METHOD | API Endpoint URI's | +=========+================================================================================+ | GET | /v2/groups/{pivot resourceType}/{pivot uniqueId}/{resourceType} | +---------+--------------------------------------------------------------------------------+ | GET | /v2/groups/{pivot resourceType}/{pivot uniqueId}/{resourceType}/{uniqueId} | +---------+--------------------------------------------------------------------------------+ | POST | /v2/groups/{pivot resourceType}/{pivot uniqueId}/{resourceType}/{uniqueId} | +---------+--------------------------------------------------------------------------------+ | GET | /v2/indicators/{pivot resourceType}/{pivot uniqueId}/{resourceType} | +---------+--------------------------------------------------------------------------------+ | GET | /v2/indicators/{pivot resourceType}/{pivot uniqueId}/{resourceType}/{uniqueId} | +---------+--------------------------------------------------------------------------------+ | POST | /v2/indicator/{pivot resourceType}/{pivot uniqueId}/{resourceType}/{uniqueId} | +---------+--------------------------------------------------------------------------------+ Args: resource_api_branch (string): The resource pivot api branch including resource id. """ resource = self.copy() resource._request_uri = '{}/{}'.format( association_resource.request_uri, resource._request_uri ) return resource
[ "def", "association_pivot", "(", "self", ",", "association_resource", ")", ":", "resource", "=", "self", ".", "copy", "(", ")", "resource", ".", "_request_uri", "=", "'{}/{}'", ".", "format", "(", "association_resource", ".", "request_uri", ",", "resource", ".", "_request_uri", ")", "return", "resource" ]
65.46875
41.5
def _retrieve_page(self, page_index): """Returns the node of matches to be processed""" params = self._get_params() params["page"] = str(page_index) doc = self._request(self._ws_prefix + ".search", True, params) return doc.getElementsByTagName(self._ws_prefix + "matches")[0]
[ "def", "_retrieve_page", "(", "self", ",", "page_index", ")", ":", "params", "=", "self", ".", "_get_params", "(", ")", "params", "[", "\"page\"", "]", "=", "str", "(", "page_index", ")", "doc", "=", "self", ".", "_request", "(", "self", ".", "_ws_prefix", "+", "\".search\"", ",", "True", ",", "params", ")", "return", "doc", ".", "getElementsByTagName", "(", "self", ".", "_ws_prefix", "+", "\"matches\"", ")", "[", "0", "]" ]
38.75
18.625
def constant(interval=1): """Generator for constant intervals. Args: interval: A constant value to yield or an iterable of such values. """ try: itr = iter(interval) except TypeError: itr = itertools.repeat(interval) for val in itr: yield val
[ "def", "constant", "(", "interval", "=", "1", ")", ":", "try", ":", "itr", "=", "iter", "(", "interval", ")", "except", "TypeError", ":", "itr", "=", "itertools", ".", "repeat", "(", "interval", ")", "for", "val", "in", "itr", ":", "yield", "val" ]
22.153846
20.538462
def get_missing_params_message(self, parameter_state): """ Get a user-friendly message indicating a missing parameter for the API endpoint. """ params = ', '.join(name for name, present in parameter_state if not present) return self.MISSING_REQUIRED_PARAMS_MSG.format(params)
[ "def", "get_missing_params_message", "(", "self", ",", "parameter_state", ")", ":", "params", "=", "', '", ".", "join", "(", "name", "for", "name", ",", "present", "in", "parameter_state", "if", "not", "present", ")", "return", "self", ".", "MISSING_REQUIRED_PARAMS_MSG", ".", "format", "(", "params", ")" ]
51.666667
21.333333
def create(self, using=None, **kwargs): """ Creates the index in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.create`` unchanged. """ self._get_connection(using).indices.create(index=self._name, body=self.to_dict(), **kwargs)
[ "def", "create", "(", "self", ",", "using", "=", "None", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_get_connection", "(", "using", ")", ".", "indices", ".", "create", "(", "index", "=", "self", ".", "_name", ",", "body", "=", "self", ".", "to_dict", "(", ")", ",", "*", "*", "kwargs", ")" ]
39
16.5
def request(req=None, method=None, requires_response=True): """Call function req and then emit its results to the LSP server.""" if req is None: return functools.partial(request, method=method, requires_response=requires_response) @functools.wraps(req) def wrapper(self, *args, **kwargs): if self.lsp_ready: params = req(self, *args, **kwargs) if params is not None: self.emit_request(method, params, requires_response) return wrapper
[ "def", "request", "(", "req", "=", "None", ",", "method", "=", "None", ",", "requires_response", "=", "True", ")", ":", "if", "req", "is", "None", ":", "return", "functools", ".", "partial", "(", "request", ",", "method", "=", "method", ",", "requires_response", "=", "requires_response", ")", "@", "functools", ".", "wraps", "(", "req", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "lsp_ready", ":", "params", "=", "req", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "params", "is", "not", "None", ":", "self", ".", "emit_request", "(", "method", ",", "params", ",", "requires_response", ")", "return", "wrapper" ]
40.923077
16.769231
def _populate_cmd_lists(self): """ Populate self.lists and hashes: self.commands, and self.aliases, self.category """ self.commands = {} self.aliases = {} self.category = {} # self.short_help = {} for cmd_instance in self.cmd_instances: if not hasattr(cmd_instance, 'aliases'): continue alias_names = cmd_instance.aliases cmd_name = cmd_instance.name self.commands[cmd_name] = cmd_instance for alias_name in alias_names: self.aliases[alias_name] = cmd_name pass cat = getattr(cmd_instance, 'category') if cat and self.category.get(cat): self.category[cat].append(cmd_name) else: self.category[cat] = [cmd_name] pass # sh = getattr(cmd_instance, 'short_help') # if sh: # self.short_help[cmd_name] = getattr(c, 'short_help') # pass pass for k in list(self.category.keys()): self.category[k].sort() pass return
[ "def", "_populate_cmd_lists", "(", "self", ")", ":", "self", ".", "commands", "=", "{", "}", "self", ".", "aliases", "=", "{", "}", "self", ".", "category", "=", "{", "}", "# self.short_help = {}", "for", "cmd_instance", "in", "self", ".", "cmd_instances", ":", "if", "not", "hasattr", "(", "cmd_instance", ",", "'aliases'", ")", ":", "continue", "alias_names", "=", "cmd_instance", ".", "aliases", "cmd_name", "=", "cmd_instance", ".", "name", "self", ".", "commands", "[", "cmd_name", "]", "=", "cmd_instance", "for", "alias_name", "in", "alias_names", ":", "self", ".", "aliases", "[", "alias_name", "]", "=", "cmd_name", "pass", "cat", "=", "getattr", "(", "cmd_instance", ",", "'category'", ")", "if", "cat", "and", "self", ".", "category", ".", "get", "(", "cat", ")", ":", "self", ".", "category", "[", "cat", "]", ".", "append", "(", "cmd_name", ")", "else", ":", "self", ".", "category", "[", "cat", "]", "=", "[", "cmd_name", "]", "pass", "# sh = getattr(cmd_instance, 'short_help')", "# if sh:", "# self.short_help[cmd_name] = getattr(c, 'short_help')", "# pass", "pass", "for", "k", "in", "list", "(", "self", ".", "category", ".", "keys", "(", ")", ")", ":", "self", ".", "category", "[", "k", "]", ".", "sort", "(", ")", "pass", "return" ]
36.096774
13.677419
def get_shutit_pexpect_session_from_child(self, shutit_pexpect_child): """Given a pexpect/child object, return the shutit_pexpect_session object. """ shutit_global.shutit_global_object.yield_to_draw() if not isinstance(shutit_pexpect_child, pexpect.pty_spawn.spawn): self.fail('Wrong type in get_shutit_pexpect_session_child: ' + str(type(shutit_pexpect_child)),throw_exception=True) # pragma: no cover for key in self.shutit_pexpect_sessions: if self.shutit_pexpect_sessions[key].pexpect_child == shutit_pexpect_child: return self.shutit_pexpect_sessions[key] return self.fail('Should not get here in get_shutit_pexpect_session',throw_exception=True)
[ "def", "get_shutit_pexpect_session_from_child", "(", "self", ",", "shutit_pexpect_child", ")", ":", "shutit_global", ".", "shutit_global_object", ".", "yield_to_draw", "(", ")", "if", "not", "isinstance", "(", "shutit_pexpect_child", ",", "pexpect", ".", "pty_spawn", ".", "spawn", ")", ":", "self", ".", "fail", "(", "'Wrong type in get_shutit_pexpect_session_child: '", "+", "str", "(", "type", "(", "shutit_pexpect_child", ")", ")", ",", "throw_exception", "=", "True", ")", "# pragma: no cover", "for", "key", "in", "self", ".", "shutit_pexpect_sessions", ":", "if", "self", ".", "shutit_pexpect_sessions", "[", "key", "]", ".", "pexpect_child", "==", "shutit_pexpect_child", ":", "return", "self", ".", "shutit_pexpect_sessions", "[", "key", "]", "return", "self", ".", "fail", "(", "'Should not get here in get_shutit_pexpect_session'", ",", "throw_exception", "=", "True", ")" ]
66.4
26.3
def check_submission_successful(self, submission_id=None): """Check if the last submission passes submission criteria. Args: submission_id (str, optional): submission of interest, defaults to the last submission done with the account Return: bool: True if the submission passed all checks, False otherwise. Example: >>> api = NumerAPI(secret_key="..", public_id="..") >>> api.upload_predictions("predictions.csv") >>> api.check_submission_successful() True """ status = self.submission_status(submission_id) # need to cast to bool to not return None in some cases. success = bool(status["concordance"]["value"]) return success
[ "def", "check_submission_successful", "(", "self", ",", "submission_id", "=", "None", ")", ":", "status", "=", "self", ".", "submission_status", "(", "submission_id", ")", "# need to cast to bool to not return None in some cases.", "success", "=", "bool", "(", "status", "[", "\"concordance\"", "]", "[", "\"value\"", "]", ")", "return", "success" ]
38.5
22.4
def parse_end_date(self, request, start_date): """ Return period in days after the start date to show event occurrences, which is one of the following in order of priority: - `end_date` GET parameter value, if given and valid. The filtering will be *inclusive* of the end date: until end-of-day of this date - `days_to_show` GET parameter value, if given and valid - page's `default_days_to_show` if set - the value of the app setting `DEFAULT_DAYS_TO_SHOW` """ if request.GET.get('end_date'): try: return djtz.parse('%s 00:00' % request.GET.get('end_date')) except ValueError: pass days_to_show = self.default_days_to_show or \ appsettings.DEFAULT_DAYS_TO_SHOW if 'days_to_show' in request.GET: try: days_to_show = int(request.GET.get('days_to_show')) except ValueError: pass return start_date + timedelta(days=days_to_show)
[ "def", "parse_end_date", "(", "self", ",", "request", ",", "start_date", ")", ":", "if", "request", ".", "GET", ".", "get", "(", "'end_date'", ")", ":", "try", ":", "return", "djtz", ".", "parse", "(", "'%s 00:00'", "%", "request", ".", "GET", ".", "get", "(", "'end_date'", ")", ")", "except", "ValueError", ":", "pass", "days_to_show", "=", "self", ".", "default_days_to_show", "or", "appsettings", ".", "DEFAULT_DAYS_TO_SHOW", "if", "'days_to_show'", "in", "request", ".", "GET", ":", "try", ":", "days_to_show", "=", "int", "(", "request", ".", "GET", ".", "get", "(", "'days_to_show'", ")", ")", "except", "ValueError", ":", "pass", "return", "start_date", "+", "timedelta", "(", "days", "=", "days_to_show", ")" ]
45.130435
17.130435
def main(): """ upload a package to pypi or gemfury :return: """ setup_main() config = ConfigData(clean=True) try: config.upload() finally: config.clean_after_if_needed()
[ "def", "main", "(", ")", ":", "setup_main", "(", ")", "config", "=", "ConfigData", "(", "clean", "=", "True", ")", "try", ":", "config", ".", "upload", "(", ")", "finally", ":", "config", ".", "clean_after_if_needed", "(", ")" ]
18.909091
15.090909
def paste( self ): """ Pastes text from the clipboard. """ text = nativestring(QApplication.clipboard().text()) for tag in text.split(','): tag = tag.strip() if ( self.isTagValid(tag) ): self.addTag(tag)
[ "def", "paste", "(", "self", ")", ":", "text", "=", "nativestring", "(", "QApplication", ".", "clipboard", "(", ")", ".", "text", "(", ")", ")", "for", "tag", "in", "text", ".", "split", "(", "','", ")", ":", "tag", "=", "tag", ".", "strip", "(", ")", "if", "(", "self", ".", "isTagValid", "(", "tag", ")", ")", ":", "self", ".", "addTag", "(", "tag", ")" ]
30.555556
7.444444
def get_git_remote_url(path='.', remote='origin'): """ Get git remote url :param path: path to repo :param remote: :return: remote url or exception """ return dulwich.repo.Repo.discover(path).get_config()\ .get((b'remote', remote.encode('utf-8')), b'url').decode('utf-8')
[ "def", "get_git_remote_url", "(", "path", "=", "'.'", ",", "remote", "=", "'origin'", ")", ":", "return", "dulwich", ".", "repo", ".", "Repo", ".", "discover", "(", "path", ")", ".", "get_config", "(", ")", ".", "get", "(", "(", "b'remote'", ",", "remote", ".", "encode", "(", "'utf-8'", ")", ")", ",", "b'url'", ")", ".", "decode", "(", "'utf-8'", ")" ]
33.222222
12.777778
def cli(verbose): """ Floyd CLI interacts with FloydHub server and executes your commands. More help is available under each command listed below. """ floyd.floyd_host = floyd.floyd_web_host = "https://dev.floydhub.com" floyd.tus_server_endpoint = "https://upload-v2-dev.floydhub.com/api/v1/upload/" configure_logger(verbose) check_cli_version()
[ "def", "cli", "(", "verbose", ")", ":", "floyd", ".", "floyd_host", "=", "floyd", ".", "floyd_web_host", "=", "\"https://dev.floydhub.com\"", "floyd", ".", "tus_server_endpoint", "=", "\"https://upload-v2-dev.floydhub.com/api/v1/upload/\"", "configure_logger", "(", "verbose", ")", "check_cli_version", "(", ")" ]
41
19.666667
def _rdheader(fp): """ Read header info of the windaq file """ tag = None # The '2' tag indicates the end of tags. while tag != 2: # For each header element, there is a tag indicating data type, # followed by the data size, followed by the data itself. 0's # pad the content to the nearest 4 bytes. If data_len=0, no pad. tag = struct.unpack('>H', fp.read(2))[0] data_size = struct.unpack('>H', fp.read(2))[0] pad_len = (4 - (data_size % 4)) % 4 pos = fp.tell() # Currently, most tags will be ignored... # storage method if tag == 1001: storage_method = fs = struct.unpack('B', fp.read(1))[0] storage_method = {0:'recording', 1:'manual', 2:'online'}[storage_method] # fs, unit16 elif tag == 1003: fs = struct.unpack('>H', fp.read(2))[0] # sensor type elif tag == 1007: # Each byte contains information for one channel n_sig = data_size channel_data = struct.unpack('>%dB' % data_size, fp.read(data_size)) # The documentation states: "0 : Channel is not used" # This means the samples are NOT saved. channel_map = ((1, 1, 'emg'), (15, 30, 'goniometer'), (31, 46, 'accelerometer'), (47, 62, 'inclinometer'), (63, 78, 'polar_interface'), (79, 94, 'ecg'), (95, 110, 'torque'), (111, 126, 'gyrometer'), (127, 142, 'sensor')) sig_name = [] # The number range that the data lies between gives the # channel for data in channel_data: # Default case if byte value falls outside of channel map base_name = 'unknown' # Unused channel if data == 0: n_sig -= 1 break for item in channel_map: if item[0] <= data <= item[1]: base_name = item[2] break existing_count = [base_name in name for name in sig_name].count(True) sig_name.append('%s_%d' % (base_name, existing_count)) # Display scale. Probably not useful. elif tag == 1009: # 100, 500, 1000, 2500, or 8500uV display_scale = struct.unpack('>I', fp.read(4))[0] # sample format, uint8 elif tag == 3: sample_fmt = struct.unpack('B', fp.read(1))[0] is_signed = bool(sample_fmt >> 7) # ie. 8 or 16 bits bit_width = sample_fmt & 127 # Measurement start time - seconds from 1.1.1970 UTC elif tag == 101: n_seconds = struct.unpack('>I', fp.read(4))[0] base_datetime = datetime.datetime.utcfromtimestamp(n_seconds) base_date = base_datetime.date() base_time = base_datetime.time() # Measurement start time - minutes from UTC elif tag == 102: n_minutes = struct.unpack('>h', fp.read(2))[0] # Go to the next tag fp.seek(pos + data_size + pad_len) header_size = fp.tell() # For interpreting the waveforms fields = {'fs':fs, 'n_sig':n_sig, 'sig_name':sig_name, 'base_time':base_time, 'base_date':base_date} # For reading the signal samples file_fields = {'header_size':header_size, 'n_sig':n_sig, 'bit_width':bit_width, 'is_signed':is_signed} return fields, file_fields
[ "def", "_rdheader", "(", "fp", ")", ":", "tag", "=", "None", "# The '2' tag indicates the end of tags.", "while", "tag", "!=", "2", ":", "# For each header element, there is a tag indicating data type,", "# followed by the data size, followed by the data itself. 0's", "# pad the content to the nearest 4 bytes. If data_len=0, no pad.", "tag", "=", "struct", ".", "unpack", "(", "'>H'", ",", "fp", ".", "read", "(", "2", ")", ")", "[", "0", "]", "data_size", "=", "struct", ".", "unpack", "(", "'>H'", ",", "fp", ".", "read", "(", "2", ")", ")", "[", "0", "]", "pad_len", "=", "(", "4", "-", "(", "data_size", "%", "4", ")", ")", "%", "4", "pos", "=", "fp", ".", "tell", "(", ")", "# Currently, most tags will be ignored...", "# storage method", "if", "tag", "==", "1001", ":", "storage_method", "=", "fs", "=", "struct", ".", "unpack", "(", "'B'", ",", "fp", ".", "read", "(", "1", ")", ")", "[", "0", "]", "storage_method", "=", "{", "0", ":", "'recording'", ",", "1", ":", "'manual'", ",", "2", ":", "'online'", "}", "[", "storage_method", "]", "# fs, unit16", "elif", "tag", "==", "1003", ":", "fs", "=", "struct", ".", "unpack", "(", "'>H'", ",", "fp", ".", "read", "(", "2", ")", ")", "[", "0", "]", "# sensor type", "elif", "tag", "==", "1007", ":", "# Each byte contains information for one channel", "n_sig", "=", "data_size", "channel_data", "=", "struct", ".", "unpack", "(", "'>%dB'", "%", "data_size", ",", "fp", ".", "read", "(", "data_size", ")", ")", "# The documentation states: \"0 : Channel is not used\"", "# This means the samples are NOT saved.", "channel_map", "=", "(", "(", "1", ",", "1", ",", "'emg'", ")", ",", "(", "15", ",", "30", ",", "'goniometer'", ")", ",", "(", "31", ",", "46", ",", "'accelerometer'", ")", ",", "(", "47", ",", "62", ",", "'inclinometer'", ")", ",", "(", "63", ",", "78", ",", "'polar_interface'", ")", ",", "(", "79", ",", "94", ",", "'ecg'", ")", ",", "(", "95", ",", "110", ",", "'torque'", ")", ",", "(", "111", ",", "126", ",", "'gyrometer'", ")", ",", "(", "127", ",", "142", ",", "'sensor'", ")", ")", "sig_name", "=", "[", "]", "# The number range that the data lies between gives the", "# channel", "for", "data", "in", "channel_data", ":", "# Default case if byte value falls outside of channel map", "base_name", "=", "'unknown'", "# Unused channel", "if", "data", "==", "0", ":", "n_sig", "-=", "1", "break", "for", "item", "in", "channel_map", ":", "if", "item", "[", "0", "]", "<=", "data", "<=", "item", "[", "1", "]", ":", "base_name", "=", "item", "[", "2", "]", "break", "existing_count", "=", "[", "base_name", "in", "name", "for", "name", "in", "sig_name", "]", ".", "count", "(", "True", ")", "sig_name", ".", "append", "(", "'%s_%d'", "%", "(", "base_name", ",", "existing_count", ")", ")", "# Display scale. Probably not useful.", "elif", "tag", "==", "1009", ":", "# 100, 500, 1000, 2500, or 8500uV", "display_scale", "=", "struct", ".", "unpack", "(", "'>I'", ",", "fp", ".", "read", "(", "4", ")", ")", "[", "0", "]", "# sample format, uint8", "elif", "tag", "==", "3", ":", "sample_fmt", "=", "struct", ".", "unpack", "(", "'B'", ",", "fp", ".", "read", "(", "1", ")", ")", "[", "0", "]", "is_signed", "=", "bool", "(", "sample_fmt", ">>", "7", ")", "# ie. 8 or 16 bits", "bit_width", "=", "sample_fmt", "&", "127", "# Measurement start time - seconds from 1.1.1970 UTC", "elif", "tag", "==", "101", ":", "n_seconds", "=", "struct", ".", "unpack", "(", "'>I'", ",", "fp", ".", "read", "(", "4", ")", ")", "[", "0", "]", "base_datetime", "=", "datetime", ".", "datetime", ".", "utcfromtimestamp", "(", "n_seconds", ")", "base_date", "=", "base_datetime", ".", "date", "(", ")", "base_time", "=", "base_datetime", ".", "time", "(", ")", "# Measurement start time - minutes from UTC", "elif", "tag", "==", "102", ":", "n_minutes", "=", "struct", ".", "unpack", "(", "'>h'", ",", "fp", ".", "read", "(", "2", ")", ")", "[", "0", "]", "# Go to the next tag", "fp", ".", "seek", "(", "pos", "+", "data_size", "+", "pad_len", ")", "header_size", "=", "fp", ".", "tell", "(", ")", "# For interpreting the waveforms", "fields", "=", "{", "'fs'", ":", "fs", ",", "'n_sig'", ":", "n_sig", ",", "'sig_name'", ":", "sig_name", ",", "'base_time'", ":", "base_time", ",", "'base_date'", ":", "base_date", "}", "# For reading the signal samples", "file_fields", "=", "{", "'header_size'", ":", "header_size", ",", "'n_sig'", ":", "n_sig", ",", "'bit_width'", ":", "bit_width", ",", "'is_signed'", ":", "is_signed", "}", "return", "fields", ",", "file_fields" ]
44.1875
15.5875
def _get_major_minor_revision(self, version_string): """Split a version string into major, minor and (optionally) revision parts. This is complicated by the fact that a version string can be something like 3.2b1.""" version = version_string.split(' ')[0].split('.') v_major = int(version[0]) v_minor = int(re.match('\d+', version[1]).group()) if len(version) >= 3: v_revision = int(re.match('\d+', version[2]).group()) else: v_revision = 0 return v_major, v_minor, v_revision
[ "def", "_get_major_minor_revision", "(", "self", ",", "version_string", ")", ":", "version", "=", "version_string", ".", "split", "(", "' '", ")", "[", "0", "]", ".", "split", "(", "'.'", ")", "v_major", "=", "int", "(", "version", "[", "0", "]", ")", "v_minor", "=", "int", "(", "re", ".", "match", "(", "'\\d+'", ",", "version", "[", "1", "]", ")", ".", "group", "(", ")", ")", "if", "len", "(", "version", ")", ">=", "3", ":", "v_revision", "=", "int", "(", "re", ".", "match", "(", "'\\d+'", ",", "version", "[", "2", "]", ")", ".", "group", "(", ")", ")", "else", ":", "v_revision", "=", "0", "return", "v_major", ",", "v_minor", ",", "v_revision" ]
40.5
15.642857
def estimate_hmm(observations, nstates, lag=1, initial_model=None, output=None, reversible=True, stationary=False, p=None, accuracy=1e-3, maxit=1000, maxit_P=100000, mincount_connectivity=1e-2): r""" Estimate maximum-likelihood HMM Generic maximum-likelihood estimation of HMMs Parameters ---------- observations : list of numpy arrays representing temporal data `observations[i]` is a 1d numpy array corresponding to the observed trajectory index `i` nstates : int The number of states in the model. lag : int the lag time at which observations should be read initial_model : HMM, optional, default=None If specified, the given initial model will be used to initialize the BHMM. Otherwise, a heuristic scheme is used to generate an initial guess. output : str, optional, default=None Output model type from [None, 'gaussian', 'discrete']. If None, will automatically select an output model type based on the format of observations. reversible : bool, optional, default=True If True, a prior that enforces reversible transition matrices (detailed balance) is used; otherwise, a standard non-reversible prior is used. stationary : bool, optional, default=False If True, the initial distribution of hidden states is self-consistently computed as the stationary distribution of the transition matrix. If False, it will be estimated from the starting states. Only set this to true if you're sure that the observation trajectories are initiated from a global equilibrium distribution. p : ndarray (nstates), optional, default=None Initial or fixed stationary distribution. If given and stationary=True, transition matrices will be estimated with the constraint that they have p as their stationary distribution. If given and stationary=False, p is the fixed initial distribution of hidden states. accuracy : float convergence threshold for EM iteration. When two the likelihood does not increase by more than accuracy, the iteration is stopped successfully. maxit : int stopping criterion for EM iteration. When so many iterations are performed without reaching the requested accuracy, the iteration is stopped without convergence (a warning is given) Return ------ hmm : :class:`HMM <bhmm.hmm.generic_hmm.HMM>` """ # select output model type if output is None: output = _guess_output_type(observations) if lag > 1: observations = lag_observations(observations, lag) # construct estimator from bhmm.estimators.maximum_likelihood import MaximumLikelihoodEstimator as _MaximumLikelihoodEstimator est = _MaximumLikelihoodEstimator(observations, nstates, initial_model=initial_model, output=output, reversible=reversible, stationary=stationary, p=p, accuracy=accuracy, maxit=maxit, maxit_P=maxit_P) # run est.fit() # set lag time est.hmm._lag = lag # return model # TODO: package into specific class (DiscreteHMM, GaussianHMM) return est.hmm
[ "def", "estimate_hmm", "(", "observations", ",", "nstates", ",", "lag", "=", "1", ",", "initial_model", "=", "None", ",", "output", "=", "None", ",", "reversible", "=", "True", ",", "stationary", "=", "False", ",", "p", "=", "None", ",", "accuracy", "=", "1e-3", ",", "maxit", "=", "1000", ",", "maxit_P", "=", "100000", ",", "mincount_connectivity", "=", "1e-2", ")", ":", "# select output model type", "if", "output", "is", "None", ":", "output", "=", "_guess_output_type", "(", "observations", ")", "if", "lag", ">", "1", ":", "observations", "=", "lag_observations", "(", "observations", ",", "lag", ")", "# construct estimator", "from", "bhmm", ".", "estimators", ".", "maximum_likelihood", "import", "MaximumLikelihoodEstimator", "as", "_MaximumLikelihoodEstimator", "est", "=", "_MaximumLikelihoodEstimator", "(", "observations", ",", "nstates", ",", "initial_model", "=", "initial_model", ",", "output", "=", "output", ",", "reversible", "=", "reversible", ",", "stationary", "=", "stationary", ",", "p", "=", "p", ",", "accuracy", "=", "accuracy", ",", "maxit", "=", "maxit", ",", "maxit_P", "=", "maxit_P", ")", "# run", "est", ".", "fit", "(", ")", "# set lag time", "est", ".", "hmm", ".", "_lag", "=", "lag", "# return model", "# TODO: package into specific class (DiscreteHMM, GaussianHMM)", "return", "est", ".", "hmm" ]
49.78125
31.046875
def make_python_patterns(additional_keywords=[], additional_builtins=[]): """Strongly inspired from idlelib.ColorDelegator.make_pat""" kw = r"\b" + any("keyword", kwlist + additional_keywords) + r"\b" kw_namespace = r"\b" + any("namespace", kw_namespace_list) + r"\b" word_operators = r"\b" + any("operator_word", wordop_list) + r"\b" builtinlist = [str(name) for name in dir(builtins) if not name.startswith('_')] + additional_builtins for v in ['None', 'True', 'False']: builtinlist.remove(v) builtin = r"([^.'\"\\#]\b|^)" + any("builtin", builtinlist) + r"\b" builtin_fct = any("builtin_fct", [r'_{2}[a-zA-Z_]*_{2}']) comment = any("comment", [r"#[^\n]*"]) instance = any("instance", [r"\bself\b", r"\bcls\b"]) decorator = any('decorator', [r'@\w*', r'.setter']) number = any("number", [r"\b[+-]?[0-9]+[lLjJ]?\b", r"\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b", r"\b[+-]?0[oO][0-7]+[lL]?\b", r"\b[+-]?0[bB][01]+[lL]?\b", r"\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?[jJ]?\b"]) sqstring = r"(\b[rRuU])?'[^'\\\n]*(\\.[^'\\\n]*)*'?" dqstring = r'(\b[rRuU])?"[^"\\\n]*(\\.[^"\\\n]*)*"?' uf_sqstring = r"(\b[rRuU])?'[^'\\\n]*(\\.[^'\\\n]*)*(\\)$(?!')$" uf_dqstring = r'(\b[rRuU])?"[^"\\\n]*(\\.[^"\\\n]*)*(\\)$(?!")$' sq3string = r"(\b[rRuU])?'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(''')?" dq3string = r'(\b[rRuU])?"""[^"\\]*((\\.|"(?!""))[^"\\]*)*(""")?' uf_sq3string = r"(\b[rRuU])?'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(\\)?(?!''')$" uf_dq3string = r'(\b[rRuU])?"""[^"\\]*((\\.|"(?!""))[^"\\]*)*(\\)?(?!""")$' string = any("string", [sq3string, dq3string, sqstring, dqstring]) ufstring1 = any("uf_sqstring", [uf_sqstring]) ufstring2 = any("uf_dqstring", [uf_dqstring]) ufstring3 = any("uf_sq3string", [uf_sq3string]) ufstring4 = any("uf_dq3string", [uf_dq3string]) return "|".join([instance, decorator, kw, kw_namespace, builtin, word_operators, builtin_fct, comment, ufstring1, ufstring2, ufstring3, ufstring4, string, number, any("SYNC", [r"\n"])])
[ "def", "make_python_patterns", "(", "additional_keywords", "=", "[", "]", ",", "additional_builtins", "=", "[", "]", ")", ":", "kw", "=", "r\"\\b\"", "+", "any", "(", "\"keyword\"", ",", "kwlist", "+", "additional_keywords", ")", "+", "r\"\\b\"", "kw_namespace", "=", "r\"\\b\"", "+", "any", "(", "\"namespace\"", ",", "kw_namespace_list", ")", "+", "r\"\\b\"", "word_operators", "=", "r\"\\b\"", "+", "any", "(", "\"operator_word\"", ",", "wordop_list", ")", "+", "r\"\\b\"", "builtinlist", "=", "[", "str", "(", "name", ")", "for", "name", "in", "dir", "(", "builtins", ")", "if", "not", "name", ".", "startswith", "(", "'_'", ")", "]", "+", "additional_builtins", "for", "v", "in", "[", "'None'", ",", "'True'", ",", "'False'", "]", ":", "builtinlist", ".", "remove", "(", "v", ")", "builtin", "=", "r\"([^.'\\\"\\\\#]\\b|^)\"", "+", "any", "(", "\"builtin\"", ",", "builtinlist", ")", "+", "r\"\\b\"", "builtin_fct", "=", "any", "(", "\"builtin_fct\"", ",", "[", "r'_{2}[a-zA-Z_]*_{2}'", "]", ")", "comment", "=", "any", "(", "\"comment\"", ",", "[", "r\"#[^\\n]*\"", "]", ")", "instance", "=", "any", "(", "\"instance\"", ",", "[", "r\"\\bself\\b\"", ",", "r\"\\bcls\\b\"", "]", ")", "decorator", "=", "any", "(", "'decorator'", ",", "[", "r'@\\w*'", ",", "r'.setter'", "]", ")", "number", "=", "any", "(", "\"number\"", ",", "[", "r\"\\b[+-]?[0-9]+[lLjJ]?\\b\"", ",", "r\"\\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\\b\"", ",", "r\"\\b[+-]?0[oO][0-7]+[lL]?\\b\"", ",", "r\"\\b[+-]?0[bB][01]+[lL]?\\b\"", ",", "r\"\\b[+-]?[0-9]+(?:\\.[0-9]+)?(?:[eE][+-]?[0-9]+)?[jJ]?\\b\"", "]", ")", "sqstring", "=", "r\"(\\b[rRuU])?'[^'\\\\\\n]*(\\\\.[^'\\\\\\n]*)*'?\"", "dqstring", "=", "r'(\\b[rRuU])?\"[^\"\\\\\\n]*(\\\\.[^\"\\\\\\n]*)*\"?'", "uf_sqstring", "=", "r\"(\\b[rRuU])?'[^'\\\\\\n]*(\\\\.[^'\\\\\\n]*)*(\\\\)$(?!')$\"", "uf_dqstring", "=", "r'(\\b[rRuU])?\"[^\"\\\\\\n]*(\\\\.[^\"\\\\\\n]*)*(\\\\)$(?!\")$'", "sq3string", "=", "r\"(\\b[rRuU])?'''[^'\\\\]*((\\\\.|'(?!''))[^'\\\\]*)*(''')?\"", "dq3string", "=", "r'(\\b[rRuU])?\"\"\"[^\"\\\\]*((\\\\.|\"(?!\"\"))[^\"\\\\]*)*(\"\"\")?'", "uf_sq3string", "=", "r\"(\\b[rRuU])?'''[^'\\\\]*((\\\\.|'(?!''))[^'\\\\]*)*(\\\\)?(?!''')$\"", "uf_dq3string", "=", "r'(\\b[rRuU])?\"\"\"[^\"\\\\]*((\\\\.|\"(?!\"\"))[^\"\\\\]*)*(\\\\)?(?!\"\"\")$'", "string", "=", "any", "(", "\"string\"", ",", "[", "sq3string", ",", "dq3string", ",", "sqstring", ",", "dqstring", "]", ")", "ufstring1", "=", "any", "(", "\"uf_sqstring\"", ",", "[", "uf_sqstring", "]", ")", "ufstring2", "=", "any", "(", "\"uf_dqstring\"", ",", "[", "uf_dqstring", "]", ")", "ufstring3", "=", "any", "(", "\"uf_sq3string\"", ",", "[", "uf_sq3string", "]", ")", "ufstring4", "=", "any", "(", "\"uf_dq3string\"", ",", "[", "uf_dq3string", "]", ")", "return", "\"|\"", ".", "join", "(", "[", "instance", ",", "decorator", ",", "kw", ",", "kw_namespace", ",", "builtin", ",", "word_operators", ",", "builtin_fct", ",", "comment", ",", "ufstring1", ",", "ufstring2", ",", "ufstring3", ",", "ufstring4", ",", "string", ",", "number", ",", "any", "(", "\"SYNC\"", ",", "[", "r\"\\n\"", "]", ")", "]", ")" ]
58.864865
17.783784
def save(self, *args, **kwargs): """ Before saving, execute 'perform_bulk_pubmed_query()'. """ self.perform_bulk_pubmed_query() super().save(*args, **kwargs)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "perform_bulk_pubmed_query", "(", ")", "super", "(", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
32
5.333333
def _round(self, number): """ Helper function for rounding-as-taught-in-school (X.5 rounds to X+1 if positive). Python 3 now rounds 0.5 to whichever side is even (i.e. 2.5 rounds to 2). :param int number: a float to round. :return: closest integer to number, rounding ties away from 0. """ sign = 1 if number >= 0 else -1 rounded = int(round(number)) nextRounded = int(round(number + 1 * sign)) if nextRounded == rounded: # We rounded X.5 to even, and it was also away from 0. return rounded elif nextRounded == rounded + 1 * sign: # We rounded normally (we are in Python 2) return rounded elif nextRounded == rounded + 2 * sign: # We rounded X.5 to even, but it was towards 0. # Go away from 0 instead. return rounded + 1 * sign else: # If we get here, something has gone wrong. raise RuntimeError("Could not round {}".format(number))
[ "def", "_round", "(", "self", ",", "number", ")", ":", "sign", "=", "1", "if", "number", ">=", "0", "else", "-", "1", "rounded", "=", "int", "(", "round", "(", "number", ")", ")", "nextRounded", "=", "int", "(", "round", "(", "number", "+", "1", "*", "sign", ")", ")", "if", "nextRounded", "==", "rounded", ":", "# We rounded X.5 to even, and it was also away from 0.", "return", "rounded", "elif", "nextRounded", "==", "rounded", "+", "1", "*", "sign", ":", "# We rounded normally (we are in Python 2)", "return", "rounded", "elif", "nextRounded", "==", "rounded", "+", "2", "*", "sign", ":", "# We rounded X.5 to even, but it was towards 0.", "# Go away from 0 instead.", "return", "rounded", "+", "1", "*", "sign", "else", ":", "# If we get here, something has gone wrong.", "raise", "RuntimeError", "(", "\"Could not round {}\"", ".", "format", "(", "number", ")", ")" ]
39.148148
17.222222
def imshow(self, key): """Show item's image""" data = self.model.get_data() import spyder.pyplot as plt plt.figure() plt.imshow(data[key]) plt.show()
[ "def", "imshow", "(", "self", ",", "key", ")", ":", "data", "=", "self", ".", "model", ".", "get_data", "(", ")", "import", "spyder", ".", "pyplot", "as", "plt", "plt", ".", "figure", "(", ")", "plt", ".", "imshow", "(", "data", "[", "key", "]", ")", "plt", ".", "show", "(", ")" ]
28.142857
10.714286
def add(self, isoel, col1, col2, channel_width, flow_rate, viscosity, method): """Add isoelastics Parameters ---------- isoel: list of ndarrays Each list item resembles one isoelastic line stored as an array of shape (N,3). The last column contains the emodulus data. col1: str Name of the first feature of all isoelastics (e.g. isoel[0][:,0]) col2: str Name of the second feature of all isoelastics (e.g. isoel[0][:,1]) channel_width: float Channel width in µm flow_rate: float Flow rate through the channel in µl/s viscosity: float Viscosity of the medium in mPa*s method: str The method used to compute the isoelastics (must be one of `VALID_METHODS`). Notes ----- The following isoelastics are automatically added for user convenience: - isoelastics with `col1` and `col2` interchanged - isoelastics for circularity if deformation was given """ if method not in VALID_METHODS: validstr = ",".join(VALID_METHODS) raise ValueError("`method` must be one of {}!".format(validstr)) for col in [col1, col2]: if col not in dfn.scalar_feature_names: raise ValueError("Not a valid feature name: {}".format(col)) meta = [channel_width, flow_rate, viscosity] # Add the feature data self._add(isoel, col1, col2, method, meta) # Also add the feature data for circularity if "deform" in [col1, col2]: col1c, col2c = col1, col2 if col1c == "deform": deform_ax = 0 col1c = "circ" else: deform_ax = 1 col2c = "circ" iso_circ = [] for iso in isoel: iso = iso.copy() iso[:, deform_ax] = 1 - iso[:, deform_ax] iso_circ.append(iso) self._add(iso_circ, col1c, col2c, method, meta)
[ "def", "add", "(", "self", ",", "isoel", ",", "col1", ",", "col2", ",", "channel_width", ",", "flow_rate", ",", "viscosity", ",", "method", ")", ":", "if", "method", "not", "in", "VALID_METHODS", ":", "validstr", "=", "\",\"", ".", "join", "(", "VALID_METHODS", ")", "raise", "ValueError", "(", "\"`method` must be one of {}!\"", ".", "format", "(", "validstr", ")", ")", "for", "col", "in", "[", "col1", ",", "col2", "]", ":", "if", "col", "not", "in", "dfn", ".", "scalar_feature_names", ":", "raise", "ValueError", "(", "\"Not a valid feature name: {}\"", ".", "format", "(", "col", ")", ")", "meta", "=", "[", "channel_width", ",", "flow_rate", ",", "viscosity", "]", "# Add the feature data", "self", ".", "_add", "(", "isoel", ",", "col1", ",", "col2", ",", "method", ",", "meta", ")", "# Also add the feature data for circularity", "if", "\"deform\"", "in", "[", "col1", ",", "col2", "]", ":", "col1c", ",", "col2c", "=", "col1", ",", "col2", "if", "col1c", "==", "\"deform\"", ":", "deform_ax", "=", "0", "col1c", "=", "\"circ\"", "else", ":", "deform_ax", "=", "1", "col2c", "=", "\"circ\"", "iso_circ", "=", "[", "]", "for", "iso", "in", "isoel", ":", "iso", "=", "iso", ".", "copy", "(", ")", "iso", "[", ":", ",", "deform_ax", "]", "=", "1", "-", "iso", "[", ":", ",", "deform_ax", "]", "iso_circ", ".", "append", "(", "iso", ")", "self", ".", "_add", "(", "iso_circ", ",", "col1c", ",", "col2c", ",", "method", ",", "meta", ")" ]
34.866667
15.716667
def request(self, url, method = u"get", data = None, headers = None, **kwargs): """ public method for doing the live request """ url, method, data, headers, kwargs = self._pre_request(url, method=method, data=data, headers=headers, **kwargs) response = self._request(url, method=method, data=data, headers=headers, **kwargs) response = self._post_request(response) # raises the appropriate exceptions response = self._handle_response(response) return response
[ "def", "request", "(", "self", ",", "url", ",", "method", "=", "u\"get\"", ",", "data", "=", "None", ",", "headers", "=", "None", ",", "*", "*", "kwargs", ")", ":", "url", ",", "method", ",", "data", ",", "headers", ",", "kwargs", "=", "self", ".", "_pre_request", "(", "url", ",", "method", "=", "method", ",", "data", "=", "data", ",", "headers", "=", "headers", ",", "*", "*", "kwargs", ")", "response", "=", "self", ".", "_request", "(", "url", ",", "method", "=", "method", ",", "data", "=", "data", ",", "headers", "=", "headers", ",", "*", "*", "kwargs", ")", "response", "=", "self", ".", "_post_request", "(", "response", ")", "# raises the appropriate exceptions", "response", "=", "self", ".", "_handle_response", "(", "response", ")", "return", "response" ]
46.764706
24.411765
def _authorization_headers_valid(self, token_type, token): """Verify authorization headers for a request. Parameters token_type (str) Type of token to access resources. token (str) Server Token or OAuth 2.0 Access Token. Returns (bool) True iff token_type and token are valid. """ if token_type not in http.VALID_TOKEN_TYPES: return False allowed_chars = ascii_letters + digits + '_' + '-' + '=' + '/' + '+' # True if token only contains allowed_chars return all(characters in allowed_chars for characters in token)
[ "def", "_authorization_headers_valid", "(", "self", ",", "token_type", ",", "token", ")", ":", "if", "token_type", "not", "in", "http", ".", "VALID_TOKEN_TYPES", ":", "return", "False", "allowed_chars", "=", "ascii_letters", "+", "digits", "+", "'_'", "+", "'-'", "+", "'='", "+", "'/'", "+", "'+'", "# True if token only contains allowed_chars", "return", "all", "(", "characters", "in", "allowed_chars", "for", "characters", "in", "token", ")" ]
36.666667
19.055556
def order_verification(self, institute, case, user, link, variant): """Create an event for a variant verification for a variant and an event for a variant verification for a case Arguments: institute (dict): A Institute object case (dict): Case object user (dict): A User object link (str): The url to be used in the event variant (dict): A variant object Returns: updated_variant(dict) """ LOG.info("Creating event for ordering validation for variant" \ " {0}".format(variant['display_name'])) updated_variant = self.variant_collection.find_one_and_update( {'_id': variant['_id']}, {'$set': {'sanger_ordered': True}}, return_document=pymongo.ReturnDocument.AFTER ) self.create_event( institute=institute, case=case, user=user, link=link, category='variant', verb='sanger', variant=variant, subject=variant['display_name'], ) LOG.info("Creating event for ordering sanger for case" \ " {0}".format(case['display_name'])) self.create_event( institute=institute, case=case, user=user, link=link, category='case', verb='sanger', variant=variant, subject=variant['display_name'], ) return updated_variant
[ "def", "order_verification", "(", "self", ",", "institute", ",", "case", ",", "user", ",", "link", ",", "variant", ")", ":", "LOG", ".", "info", "(", "\"Creating event for ordering validation for variant\"", "\" {0}\"", ".", "format", "(", "variant", "[", "'display_name'", "]", ")", ")", "updated_variant", "=", "self", ".", "variant_collection", ".", "find_one_and_update", "(", "{", "'_id'", ":", "variant", "[", "'_id'", "]", "}", ",", "{", "'$set'", ":", "{", "'sanger_ordered'", ":", "True", "}", "}", ",", "return_document", "=", "pymongo", ".", "ReturnDocument", ".", "AFTER", ")", "self", ".", "create_event", "(", "institute", "=", "institute", ",", "case", "=", "case", ",", "user", "=", "user", ",", "link", "=", "link", ",", "category", "=", "'variant'", ",", "verb", "=", "'sanger'", ",", "variant", "=", "variant", ",", "subject", "=", "variant", "[", "'display_name'", "]", ",", ")", "LOG", ".", "info", "(", "\"Creating event for ordering sanger for case\"", "\" {0}\"", ".", "format", "(", "case", "[", "'display_name'", "]", ")", ")", "self", ".", "create_event", "(", "institute", "=", "institute", ",", "case", "=", "case", ",", "user", "=", "user", ",", "link", "=", "link", ",", "category", "=", "'case'", ",", "verb", "=", "'sanger'", ",", "variant", "=", "variant", ",", "subject", "=", "variant", "[", "'display_name'", "]", ",", ")", "return", "updated_variant" ]
31.458333
17.791667
def dump(self, itemkey, filename=None, path=None): """ Dump a file attachment to disk, with optional filename and path """ if not filename: filename = self.item(itemkey)["data"]["filename"] if path: pth = os.path.join(path, filename) else: pth = filename file = self.file(itemkey) if self.snapshot: self.snapshot = False pth = pth + ".zip" with open(pth, "wb") as f: f.write(file)
[ "def", "dump", "(", "self", ",", "itemkey", ",", "filename", "=", "None", ",", "path", "=", "None", ")", ":", "if", "not", "filename", ":", "filename", "=", "self", ".", "item", "(", "itemkey", ")", "[", "\"data\"", "]", "[", "\"filename\"", "]", "if", "path", ":", "pth", "=", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", "else", ":", "pth", "=", "filename", "file", "=", "self", ".", "file", "(", "itemkey", ")", "if", "self", ".", "snapshot", ":", "self", ".", "snapshot", "=", "False", "pth", "=", "pth", "+", "\".zip\"", "with", "open", "(", "pth", ",", "\"wb\"", ")", "as", "f", ":", "f", ".", "write", "(", "file", ")" ]
31.8125
13.0625
def is_compatible(self, obj): """Check if characters can be combined into a textline We consider characters compatible if: - the Unicode mapping is known, and both have the same render mode - the Unicode mapping is unknown but both are part of the same font """ both_unicode_mapped = isinstance(self._text, str) and isinstance(obj._text, str) try: if both_unicode_mapped: return self.rendermode == obj.rendermode font0, _ = self._text font1, _ = obj._text return font0 == font1 and self.rendermode == obj.rendermode except (ValueError, AttributeError): return False
[ "def", "is_compatible", "(", "self", ",", "obj", ")", ":", "both_unicode_mapped", "=", "isinstance", "(", "self", ".", "_text", ",", "str", ")", "and", "isinstance", "(", "obj", ".", "_text", ",", "str", ")", "try", ":", "if", "both_unicode_mapped", ":", "return", "self", ".", "rendermode", "==", "obj", ".", "rendermode", "font0", ",", "_", "=", "self", ".", "_text", "font1", ",", "_", "=", "obj", ".", "_text", "return", "font0", "==", "font1", "and", "self", ".", "rendermode", "==", "obj", ".", "rendermode", "except", "(", "ValueError", ",", "AttributeError", ")", ":", "return", "False" ]
43.6875
18.5
def env(): """Verify PCI variables and construct exported variables""" if cij.ssh.env(): cij.err("cij.pci.env: invalid SSH environment") return 1 pci = cij.env_to_dict(PREFIX, REQUIRED) pci["BUS_PATH"] = "/sys/bus/pci" pci["DEV_PATH"] = os.sep.join([pci["BUS_PATH"], "devices", pci["DEV_NAME"]]) cij.env_export(PREFIX, EXPORTED, pci) return 0
[ "def", "env", "(", ")", ":", "if", "cij", ".", "ssh", ".", "env", "(", ")", ":", "cij", ".", "err", "(", "\"cij.pci.env: invalid SSH environment\"", ")", "return", "1", "pci", "=", "cij", ".", "env_to_dict", "(", "PREFIX", ",", "REQUIRED", ")", "pci", "[", "\"BUS_PATH\"", "]", "=", "\"/sys/bus/pci\"", "pci", "[", "\"DEV_PATH\"", "]", "=", "os", ".", "sep", ".", "join", "(", "[", "pci", "[", "\"BUS_PATH\"", "]", ",", "\"devices\"", ",", "pci", "[", "\"DEV_NAME\"", "]", "]", ")", "cij", ".", "env_export", "(", "PREFIX", ",", "EXPORTED", ",", "pci", ")", "return", "0" ]
25.133333
24.266667
def create_node(self, config_file=None, seed=None, tags=None): """ Set up a CondorDagmanNode class to run ``pycbc_create_injections``. Parameters ---------- config_file : pycbc.workflow.core.File A ``pycbc.workflow.core.File`` for inference configuration file to be used with ``--config-files`` option. seed : int Seed to use for generating injections. tags : list A list of tags to include in filenames. Returns -------- node : pycbc.workflow.core.Node The node to run the job. """ # default for tags is empty list tags = [] if tags is None else tags # get analysis start and end time start_time = self.cp.get("workflow", "start-time") end_time = self.cp.get("workflow", "end-time") analysis_time = segments.segment(int(start_time), int(end_time)) # make node for running executable node = Node(self) node.add_input_opt("--config-file", config_file) if seed: node.add_opt("--seed", seed) injection_file = node.new_output_file_opt(analysis_time, ".hdf", "--output-file", tags=tags) return node, injection_file
[ "def", "create_node", "(", "self", ",", "config_file", "=", "None", ",", "seed", "=", "None", ",", "tags", "=", "None", ")", ":", "# default for tags is empty list", "tags", "=", "[", "]", "if", "tags", "is", "None", "else", "tags", "# get analysis start and end time", "start_time", "=", "self", ".", "cp", ".", "get", "(", "\"workflow\"", ",", "\"start-time\"", ")", "end_time", "=", "self", ".", "cp", ".", "get", "(", "\"workflow\"", ",", "\"end-time\"", ")", "analysis_time", "=", "segments", ".", "segment", "(", "int", "(", "start_time", ")", ",", "int", "(", "end_time", ")", ")", "# make node for running executable", "node", "=", "Node", "(", "self", ")", "node", ".", "add_input_opt", "(", "\"--config-file\"", ",", "config_file", ")", "if", "seed", ":", "node", ".", "add_opt", "(", "\"--seed\"", ",", "seed", ")", "injection_file", "=", "node", ".", "new_output_file_opt", "(", "analysis_time", ",", "\".hdf\"", ",", "\"--output-file\"", ",", "tags", "=", "tags", ")", "return", "node", ",", "injection_file" ]
35.864865
18.567568
def fasta_files_equal(seq_file1, seq_file2): """Check equality of a FASTA file to another FASTA file Args: seq_file1: Path to a FASTA file seq_file2: Path to another FASTA file Returns: bool: If the sequences are the same """ # Load already set representative sequence seq1 = SeqIO.read(open(seq_file1), 'fasta') # Load kegg sequence seq2 = SeqIO.read(open(seq_file2), 'fasta') # Test equality if str(seq1.seq) == str(seq2.seq): return True else: return False
[ "def", "fasta_files_equal", "(", "seq_file1", ",", "seq_file2", ")", ":", "# Load already set representative sequence", "seq1", "=", "SeqIO", ".", "read", "(", "open", "(", "seq_file1", ")", ",", "'fasta'", ")", "# Load kegg sequence", "seq2", "=", "SeqIO", ".", "read", "(", "open", "(", "seq_file2", ")", ",", "'fasta'", ")", "# Test equality", "if", "str", "(", "seq1", ".", "seq", ")", "==", "str", "(", "seq2", ".", "seq", ")", ":", "return", "True", "else", ":", "return", "False" ]
22.913043
19.26087
def get_temp_filename(suffix=None): """ return a string in the form of temp_X, where X is a large integer """ file = tempfile.mkstemp(suffix=suffix or "", prefix="temp_", dir=os.getcwd()) # or "" for Python 2 compatibility os.close(file[0]) return file[1]
[ "def", "get_temp_filename", "(", "suffix", "=", "None", ")", ":", "file", "=", "tempfile", ".", "mkstemp", "(", "suffix", "=", "suffix", "or", "\"\"", ",", "prefix", "=", "\"temp_\"", ",", "dir", "=", "os", ".", "getcwd", "(", ")", ")", "# or \"\" for Python 2 compatibility", "os", ".", "close", "(", "file", "[", "0", "]", ")", "return", "file", "[", "1", "]" ]
53.4
24.4
def openssh_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60): """Create an ssh tunnel using command-line ssh that connects port lport on this machine to localhost:rport on server. The tunnel will automatically close when not in use, remaining open for a minimum of timeout seconds for an initial connection. This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`, as seen from `server`. keyfile and password may be specified, but ssh config is checked for defaults. Parameters ---------- lport : int local port for connecting to the tunnel from this machine. rport : int port on the remote machine to connect to. server : str The ssh server to connect to. The full ssh server string will be parsed. user@server:port remoteip : str [Default: 127.0.0.1] The remote ip, specifying the destination of the tunnel. Default is localhost, which means that the tunnel would redirect localhost:lport on this machine to localhost:rport on the *server*. keyfile : str; path to public key file This specifies a key to be used in ssh login, default None. Regular default ssh keys will be used without specifying this argument. password : str; Your ssh password to the ssh server. Note that if this is left None, you will be prompted for it if passwordless key based login is unavailable. timeout : int [default: 60] The time (in seconds) after which no activity will result in the tunnel closing. This prevents orphaned tunnels from running forever. """ if pexpect is None: raise ImportError("pexpect unavailable, use paramiko_tunnel") ssh="ssh " if keyfile: ssh += "-i " + keyfile if ':' in server: server, port = server.split(':') ssh += " -p %s" % port cmd = "%s -f -L 127.0.0.1:%i:%s:%i %s sleep %i" % ( ssh, lport, remoteip, rport, server, timeout) tunnel = pexpect.spawn(cmd) failed = False while True: try: tunnel.expect('[Pp]assword:', timeout=.1) except pexpect.TIMEOUT: continue except pexpect.EOF: if tunnel.exitstatus: print (tunnel.exitstatus) print (tunnel.before) print (tunnel.after) raise RuntimeError("tunnel '%s' failed to start"%(cmd)) else: return tunnel.pid else: if failed: print("Password rejected, try again") password=None if password is None: password = getpass("%s's password: "%(server)) tunnel.sendline(password) failed = True
[ "def", "openssh_tunnel", "(", "lport", ",", "rport", ",", "server", ",", "remoteip", "=", "'127.0.0.1'", ",", "keyfile", "=", "None", ",", "password", "=", "None", ",", "timeout", "=", "60", ")", ":", "if", "pexpect", "is", "None", ":", "raise", "ImportError", "(", "\"pexpect unavailable, use paramiko_tunnel\"", ")", "ssh", "=", "\"ssh \"", "if", "keyfile", ":", "ssh", "+=", "\"-i \"", "+", "keyfile", "if", "':'", "in", "server", ":", "server", ",", "port", "=", "server", ".", "split", "(", "':'", ")", "ssh", "+=", "\" -p %s\"", "%", "port", "cmd", "=", "\"%s -f -L 127.0.0.1:%i:%s:%i %s sleep %i\"", "%", "(", "ssh", ",", "lport", ",", "remoteip", ",", "rport", ",", "server", ",", "timeout", ")", "tunnel", "=", "pexpect", ".", "spawn", "(", "cmd", ")", "failed", "=", "False", "while", "True", ":", "try", ":", "tunnel", ".", "expect", "(", "'[Pp]assword:'", ",", "timeout", "=", ".1", ")", "except", "pexpect", ".", "TIMEOUT", ":", "continue", "except", "pexpect", ".", "EOF", ":", "if", "tunnel", ".", "exitstatus", ":", "print", "(", "tunnel", ".", "exitstatus", ")", "print", "(", "tunnel", ".", "before", ")", "print", "(", "tunnel", ".", "after", ")", "raise", "RuntimeError", "(", "\"tunnel '%s' failed to start\"", "%", "(", "cmd", ")", ")", "else", ":", "return", "tunnel", ".", "pid", "else", ":", "if", "failed", ":", "print", "(", "\"Password rejected, try again\"", ")", "password", "=", "None", "if", "password", "is", "None", ":", "password", "=", "getpass", "(", "\"%s's password: \"", "%", "(", "server", ")", ")", "tunnel", ".", "sendline", "(", "password", ")", "failed", "=", "True" ]
39.901408
22.098592
def pre_fork(self, process_manager): ''' Pre-fork we need to create the zmq router device ''' salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager) if USE_LOAD_BALANCER: self.socket_queue = multiprocessing.Queue() process_manager.add_process( LoadBalancerServer, args=(self.opts, self.socket_queue) ) elif not salt.utils.platform.is_windows(): self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) _set_tcp_keepalive(self._socket, self.opts) self._socket.setblocking(0) self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
[ "def", "pre_fork", "(", "self", ",", "process_manager", ")", ":", "salt", ".", "transport", ".", "mixins", ".", "auth", ".", "AESReqServerMixin", ".", "pre_fork", "(", "self", ",", "process_manager", ")", "if", "USE_LOAD_BALANCER", ":", "self", ".", "socket_queue", "=", "multiprocessing", ".", "Queue", "(", ")", "process_manager", ".", "add_process", "(", "LoadBalancerServer", ",", "args", "=", "(", "self", ".", "opts", ",", "self", ".", "socket_queue", ")", ")", "elif", "not", "salt", ".", "utils", ".", "platform", ".", "is_windows", "(", ")", ":", "self", ".", "_socket", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "self", ".", "_socket", ".", "setsockopt", "(", "socket", ".", "SOL_SOCKET", ",", "socket", ".", "SO_REUSEADDR", ",", "1", ")", "_set_tcp_keepalive", "(", "self", ".", "_socket", ",", "self", ".", "opts", ")", "self", ".", "_socket", ".", "setblocking", "(", "0", ")", "self", ".", "_socket", ".", "bind", "(", "(", "self", ".", "opts", "[", "'interface'", "]", ",", "int", "(", "self", ".", "opts", "[", "'ret_port'", "]", ")", ")", ")" ]
49.1875
21.8125
def gen_filter(name, op, value, is_or=False): """Generates a single filter expression for ``filter[]``.""" if op not in OPERATORS: raise ValueError('Unknown operator {}'.format(op)) result = u'{} {} {}'.format(name, op, escape_filter(value)) if is_or: result = u'or ' + result return result
[ "def", "gen_filter", "(", "name", ",", "op", ",", "value", ",", "is_or", "=", "False", ")", ":", "if", "op", "not", "in", "OPERATORS", ":", "raise", "ValueError", "(", "'Unknown operator {}'", ".", "format", "(", "op", ")", ")", "result", "=", "u'{} {} {}'", ".", "format", "(", "name", ",", "op", ",", "escape_filter", "(", "value", ")", ")", "if", "is_or", ":", "result", "=", "u'or '", "+", "result", "return", "result" ]
39.875
14.625
def get_entity_from_dimensions(dimensions, text): """ Infer the underlying entity of a unit (e.g. "volume" for "m^3"). Just based on the unit's dimensionality if the classifier is disabled. """ new_dimensions = [{'base': l.NAMES[i['base']].entity.name, 'power': i['power']} for i in dimensions] final_dimensions = sorted(new_dimensions, key=lambda x: x['base']) key = l.get_key_from_dimensions(final_dimensions) try: if clf.USE_CLF: ent = clf.disambiguate_entity(key, text) else: ent = l.DERIVED_ENT[key][0] except IndexError: logging.debug(u'\tCould not find entity for: %s', key) ent = c.Entity(name='unknown', dimensions=new_dimensions) return ent
[ "def", "get_entity_from_dimensions", "(", "dimensions", ",", "text", ")", ":", "new_dimensions", "=", "[", "{", "'base'", ":", "l", ".", "NAMES", "[", "i", "[", "'base'", "]", "]", ".", "entity", ".", "name", ",", "'power'", ":", "i", "[", "'power'", "]", "}", "for", "i", "in", "dimensions", "]", "final_dimensions", "=", "sorted", "(", "new_dimensions", ",", "key", "=", "lambda", "x", ":", "x", "[", "'base'", "]", ")", "key", "=", "l", ".", "get_key_from_dimensions", "(", "final_dimensions", ")", "try", ":", "if", "clf", ".", "USE_CLF", ":", "ent", "=", "clf", ".", "disambiguate_entity", "(", "key", ",", "text", ")", "else", ":", "ent", "=", "l", ".", "DERIVED_ENT", "[", "key", "]", "[", "0", "]", "except", "IndexError", ":", "logging", ".", "debug", "(", "u'\\tCould not find entity for: %s'", ",", "key", ")", "ent", "=", "c", ".", "Entity", "(", "name", "=", "'unknown'", ",", "dimensions", "=", "new_dimensions", ")", "return", "ent" ]
34.181818
22.727273
def from_dict(data, ctx): """ Instantiate a new AccountSummary from a dict (generally from loading a JSON response). The data used to instantiate the AccountSummary is a shallow copy of the dict passed in, with any complex child types instantiated appropriately. """ data = data.copy() if data.get('balance') is not None: data['balance'] = ctx.convert_decimal_number( data.get('balance') ) if data.get('pl') is not None: data['pl'] = ctx.convert_decimal_number( data.get('pl') ) if data.get('resettablePL') is not None: data['resettablePL'] = ctx.convert_decimal_number( data.get('resettablePL') ) if data.get('financing') is not None: data['financing'] = ctx.convert_decimal_number( data.get('financing') ) if data.get('commission') is not None: data['commission'] = ctx.convert_decimal_number( data.get('commission') ) if data.get('guaranteedExecutionFees') is not None: data['guaranteedExecutionFees'] = ctx.convert_decimal_number( data.get('guaranteedExecutionFees') ) if data.get('marginRate') is not None: data['marginRate'] = ctx.convert_decimal_number( data.get('marginRate') ) if data.get('unrealizedPL') is not None: data['unrealizedPL'] = ctx.convert_decimal_number( data.get('unrealizedPL') ) if data.get('NAV') is not None: data['NAV'] = ctx.convert_decimal_number( data.get('NAV') ) if data.get('marginUsed') is not None: data['marginUsed'] = ctx.convert_decimal_number( data.get('marginUsed') ) if data.get('marginAvailable') is not None: data['marginAvailable'] = ctx.convert_decimal_number( data.get('marginAvailable') ) if data.get('positionValue') is not None: data['positionValue'] = ctx.convert_decimal_number( data.get('positionValue') ) if data.get('marginCloseoutUnrealizedPL') is not None: data['marginCloseoutUnrealizedPL'] = ctx.convert_decimal_number( data.get('marginCloseoutUnrealizedPL') ) if data.get('marginCloseoutNAV') is not None: data['marginCloseoutNAV'] = ctx.convert_decimal_number( data.get('marginCloseoutNAV') ) if data.get('marginCloseoutMarginUsed') is not None: data['marginCloseoutMarginUsed'] = ctx.convert_decimal_number( data.get('marginCloseoutMarginUsed') ) if data.get('marginCloseoutPercent') is not None: data['marginCloseoutPercent'] = ctx.convert_decimal_number( data.get('marginCloseoutPercent') ) if data.get('marginCloseoutPositionValue') is not None: data['marginCloseoutPositionValue'] = ctx.convert_decimal_number( data.get('marginCloseoutPositionValue') ) if data.get('withdrawalLimit') is not None: data['withdrawalLimit'] = ctx.convert_decimal_number( data.get('withdrawalLimit') ) if data.get('marginCallMarginUsed') is not None: data['marginCallMarginUsed'] = ctx.convert_decimal_number( data.get('marginCallMarginUsed') ) if data.get('marginCallPercent') is not None: data['marginCallPercent'] = ctx.convert_decimal_number( data.get('marginCallPercent') ) return AccountSummary(**data)
[ "def", "from_dict", "(", "data", ",", "ctx", ")", ":", "data", "=", "data", ".", "copy", "(", ")", "if", "data", ".", "get", "(", "'balance'", ")", "is", "not", "None", ":", "data", "[", "'balance'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'balance'", ")", ")", "if", "data", ".", "get", "(", "'pl'", ")", "is", "not", "None", ":", "data", "[", "'pl'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'pl'", ")", ")", "if", "data", ".", "get", "(", "'resettablePL'", ")", "is", "not", "None", ":", "data", "[", "'resettablePL'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'resettablePL'", ")", ")", "if", "data", ".", "get", "(", "'financing'", ")", "is", "not", "None", ":", "data", "[", "'financing'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'financing'", ")", ")", "if", "data", ".", "get", "(", "'commission'", ")", "is", "not", "None", ":", "data", "[", "'commission'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'commission'", ")", ")", "if", "data", ".", "get", "(", "'guaranteedExecutionFees'", ")", "is", "not", "None", ":", "data", "[", "'guaranteedExecutionFees'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'guaranteedExecutionFees'", ")", ")", "if", "data", ".", "get", "(", "'marginRate'", ")", "is", "not", "None", ":", "data", "[", "'marginRate'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginRate'", ")", ")", "if", "data", ".", "get", "(", "'unrealizedPL'", ")", "is", "not", "None", ":", "data", "[", "'unrealizedPL'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'unrealizedPL'", ")", ")", "if", "data", ".", "get", "(", "'NAV'", ")", "is", "not", "None", ":", "data", "[", "'NAV'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'NAV'", ")", ")", "if", "data", ".", "get", "(", "'marginUsed'", ")", "is", "not", "None", ":", "data", "[", "'marginUsed'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginUsed'", ")", ")", "if", "data", ".", "get", "(", "'marginAvailable'", ")", "is", "not", "None", ":", "data", "[", "'marginAvailable'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginAvailable'", ")", ")", "if", "data", ".", "get", "(", "'positionValue'", ")", "is", "not", "None", ":", "data", "[", "'positionValue'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'positionValue'", ")", ")", "if", "data", ".", "get", "(", "'marginCloseoutUnrealizedPL'", ")", "is", "not", "None", ":", "data", "[", "'marginCloseoutUnrealizedPL'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginCloseoutUnrealizedPL'", ")", ")", "if", "data", ".", "get", "(", "'marginCloseoutNAV'", ")", "is", "not", "None", ":", "data", "[", "'marginCloseoutNAV'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginCloseoutNAV'", ")", ")", "if", "data", ".", "get", "(", "'marginCloseoutMarginUsed'", ")", "is", "not", "None", ":", "data", "[", "'marginCloseoutMarginUsed'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginCloseoutMarginUsed'", ")", ")", "if", "data", ".", "get", "(", "'marginCloseoutPercent'", ")", "is", "not", "None", ":", "data", "[", "'marginCloseoutPercent'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginCloseoutPercent'", ")", ")", "if", "data", ".", "get", "(", "'marginCloseoutPositionValue'", ")", "is", "not", "None", ":", "data", "[", "'marginCloseoutPositionValue'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginCloseoutPositionValue'", ")", ")", "if", "data", ".", "get", "(", "'withdrawalLimit'", ")", "is", "not", "None", ":", "data", "[", "'withdrawalLimit'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'withdrawalLimit'", ")", ")", "if", "data", ".", "get", "(", "'marginCallMarginUsed'", ")", "is", "not", "None", ":", "data", "[", "'marginCallMarginUsed'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginCallMarginUsed'", ")", ")", "if", "data", ".", "get", "(", "'marginCallPercent'", ")", "is", "not", "None", ":", "data", "[", "'marginCallPercent'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginCallPercent'", ")", ")", "return", "AccountSummary", "(", "*", "*", "data", ")" ]
34.144144
21.567568
def overlap(self, other): """ Returns True if both ranges share any points. >>> intrange(1, 10).overlap(intrange(5, 15)) True >>> intrange(1, 5).overlap(intrange(5, 10)) False This is the same as the ``&&`` operator for two ranges in PostgreSQL. :param other: Range to test against. :return: ``True`` if ranges overlap, otherwise ``False``. :raises TypeError: If `other` is of another type than this range. .. seealso:: If you need to know which part that overlapped, consider using :meth:`~spans.types.Range.intersection`. """ # Special case for empty ranges if not self or not other: return False if self < other: a, b = self, other else: a, b = other, self # We need to explicitly handle unbounded ranges since a.upper and b.lower # make the intervals seem adjacent even though they are not if a.upper_inf or b.lower_inf: return True return a.upper > b.lower or a.upper == b.lower and a.upper_inc and b.lower_inc
[ "def", "overlap", "(", "self", ",", "other", ")", ":", "# Special case for empty ranges", "if", "not", "self", "or", "not", "other", ":", "return", "False", "if", "self", "<", "other", ":", "a", ",", "b", "=", "self", ",", "other", "else", ":", "a", ",", "b", "=", "other", ",", "self", "# We need to explicitly handle unbounded ranges since a.upper and b.lower", "# make the intervals seem adjacent even though they are not", "if", "a", ".", "upper_inf", "or", "b", ".", "lower_inf", ":", "return", "True", "return", "a", ".", "upper", ">", "b", ".", "lower", "or", "a", ".", "upper", "==", "b", ".", "lower", "and", "a", ".", "upper_inc", "and", "b", ".", "lower_inc" ]
33.382353
22.617647
def maybe_base_expanded_node_name(self, node_name): """Expand the base name if there are node names nested under the node. For example, if there are two nodes in the graph, "a" and "a/read", then calling this function on "a" will give "a/(a)", a form that points at a leaf node in the nested TensorBoard graph. Calling this function on "a/read" will just return "a/read", because there is no node nested under it. This method is thread-safe. Args: node_name: Name of the node. graph_def: The `GraphDef` that the node is a part of. Returns: Possibly base-expanded node name. """ with self._node_name_lock: # Lazily populate the map from original node name to base-expanded ones. if self._maybe_base_expanded_node_names is None: self._maybe_base_expanded_node_names = dict() # Sort all the node names. sorted_names = sorted(node.name for node in self._graph_def.node) for i, name in enumerate(sorted_names): j = i + 1 while j < len(sorted_names) and sorted_names[j].startswith(name): if sorted_names[j].startswith(name + '/'): self._maybe_base_expanded_node_names[name] = ( name + '/(' + name.split('/')[-1] + ')') break j += 1 return self._maybe_base_expanded_node_names.get(node_name, node_name)
[ "def", "maybe_base_expanded_node_name", "(", "self", ",", "node_name", ")", ":", "with", "self", ".", "_node_name_lock", ":", "# Lazily populate the map from original node name to base-expanded ones.", "if", "self", ".", "_maybe_base_expanded_node_names", "is", "None", ":", "self", ".", "_maybe_base_expanded_node_names", "=", "dict", "(", ")", "# Sort all the node names.", "sorted_names", "=", "sorted", "(", "node", ".", "name", "for", "node", "in", "self", ".", "_graph_def", ".", "node", ")", "for", "i", ",", "name", "in", "enumerate", "(", "sorted_names", ")", ":", "j", "=", "i", "+", "1", "while", "j", "<", "len", "(", "sorted_names", ")", "and", "sorted_names", "[", "j", "]", ".", "startswith", "(", "name", ")", ":", "if", "sorted_names", "[", "j", "]", ".", "startswith", "(", "name", "+", "'/'", ")", ":", "self", ".", "_maybe_base_expanded_node_names", "[", "name", "]", "=", "(", "name", "+", "'/('", "+", "name", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "+", "')'", ")", "break", "j", "+=", "1", "return", "self", ".", "_maybe_base_expanded_node_names", ".", "get", "(", "node_name", ",", "node_name", ")" ]
41.484848
22.545455
def _get_groups(self): """ Returns an _LDAPUserGroups object, which can determine group membership. """ if self._groups is None: self._groups = _LDAPUserGroups(self) return self._groups
[ "def", "_get_groups", "(", "self", ")", ":", "if", "self", ".", "_groups", "is", "None", ":", "self", ".", "_groups", "=", "_LDAPUserGroups", "(", "self", ")", "return", "self", ".", "_groups" ]
26.444444
15.111111
def register_warning_code(code, exception_type, domain='core'): """Register a new warning code""" Logger._warning_code_to_exception[code] = (exception_type, domain) Logger._domain_codes[domain].add(code)
[ "def", "register_warning_code", "(", "code", ",", "exception_type", ",", "domain", "=", "'core'", ")", ":", "Logger", ".", "_warning_code_to_exception", "[", "code", "]", "=", "(", "exception_type", ",", "domain", ")", "Logger", ".", "_domain_codes", "[", "domain", "]", ".", "add", "(", "code", ")" ]
56
15.75
def filedet(name, fobj=None, suffix=None): """ Detect file type by filename. :param name: file name :param fobj: file object :param suffix: file suffix like ``py``, ``.py`` :return: file type full name, such as ``python``, ``bash`` """ name = name or (fobj and fobj.name) or suffix separated = name.split('.') if len(separated) == 1: raise FiledetException('file name error.') key = '.' + separated[-1] return _file_type_map.get(key)
[ "def", "filedet", "(", "name", ",", "fobj", "=", "None", ",", "suffix", "=", "None", ")", ":", "name", "=", "name", "or", "(", "fobj", "and", "fobj", ".", "name", ")", "or", "suffix", "separated", "=", "name", ".", "split", "(", "'.'", ")", "if", "len", "(", "separated", ")", "==", "1", ":", "raise", "FiledetException", "(", "'file name error.'", ")", "key", "=", "'.'", "+", "separated", "[", "-", "1", "]", "return", "_file_type_map", ".", "get", "(", "key", ")" ]
29.75
12.875
def basis_comparison_report(bs1, bs2, uncontract_general=False): ''' Compares two basis set dictionaries and prints a report about their differences ''' all_bs1 = list(bs1['elements'].keys()) if uncontract_general: bs1 = manip.uncontract_general(bs1) bs2 = manip.uncontract_general(bs2) not_in_bs1 = [] # Found in bs2, not in bs1 not_in_bs2 = all_bs1.copy() # Found in bs1, not in bs2 no_diff = [] # Elements for which there is no difference some_diff = [] # Elements that are different big_diff = [] # Elements that are substantially different for k, v in bs2['elements'].items(): if k not in all_bs1: not_in_bs1.append(k) continue print() print("-------------------------------------") print(" Element ", k) bs1_el = bs1['elements'][k] max_rdiff_el = 0.0 max_rdiff_ecp = 0.0 # Check to make sure that neither or both have ecp/electron shells if 'electron_shells' in v and 'electron_shells' not in bs1_el: print("bs2 has electron_shells, but bs1 does not") max_rdiff_el = float('inf') if 'electron_shells' in bs1_el and 'electron_shells' not in v: print("bs1 has electron_shells, but bs2 does not") max_rdiff_el = float('inf') if 'ecp_potentials' in v and 'ecp_potentials' not in bs1_el: print("bs2 has ecp_potentials, but bs1 does not") max_rdiff_ecp = float('inf') if 'ecp_potentials' in bs1_el and 'ecp_potentials' not in v: print("bs1 has ecp_potentials, but bs2 does not") max_rdiff_ecp = float('inf') if 'electron_shells' in v and 'electron_shells' in bs1_el: max_rdiff_el = max(max_rdiff_el, shells_difference(v['electron_shells'], bs1_el['electron_shells'])) if 'ecp_potentials' in v and 'ecp_potentials' in bs1_el: nel1 = v['ecp_electrons'] nel2 = bs1_el['ecp_electrons'] if int(nel1) != int(nel2): print('Different number of electrons replaced by ECP ({} vs {})'.format(nel1, nel2)) max_rdiff_ecp = float('inf') else: max_rdiff_ecp = max(max_rdiff_ecp, potentials_difference(v['ecp_potentials'], bs1_el['ecp_potentials'])) max_rdiff = max(max_rdiff_el, max_rdiff_ecp) # Handle some differences if max_rdiff == float('inf'): big_diff.append(k) elif max_rdiff == 0.0: no_diff.append(k) else: some_diff.append(k) not_in_bs2.remove(k) print() print(" Not in bs1: ", _print_list(not_in_bs1)) print(" Not in bs2: ", _print_list(not_in_bs2)) print(" No difference: ", _print_list(no_diff)) print("Some difference: ", _print_list(some_diff)) print(" BIG difference: ", _print_list(big_diff)) print() return (len(not_in_bs1) == 0 and len(not_in_bs2) == 0 and len(some_diff) == 0 and len(big_diff) == 0)
[ "def", "basis_comparison_report", "(", "bs1", ",", "bs2", ",", "uncontract_general", "=", "False", ")", ":", "all_bs1", "=", "list", "(", "bs1", "[", "'elements'", "]", ".", "keys", "(", ")", ")", "if", "uncontract_general", ":", "bs1", "=", "manip", ".", "uncontract_general", "(", "bs1", ")", "bs2", "=", "manip", ".", "uncontract_general", "(", "bs2", ")", "not_in_bs1", "=", "[", "]", "# Found in bs2, not in bs1", "not_in_bs2", "=", "all_bs1", ".", "copy", "(", ")", "# Found in bs1, not in bs2", "no_diff", "=", "[", "]", "# Elements for which there is no difference", "some_diff", "=", "[", "]", "# Elements that are different", "big_diff", "=", "[", "]", "# Elements that are substantially different", "for", "k", ",", "v", "in", "bs2", "[", "'elements'", "]", ".", "items", "(", ")", ":", "if", "k", "not", "in", "all_bs1", ":", "not_in_bs1", ".", "append", "(", "k", ")", "continue", "print", "(", ")", "print", "(", "\"-------------------------------------\"", ")", "print", "(", "\" Element \"", ",", "k", ")", "bs1_el", "=", "bs1", "[", "'elements'", "]", "[", "k", "]", "max_rdiff_el", "=", "0.0", "max_rdiff_ecp", "=", "0.0", "# Check to make sure that neither or both have ecp/electron shells", "if", "'electron_shells'", "in", "v", "and", "'electron_shells'", "not", "in", "bs1_el", ":", "print", "(", "\"bs2 has electron_shells, but bs1 does not\"", ")", "max_rdiff_el", "=", "float", "(", "'inf'", ")", "if", "'electron_shells'", "in", "bs1_el", "and", "'electron_shells'", "not", "in", "v", ":", "print", "(", "\"bs1 has electron_shells, but bs2 does not\"", ")", "max_rdiff_el", "=", "float", "(", "'inf'", ")", "if", "'ecp_potentials'", "in", "v", "and", "'ecp_potentials'", "not", "in", "bs1_el", ":", "print", "(", "\"bs2 has ecp_potentials, but bs1 does not\"", ")", "max_rdiff_ecp", "=", "float", "(", "'inf'", ")", "if", "'ecp_potentials'", "in", "bs1_el", "and", "'ecp_potentials'", "not", "in", "v", ":", "print", "(", "\"bs1 has ecp_potentials, but bs2 does not\"", ")", "max_rdiff_ecp", "=", "float", "(", "'inf'", ")", "if", "'electron_shells'", "in", "v", "and", "'electron_shells'", "in", "bs1_el", ":", "max_rdiff_el", "=", "max", "(", "max_rdiff_el", ",", "shells_difference", "(", "v", "[", "'electron_shells'", "]", ",", "bs1_el", "[", "'electron_shells'", "]", ")", ")", "if", "'ecp_potentials'", "in", "v", "and", "'ecp_potentials'", "in", "bs1_el", ":", "nel1", "=", "v", "[", "'ecp_electrons'", "]", "nel2", "=", "bs1_el", "[", "'ecp_electrons'", "]", "if", "int", "(", "nel1", ")", "!=", "int", "(", "nel2", ")", ":", "print", "(", "'Different number of electrons replaced by ECP ({} vs {})'", ".", "format", "(", "nel1", ",", "nel2", ")", ")", "max_rdiff_ecp", "=", "float", "(", "'inf'", ")", "else", ":", "max_rdiff_ecp", "=", "max", "(", "max_rdiff_ecp", ",", "potentials_difference", "(", "v", "[", "'ecp_potentials'", "]", ",", "bs1_el", "[", "'ecp_potentials'", "]", ")", ")", "max_rdiff", "=", "max", "(", "max_rdiff_el", ",", "max_rdiff_ecp", ")", "# Handle some differences", "if", "max_rdiff", "==", "float", "(", "'inf'", ")", ":", "big_diff", ".", "append", "(", "k", ")", "elif", "max_rdiff", "==", "0.0", ":", "no_diff", ".", "append", "(", "k", ")", "else", ":", "some_diff", ".", "append", "(", "k", ")", "not_in_bs2", ".", "remove", "(", "k", ")", "print", "(", ")", "print", "(", "\" Not in bs1: \"", ",", "_print_list", "(", "not_in_bs1", ")", ")", "print", "(", "\" Not in bs2: \"", ",", "_print_list", "(", "not_in_bs2", ")", ")", "print", "(", "\" No difference: \"", ",", "_print_list", "(", "no_diff", ")", ")", "print", "(", "\"Some difference: \"", ",", "_print_list", "(", "some_diff", ")", ")", "print", "(", "\" BIG difference: \"", ",", "_print_list", "(", "big_diff", ")", ")", "print", "(", ")", "return", "(", "len", "(", "not_in_bs1", ")", "==", "0", "and", "len", "(", "not_in_bs2", ")", "==", "0", "and", "len", "(", "some_diff", ")", "==", "0", "and", "len", "(", "big_diff", ")", "==", "0", ")" ]
39.532468
22.519481
def convert_to_cluster_template(self, plugin_name, hadoop_version, template_name, filecontent): """Convert to cluster template Create Cluster Template directly, avoiding Cluster Template mechanism. """ resp = self.api.post('/plugins/%s/%s/convert-config/%s' % (plugin_name, hadoop_version, urlparse.quote(template_name)), data=filecontent) if resp.status_code != 202: raise RuntimeError('Failed to upload template file for plugin "%s"' ' and version "%s"' % (plugin_name, hadoop_version)) else: return base.get_json(resp)['cluster_template']
[ "def", "convert_to_cluster_template", "(", "self", ",", "plugin_name", ",", "hadoop_version", ",", "template_name", ",", "filecontent", ")", ":", "resp", "=", "self", ".", "api", ".", "post", "(", "'/plugins/%s/%s/convert-config/%s'", "%", "(", "plugin_name", ",", "hadoop_version", ",", "urlparse", ".", "quote", "(", "template_name", ")", ")", ",", "data", "=", "filecontent", ")", "if", "resp", ".", "status_code", "!=", "202", ":", "raise", "RuntimeError", "(", "'Failed to upload template file for plugin \"%s\"'", "' and version \"%s\"'", "%", "(", "plugin_name", ",", "hadoop_version", ")", ")", "else", ":", "return", "base", ".", "get_json", "(", "resp", ")", "[", "'cluster_template'", "]" ]
45.611111
17.777778
def is_empty(self): """ Test interval emptiness. :return: True if interval is empty, False otherwise. """ return ( self._lower > self._upper or (self._lower == self._upper and (self._left == OPEN or self._right == OPEN)) )
[ "def", "is_empty", "(", "self", ")", ":", "return", "(", "self", ".", "_lower", ">", "self", ".", "_upper", "or", "(", "self", ".", "_lower", "==", "self", ".", "_upper", "and", "(", "self", ".", "_left", "==", "OPEN", "or", "self", ".", "_right", "==", "OPEN", ")", ")", ")" ]
28.6
19.2
def json(self, **kwargs): """Returns the json-encoded content of a response, if any. :param \*\*kwargs: Optional arguments that ``json.loads`` takes. """ if not self.encoding and len(self.content) > 3: # No encoding set. JSON RFC 4627 section 3 states we should expect # UTF-8, -16 or -32. Detect which one to use; If the detection or # decoding fails, fall back to `self.text` (using chardet to make # a best guess). encoding = guess_json_utf(self.content) if encoding is not None: try: return json.loads(self.content.decode(encoding), **kwargs) except UnicodeDecodeError: # Wrong UTF codec detected; usually because it's not UTF-8 # but some other 8-bit codec. This is an RFC violation, # and the server didn't bother to tell us what codec *was* # used. pass return json.loads(self.text, **kwargs)
[ "def", "json", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "encoding", "and", "len", "(", "self", ".", "content", ")", ">", "3", ":", "# No encoding set. JSON RFC 4627 section 3 states we should expect", "# UTF-8, -16 or -32. Detect which one to use; If the detection or", "# decoding fails, fall back to `self.text` (using chardet to make", "# a best guess).", "encoding", "=", "guess_json_utf", "(", "self", ".", "content", ")", "if", "encoding", "is", "not", "None", ":", "try", ":", "return", "json", ".", "loads", "(", "self", ".", "content", ".", "decode", "(", "encoding", ")", ",", "*", "*", "kwargs", ")", "except", "UnicodeDecodeError", ":", "# Wrong UTF codec detected; usually because it's not UTF-8", "# but some other 8-bit codec. This is an RFC violation,", "# and the server didn't bother to tell us what codec *was*", "# used.", "pass", "return", "json", ".", "loads", "(", "self", ".", "text", ",", "*", "*", "kwargs", ")" ]
47.5
22.181818
def get_type_name(self): """ Returns the type name of the PKCS7 structure :return: A string with the typename """ nid = _lib.OBJ_obj2nid(self._pkcs7.type) string_type = _lib.OBJ_nid2sn(nid) return _ffi.string(string_type)
[ "def", "get_type_name", "(", "self", ")", ":", "nid", "=", "_lib", ".", "OBJ_obj2nid", "(", "self", ".", "_pkcs7", ".", "type", ")", "string_type", "=", "_lib", ".", "OBJ_nid2sn", "(", "nid", ")", "return", "_ffi", ".", "string", "(", "string_type", ")" ]
30
9.111111
def add_file(self, path, parent=None, tree=TreeType.SOURCE_ROOT, target_name=None, force=True, file_options=FileOptions()): """ Adds a file to the project, taking care of the type of the file and creating additional structures depending on the file type. For instance, frameworks will be linked, embedded and search paths will be adjusted automatically. Header file will be added to the headers sections, but not compiled, whereas the source files will be added to the compilation phase. :param path: Path to the file to be added :param parent: Parent group to be added under :param tree: Tree where the path is relative to :param target_name: Target name or list of target names where the file should be added (none for every target) :param force: Add the file without checking if the file already exists :param file_options: FileOptions object to be used during the addition of the file to the project. :return: a list of elements that were added to the project successfully as PBXBuildFile objects """ results = [] # if it's not forced to add the file stop if the file already exists. if not force: for section in self.objects.get_sections(): for obj in self.objects.get_objects_in_section(section): if u'path' in obj and ProjectFiles._path_leaf(path) == ProjectFiles._path_leaf(obj.path): return [] file_ref, abs_path, path, tree, expected_build_phase = self._add_file_reference(path, parent, tree, force, file_options) if path is None or tree is None: return None # no need to create the build_files, done if not file_options.create_build_files: return results # create build_files for the targets results.extend(self._create_build_files(file_ref, target_name, expected_build_phase, file_options)) # special case for the frameworks and libraries to update the search paths if tree != TreeType.SOURCE_ROOT or abs_path is None: return results # the path is absolute and it's outside the scope of the project for linking purposes library_path = os.path.join(u'$(SRCROOT)', os.path.split(file_ref.path)[0]) if os.path.isfile(abs_path): self.add_library_search_paths([library_path], recursive=False) else: self.add_framework_search_paths([library_path, u'$(inherited)'], recursive=False) return results
[ "def", "add_file", "(", "self", ",", "path", ",", "parent", "=", "None", ",", "tree", "=", "TreeType", ".", "SOURCE_ROOT", ",", "target_name", "=", "None", ",", "force", "=", "True", ",", "file_options", "=", "FileOptions", "(", ")", ")", ":", "results", "=", "[", "]", "# if it's not forced to add the file stop if the file already exists.", "if", "not", "force", ":", "for", "section", "in", "self", ".", "objects", ".", "get_sections", "(", ")", ":", "for", "obj", "in", "self", ".", "objects", ".", "get_objects_in_section", "(", "section", ")", ":", "if", "u'path'", "in", "obj", "and", "ProjectFiles", ".", "_path_leaf", "(", "path", ")", "==", "ProjectFiles", ".", "_path_leaf", "(", "obj", ".", "path", ")", ":", "return", "[", "]", "file_ref", ",", "abs_path", ",", "path", ",", "tree", ",", "expected_build_phase", "=", "self", ".", "_add_file_reference", "(", "path", ",", "parent", ",", "tree", ",", "force", ",", "file_options", ")", "if", "path", "is", "None", "or", "tree", "is", "None", ":", "return", "None", "# no need to create the build_files, done", "if", "not", "file_options", ".", "create_build_files", ":", "return", "results", "# create build_files for the targets", "results", ".", "extend", "(", "self", ".", "_create_build_files", "(", "file_ref", ",", "target_name", ",", "expected_build_phase", ",", "file_options", ")", ")", "# special case for the frameworks and libraries to update the search paths", "if", "tree", "!=", "TreeType", ".", "SOURCE_ROOT", "or", "abs_path", "is", "None", ":", "return", "results", "# the path is absolute and it's outside the scope of the project for linking purposes", "library_path", "=", "os", ".", "path", ".", "join", "(", "u'$(SRCROOT)'", ",", "os", ".", "path", ".", "split", "(", "file_ref", ".", "path", ")", "[", "0", "]", ")", "if", "os", ".", "path", ".", "isfile", "(", "abs_path", ")", ":", "self", ".", "add_library_search_paths", "(", "[", "library_path", "]", ",", "recursive", "=", "False", ")", "else", ":", "self", ".", "add_framework_search_paths", "(", "[", "library_path", ",", "u'$(inherited)'", "]", ",", "recursive", "=", "False", ")", "return", "results" ]
56.847826
35.065217
def ToDeltas(self): """Convert the sequence to the sequence of differences between points. The value of each point v[i] is replaced by v[i+1] - v[i], except for the last point which is dropped. """ if len(self.data) < 2: self.data = [] return for i in range(0, len(self.data) - 1): if self.data[i][0] is None or self.data[i + 1][0] is None: self.data[i][0] = None else: self.data[i][0] = self.data[i + 1][0] - self.data[i][0] del self.data[-1]
[ "def", "ToDeltas", "(", "self", ")", ":", "if", "len", "(", "self", ".", "data", ")", "<", "2", ":", "self", ".", "data", "=", "[", "]", "return", "for", "i", "in", "range", "(", "0", ",", "len", "(", "self", ".", "data", ")", "-", "1", ")", ":", "if", "self", ".", "data", "[", "i", "]", "[", "0", "]", "is", "None", "or", "self", ".", "data", "[", "i", "+", "1", "]", "[", "0", "]", "is", "None", ":", "self", ".", "data", "[", "i", "]", "[", "0", "]", "=", "None", "else", ":", "self", ".", "data", "[", "i", "]", "[", "0", "]", "=", "self", ".", "data", "[", "i", "+", "1", "]", "[", "0", "]", "-", "self", ".", "data", "[", "i", "]", "[", "0", "]", "del", "self", ".", "data", "[", "-", "1", "]" ]
33.2
18.333333
def _offset(value): """Parse timezone to offset in seconds. Args: value: A timezone in the '+0000' format. An integer would also work. Returns: The timezone offset from GMT in seconds as an integer. """ o = int(value) if o == 0: return 0 a = abs(o) s = a*36+(a%100)*24 return (o//a)*s
[ "def", "_offset", "(", "value", ")", ":", "o", "=", "int", "(", "value", ")", "if", "o", "==", "0", ":", "return", "0", "a", "=", "abs", "(", "o", ")", "s", "=", "a", "*", "36", "+", "(", "a", "%", "100", ")", "*", "24", "return", "(", "o", "//", "a", ")", "*", "s" ]
22.133333
23.6
def value_loss_given_predictions(value_prediction, rewards, reward_mask, gamma=0.99): """Computes the value loss given the prediction of the value function. Args: value_prediction: np.ndarray of shape (B, T+1, 1) rewards: np.ndarray of shape (B, T) of rewards. reward_mask: np.ndarray of shape (B, T), the mask over rewards. gamma: float, discount factor. Returns: The average L2 value loss, averaged over instances where reward_mask is 1. """ B, T = rewards.shape # pylint: disable=invalid-name assert (B, T) == reward_mask.shape assert (B, T + 1, 1) == value_prediction.shape value_prediction = np.squeeze(value_prediction, axis=2) # (B, T+1) value_prediction = value_prediction[:, :-1] * reward_mask # (B, T) r2g = rewards_to_go(rewards, reward_mask, gamma=gamma) # (B, T) loss = (value_prediction - r2g)**2 # Take an average on only the points where mask != 0. return np.sum(loss) / np.sum(reward_mask)
[ "def", "value_loss_given_predictions", "(", "value_prediction", ",", "rewards", ",", "reward_mask", ",", "gamma", "=", "0.99", ")", ":", "B", ",", "T", "=", "rewards", ".", "shape", "# pylint: disable=invalid-name", "assert", "(", "B", ",", "T", ")", "==", "reward_mask", ".", "shape", "assert", "(", "B", ",", "T", "+", "1", ",", "1", ")", "==", "value_prediction", ".", "shape", "value_prediction", "=", "np", ".", "squeeze", "(", "value_prediction", ",", "axis", "=", "2", ")", "# (B, T+1)", "value_prediction", "=", "value_prediction", "[", ":", ",", ":", "-", "1", "]", "*", "reward_mask", "# (B, T)", "r2g", "=", "rewards_to_go", "(", "rewards", ",", "reward_mask", ",", "gamma", "=", "gamma", ")", "# (B, T)", "loss", "=", "(", "value_prediction", "-", "r2g", ")", "**", "2", "# Take an average on only the points where mask != 0.", "return", "np", ".", "sum", "(", "loss", ")", "/", "np", ".", "sum", "(", "reward_mask", ")" ]
38.296296
18.925926
def update(self): """ Update this `~photutils.isophote.EllipseSample` instance. This method calls the :meth:`~photutils.isophote.EllipseSample.extract` method to get the values that match the current ``geometry`` attribute, and then computes the the mean intensity, local gradient, and other associated quantities. """ step = self.geometry.astep # Update the mean value first, using extraction from main sample. s = self.extract() self.mean = np.mean(s[2]) # Get sample with same geometry but at a different distance from # center. Estimate gradient from there. gradient, gradient_error = self._get_gradient(step) # Check for meaningful gradient. If no meaningful gradient, try # another sample, this time using larger radius. Meaningful # gradient means something shallower, but still close to within # a factor 3 from previous gradient estimate. If no previous # estimate is available, guess it. previous_gradient = self.gradient if not previous_gradient: previous_gradient = -0.05 # good enough, based on usage if gradient >= (previous_gradient / 3.): # gradient is negative! gradient, gradient_error = self._get_gradient(2 * step) # If still no meaningful gradient can be measured, try with # previous one, slightly shallower. A factor 0.8 is not too far # from what is expected from geometrical sampling steps of 10-20% # and a deVaucouleurs law or an exponential disk (at least at its # inner parts, r <~ 5 req). Gradient error is meaningless in this # case. if gradient >= (previous_gradient / 3.): gradient = previous_gradient * 0.8 gradient_error = None self.gradient = gradient self.gradient_error = gradient_error if gradient_error: self.gradient_relative_error = gradient_error / np.abs(gradient) else: self.gradient_relative_error = None
[ "def", "update", "(", "self", ")", ":", "step", "=", "self", ".", "geometry", ".", "astep", "# Update the mean value first, using extraction from main sample.", "s", "=", "self", ".", "extract", "(", ")", "self", ".", "mean", "=", "np", ".", "mean", "(", "s", "[", "2", "]", ")", "# Get sample with same geometry but at a different distance from", "# center. Estimate gradient from there.", "gradient", ",", "gradient_error", "=", "self", ".", "_get_gradient", "(", "step", ")", "# Check for meaningful gradient. If no meaningful gradient, try", "# another sample, this time using larger radius. Meaningful", "# gradient means something shallower, but still close to within", "# a factor 3 from previous gradient estimate. If no previous", "# estimate is available, guess it.", "previous_gradient", "=", "self", ".", "gradient", "if", "not", "previous_gradient", ":", "previous_gradient", "=", "-", "0.05", "# good enough, based on usage", "if", "gradient", ">=", "(", "previous_gradient", "/", "3.", ")", ":", "# gradient is negative!", "gradient", ",", "gradient_error", "=", "self", ".", "_get_gradient", "(", "2", "*", "step", ")", "# If still no meaningful gradient can be measured, try with", "# previous one, slightly shallower. A factor 0.8 is not too far", "# from what is expected from geometrical sampling steps of 10-20%", "# and a deVaucouleurs law or an exponential disk (at least at its", "# inner parts, r <~ 5 req). Gradient error is meaningless in this", "# case.", "if", "gradient", ">=", "(", "previous_gradient", "/", "3.", ")", ":", "gradient", "=", "previous_gradient", "*", "0.8", "gradient_error", "=", "None", "self", ".", "gradient", "=", "gradient", "self", ".", "gradient_error", "=", "gradient_error", "if", "gradient_error", ":", "self", ".", "gradient_relative_error", "=", "gradient_error", "/", "np", ".", "abs", "(", "gradient", ")", "else", ":", "self", ".", "gradient_relative_error", "=", "None" ]
42.040816
22.77551
def map_azure_exceptions(key=None, exc_pass=()): """Map Azure-specific exceptions to the simplekv-API.""" from azure.common import AzureMissingResourceHttpError, AzureHttpError,\ AzureException try: yield except AzureMissingResourceHttpError as ex: if ex.__class__.__name__ not in exc_pass: s = str(ex) if s.startswith(u"The specified container does not exist."): raise IOError(s) raise KeyError(key) except AzureHttpError as ex: if ex.__class__.__name__ not in exc_pass: raise IOError(str(ex)) except AzureException as ex: if ex.__class__.__name__ not in exc_pass: raise IOError(str(ex))
[ "def", "map_azure_exceptions", "(", "key", "=", "None", ",", "exc_pass", "=", "(", ")", ")", ":", "from", "azure", ".", "common", "import", "AzureMissingResourceHttpError", ",", "AzureHttpError", ",", "AzureException", "try", ":", "yield", "except", "AzureMissingResourceHttpError", "as", "ex", ":", "if", "ex", ".", "__class__", ".", "__name__", "not", "in", "exc_pass", ":", "s", "=", "str", "(", "ex", ")", "if", "s", ".", "startswith", "(", "u\"The specified container does not exist.\"", ")", ":", "raise", "IOError", "(", "s", ")", "raise", "KeyError", "(", "key", ")", "except", "AzureHttpError", "as", "ex", ":", "if", "ex", ".", "__class__", ".", "__name__", "not", "in", "exc_pass", ":", "raise", "IOError", "(", "str", "(", "ex", ")", ")", "except", "AzureException", "as", "ex", ":", "if", "ex", ".", "__class__", ".", "__name__", "not", "in", "exc_pass", ":", "raise", "IOError", "(", "str", "(", "ex", ")", ")" ]
39.5
13.833333
def get_otp(hsm, args): """ Get OTP from YubiKey. """ if args.no_otp: return None if hsm.version.have_unlock(): if args.stdin: otp = sys.stdin.readline() while otp and otp[-1] == '\n': otp = otp[:-1] else: otp = raw_input('Enter admin YubiKey OTP (press enter to skip) : ') if len(otp) == 44: # YubiHSM admin OTP's always have a public_id length of 6 bytes return otp if otp: sys.stderr.write("ERROR: Invalid YubiKey OTP\n") return None
[ "def", "get_otp", "(", "hsm", ",", "args", ")", ":", "if", "args", ".", "no_otp", ":", "return", "None", "if", "hsm", ".", "version", ".", "have_unlock", "(", ")", ":", "if", "args", ".", "stdin", ":", "otp", "=", "sys", ".", "stdin", ".", "readline", "(", ")", "while", "otp", "and", "otp", "[", "-", "1", "]", "==", "'\\n'", ":", "otp", "=", "otp", "[", ":", "-", "1", "]", "else", ":", "otp", "=", "raw_input", "(", "'Enter admin YubiKey OTP (press enter to skip) : '", ")", "if", "len", "(", "otp", ")", "==", "44", ":", "# YubiHSM admin OTP's always have a public_id length of 6 bytes", "return", "otp", "if", "otp", ":", "sys", ".", "stderr", ".", "write", "(", "\"ERROR: Invalid YubiKey OTP\\n\"", ")", "return", "None" ]
33.176471
17.705882
def netloc(self): """Network location including host and port""" if self.port: return '%s:%d' % (self.host, self.port) return self.host
[ "def", "netloc", "(", "self", ")", ":", "if", "self", ".", "port", ":", "return", "'%s:%d'", "%", "(", "self", ".", "host", ",", "self", ".", "port", ")", "return", "self", ".", "host" ]
33.4
13.8
def create_default_file(cls, data=None, mode=None): """Create a config file and override data if specified.""" filepath = cls.get_default_filepath() if not filepath: return False filename = os.path.basename(filepath) config = read_file(get_data_path(), filename) # Find and replace data in default config data = data or {} for k, v in six.iteritems(data): v = v or "" config = re.sub( r"^(%(key)s) =[ ]*$" % {"key": k}, "%(key)s = %(value)s" % {"key": k, "value": v}, config, flags=re.MULTILINE, ) dirpath = os.path.dirname(filepath) if not os.path.exists(dirpath): os.makedirs(dirpath) with click.open_file(filepath, "w+") as f: f.write(config) if mode is not None: os.chmod(filepath, mode) return True
[ "def", "create_default_file", "(", "cls", ",", "data", "=", "None", ",", "mode", "=", "None", ")", ":", "filepath", "=", "cls", ".", "get_default_filepath", "(", ")", "if", "not", "filepath", ":", "return", "False", "filename", "=", "os", ".", "path", ".", "basename", "(", "filepath", ")", "config", "=", "read_file", "(", "get_data_path", "(", ")", ",", "filename", ")", "# Find and replace data in default config", "data", "=", "data", "or", "{", "}", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "data", ")", ":", "v", "=", "v", "or", "\"\"", "config", "=", "re", ".", "sub", "(", "r\"^(%(key)s) =[ ]*$\"", "%", "{", "\"key\"", ":", "k", "}", ",", "\"%(key)s = %(value)s\"", "%", "{", "\"key\"", ":", "k", ",", "\"value\"", ":", "v", "}", ",", "config", ",", "flags", "=", "re", ".", "MULTILINE", ",", ")", "dirpath", "=", "os", ".", "path", ".", "dirname", "(", "filepath", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "dirpath", ")", ":", "os", ".", "makedirs", "(", "dirpath", ")", "with", "click", ".", "open_file", "(", "filepath", ",", "\"w+\"", ")", "as", "f", ":", "f", ".", "write", "(", "config", ")", "if", "mode", "is", "not", "None", ":", "os", ".", "chmod", "(", "filepath", ",", "mode", ")", "return", "True" ]
31.033333
15.766667
def iconcat(a, b): "Same as a += b, for a and b sequences." if not hasattr(a, '__getitem__'): msg = "'%s' object can't be concatenated" % type(a).__name__ raise TypeError(msg) a += b return a
[ "def", "iconcat", "(", "a", ",", "b", ")", ":", "if", "not", "hasattr", "(", "a", ",", "'__getitem__'", ")", ":", "msg", "=", "\"'%s' object can't be concatenated\"", "%", "type", "(", "a", ")", ".", "__name__", "raise", "TypeError", "(", "msg", ")", "a", "+=", "b", "return", "a" ]
31
18.142857
def _fix_reindent(self, result): """Fix a badly indented line. This is done by adding or removing from its initial indent only. """ num_indent_spaces = int(result['info'].split()[1]) line_index = result['line'] - 1 target = self.source[line_index] self.source[line_index] = ' ' * num_indent_spaces + target.lstrip()
[ "def", "_fix_reindent", "(", "self", ",", "result", ")", ":", "num_indent_spaces", "=", "int", "(", "result", "[", "'info'", "]", ".", "split", "(", ")", "[", "1", "]", ")", "line_index", "=", "result", "[", "'line'", "]", "-", "1", "target", "=", "self", ".", "source", "[", "line_index", "]", "self", ".", "source", "[", "line_index", "]", "=", "' '", "*", "num_indent_spaces", "+", "target", ".", "lstrip", "(", ")" ]
33.090909
19.454545
def save_task_info(self, res, mem_gb=0): """ :param self: an object with attributes .hdf5, .argnames, .sent :parent res: a :class:`Result` object :param mem_gb: memory consumption at the saving time (optional) """ mon = res.mon name = mon.operation[6:] # strip 'total ' if self.hdf5: mon.hdf5 = self.hdf5 # needed for the flush below t = (mon.task_no, mon.weight, mon.duration, len(res.pik), mem_gb) data = numpy.array([t], task_info_dt) hdf5.extend3(self.hdf5.filename, 'task_info/' + name, data, argnames=self.argnames, sent=self.sent) mon.flush()
[ "def", "save_task_info", "(", "self", ",", "res", ",", "mem_gb", "=", "0", ")", ":", "mon", "=", "res", ".", "mon", "name", "=", "mon", ".", "operation", "[", "6", ":", "]", "# strip 'total '", "if", "self", ".", "hdf5", ":", "mon", ".", "hdf5", "=", "self", ".", "hdf5", "# needed for the flush below", "t", "=", "(", "mon", ".", "task_no", ",", "mon", ".", "weight", ",", "mon", ".", "duration", ",", "len", "(", "res", ".", "pik", ")", ",", "mem_gb", ")", "data", "=", "numpy", ".", "array", "(", "[", "t", "]", ",", "task_info_dt", ")", "hdf5", ".", "extend3", "(", "self", ".", "hdf5", ".", "filename", ",", "'task_info/'", "+", "name", ",", "data", ",", "argnames", "=", "self", ".", "argnames", ",", "sent", "=", "self", ".", "sent", ")", "mon", ".", "flush", "(", ")" ]
41.733333
15.6
def get_choices(self): """stub""" # ideally would return a displayText object in text ... except for legacy # use cases like OEA, it expects a text string. choices = [] # for current_choice in self.my_osid_object.object_map['choices']: for current_choice in self.my_osid_object._my_map['choices']: filtered_choice = { 'id': current_choice['id'], 'text': self.get_matching_language_value('texts', dictionary=current_choice).text, 'name': current_choice['name'] } choices.append(filtered_choice) return choices
[ "def", "get_choices", "(", "self", ")", ":", "# ideally would return a displayText object in text ... except for legacy", "# use cases like OEA, it expects a text string.", "choices", "=", "[", "]", "# for current_choice in self.my_osid_object.object_map['choices']:", "for", "current_choice", "in", "self", ".", "my_osid_object", ".", "_my_map", "[", "'choices'", "]", ":", "filtered_choice", "=", "{", "'id'", ":", "current_choice", "[", "'id'", "]", ",", "'text'", ":", "self", ".", "get_matching_language_value", "(", "'texts'", ",", "dictionary", "=", "current_choice", ")", ".", "text", ",", "'name'", ":", "current_choice", "[", "'name'", "]", "}", "choices", ".", "append", "(", "filtered_choice", ")", "return", "choices" ]
46.066667
19.8
def _calculate_price_by_slippage(self, action: str, price: float) -> float: """ 计算考虑滑点之后的价格 :param action: 交易动作, 支持 ['buy', 'sell'] :param price: 原始交易价格 :return: 考虑滑点后的交易价格 """ if action == "buy": return price * (1 + self.slippage) if action == "sell": return price * (1 - self.slippage) return price
[ "def", "_calculate_price_by_slippage", "(", "self", ",", "action", ":", "str", ",", "price", ":", "float", ")", "->", "float", ":", "if", "action", "==", "\"buy\"", ":", "return", "price", "*", "(", "1", "+", "self", ".", "slippage", ")", "if", "action", "==", "\"sell\"", ":", "return", "price", "*", "(", "1", "-", "self", ".", "slippage", ")", "return", "price" ]
32.083333
12.083333
def loggers(self): """Return all the loggers that should be activated""" ret = [] if self.logger_name: if isinstance(self.logger_name, logging.Logger): ret.append((self.logger_name.name, self.logger_name)) else: ret.append((self.logger_name, logging.getLogger(self.logger_name))) else: ret = list(logging.Logger.manager.loggerDict.items()) ret.append(("root", logging.getLogger())) return ret
[ "def", "loggers", "(", "self", ")", ":", "ret", "=", "[", "]", "if", "self", ".", "logger_name", ":", "if", "isinstance", "(", "self", ".", "logger_name", ",", "logging", ".", "Logger", ")", ":", "ret", ".", "append", "(", "(", "self", ".", "logger_name", ".", "name", ",", "self", ".", "logger_name", ")", ")", "else", ":", "ret", ".", "append", "(", "(", "self", ".", "logger_name", ",", "logging", ".", "getLogger", "(", "self", ".", "logger_name", ")", ")", ")", "else", ":", "ret", "=", "list", "(", "logging", ".", "Logger", ".", "manager", ".", "loggerDict", ".", "items", "(", ")", ")", "ret", ".", "append", "(", "(", "\"root\"", ",", "logging", ".", "getLogger", "(", ")", ")", ")", "return", "ret" ]
38.538462
23.076923
def iscm_md_update_dict(self, keypath, data): """ Update a metadata dictionary entry """ current = self.metadata for k in string.split(keypath, "."): if not current.has_key(k): current[k] = {} current = current[k] current.update(data)
[ "def", "iscm_md_update_dict", "(", "self", ",", "keypath", ",", "data", ")", ":", "current", "=", "self", ".", "metadata", "for", "k", "in", "string", ".", "split", "(", "keypath", ",", "\".\"", ")", ":", "if", "not", "current", ".", "has_key", "(", "k", ")", ":", "current", "[", "k", "]", "=", "{", "}", "current", "=", "current", "[", "k", "]", "current", ".", "update", "(", "data", ")" ]
31.3
5.1
def download(sid, credentials=None, subjects_path=None, overwrite=False, release='HCP_1200', database='hcp-openaccess', file_list=None): ''' download(sid) downloads the data for subject with the given subject id. By default, the subject will be placed in the first HCP subject directory in the subjects directories list. Note: In order for downloading to work, you must have s3fs installed. This is not a requirement for the neuropythy library and does not install automatically when installing via pip. The github repository for this library can be found at https://github.com/dask/s3fs. Installation instructions can be found here: http://s3fs.readthedocs.io/en/latest/install.html Accepted options include: * credentials (default: None) may be used to specify the Amazon AWS Bucket credentials, which can be generated from the HCP db (https://db.humanconnectome.org/). If this argument can be coerced to a credentials tuple via the to_credentials function, that result will be used. If None, then the function will try to use the hcp_credentials configuration item in neuropythy.config; otherwise an error is raised. * subjects_path (default: None) specifies where the subject should be placed. If None, then the first directory in the subjects paths list is used. If there is not one of these then an error is raised. * overwrite (default: False) specifies whether or not to overwrite files that already exist. In addition to True (do overwrite) and False (don't overwrite), the value 'error' indicates that an error should be raised if a file already exists. ''' if s3fs is None: raise RuntimeError('s3fs was not successfully loaded, so downloads may not occur; check ' 'your Python configuration to make sure that s3fs is installed. See ' 'http://s3fs.readthedocs.io/en/latest/install.html for details.') if credentials is None: credentials = config['hcp_credentials'] if credentials is None: raise ValueError('No hcp_credentials specified or found') (s3fs_key, s3fs_secret) = to_credentials(credentials) if subjects_path is None: sdirs = config['hcp_subject_paths'] subjects_path = next((sd for sd in sdirs if os.path.isdir(sd)), None) if subjects_path is None: raise ValueError('No subjects path given or found') else: subjects_path = os.path.expanduser(subjects_path) # Make sure we can connect to the bucket first... fs = s3fs.S3FileSystem(key=s3fs_key, secret=s3fs_secret) # Okay, make sure the release is found if not fs.exists('/'.join([database, release])): raise ValueError('database/release (%s/%s) not found' % (database, release)) # Check on the subject id to sid = to_subject_id(sid) hcp_sdir = '/'.join([database, release, str(sid)]) if not fs.exists(hcp_sdir): raise ValueError('Subject %d not found in release' % sid) # Okay, lets download this subject! loc_sdir = os.path.join(subjects_path, str(sid)) # walk through the subject structures pulled = [] for flnm in six.iterkeys(subject_structure['filemap']): flnm = flnm.format({'id':sid}) loc_flnm = os.path.join(loc_sdir, flnm) hcp_flnm = '/'.join([hcp_sdir, flnm]) if not overwrite and os.path.isfile(loc_flnm): continue # gotta download it! basedir = os.path.split(loc_flnm)[0] if not os.path.isdir(basedir): os.makedirs(os.path.abspath(basedir), 0o755) fs.get(hcp_flnm, loc_flnm) pulled.append(loc_flnm) return pulled
[ "def", "download", "(", "sid", ",", "credentials", "=", "None", ",", "subjects_path", "=", "None", ",", "overwrite", "=", "False", ",", "release", "=", "'HCP_1200'", ",", "database", "=", "'hcp-openaccess'", ",", "file_list", "=", "None", ")", ":", "if", "s3fs", "is", "None", ":", "raise", "RuntimeError", "(", "'s3fs was not successfully loaded, so downloads may not occur; check '", "'your Python configuration to make sure that s3fs is installed. See '", "'http://s3fs.readthedocs.io/en/latest/install.html for details.'", ")", "if", "credentials", "is", "None", ":", "credentials", "=", "config", "[", "'hcp_credentials'", "]", "if", "credentials", "is", "None", ":", "raise", "ValueError", "(", "'No hcp_credentials specified or found'", ")", "(", "s3fs_key", ",", "s3fs_secret", ")", "=", "to_credentials", "(", "credentials", ")", "if", "subjects_path", "is", "None", ":", "sdirs", "=", "config", "[", "'hcp_subject_paths'", "]", "subjects_path", "=", "next", "(", "(", "sd", "for", "sd", "in", "sdirs", "if", "os", ".", "path", ".", "isdir", "(", "sd", ")", ")", ",", "None", ")", "if", "subjects_path", "is", "None", ":", "raise", "ValueError", "(", "'No subjects path given or found'", ")", "else", ":", "subjects_path", "=", "os", ".", "path", ".", "expanduser", "(", "subjects_path", ")", "# Make sure we can connect to the bucket first...", "fs", "=", "s3fs", ".", "S3FileSystem", "(", "key", "=", "s3fs_key", ",", "secret", "=", "s3fs_secret", ")", "# Okay, make sure the release is found", "if", "not", "fs", ".", "exists", "(", "'/'", ".", "join", "(", "[", "database", ",", "release", "]", ")", ")", ":", "raise", "ValueError", "(", "'database/release (%s/%s) not found'", "%", "(", "database", ",", "release", ")", ")", "# Check on the subject id to", "sid", "=", "to_subject_id", "(", "sid", ")", "hcp_sdir", "=", "'/'", ".", "join", "(", "[", "database", ",", "release", ",", "str", "(", "sid", ")", "]", ")", "if", "not", "fs", ".", "exists", "(", "hcp_sdir", ")", ":", "raise", "ValueError", "(", "'Subject %d not found in release'", "%", "sid", ")", "# Okay, lets download this subject!", "loc_sdir", "=", "os", ".", "path", ".", "join", "(", "subjects_path", ",", "str", "(", "sid", ")", ")", "# walk through the subject structures", "pulled", "=", "[", "]", "for", "flnm", "in", "six", ".", "iterkeys", "(", "subject_structure", "[", "'filemap'", "]", ")", ":", "flnm", "=", "flnm", ".", "format", "(", "{", "'id'", ":", "sid", "}", ")", "loc_flnm", "=", "os", ".", "path", ".", "join", "(", "loc_sdir", ",", "flnm", ")", "hcp_flnm", "=", "'/'", ".", "join", "(", "[", "hcp_sdir", ",", "flnm", "]", ")", "if", "not", "overwrite", "and", "os", ".", "path", ".", "isfile", "(", "loc_flnm", ")", ":", "continue", "# gotta download it!", "basedir", "=", "os", ".", "path", ".", "split", "(", "loc_flnm", ")", "[", "0", "]", "if", "not", "os", ".", "path", ".", "isdir", "(", "basedir", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "abspath", "(", "basedir", ")", ",", "0o755", ")", "fs", ".", "get", "(", "hcp_flnm", ",", "loc_flnm", ")", "pulled", ".", "append", "(", "loc_flnm", ")", "return", "pulled" ]
60.266667
30.233333
def prepare_notebook_context(request, notebook_context): """Fill in notebook context with default values.""" if not notebook_context: notebook_context = {} # Override notebook Jinja templates if "extra_template_paths" not in notebook_context: notebook_context["extra_template_paths"] = [os.path.join(os.path.dirname(__file__), "server", "templates")] # Furious invalid state follows if we let this slip through assert type(notebook_context["extra_template_paths"]) == list, "Got bad extra_template_paths {}".format(notebook_context["extra_template_paths"]) # Jinja variables notebook_context["jinja_environment_options"] = notebook_context.get("jinja_environment_options", {}) assert type(notebook_context["jinja_environment_options"]) == dict # XXX: Following passing of global variables to Jinja templates requires Jinja 2.8.0dev+ version and is not yet supported # http://jinja.pocoo.org/docs/dev/api/#jinja2.Environment.globals # notebook_context["jinja_environment_options"]["globals"] = notebook_context["jinja_environment_options"].get("globals", {}) # globals_ = notebook_context["jinja_environment_options"]["globals"] # # assert type(globals_) == dict # # if not "home_url" in globals_: # globals_["home_url"] = request.host_url # # if not "home_title" in globals_: # globals_["home_title"] = "Back to site" # Tell notebook to correctly address WebSockets allow origin policy notebook_context["allow_origin"] = route_to_alt_domain(request, request.host_url) notebook_context["notebook_path"] = request.route_path("notebook_proxy", remainder="") # Record the hash of the current parameters, so we know if this user accesses the notebook in this or different context if "context_hash" not in notebook_context: notebook_context["context_hash"] = make_dict_hash(notebook_context) print(notebook_context)
[ "def", "prepare_notebook_context", "(", "request", ",", "notebook_context", ")", ":", "if", "not", "notebook_context", ":", "notebook_context", "=", "{", "}", "# Override notebook Jinja templates", "if", "\"extra_template_paths\"", "not", "in", "notebook_context", ":", "notebook_context", "[", "\"extra_template_paths\"", "]", "=", "[", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "\"server\"", ",", "\"templates\"", ")", "]", "# Furious invalid state follows if we let this slip through", "assert", "type", "(", "notebook_context", "[", "\"extra_template_paths\"", "]", ")", "==", "list", ",", "\"Got bad extra_template_paths {}\"", ".", "format", "(", "notebook_context", "[", "\"extra_template_paths\"", "]", ")", "# Jinja variables", "notebook_context", "[", "\"jinja_environment_options\"", "]", "=", "notebook_context", ".", "get", "(", "\"jinja_environment_options\"", ",", "{", "}", ")", "assert", "type", "(", "notebook_context", "[", "\"jinja_environment_options\"", "]", ")", "==", "dict", "# XXX: Following passing of global variables to Jinja templates requires Jinja 2.8.0dev+ version and is not yet supported", "# http://jinja.pocoo.org/docs/dev/api/#jinja2.Environment.globals", "# notebook_context[\"jinja_environment_options\"][\"globals\"] = notebook_context[\"jinja_environment_options\"].get(\"globals\", {})", "# globals_ = notebook_context[\"jinja_environment_options\"][\"globals\"]", "#", "# assert type(globals_) == dict", "#", "# if not \"home_url\" in globals_:", "# globals_[\"home_url\"] = request.host_url", "#", "# if not \"home_title\" in globals_:", "# globals_[\"home_title\"] = \"Back to site\"", "# Tell notebook to correctly address WebSockets allow origin policy", "notebook_context", "[", "\"allow_origin\"", "]", "=", "route_to_alt_domain", "(", "request", ",", "request", ".", "host_url", ")", "notebook_context", "[", "\"notebook_path\"", "]", "=", "request", ".", "route_path", "(", "\"notebook_proxy\"", ",", "remainder", "=", "\"\"", ")", "# Record the hash of the current parameters, so we know if this user accesses the notebook in this or different context", "if", "\"context_hash\"", "not", "in", "notebook_context", ":", "notebook_context", "[", "\"context_hash\"", "]", "=", "make_dict_hash", "(", "notebook_context", ")", "print", "(", "notebook_context", ")" ]
46.804878
34.341463
def git_checkout(git_branch=None, locale_root=None): """ Checkouts branch to last commit :param git_branch: branch to checkout :param locale_root: locale folder path :return: tuple stdout, stderr of completed command """ if git_branch is None: git_branch = settings.GIT_BRANCH if locale_root is None: locale_root = settings.LOCALE_ROOT proc = Popen('git checkout ' + git_branch + ' -- ' + locale_root, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = proc.communicate() return stdout, stderr
[ "def", "git_checkout", "(", "git_branch", "=", "None", ",", "locale_root", "=", "None", ")", ":", "if", "git_branch", "is", "None", ":", "git_branch", "=", "settings", ".", "GIT_BRANCH", "if", "locale_root", "is", "None", ":", "locale_root", "=", "settings", ".", "LOCALE_ROOT", "proc", "=", "Popen", "(", "'git checkout '", "+", "git_branch", "+", "' -- '", "+", "locale_root", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "stdout", ",", "stderr", "=", "proc", ".", "communicate", "(", ")", "return", "stdout", ",", "stderr" ]
32.941176
11.882353
def neurite_root_section_ids(self): '''Get the section IDs of the intitial neurite sections''' sec = self.sections return [i for i, ss in enumerate(sec) if ss.pid > -1 and (sec[ss.pid].ntype == POINT_TYPE.SOMA and ss.ntype != POINT_TYPE.SOMA)]
[ "def", "neurite_root_section_ids", "(", "self", ")", ":", "sec", "=", "self", ".", "sections", "return", "[", "i", "for", "i", ",", "ss", "in", "enumerate", "(", "sec", ")", "if", "ss", ".", "pid", ">", "-", "1", "and", "(", "sec", "[", "ss", ".", "pid", "]", ".", "ntype", "==", "POINT_TYPE", ".", "SOMA", "and", "ss", ".", "ntype", "!=", "POINT_TYPE", ".", "SOMA", ")", "]" ]
52.333333
18.333333
def recalculate_checksums(self, flags=0): """ (Re)calculates the checksum for any IPv4/ICMP/ICMPv6/TCP/UDP checksum present in the given packet. Individual checksum calculations may be disabled via the appropriate flag. Typically this function should be invoked on a modified packet before it is injected with WinDivert.send(). Returns the number of checksums calculated. See: https://reqrypt.org/windivert-doc.html#divert_helper_calc_checksums """ buff, buff_ = self.__to_buffers() num = windivert_dll.WinDivertHelperCalcChecksums(ctypes.byref(buff_), len(self.raw), flags) if PY2: self.raw = memoryview(buff)[:len(self.raw)] return num
[ "def", "recalculate_checksums", "(", "self", ",", "flags", "=", "0", ")", ":", "buff", ",", "buff_", "=", "self", ".", "__to_buffers", "(", ")", "num", "=", "windivert_dll", ".", "WinDivertHelperCalcChecksums", "(", "ctypes", ".", "byref", "(", "buff_", ")", ",", "len", "(", "self", ".", "raw", ")", ",", "flags", ")", "if", "PY2", ":", "self", ".", "raw", "=", "memoryview", "(", "buff", ")", "[", ":", "len", "(", "self", ".", "raw", ")", "]", "return", "num" ]
51.785714
28.357143