text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def xpathNextDescendantOrSelf(self, cur): """Traversal function for the "descendant-or-self" direction the descendant-or-self axis contains the context node and the descendants of the context node in document order; thus the context node is the first node on the axis, and the first child of the context node is the second node on the axis """ if cur is None: cur__o = None else: cur__o = cur._o ret = libxml2mod.xmlXPathNextDescendantOrSelf(self._o, cur__o) if ret is None:raise xpathError('xmlXPathNextDescendantOrSelf() failed') __tmp = xmlNode(_obj=ret) return __tmp
[ "def", "xpathNextDescendantOrSelf", "(", "self", ",", "cur", ")", ":", "if", "cur", "is", "None", ":", "cur__o", "=", "None", "else", ":", "cur__o", "=", "cur", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlXPathNextDescendantOrSelf", "(", "self", ".", "_o", ",", "cur__o", ")", "if", "ret", "is", "None", ":", "raise", "xpathError", "(", "'xmlXPathNextDescendantOrSelf() failed'", ")", "__tmp", "=", "xmlNode", "(", "_obj", "=", "ret", ")", "return", "__tmp" ]
51.153846
16.923077
def y_grid(self, grid=None): """The vertical lines that run accross the chart from the y-ticks. If a boolean value is given, these gridlines will be turned on or off. Otherwise, the method will return their current state. :param bool grid: Turns the gridlines on or off. :rtype: ``bool``""" if grid is None: return self._y_grid else: if not isinstance(grid, bool): raise TypeError("grid must be boolean, not '%s'" % grid) self._y_grid = grid
[ "def", "y_grid", "(", "self", ",", "grid", "=", "None", ")", ":", "if", "grid", "is", "None", ":", "return", "self", ".", "_y_grid", "else", ":", "if", "not", "isinstance", "(", "grid", ",", "bool", ")", ":", "raise", "TypeError", "(", "\"grid must be boolean, not '%s'\"", "%", "grid", ")", "self", ".", "_y_grid", "=", "grid" ]
35.866667
20.2
def dict_hist(item_list, weight_list=None, ordered=False, labels=None): r""" Builds a histogram of items in item_list Args: item_list (list): list with hashable items (usually containing duplicates) Returns: dict : dictionary where the keys are items in item_list, and the values are the number of times the item appears in item_list. CommandLine: python -m utool.util_dict --test-dict_hist Example: >>> # ENABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> item_list = [1, 2, 39, 900, 1232, 900, 1232, 2, 2, 2, 900] >>> hist_ = dict_hist(item_list) >>> result = ut.repr2(hist_) >>> print(result) {1: 1, 2: 4, 39: 1, 900: 3, 1232: 2} """ if labels is None: # hist_ = defaultdict(lambda: 0) hist_ = defaultdict(int) else: hist_ = {k: 0 for k in labels} if weight_list is None: # weight_list = it.repeat(1) for item in item_list: hist_[item] += 1 else: for item, weight in zip(item_list, weight_list): hist_[item] += weight # hist_ = dict(hist_) if ordered: # import utool as ut # key_order = ut.sortedby(list(hist_.keys()), list(hist_.values())) getval = op.itemgetter(1) key_order = [key for (key, value) in sorted(hist_.items(), key=getval)] hist_ = order_dict_by(hist_, key_order) return hist_
[ "def", "dict_hist", "(", "item_list", ",", "weight_list", "=", "None", ",", "ordered", "=", "False", ",", "labels", "=", "None", ")", ":", "if", "labels", "is", "None", ":", "# hist_ = defaultdict(lambda: 0)", "hist_", "=", "defaultdict", "(", "int", ")", "else", ":", "hist_", "=", "{", "k", ":", "0", "for", "k", "in", "labels", "}", "if", "weight_list", "is", "None", ":", "# weight_list = it.repeat(1)", "for", "item", "in", "item_list", ":", "hist_", "[", "item", "]", "+=", "1", "else", ":", "for", "item", ",", "weight", "in", "zip", "(", "item_list", ",", "weight_list", ")", ":", "hist_", "[", "item", "]", "+=", "weight", "# hist_ = dict(hist_)", "if", "ordered", ":", "# import utool as ut", "# key_order = ut.sortedby(list(hist_.keys()), list(hist_.values()))", "getval", "=", "op", ".", "itemgetter", "(", "1", ")", "key_order", "=", "[", "key", "for", "(", "key", ",", "value", ")", "in", "sorted", "(", "hist_", ".", "items", "(", ")", ",", "key", "=", "getval", ")", "]", "hist_", "=", "order_dict_by", "(", "hist_", ",", "key_order", ")", "return", "hist_" ]
33.045455
18.659091
def _chorder(self): """Add <interleave> if child order is arbitrary.""" if (self.interleave and len([ c for c in self.children if ":" not in c.name ]) > 1): return "<interleave>%s</interleave>" return "%s"
[ "def", "_chorder", "(", "self", ")", ":", "if", "(", "self", ".", "interleave", "and", "len", "(", "[", "c", "for", "c", "in", "self", ".", "children", "if", "\":\"", "not", "in", "c", ".", "name", "]", ")", ">", "1", ")", ":", "return", "\"<interleave>%s</interleave>\"", "return", "\"%s\"" ]
41.333333
15.166667
def get_limits_for_pool(self, pool): """ meterer.get_limits_for_pool(pool) -> dict Returns the limits for the given pool. If the pool does not have limits set, {} is returned. The resulting dict has the following format. Each item is optional and indicates no limit for the specified time period. { "year": int, "month": int, "week": int, "day": int, "hour": int, } """ pool_limits = self.cache.get("LIMIT:%s" % pool) if pool_limits is None: return {} return json_loads(pool_limits)
[ "def", "get_limits_for_pool", "(", "self", ",", "pool", ")", ":", "pool_limits", "=", "self", ".", "cache", ".", "get", "(", "\"LIMIT:%s\"", "%", "pool", ")", "if", "pool_limits", "is", "None", ":", "return", "{", "}", "return", "json_loads", "(", "pool_limits", ")" ]
28.727273
19.363636
def _update(self): ''' Given degree, minute, and second information, clean up the variables and make them consistent (for example, if minutes > 60, add extra to degrees, or if degrees is a decimal, add extra to minutes). ''' self.decimal_degree = self._calc_decimaldegree(self.degree, self.minute, self.second) self.degree, self.minute, self.decimal_minute, self.second = self._calc_degreeminutes(self.decimal_degree)
[ "def", "_update", "(", "self", ")", ":", "self", ".", "decimal_degree", "=", "self", ".", "_calc_decimaldegree", "(", "self", ".", "degree", ",", "self", ".", "minute", ",", "self", ".", "second", ")", "self", ".", "degree", ",", "self", ".", "minute", ",", "self", ".", "decimal_minute", ",", "self", ".", "second", "=", "self", ".", "_calc_degreeminutes", "(", "self", ".", "decimal_degree", ")" ]
58.25
38.25
def create_serv_obj(self, tenant_id): """Creates and stores the service object associated with a tenant. """ self.service_attr[tenant_id] = ServiceIpSegTenantMap() self.store_tenant_obj(tenant_id, self.service_attr[tenant_id])
[ "def", "create_serv_obj", "(", "self", ",", "tenant_id", ")", ":", "self", ".", "service_attr", "[", "tenant_id", "]", "=", "ServiceIpSegTenantMap", "(", ")", "self", ".", "store_tenant_obj", "(", "tenant_id", ",", "self", ".", "service_attr", "[", "tenant_id", "]", ")" ]
61.75
13.75
def simulate(self, ts_length, init=None, num_reps=None, random_state=None): """ Simulate time series of state transitions, where the states are annotated with their values (if `state_values` is not None). Parameters ---------- ts_length : scalar(int) Length of each simulation. init : scalar or array_like, optional(default=None) Initial state values(s). If None, the initial state is randomly drawn. num_reps : scalar(int), optional(default=None) Number of repetitions of simulation. random_state : int or np.random.RandomState, optional Random seed (integer) or np.random.RandomState instance to set the initial state of the random number generator for reproducibility. If None, a randomly initialized RandomState is used. Returns ------- X : ndarray(ndim=1 or 2) Array containing the sample path(s), of shape (ts_length,) if init is a scalar (integer) or None and num_reps is None; of shape (k, ts_length) otherwise, where k = len(init) if (init, num_reps) = (array, None), k = num_reps if (init, num_reps) = (int or None, int), and k = len(init)*num_reps if (init, num_reps) = (array, int). """ if init is not None: init_idx = self.get_index(init) else: init_idx = None X = self.simulate_indices(ts_length, init=init_idx, num_reps=num_reps, random_state=random_state) # Annotate states if self.state_values is not None: X = self.state_values[X] return X
[ "def", "simulate", "(", "self", ",", "ts_length", ",", "init", "=", "None", ",", "num_reps", "=", "None", ",", "random_state", "=", "None", ")", ":", "if", "init", "is", "not", "None", ":", "init_idx", "=", "self", ".", "get_index", "(", "init", ")", "else", ":", "init_idx", "=", "None", "X", "=", "self", ".", "simulate_indices", "(", "ts_length", ",", "init", "=", "init_idx", ",", "num_reps", "=", "num_reps", ",", "random_state", "=", "random_state", ")", "# Annotate states", "if", "self", ".", "state_values", "is", "not", "None", ":", "X", "=", "self", ".", "state_values", "[", "X", "]", "return", "X" ]
37.173913
22.826087
def is_up_url(url, allow_redirects=False, timeout=5): r""" Check URL to see if it is a valid web page, return the redirected location if it is Returns: None if ConnectionError False if url is invalid (any HTTP error code) cleaned up URL (following redirects and possibly adding HTTP schema "http://") >>> is_up_url("duckduckgo.com") # a more private, less manipulative search engine 'https://duckduckgo.com/' >>> urlisup = is_up_url("totalgood.org") >>> not urlisup or str(urlisup).startswith('http') True >>> urlisup = is_up_url("wikipedia.org") >>> str(urlisup).startswith('http') True >>> 'wikipedia.org' in str(urlisup) True >>> bool(is_up_url('8158989668202919656')) False >>> is_up_url('invalidurlwithoutadomain') False """ if not isinstance(url, basestring) or '.' not in url: return False normalized_url = prepend_http(url) session = requests.Session() session.mount(url, HTTPAdapter(max_retries=2)) try: resp = session.get(normalized_url, allow_redirects=allow_redirects, timeout=timeout) except ConnectionError: return None except: return None if resp.status_code in (301, 302, 307) or resp.headers.get('location', None): return resp.headers.get('location', None) # return redirected URL elif 100 <= resp.status_code < 400: return normalized_url # return the original URL that was requested/visited else: return False
[ "def", "is_up_url", "(", "url", ",", "allow_redirects", "=", "False", ",", "timeout", "=", "5", ")", ":", "if", "not", "isinstance", "(", "url", ",", "basestring", ")", "or", "'.'", "not", "in", "url", ":", "return", "False", "normalized_url", "=", "prepend_http", "(", "url", ")", "session", "=", "requests", ".", "Session", "(", ")", "session", ".", "mount", "(", "url", ",", "HTTPAdapter", "(", "max_retries", "=", "2", ")", ")", "try", ":", "resp", "=", "session", ".", "get", "(", "normalized_url", ",", "allow_redirects", "=", "allow_redirects", ",", "timeout", "=", "timeout", ")", "except", "ConnectionError", ":", "return", "None", "except", ":", "return", "None", "if", "resp", ".", "status_code", "in", "(", "301", ",", "302", ",", "307", ")", "or", "resp", ".", "headers", ".", "get", "(", "'location'", ",", "None", ")", ":", "return", "resp", ".", "headers", ".", "get", "(", "'location'", ",", "None", ")", "# return redirected URL", "elif", "100", "<=", "resp", ".", "status_code", "<", "400", ":", "return", "normalized_url", "# return the original URL that was requested/visited", "else", ":", "return", "False" ]
36.875
20.8
def format_command( command_args, # type: List[str] command_output, # type: str ): # type: (...) -> str """ Format command information for logging. """ text = 'Command arguments: {}\n'.format(command_args) if not command_output: text += 'Command output: None' elif logger.getEffectiveLevel() > logging.DEBUG: text += 'Command output: [use --verbose to show]' else: if not command_output.endswith('\n'): command_output += '\n' text += ( 'Command output:\n{}' '-----------------------------------------' ).format(command_output) return text
[ "def", "format_command", "(", "command_args", ",", "# type: List[str]", "command_output", ",", "# type: str", ")", ":", "# type: (...) -> str", "text", "=", "'Command arguments: {}\\n'", ".", "format", "(", "command_args", ")", "if", "not", "command_output", ":", "text", "+=", "'Command output: None'", "elif", "logger", ".", "getEffectiveLevel", "(", ")", ">", "logging", ".", "DEBUG", ":", "text", "+=", "'Command output: [use --verbose to show]'", "else", ":", "if", "not", "command_output", ".", "endswith", "(", "'\\n'", ")", ":", "command_output", "+=", "'\\n'", "text", "+=", "(", "'Command output:\\n{}'", "'-----------------------------------------'", ")", ".", "format", "(", "command_output", ")", "return", "text" ]
27.826087
15.304348
def model(self, inputs, mode='train'): """Build a simple convnet (BN before ReLU). Args: inputs: a tensor of size [batch_size, height, width, channels] mode: string in ['train', 'test'] Returns: the last op containing the predictions Note: Best score Step: 7015 - Epoch: 18/20 - best batch acc: 0.8984 - loss: 1.5656 Worst score Step: 7523 - Epoch: 20/20 - best batch acc: 0.7734 - loss: 1.6874 """ # Extract features training = (mode == 'train') with tf.variable_scope('conv1') as scope: conv = tf.layers.conv2d(inputs=inputs, filters=16, kernel_size=[3, 3], padding='SAME') bn = tf.layers.batch_normalization(inputs=conv, training=training) bn = tf.nn.relu(bn) conv = tf.layers.conv2d(inputs=bn, filters=16, kernel_size=[3, 3], padding='SAME') bn = tf.layers.batch_normalization(inputs=conv, training=training) bn = tf.nn.relu(bn) pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name) with tf.variable_scope('conv2') as scope: conv = tf.layers.conv2d(inputs=pool, filters=32, kernel_size=[3, 3], padding='SAME') bn = tf.layers.batch_normalization(inputs=conv, training=training) bn = tf.nn.relu(bn) conv = tf.layers.conv2d(inputs=bn, filters=32, kernel_size=[3, 3], padding='SAME') bn = tf.layers.batch_normalization(inputs=conv, training=training) bn = tf.nn.relu(bn) pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name) with tf.variable_scope('conv3') as scope: conv = tf.layers.conv2d(inputs=pool, filters=32, kernel_size=[3, 3], padding='SAME') bn = tf.layers.batch_normalization(inputs=conv, training=training) bn = tf.nn.relu(bn) conv = tf.layers.conv2d(inputs=bn, filters=32, kernel_size=[3, 3], padding='SAME') bn = tf.layers.batch_normalization(inputs=conv, training=training) bn = tf.nn.relu(bn) pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name) # Classify with tf.variable_scope('fc') as scope: flat = tf.layers.flatten(pool) fc = tf.layers.dense(inputs=flat, units=32, activation=tf.nn.relu) softmax = tf.layers.dense(inputs=fc, units=self.num_classes, activation=tf.nn.softmax) return softmax
[ "def", "model", "(", "self", ",", "inputs", ",", "mode", "=", "'train'", ")", ":", "# Extract features", "training", "=", "(", "mode", "==", "'train'", ")", "with", "tf", ".", "variable_scope", "(", "'conv1'", ")", "as", "scope", ":", "conv", "=", "tf", ".", "layers", ".", "conv2d", "(", "inputs", "=", "inputs", ",", "filters", "=", "16", ",", "kernel_size", "=", "[", "3", ",", "3", "]", ",", "padding", "=", "'SAME'", ")", "bn", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "inputs", "=", "conv", ",", "training", "=", "training", ")", "bn", "=", "tf", ".", "nn", ".", "relu", "(", "bn", ")", "conv", "=", "tf", ".", "layers", ".", "conv2d", "(", "inputs", "=", "bn", ",", "filters", "=", "16", ",", "kernel_size", "=", "[", "3", ",", "3", "]", ",", "padding", "=", "'SAME'", ")", "bn", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "inputs", "=", "conv", ",", "training", "=", "training", ")", "bn", "=", "tf", ".", "nn", ".", "relu", "(", "bn", ")", "pool", "=", "tf", ".", "layers", ".", "max_pooling2d", "(", "bn", ",", "pool_size", "=", "[", "2", ",", "2", "]", ",", "strides", "=", "2", ",", "padding", "=", "'SAME'", ",", "name", "=", "scope", ".", "name", ")", "with", "tf", ".", "variable_scope", "(", "'conv2'", ")", "as", "scope", ":", "conv", "=", "tf", ".", "layers", ".", "conv2d", "(", "inputs", "=", "pool", ",", "filters", "=", "32", ",", "kernel_size", "=", "[", "3", ",", "3", "]", ",", "padding", "=", "'SAME'", ")", "bn", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "inputs", "=", "conv", ",", "training", "=", "training", ")", "bn", "=", "tf", ".", "nn", ".", "relu", "(", "bn", ")", "conv", "=", "tf", ".", "layers", ".", "conv2d", "(", "inputs", "=", "bn", ",", "filters", "=", "32", ",", "kernel_size", "=", "[", "3", ",", "3", "]", ",", "padding", "=", "'SAME'", ")", "bn", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "inputs", "=", "conv", ",", "training", "=", "training", ")", "bn", "=", "tf", ".", "nn", ".", "relu", "(", "bn", ")", "pool", "=", "tf", ".", "layers", ".", "max_pooling2d", "(", "bn", ",", "pool_size", "=", "[", "2", ",", "2", "]", ",", "strides", "=", "2", ",", "padding", "=", "'SAME'", ",", "name", "=", "scope", ".", "name", ")", "with", "tf", ".", "variable_scope", "(", "'conv3'", ")", "as", "scope", ":", "conv", "=", "tf", ".", "layers", ".", "conv2d", "(", "inputs", "=", "pool", ",", "filters", "=", "32", ",", "kernel_size", "=", "[", "3", ",", "3", "]", ",", "padding", "=", "'SAME'", ")", "bn", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "inputs", "=", "conv", ",", "training", "=", "training", ")", "bn", "=", "tf", ".", "nn", ".", "relu", "(", "bn", ")", "conv", "=", "tf", ".", "layers", ".", "conv2d", "(", "inputs", "=", "bn", ",", "filters", "=", "32", ",", "kernel_size", "=", "[", "3", ",", "3", "]", ",", "padding", "=", "'SAME'", ")", "bn", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "inputs", "=", "conv", ",", "training", "=", "training", ")", "bn", "=", "tf", ".", "nn", ".", "relu", "(", "bn", ")", "pool", "=", "tf", ".", "layers", ".", "max_pooling2d", "(", "bn", ",", "pool_size", "=", "[", "2", ",", "2", "]", ",", "strides", "=", "2", ",", "padding", "=", "'SAME'", ",", "name", "=", "scope", ".", "name", ")", "# Classify", "with", "tf", ".", "variable_scope", "(", "'fc'", ")", "as", "scope", ":", "flat", "=", "tf", ".", "layers", ".", "flatten", "(", "pool", ")", "fc", "=", "tf", ".", "layers", ".", "dense", "(", "inputs", "=", "flat", ",", "units", "=", "32", ",", "activation", "=", "tf", ".", "nn", ".", "relu", ")", "softmax", "=", "tf", ".", "layers", ".", "dense", "(", "inputs", "=", "fc", ",", "units", "=", "self", ".", "num_classes", ",", "activation", "=", "tf", ".", "nn", ".", "softmax", ")", "return", "softmax" ]
52.55102
28.714286
def plfit_lsq(x,y): """ Returns A and B in y=Ax^B http://mathworld.wolfram.com/LeastSquaresFittingPowerLaw.html """ n = len(x) btop = n * (log(x)*log(y)).sum() - (log(x)).sum()*(log(y)).sum() bbottom = n*(log(x)**2).sum() - (log(x).sum())**2 b = btop / bbottom a = ( log(y).sum() - b * log(x).sum() ) / n A = exp(a) return A,b
[ "def", "plfit_lsq", "(", "x", ",", "y", ")", ":", "n", "=", "len", "(", "x", ")", "btop", "=", "n", "*", "(", "log", "(", "x", ")", "*", "log", "(", "y", ")", ")", ".", "sum", "(", ")", "-", "(", "log", "(", "x", ")", ")", ".", "sum", "(", ")", "*", "(", "log", "(", "y", ")", ")", ".", "sum", "(", ")", "bbottom", "=", "n", "*", "(", "log", "(", "x", ")", "**", "2", ")", ".", "sum", "(", ")", "-", "(", "log", "(", "x", ")", ".", "sum", "(", ")", ")", "**", "2", "b", "=", "btop", "/", "bbottom", "a", "=", "(", "log", "(", "y", ")", ".", "sum", "(", ")", "-", "b", "*", "log", "(", "x", ")", ".", "sum", "(", ")", ")", "/", "n", "A", "=", "exp", "(", "a", ")", "return", "A", ",", "b" ]
27.615385
18.538462
def _get_encrypted_masterpassword(self): """ Obtain the encrypted masterkey .. note:: The encrypted masterkey is checksummed, so that we can figure out that a provided password is correct or not. The checksum is only 4 bytes long! """ if not self.unlocked(): raise WalletLocked aes = AESCipher(self.password) return "{}${}".format( self._derive_checksum(self.masterkey), aes.encrypt(self.masterkey) )
[ "def", "_get_encrypted_masterpassword", "(", "self", ")", ":", "if", "not", "self", ".", "unlocked", "(", ")", ":", "raise", "WalletLocked", "aes", "=", "AESCipher", "(", "self", ".", "password", ")", "return", "\"{}${}\"", ".", "format", "(", "self", ".", "_derive_checksum", "(", "self", ".", "masterkey", ")", ",", "aes", ".", "encrypt", "(", "self", ".", "masterkey", ")", ")" ]
38.846154
16.615385
def echo_event(data): """Echo a json dump of an object using click""" return click.echo(json.dumps(data, sort_keys=True, indent=2))
[ "def", "echo_event", "(", "data", ")", ":", "return", "click", ".", "echo", "(", "json", ".", "dumps", "(", "data", ",", "sort_keys", "=", "True", ",", "indent", "=", "2", ")", ")" ]
45.666667
14.666667
def create_refobj(self, ): """Create and return a new reftrack node :returns: the new reftrack node :rtype: str :raises: None """ n = cmds.createNode("jb_reftrack") cmds.lockNode(n, lock=True) return n
[ "def", "create_refobj", "(", "self", ",", ")", ":", "n", "=", "cmds", ".", "createNode", "(", "\"jb_reftrack\"", ")", "cmds", ".", "lockNode", "(", "n", ",", "lock", "=", "True", ")", "return", "n" ]
25.7
12.6
def _extract_actions_unique_topics(self, movement_counts, max_movements, cluster_topology, max_movement_size): """Extract actions limiting to given max value such that the resultant has the minimum possible number of duplicate topics. Algorithm: 1. Group actions by by topic-name: {topic: action-list} 2. Iterate through the dictionary in circular fashion and keep extracting actions with until max_partition_movements are reached. :param movement_counts: list of tuple ((topic, partition), movement count) :param max_movements: max number of movements to extract :param cluster_topology: cluster topology containing the new proposed assignment for the cluster :param max_movement_size: maximum size of data to move at a time in extracted actions :return: list of tuple (topic, partitions) to include in the reduced plan """ # Group actions by topic topic_actions = defaultdict(list) for t_p, replica_change_cnt in movement_counts: topic_actions[t_p[0]].append((t_p, replica_change_cnt)) # Create reduced assignment minimizing duplication of topics extracted_actions = [] curr_movements = 0 curr_size = 0 action_available = True while curr_movements < max_movements and curr_size <= max_movement_size and action_available: action_available = False for topic, actions in six.iteritems(topic_actions): for action in actions: action_size = cluster_topology.partitions[action[0]].size if curr_movements + action[1] > max_movements or curr_size + action_size > max_movement_size: # Remove action since it won't be possible to use it actions.remove(action) else: # Append (topic, partition) to the list of movements action_available = True extracted_actions.append(action[0]) curr_movements += action[1] curr_size += action_size actions.remove(action) break return extracted_actions
[ "def", "_extract_actions_unique_topics", "(", "self", ",", "movement_counts", ",", "max_movements", ",", "cluster_topology", ",", "max_movement_size", ")", ":", "# Group actions by topic", "topic_actions", "=", "defaultdict", "(", "list", ")", "for", "t_p", ",", "replica_change_cnt", "in", "movement_counts", ":", "topic_actions", "[", "t_p", "[", "0", "]", "]", ".", "append", "(", "(", "t_p", ",", "replica_change_cnt", ")", ")", "# Create reduced assignment minimizing duplication of topics", "extracted_actions", "=", "[", "]", "curr_movements", "=", "0", "curr_size", "=", "0", "action_available", "=", "True", "while", "curr_movements", "<", "max_movements", "and", "curr_size", "<=", "max_movement_size", "and", "action_available", ":", "action_available", "=", "False", "for", "topic", ",", "actions", "in", "six", ".", "iteritems", "(", "topic_actions", ")", ":", "for", "action", "in", "actions", ":", "action_size", "=", "cluster_topology", ".", "partitions", "[", "action", "[", "0", "]", "]", ".", "size", "if", "curr_movements", "+", "action", "[", "1", "]", ">", "max_movements", "or", "curr_size", "+", "action_size", ">", "max_movement_size", ":", "# Remove action since it won't be possible to use it", "actions", ".", "remove", "(", "action", ")", "else", ":", "# Append (topic, partition) to the list of movements", "action_available", "=", "True", "extracted_actions", ".", "append", "(", "action", "[", "0", "]", ")", "curr_movements", "+=", "action", "[", "1", "]", "curr_size", "+=", "action_size", "actions", ".", "remove", "(", "action", ")", "break", "return", "extracted_actions" ]
54.47619
24.738095
def Emulation_setCPUThrottlingRate(self, rate): """ Function path: Emulation.setCPUThrottlingRate Domain: Emulation Method name: setCPUThrottlingRate WARNING: This function is marked 'Experimental'! Parameters: Required arguments: 'rate' (type: number) -> Throttling rate as a slowdown factor (1 is no throttle, 2 is 2x slowdown, etc). No return value. Description: Enables CPU throttling to emulate slow CPUs. """ assert isinstance(rate, (float, int) ), "Argument 'rate' must be of type '['float', 'int']'. Received type: '%s'" % type( rate) subdom_funcs = self.synchronous_command('Emulation.setCPUThrottlingRate', rate=rate) return subdom_funcs
[ "def", "Emulation_setCPUThrottlingRate", "(", "self", ",", "rate", ")", ":", "assert", "isinstance", "(", "rate", ",", "(", "float", ",", "int", ")", ")", ",", "\"Argument 'rate' must be of type '['float', 'int']'. Received type: '%s'\"", "%", "type", "(", "rate", ")", "subdom_funcs", "=", "self", ".", "synchronous_command", "(", "'Emulation.setCPUThrottlingRate'", ",", "rate", "=", "rate", ")", "return", "subdom_funcs" ]
33
22.619048
def fetch_items(self, category, **kwargs): """Fetch the pages :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] reviews_api = kwargs['reviews_api'] mediawiki_version = self.client.get_version() logger.info("MediaWiki version: %s", mediawiki_version) if reviews_api: if ((mediawiki_version[0] == 1 and mediawiki_version[1] >= 27) or mediawiki_version[0] > 1): fetcher = self.__fetch_1_27(from_date) else: logger.warning("Reviews API only available in MediaWiki >= 1.27") logger.warning("Using the Pages API instead") fetcher = self.__fetch_pre1_27(from_date) else: fetcher = self.__fetch_pre1_27(from_date) for page_reviews in fetcher: yield page_reviews
[ "def", "fetch_items", "(", "self", ",", "category", ",", "*", "*", "kwargs", ")", ":", "from_date", "=", "kwargs", "[", "'from_date'", "]", "reviews_api", "=", "kwargs", "[", "'reviews_api'", "]", "mediawiki_version", "=", "self", ".", "client", ".", "get_version", "(", ")", "logger", ".", "info", "(", "\"MediaWiki version: %s\"", ",", "mediawiki_version", ")", "if", "reviews_api", ":", "if", "(", "(", "mediawiki_version", "[", "0", "]", "==", "1", "and", "mediawiki_version", "[", "1", "]", ">=", "27", ")", "or", "mediawiki_version", "[", "0", "]", ">", "1", ")", ":", "fetcher", "=", "self", ".", "__fetch_1_27", "(", "from_date", ")", "else", ":", "logger", ".", "warning", "(", "\"Reviews API only available in MediaWiki >= 1.27\"", ")", "logger", ".", "warning", "(", "\"Using the Pages API instead\"", ")", "fetcher", "=", "self", ".", "__fetch_pre1_27", "(", "from_date", ")", "else", ":", "fetcher", "=", "self", ".", "__fetch_pre1_27", "(", "from_date", ")", "for", "page_reviews", "in", "fetcher", ":", "yield", "page_reviews" ]
36.115385
19.615385
def actuator_off(self, service_location_id, actuator_id, duration=None): """ Turn actuator off Parameters ---------- service_location_id : int actuator_id : int duration : int, optional 300,900,1800 or 3600 , specifying the time in seconds the actuator should be turned on. Any other value results in turning on for an undetermined period of time. Returns ------- requests.Response """ return self._actuator_on_off( on_off='off', service_location_id=service_location_id, actuator_id=actuator_id, duration=duration)
[ "def", "actuator_off", "(", "self", ",", "service_location_id", ",", "actuator_id", ",", "duration", "=", "None", ")", ":", "return", "self", ".", "_actuator_on_off", "(", "on_off", "=", "'off'", ",", "service_location_id", "=", "service_location_id", ",", "actuator_id", "=", "actuator_id", ",", "duration", "=", "duration", ")" ]
32.65
19.25
def get_name(self): """Return client name""" if self.given_name is None: # Name according to host if self.hostname is None: name = _("Console") else: name = self.hostname # Adding id to name client_id = self.id_['int_id'] + u'/' + self.id_['str_id'] name = name + u' ' + client_id elif self.given_name in ["Pylab", "SymPy", "Cython"]: client_id = self.id_['int_id'] + u'/' + self.id_['str_id'] name = self.given_name + u' ' + client_id else: name = self.given_name + u'/' + self.id_['str_id'] return name
[ "def", "get_name", "(", "self", ")", ":", "if", "self", ".", "given_name", "is", "None", ":", "# Name according to host\r", "if", "self", ".", "hostname", "is", "None", ":", "name", "=", "_", "(", "\"Console\"", ")", "else", ":", "name", "=", "self", ".", "hostname", "# Adding id to name\r", "client_id", "=", "self", ".", "id_", "[", "'int_id'", "]", "+", "u'/'", "+", "self", ".", "id_", "[", "'str_id'", "]", "name", "=", "name", "+", "u' '", "+", "client_id", "elif", "self", ".", "given_name", "in", "[", "\"Pylab\"", ",", "\"SymPy\"", ",", "\"Cython\"", "]", ":", "client_id", "=", "self", ".", "id_", "[", "'int_id'", "]", "+", "u'/'", "+", "self", ".", "id_", "[", "'str_id'", "]", "name", "=", "self", ".", "given_name", "+", "u' '", "+", "client_id", "else", ":", "name", "=", "self", ".", "given_name", "+", "u'/'", "+", "self", ".", "id_", "[", "'str_id'", "]", "return", "name" ]
40.235294
13.941176
def sun_ra_dec(utc_time): """Right ascension and declination of the sun at *utc_time*. """ jdate = jdays2000(utc_time) / 36525.0 eps = np.deg2rad(23.0 + 26.0 / 60.0 + 21.448 / 3600.0 - (46.8150 * jdate + 0.00059 * jdate * jdate - 0.001813 * jdate * jdate * jdate) / 3600) eclon = sun_ecliptic_longitude(utc_time) x__ = np.cos(eclon) y__ = np.cos(eps) * np.sin(eclon) z__ = np.sin(eps) * np.sin(eclon) r__ = np.sqrt(1.0 - z__ * z__) # sun declination declination = np.arctan2(z__, r__) # right ascension right_ascension = 2 * np.arctan2(y__, (x__ + r__)) return right_ascension, declination
[ "def", "sun_ra_dec", "(", "utc_time", ")", ":", "jdate", "=", "jdays2000", "(", "utc_time", ")", "/", "36525.0", "eps", "=", "np", ".", "deg2rad", "(", "23.0", "+", "26.0", "/", "60.0", "+", "21.448", "/", "3600.0", "-", "(", "46.8150", "*", "jdate", "+", "0.00059", "*", "jdate", "*", "jdate", "-", "0.001813", "*", "jdate", "*", "jdate", "*", "jdate", ")", "/", "3600", ")", "eclon", "=", "sun_ecliptic_longitude", "(", "utc_time", ")", "x__", "=", "np", ".", "cos", "(", "eclon", ")", "y__", "=", "np", ".", "cos", "(", "eps", ")", "*", "np", ".", "sin", "(", "eclon", ")", "z__", "=", "np", ".", "sin", "(", "eps", ")", "*", "np", ".", "sin", "(", "eclon", ")", "r__", "=", "np", ".", "sqrt", "(", "1.0", "-", "z__", "*", "z__", ")", "# sun declination", "declination", "=", "np", ".", "arctan2", "(", "z__", ",", "r__", ")", "# right ascension", "right_ascension", "=", "2", "*", "np", ".", "arctan2", "(", "y__", ",", "(", "x__", "+", "r__", ")", ")", "return", "right_ascension", ",", "declination" ]
39.529412
10.058824
def is_valid_short_number_for_region(short_numobj, region_dialing_from): """Tests whether a short number matches a valid pattern in a region. Note that this doesn't verify the number is actually in use, which is impossible to tell by just looking at the number itself. Arguments: short_numobj -- the short number to check as a PhoneNumber object. region_dialing_from -- the region from which the number is dialed Return whether the short number matches a valid pattern """ if not _region_dialing_from_matches_number(short_numobj, region_dialing_from): return False metadata = PhoneMetadata.short_metadata_for_region(region_dialing_from) if metadata is None: # pragma no cover return False short_number = national_significant_number(short_numobj) general_desc = metadata.general_desc if not _matches_possible_number_and_national_number(short_number, general_desc): return False short_number_desc = metadata.short_code if short_number_desc.national_number_pattern is None: # pragma no cover return False return _matches_possible_number_and_national_number(short_number, short_number_desc)
[ "def", "is_valid_short_number_for_region", "(", "short_numobj", ",", "region_dialing_from", ")", ":", "if", "not", "_region_dialing_from_matches_number", "(", "short_numobj", ",", "region_dialing_from", ")", ":", "return", "False", "metadata", "=", "PhoneMetadata", ".", "short_metadata_for_region", "(", "region_dialing_from", ")", "if", "metadata", "is", "None", ":", "# pragma no cover", "return", "False", "short_number", "=", "national_significant_number", "(", "short_numobj", ")", "general_desc", "=", "metadata", ".", "general_desc", "if", "not", "_matches_possible_number_and_national_number", "(", "short_number", ",", "general_desc", ")", ":", "return", "False", "short_number_desc", "=", "metadata", ".", "short_code", "if", "short_number_desc", ".", "national_number_pattern", "is", "None", ":", "# pragma no cover", "return", "False", "return", "_matches_possible_number_and_national_number", "(", "short_number", ",", "short_number_desc", ")" ]
46.68
24.8
def get_tag_users(self, tag_id, first_user_id=None): """ 获取标签下粉丝列表 :param tag_id: 标签 ID :param first_user_id: 可选。第一个拉取的 OPENID,不填默认从头开始拉取 :return: 返回的 JSON 数据包 """ data = { 'tagid': tag_id, } if first_user_id: data['next_openid'] = first_user_id return self._post( 'user/tag/get', data=data )
[ "def", "get_tag_users", "(", "self", ",", "tag_id", ",", "first_user_id", "=", "None", ")", ":", "data", "=", "{", "'tagid'", ":", "tag_id", ",", "}", "if", "first_user_id", ":", "data", "[", "'next_openid'", "]", "=", "first_user_id", "return", "self", ".", "_post", "(", "'user/tag/get'", ",", "data", "=", "data", ")" ]
24.294118
16.529412
def resample_nn_1d(a, centers): """Return one-dimensional nearest-neighbor indexes based on user-specified centers. Parameters ---------- a : array-like 1-dimensional array of numeric values from which to extract indexes of nearest-neighbors centers : array-like 1-dimensional array of numeric values representing a subset of values to approximate Returns ------- An array of indexes representing values closest to given array values """ ix = [] for center in centers: index = (np.abs(a - center)).argmin() if index not in ix: ix.append(index) return ix
[ "def", "resample_nn_1d", "(", "a", ",", "centers", ")", ":", "ix", "=", "[", "]", "for", "center", "in", "centers", ":", "index", "=", "(", "np", ".", "abs", "(", "a", "-", "center", ")", ")", ".", "argmin", "(", ")", "if", "index", "not", "in", "ix", ":", "ix", ".", "append", "(", "index", ")", "return", "ix" ]
29.045455
22.227273
def insert_many(self, it): """Inserts a collection of objects into the table.""" unique_indexes = self._uniqueIndexes # [ind for ind in self._indexes.values() if ind.is_unique] NO_SUCH_ATTR = object() new_objs = list(it) if unique_indexes: for ind in unique_indexes: ind_attr = ind.attr new_keys = dict((getattr(obj, ind_attr, NO_SUCH_ATTR), obj) for obj in new_objs) if not ind.accept_none and (None in new_keys or NO_SUCH_ATTR in new_keys): raise KeyError("unique key cannot be None or blank for index %s" % ind_attr, [ob for ob in new_objs if getattr(ob, ind_attr, NO_SUCH_ATTR) is None]) if len(new_keys) < len(new_objs): raise KeyError("given sequence contains duplicate keys for index %s" % ind_attr) for key in new_keys: if key in ind: obj = new_keys[key] raise KeyError("duplicate unique key value '%s' for index %s" % (getattr(obj, ind_attr), ind_attr), new_keys[key]) for obj in new_objs: self.obs.append(obj) for attr, ind in self._indexes.items(): obval = getattr(obj, attr) ind[obval] = obj return self
[ "def", "insert_many", "(", "self", ",", "it", ")", ":", "unique_indexes", "=", "self", ".", "_uniqueIndexes", "# [ind for ind in self._indexes.values() if ind.is_unique]", "NO_SUCH_ATTR", "=", "object", "(", ")", "new_objs", "=", "list", "(", "it", ")", "if", "unique_indexes", ":", "for", "ind", "in", "unique_indexes", ":", "ind_attr", "=", "ind", ".", "attr", "new_keys", "=", "dict", "(", "(", "getattr", "(", "obj", ",", "ind_attr", ",", "NO_SUCH_ATTR", ")", ",", "obj", ")", "for", "obj", "in", "new_objs", ")", "if", "not", "ind", ".", "accept_none", "and", "(", "None", "in", "new_keys", "or", "NO_SUCH_ATTR", "in", "new_keys", ")", ":", "raise", "KeyError", "(", "\"unique key cannot be None or blank for index %s\"", "%", "ind_attr", ",", "[", "ob", "for", "ob", "in", "new_objs", "if", "getattr", "(", "ob", ",", "ind_attr", ",", "NO_SUCH_ATTR", ")", "is", "None", "]", ")", "if", "len", "(", "new_keys", ")", "<", "len", "(", "new_objs", ")", ":", "raise", "KeyError", "(", "\"given sequence contains duplicate keys for index %s\"", "%", "ind_attr", ")", "for", "key", "in", "new_keys", ":", "if", "key", "in", "ind", ":", "obj", "=", "new_keys", "[", "key", "]", "raise", "KeyError", "(", "\"duplicate unique key value '%s' for index %s\"", "%", "(", "getattr", "(", "obj", ",", "ind_attr", ")", ",", "ind_attr", ")", ",", "new_keys", "[", "key", "]", ")", "for", "obj", "in", "new_objs", ":", "self", ".", "obs", ".", "append", "(", "obj", ")", "for", "attr", ",", "ind", "in", "self", ".", "_indexes", ".", "items", "(", ")", ":", "obval", "=", "getattr", "(", "obj", ",", "attr", ")", "ind", "[", "obval", "]", "=", "obj", "return", "self" ]
51.888889
24.148148
def get_hierarchy_traversal_session_for_hierarchy(self, hierarchy_id, proxy): """Gets the ``OsidSession`` associated with the hierarchy traversal service for the given hierarchy. arg: hierarchy_id (osid.id.Id): the ``Id`` of the hierarchy arg: proxy (osid.proxy.Proxy): a proxy return: (osid.hierarchy.HierarchyTraversalSession) - a ``HierarchyTraversalSession`` raise: NotFound - ``hierarchyid`` not found raise: NullArgument - ``hierarchy_id`` or ``proxy`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_hierarchy_traversal()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_hierarchy_traversal()`` and ``supports_visible_federation()`` are ``true``.* """ if not self.supports_hierarchy_traversal(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.HierarchyTraversalSession(hierarchy_id, proxy, self._runtime)
[ "def", "get_hierarchy_traversal_session_for_hierarchy", "(", "self", ",", "hierarchy_id", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_hierarchy_traversal", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "##", "# Also include check to see if the catalog Id is found otherwise raise errors.NotFound", "##", "# pylint: disable=no-member", "return", "sessions", ".", "HierarchyTraversalSession", "(", "hierarchy_id", ",", "proxy", ",", "self", ".", "_runtime", ")" ]
51.458333
21.916667
def parse_exponent(source, start): """returns end of exponential, raises SyntaxError if failed""" if not source[start] in {'e', 'E'}: if source[start] in IDENTIFIER_PART: raise SyntaxError('Invalid number literal!') return start start += 1 if source[start] in {'-', '+'}: start += 1 FOUND = False # we need at least one dig after exponent while source[start] in NUMS: FOUND = True start += 1 if not FOUND or source[start] in IDENTIFIER_PART: raise SyntaxError('Invalid number literal!') return start
[ "def", "parse_exponent", "(", "source", ",", "start", ")", ":", "if", "not", "source", "[", "start", "]", "in", "{", "'e'", ",", "'E'", "}", ":", "if", "source", "[", "start", "]", "in", "IDENTIFIER_PART", ":", "raise", "SyntaxError", "(", "'Invalid number literal!'", ")", "return", "start", "start", "+=", "1", "if", "source", "[", "start", "]", "in", "{", "'-'", ",", "'+'", "}", ":", "start", "+=", "1", "FOUND", "=", "False", "# we need at least one dig after exponent", "while", "source", "[", "start", "]", "in", "NUMS", ":", "FOUND", "=", "True", "start", "+=", "1", "if", "not", "FOUND", "or", "source", "[", "start", "]", "in", "IDENTIFIER_PART", ":", "raise", "SyntaxError", "(", "'Invalid number literal!'", ")", "return", "start" ]
34.058824
13.352941
def container_unfreeze(name, remote_addr=None, cert=None, key=None, verify_cert=True): ''' Unfreeze a container name : Name of the container to unfreeze remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. ''' container = container_get( name, remote_addr, cert, key, verify_cert, _raw=True ) container.unfreeze(wait=True) return _pylxd_model_to_dict(container)
[ "def", "container_unfreeze", "(", "name", ",", "remote_addr", "=", "None", ",", "cert", "=", "None", ",", "key", "=", "None", ",", "verify_cert", "=", "True", ")", ":", "container", "=", "container_get", "(", "name", ",", "remote_addr", ",", "cert", ",", "key", ",", "verify_cert", ",", "_raw", "=", "True", ")", "container", ".", "unfreeze", "(", "wait", "=", "True", ")", "return", "_pylxd_model_to_dict", "(", "container", ")" ]
25.736842
21.736842
def get_hosting_device_driver(self, context, id): """Returns device driver for hosting device template with <id>.""" if id is None: return try: return self._hosting_device_drivers[id] except KeyError: try: template = self._get_hosting_device_template(context, id) self._hosting_device_drivers[id] = importutils.import_object( template['device_driver']) except (ImportError, TypeError, n_exc.NeutronException): LOG.exception("Error loading hosting device driver for " "hosting device template %s", id) return self._hosting_device_drivers.get(id)
[ "def", "get_hosting_device_driver", "(", "self", ",", "context", ",", "id", ")", ":", "if", "id", "is", "None", ":", "return", "try", ":", "return", "self", ".", "_hosting_device_drivers", "[", "id", "]", "except", "KeyError", ":", "try", ":", "template", "=", "self", ".", "_get_hosting_device_template", "(", "context", ",", "id", ")", "self", ".", "_hosting_device_drivers", "[", "id", "]", "=", "importutils", ".", "import_object", "(", "template", "[", "'device_driver'", "]", ")", "except", "(", "ImportError", ",", "TypeError", ",", "n_exc", ".", "NeutronException", ")", ":", "LOG", ".", "exception", "(", "\"Error loading hosting device driver for \"", "\"hosting device template %s\"", ",", "id", ")", "return", "self", ".", "_hosting_device_drivers", ".", "get", "(", "id", ")" ]
48
20.133333
def next(self): """ Return the next row of a query result set, respecting if cursor was closed. """ if self.rows is None: raise ProgrammingError( "No result available. " + "execute() or executemany() must be called first." ) elif not self._closed: return next(self.rows) else: raise ProgrammingError("Cursor closed")
[ "def", "next", "(", "self", ")", ":", "if", "self", ".", "rows", "is", "None", ":", "raise", "ProgrammingError", "(", "\"No result available. \"", "+", "\"execute() or executemany() must be called first.\"", ")", "elif", "not", "self", ".", "_closed", ":", "return", "next", "(", "self", ".", "rows", ")", "else", ":", "raise", "ProgrammingError", "(", "\"Cursor closed\"", ")" ]
31.357143
14.928571
def read_line(csv_contents, options, prop_indices, mol, ensemble_list=None): """ read csv line """ if not ensemble_list: score_field = options.score_field status_field = options.status_field active_label = options.active_label decoy_label = options.decoy_label # do the active/decoy labels have appropriate values? active_value_matcher = re.compile(active_label) decoy_value_matcher = re.compile(decoy_label) status_label_index = prop_indices[status_field] if not active_value_matcher.match(csv_contents[status_label_index]) and not decoy_value_matcher.match( csv_contents[status_label_index]): print("\n molecule lacks appropriate status label") return 1 # are the score field values defined? score_field_indices = [] if ensemble_list: queryList = ensemble_list else: queryList = [x for x in prop_indices.keys() if score_field in x] for query in queryList: score_field_indices.append(prop_indices[query]) for value in [csv_contents[x] for x in score_field_indices]: if value in ('', 'n/a', 'N/A', None): print("\n molecule lacks appropriate score field value") return 1 # loop over property values for label in prop_indices.keys(): # get property value value_index = prop_indices[label] value = csv_contents[value_index] # set corresponding molecule attribute if label in queryList: mol.SetProp(label, value, 'score') else: mol.SetProp(label, value) # return mol return mol
[ "def", "read_line", "(", "csv_contents", ",", "options", ",", "prop_indices", ",", "mol", ",", "ensemble_list", "=", "None", ")", ":", "if", "not", "ensemble_list", ":", "score_field", "=", "options", ".", "score_field", "status_field", "=", "options", ".", "status_field", "active_label", "=", "options", ".", "active_label", "decoy_label", "=", "options", ".", "decoy_label", "# do the active/decoy labels have appropriate values?", "active_value_matcher", "=", "re", ".", "compile", "(", "active_label", ")", "decoy_value_matcher", "=", "re", ".", "compile", "(", "decoy_label", ")", "status_label_index", "=", "prop_indices", "[", "status_field", "]", "if", "not", "active_value_matcher", ".", "match", "(", "csv_contents", "[", "status_label_index", "]", ")", "and", "not", "decoy_value_matcher", ".", "match", "(", "csv_contents", "[", "status_label_index", "]", ")", ":", "print", "(", "\"\\n molecule lacks appropriate status label\"", ")", "return", "1", "# are the score field values defined?", "score_field_indices", "=", "[", "]", "if", "ensemble_list", ":", "queryList", "=", "ensemble_list", "else", ":", "queryList", "=", "[", "x", "for", "x", "in", "prop_indices", ".", "keys", "(", ")", "if", "score_field", "in", "x", "]", "for", "query", "in", "queryList", ":", "score_field_indices", ".", "append", "(", "prop_indices", "[", "query", "]", ")", "for", "value", "in", "[", "csv_contents", "[", "x", "]", "for", "x", "in", "score_field_indices", "]", ":", "if", "value", "in", "(", "''", ",", "'n/a'", ",", "'N/A'", ",", "None", ")", ":", "print", "(", "\"\\n molecule lacks appropriate score field value\"", ")", "return", "1", "# loop over property values", "for", "label", "in", "prop_indices", ".", "keys", "(", ")", ":", "# get property value", "value_index", "=", "prop_indices", "[", "label", "]", "value", "=", "csv_contents", "[", "value_index", "]", "# set corresponding molecule attribute", "if", "label", "in", "queryList", ":", "mol", ".", "SetProp", "(", "label", ",", "value", ",", "'score'", ")", "else", ":", "mol", ".", "SetProp", "(", "label", ",", "value", ")", "# return mol", "return", "mol" ]
32.916667
17.9375
def create_hit(self, hit_type=None, question=None, lifetime=datetime.timedelta(days=7), max_assignments=1, title=None, description=None, keywords=None, reward=None, duration=datetime.timedelta(days=7), approval_delay=None, annotation=None, questions=None, qualifications=None, response_groups=None): """ Creates a new HIT. Returns a ResultSet See: http://docs.amazonwebservices.com/AWSMechanicalTurkRequester/2006-10-31/ApiReference_CreateHITOperation.html """ # handle single or multiple questions neither = question is None and questions is None both = question is not None and questions is not None if neither or both: raise ValueError("Must specify either question (single Question instance) or questions (list or QuestionForm instance), but not both") if question: questions = [question] question_param = QuestionForm(questions) if isinstance(question, QuestionForm): question_param = question elif isinstance(question, ExternalQuestion): question_param = question # Handle basic required arguments and set up params dict params = {'Question': question_param.get_as_xml(), 'LifetimeInSeconds' : self.duration_as_seconds(lifetime), 'MaxAssignments' : max_assignments, } # if hit type specified then add it # else add the additional required parameters if hit_type: params['HITTypeId'] = hit_type else: # Handle keywords final_keywords = MTurkConnection.get_keywords_as_string(keywords) # Handle price argument final_price = MTurkConnection.get_price_as_price(reward) final_duration = self.duration_as_seconds(duration) additional_params = dict( Title=title, Description=description, Keywords=final_keywords, AssignmentDurationInSeconds=final_duration, ) additional_params.update(final_price.get_as_params('Reward')) if approval_delay is not None: d = self.duration_as_seconds(approval_delay) additional_params['AutoApprovalDelayInSeconds'] = d # add these params to the others params.update(additional_params) # add the annotation if specified if annotation is not None: params['RequesterAnnotation'] = annotation # Add the Qualifications if specified if qualifications is not None: params.update(qualifications.get_as_params()) # Handle optional response groups argument if response_groups: self.build_list_params(params, response_groups, 'ResponseGroup') # Submit return self._process_request('CreateHIT', params, [('HIT', HIT),])
[ "def", "create_hit", "(", "self", ",", "hit_type", "=", "None", ",", "question", "=", "None", ",", "lifetime", "=", "datetime", ".", "timedelta", "(", "days", "=", "7", ")", ",", "max_assignments", "=", "1", ",", "title", "=", "None", ",", "description", "=", "None", ",", "keywords", "=", "None", ",", "reward", "=", "None", ",", "duration", "=", "datetime", ".", "timedelta", "(", "days", "=", "7", ")", ",", "approval_delay", "=", "None", ",", "annotation", "=", "None", ",", "questions", "=", "None", ",", "qualifications", "=", "None", ",", "response_groups", "=", "None", ")", ":", "# handle single or multiple questions", "neither", "=", "question", "is", "None", "and", "questions", "is", "None", "both", "=", "question", "is", "not", "None", "and", "questions", "is", "not", "None", "if", "neither", "or", "both", ":", "raise", "ValueError", "(", "\"Must specify either question (single Question instance) or questions (list or QuestionForm instance), but not both\"", ")", "if", "question", ":", "questions", "=", "[", "question", "]", "question_param", "=", "QuestionForm", "(", "questions", ")", "if", "isinstance", "(", "question", ",", "QuestionForm", ")", ":", "question_param", "=", "question", "elif", "isinstance", "(", "question", ",", "ExternalQuestion", ")", ":", "question_param", "=", "question", "# Handle basic required arguments and set up params dict", "params", "=", "{", "'Question'", ":", "question_param", ".", "get_as_xml", "(", ")", ",", "'LifetimeInSeconds'", ":", "self", ".", "duration_as_seconds", "(", "lifetime", ")", ",", "'MaxAssignments'", ":", "max_assignments", ",", "}", "# if hit type specified then add it", "# else add the additional required parameters", "if", "hit_type", ":", "params", "[", "'HITTypeId'", "]", "=", "hit_type", "else", ":", "# Handle keywords", "final_keywords", "=", "MTurkConnection", ".", "get_keywords_as_string", "(", "keywords", ")", "# Handle price argument", "final_price", "=", "MTurkConnection", ".", "get_price_as_price", "(", "reward", ")", "final_duration", "=", "self", ".", "duration_as_seconds", "(", "duration", ")", "additional_params", "=", "dict", "(", "Title", "=", "title", ",", "Description", "=", "description", ",", "Keywords", "=", "final_keywords", ",", "AssignmentDurationInSeconds", "=", "final_duration", ",", ")", "additional_params", ".", "update", "(", "final_price", ".", "get_as_params", "(", "'Reward'", ")", ")", "if", "approval_delay", "is", "not", "None", ":", "d", "=", "self", ".", "duration_as_seconds", "(", "approval_delay", ")", "additional_params", "[", "'AutoApprovalDelayInSeconds'", "]", "=", "d", "# add these params to the others", "params", ".", "update", "(", "additional_params", ")", "# add the annotation if specified", "if", "annotation", "is", "not", "None", ":", "params", "[", "'RequesterAnnotation'", "]", "=", "annotation", "# Add the Qualifications if specified", "if", "qualifications", "is", "not", "None", ":", "params", ".", "update", "(", "qualifications", ".", "get_as_params", "(", ")", ")", "# Handle optional response groups argument", "if", "response_groups", ":", "self", ".", "build_list_params", "(", "params", ",", "response_groups", ",", "'ResponseGroup'", ")", "# Submit", "return", "self", ".", "_process_request", "(", "'CreateHIT'", ",", "params", ",", "[", "(", "'HIT'", ",", "HIT", ")", ",", "]", ")" ]
40.233766
18.935065
def queue(self, name, value, quality=None, timestamp=None, attributes=None): """ To reduce network traffic, you can buffer datapoints and then flush() anything in the queue. :param name: the name / label / tag for sensor data :param value: the sensor reading or value to record :param quality: the quality value, use the constants BAD, GOOD, etc. (optional and defaults to UNCERTAIN) :param timestamp: the time the reading was recorded in epoch milliseconds (optional and defaults to now) :param attributes: dictionary for any key-value pairs to store with the reading (optional) """ # Get timestamp first in case delay opening websocket connection # and it must have millisecond accuracy if not timestamp: timestamp = int(round(time.time() * 1000)) else: # Coerce datetime objects to epoch if isinstance(timestamp, datetime.datetime): timestamp = int(round(int(timestamp.strftime('%s')) * 1000)) # Only specific quality values supported if quality not in [self.BAD, self.GOOD, self.NA, self.UNCERTAIN]: quality = self.UNCERTAIN # Check if adding to queue of an existing tag and add second datapoint for point in self._queue: if point['name'] == name: point['datapoints'].append([timestamp, value, quality]) return # If adding new tag, initialize and set any attributes datapoint = { "name": name, "datapoints": [[timestamp, value, quality]] } # Attributes are extra details for a datapoint if attributes is not None: if not isinstance(attributes, dict): raise ValueError("Attributes are expected to be a dictionary.") # Validate rules for attribute keys to provide guidance. invalid_value = ':;= ' has_invalid_value = re.compile(r'[%s]' % (invalid_value)).search has_valid_key = re.compile(r'^[\w\.\/\-]+$').search for (key, val) in list(attributes.items()): # Values cannot be empty if (val == '') or (val is None): raise ValueError("Attribute (%s) must have a non-empty value." % (key)) # Values should be treated as a string for regex validation val = str(val) # Values cannot contain certain arbitrary characters if bool(has_invalid_value(val)): raise ValueError("Attribute (%s) cannot contain (%s)." % (key, invalid_value)) # Attributes have to be alphanumeric-ish if not bool(has_valid_key): raise ValueError("Key (%s) not alphanumeric-ish." % (key)) datapoint['attributes'] = attributes self._queue.append(datapoint) logging.debug("QUEUE: " + str(len(self._queue)))
[ "def", "queue", "(", "self", ",", "name", ",", "value", ",", "quality", "=", "None", ",", "timestamp", "=", "None", ",", "attributes", "=", "None", ")", ":", "# Get timestamp first in case delay opening websocket connection", "# and it must have millisecond accuracy", "if", "not", "timestamp", ":", "timestamp", "=", "int", "(", "round", "(", "time", ".", "time", "(", ")", "*", "1000", ")", ")", "else", ":", "# Coerce datetime objects to epoch", "if", "isinstance", "(", "timestamp", ",", "datetime", ".", "datetime", ")", ":", "timestamp", "=", "int", "(", "round", "(", "int", "(", "timestamp", ".", "strftime", "(", "'%s'", ")", ")", "*", "1000", ")", ")", "# Only specific quality values supported", "if", "quality", "not", "in", "[", "self", ".", "BAD", ",", "self", ".", "GOOD", ",", "self", ".", "NA", ",", "self", ".", "UNCERTAIN", "]", ":", "quality", "=", "self", ".", "UNCERTAIN", "# Check if adding to queue of an existing tag and add second datapoint", "for", "point", "in", "self", ".", "_queue", ":", "if", "point", "[", "'name'", "]", "==", "name", ":", "point", "[", "'datapoints'", "]", ".", "append", "(", "[", "timestamp", ",", "value", ",", "quality", "]", ")", "return", "# If adding new tag, initialize and set any attributes", "datapoint", "=", "{", "\"name\"", ":", "name", ",", "\"datapoints\"", ":", "[", "[", "timestamp", ",", "value", ",", "quality", "]", "]", "}", "# Attributes are extra details for a datapoint", "if", "attributes", "is", "not", "None", ":", "if", "not", "isinstance", "(", "attributes", ",", "dict", ")", ":", "raise", "ValueError", "(", "\"Attributes are expected to be a dictionary.\"", ")", "# Validate rules for attribute keys to provide guidance.", "invalid_value", "=", "':;= '", "has_invalid_value", "=", "re", ".", "compile", "(", "r'[%s]'", "%", "(", "invalid_value", ")", ")", ".", "search", "has_valid_key", "=", "re", ".", "compile", "(", "r'^[\\w\\.\\/\\-]+$'", ")", ".", "search", "for", "(", "key", ",", "val", ")", "in", "list", "(", "attributes", ".", "items", "(", ")", ")", ":", "# Values cannot be empty", "if", "(", "val", "==", "''", ")", "or", "(", "val", "is", "None", ")", ":", "raise", "ValueError", "(", "\"Attribute (%s) must have a non-empty value.\"", "%", "(", "key", ")", ")", "# Values should be treated as a string for regex validation", "val", "=", "str", "(", "val", ")", "# Values cannot contain certain arbitrary characters", "if", "bool", "(", "has_invalid_value", "(", "val", ")", ")", ":", "raise", "ValueError", "(", "\"Attribute (%s) cannot contain (%s).\"", "%", "(", "key", ",", "invalid_value", ")", ")", "# Attributes have to be alphanumeric-ish", "if", "not", "bool", "(", "has_valid_key", ")", ":", "raise", "ValueError", "(", "\"Key (%s) not alphanumeric-ish.\"", "%", "(", "key", ")", ")", "datapoint", "[", "'attributes'", "]", "=", "attributes", "self", ".", "_queue", ".", "append", "(", "datapoint", ")", "logging", ".", "debug", "(", "\"QUEUE: \"", "+", "str", "(", "len", "(", "self", ".", "_queue", ")", ")", ")" ]
39
23.25974
def _Main(self): """The main loop.""" # We need a resolver context per process to prevent multi processing # issues with file objects stored in images. resolver_context = context.Context() for credential_configuration in self._processing_configuration.credentials: resolver.Resolver.key_chain.SetCredential( credential_configuration.path_spec, credential_configuration.credential_type, credential_configuration.credential_data) self._parser_mediator = parsers_mediator.ParserMediator( None, self._knowledge_base, artifacts_filter_helper=self._artifacts_filter_helper, preferred_year=self._processing_configuration.preferred_year, resolver_context=resolver_context, temporary_directory=self._processing_configuration.temporary_directory) self._parser_mediator.SetEventExtractionConfiguration( self._processing_configuration.event_extraction) self._parser_mediator.SetInputSourceConfiguration( self._processing_configuration.input_source) # We need to initialize the parser and hasher objects after the process # has forked otherwise on Windows the "fork" will fail with # a PickleError for Python modules that cannot be pickled. self._extraction_worker = worker.EventExtractionWorker( parser_filter_expression=( self._processing_configuration.parser_filter_expression)) self._extraction_worker.SetExtractionConfiguration( self._processing_configuration.extraction) self._parser_mediator.StartProfiling( self._processing_configuration.profiling, self._name, self._process_information) self._StartProfiling(self._processing_configuration.profiling) if self._processing_profiler: self._extraction_worker.SetProcessingProfiler(self._processing_profiler) if self._serializers_profiler: self._storage_writer.SetSerializersProfiler(self._serializers_profiler) if self._storage_profiler: self._storage_writer.SetStorageProfiler(self._storage_profiler) logger.debug('Worker: {0!s} (PID: {1:d}) started.'.format( self._name, self._pid)) self._status = definitions.STATUS_INDICATOR_RUNNING try: logger.debug('{0!s} (PID: {1:d}) started monitoring task queue.'.format( self._name, self._pid)) while not self._abort: try: task = self._task_queue.PopItem() except (errors.QueueClose, errors.QueueEmpty) as exception: logger.debug('ConsumeItems exiting with exception {0:s}.'.format( type(exception))) break if isinstance(task, plaso_queue.QueueAbort): logger.debug('ConsumeItems exiting, dequeued QueueAbort object.') break self._ProcessTask(task) logger.debug('{0!s} (PID: {1:d}) stopped monitoring task queue.'.format( self._name, self._pid)) # All exceptions need to be caught here to prevent the process # from being killed by an uncaught exception. except Exception as exception: # pylint: disable=broad-except logger.warning( 'Unhandled exception in process: {0!s} (PID: {1:d}).'.format( self._name, self._pid)) logger.exception(exception) self._abort = True if self._processing_profiler: self._extraction_worker.SetProcessingProfiler(None) if self._serializers_profiler: self._storage_writer.SetSerializersProfiler(None) if self._storage_profiler: self._storage_writer.SetStorageProfiler(None) self._StopProfiling() self._parser_mediator.StopProfiling() self._extraction_worker = None self._parser_mediator = None self._storage_writer = None if self._abort: self._status = definitions.STATUS_INDICATOR_ABORTED else: self._status = definitions.STATUS_INDICATOR_COMPLETED logger.debug('Worker: {0!s} (PID: {1:d}) stopped.'.format( self._name, self._pid)) try: self._task_queue.Close(abort=self._abort) except errors.QueueAlreadyClosed: logger.error('Queue for {0:s} was already closed.'.format(self.name))
[ "def", "_Main", "(", "self", ")", ":", "# We need a resolver context per process to prevent multi processing", "# issues with file objects stored in images.", "resolver_context", "=", "context", ".", "Context", "(", ")", "for", "credential_configuration", "in", "self", ".", "_processing_configuration", ".", "credentials", ":", "resolver", ".", "Resolver", ".", "key_chain", ".", "SetCredential", "(", "credential_configuration", ".", "path_spec", ",", "credential_configuration", ".", "credential_type", ",", "credential_configuration", ".", "credential_data", ")", "self", ".", "_parser_mediator", "=", "parsers_mediator", ".", "ParserMediator", "(", "None", ",", "self", ".", "_knowledge_base", ",", "artifacts_filter_helper", "=", "self", ".", "_artifacts_filter_helper", ",", "preferred_year", "=", "self", ".", "_processing_configuration", ".", "preferred_year", ",", "resolver_context", "=", "resolver_context", ",", "temporary_directory", "=", "self", ".", "_processing_configuration", ".", "temporary_directory", ")", "self", ".", "_parser_mediator", ".", "SetEventExtractionConfiguration", "(", "self", ".", "_processing_configuration", ".", "event_extraction", ")", "self", ".", "_parser_mediator", ".", "SetInputSourceConfiguration", "(", "self", ".", "_processing_configuration", ".", "input_source", ")", "# We need to initialize the parser and hasher objects after the process", "# has forked otherwise on Windows the \"fork\" will fail with", "# a PickleError for Python modules that cannot be pickled.", "self", ".", "_extraction_worker", "=", "worker", ".", "EventExtractionWorker", "(", "parser_filter_expression", "=", "(", "self", ".", "_processing_configuration", ".", "parser_filter_expression", ")", ")", "self", ".", "_extraction_worker", ".", "SetExtractionConfiguration", "(", "self", ".", "_processing_configuration", ".", "extraction", ")", "self", ".", "_parser_mediator", ".", "StartProfiling", "(", "self", ".", "_processing_configuration", ".", "profiling", ",", "self", ".", "_name", ",", "self", ".", "_process_information", ")", "self", ".", "_StartProfiling", "(", "self", ".", "_processing_configuration", ".", "profiling", ")", "if", "self", ".", "_processing_profiler", ":", "self", ".", "_extraction_worker", ".", "SetProcessingProfiler", "(", "self", ".", "_processing_profiler", ")", "if", "self", ".", "_serializers_profiler", ":", "self", ".", "_storage_writer", ".", "SetSerializersProfiler", "(", "self", ".", "_serializers_profiler", ")", "if", "self", ".", "_storage_profiler", ":", "self", ".", "_storage_writer", ".", "SetStorageProfiler", "(", "self", ".", "_storage_profiler", ")", "logger", ".", "debug", "(", "'Worker: {0!s} (PID: {1:d}) started.'", ".", "format", "(", "self", ".", "_name", ",", "self", ".", "_pid", ")", ")", "self", ".", "_status", "=", "definitions", ".", "STATUS_INDICATOR_RUNNING", "try", ":", "logger", ".", "debug", "(", "'{0!s} (PID: {1:d}) started monitoring task queue.'", ".", "format", "(", "self", ".", "_name", ",", "self", ".", "_pid", ")", ")", "while", "not", "self", ".", "_abort", ":", "try", ":", "task", "=", "self", ".", "_task_queue", ".", "PopItem", "(", ")", "except", "(", "errors", ".", "QueueClose", ",", "errors", ".", "QueueEmpty", ")", "as", "exception", ":", "logger", ".", "debug", "(", "'ConsumeItems exiting with exception {0:s}.'", ".", "format", "(", "type", "(", "exception", ")", ")", ")", "break", "if", "isinstance", "(", "task", ",", "plaso_queue", ".", "QueueAbort", ")", ":", "logger", ".", "debug", "(", "'ConsumeItems exiting, dequeued QueueAbort object.'", ")", "break", "self", ".", "_ProcessTask", "(", "task", ")", "logger", ".", "debug", "(", "'{0!s} (PID: {1:d}) stopped monitoring task queue.'", ".", "format", "(", "self", ".", "_name", ",", "self", ".", "_pid", ")", ")", "# All exceptions need to be caught here to prevent the process", "# from being killed by an uncaught exception.", "except", "Exception", "as", "exception", ":", "# pylint: disable=broad-except", "logger", ".", "warning", "(", "'Unhandled exception in process: {0!s} (PID: {1:d}).'", ".", "format", "(", "self", ".", "_name", ",", "self", ".", "_pid", ")", ")", "logger", ".", "exception", "(", "exception", ")", "self", ".", "_abort", "=", "True", "if", "self", ".", "_processing_profiler", ":", "self", ".", "_extraction_worker", ".", "SetProcessingProfiler", "(", "None", ")", "if", "self", ".", "_serializers_profiler", ":", "self", ".", "_storage_writer", ".", "SetSerializersProfiler", "(", "None", ")", "if", "self", ".", "_storage_profiler", ":", "self", ".", "_storage_writer", ".", "SetStorageProfiler", "(", "None", ")", "self", ".", "_StopProfiling", "(", ")", "self", ".", "_parser_mediator", ".", "StopProfiling", "(", ")", "self", ".", "_extraction_worker", "=", "None", "self", ".", "_parser_mediator", "=", "None", "self", ".", "_storage_writer", "=", "None", "if", "self", ".", "_abort", ":", "self", ".", "_status", "=", "definitions", ".", "STATUS_INDICATOR_ABORTED", "else", ":", "self", ".", "_status", "=", "definitions", ".", "STATUS_INDICATOR_COMPLETED", "logger", ".", "debug", "(", "'Worker: {0!s} (PID: {1:d}) stopped.'", ".", "format", "(", "self", ".", "_name", ",", "self", ".", "_pid", ")", ")", "try", ":", "self", ".", "_task_queue", ".", "Close", "(", "abort", "=", "self", ".", "_abort", ")", "except", "errors", ".", "QueueAlreadyClosed", ":", "logger", ".", "error", "(", "'Queue for {0:s} was already closed.'", ".", "format", "(", "self", ".", "name", ")", ")" ]
35.814159
22.663717
def set_conn(self, **kwargs): """ takes a connection and creates the connection """ # log = logging.getLogger("%s.%s" % (self.log, inspect.stack()[0][3])) log.setLevel(kwargs.get('log_level',self.log_level)) conn_name = kwargs.get("name") if not conn_name: raise NameError("a connection requires a 'name': %s" % kwargs) elif self.conns.get(conn_name): raise KeyError("connection '%s' has already been set" % conn_name) if not kwargs.get("active", True): log.warning("Connection '%s' is set as inactive" % conn_name) return conn_type = kwargs.get("conn_type") if not conn_type or conn_type not in self.conn_mapping.nested: err_msg = ["a connection requires a valid 'conn_type':\n", "%s"] raise NameError("".join(err_msg) % (list(self.conn_mapping.nested))) log.info("Setting '%s' connection", conn_name) if conn_type == "triplestore": conn = make_tstore_conn(kwargs) else: conn = RdfwConnections[conn_type][kwargs['vendor']](**kwargs) self.conns[conn_name] = conn self.__is_initialized__ = True
[ "def", "set_conn", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# log = logging.getLogger(\"%s.%s\" % (self.log, inspect.stack()[0][3]))", "log", ".", "setLevel", "(", "kwargs", ".", "get", "(", "'log_level'", ",", "self", ".", "log_level", ")", ")", "conn_name", "=", "kwargs", ".", "get", "(", "\"name\"", ")", "if", "not", "conn_name", ":", "raise", "NameError", "(", "\"a connection requires a 'name': %s\"", "%", "kwargs", ")", "elif", "self", ".", "conns", ".", "get", "(", "conn_name", ")", ":", "raise", "KeyError", "(", "\"connection '%s' has already been set\"", "%", "conn_name", ")", "if", "not", "kwargs", ".", "get", "(", "\"active\"", ",", "True", ")", ":", "log", ".", "warning", "(", "\"Connection '%s' is set as inactive\"", "%", "conn_name", ")", "return", "conn_type", "=", "kwargs", ".", "get", "(", "\"conn_type\"", ")", "if", "not", "conn_type", "or", "conn_type", "not", "in", "self", ".", "conn_mapping", ".", "nested", ":", "err_msg", "=", "[", "\"a connection requires a valid 'conn_type':\\n\"", ",", "\"%s\"", "]", "raise", "NameError", "(", "\"\"", ".", "join", "(", "err_msg", ")", "%", "(", "list", "(", "self", ".", "conn_mapping", ".", "nested", ")", ")", ")", "log", ".", "info", "(", "\"Setting '%s' connection\"", ",", "conn_name", ")", "if", "conn_type", "==", "\"triplestore\"", ":", "conn", "=", "make_tstore_conn", "(", "kwargs", ")", "else", ":", "conn", "=", "RdfwConnections", "[", "conn_type", "]", "[", "kwargs", "[", "'vendor'", "]", "]", "(", "*", "*", "kwargs", ")", "self", ".", "conns", "[", "conn_name", "]", "=", "conn", "self", ".", "__is_initialized__", "=", "True" ]
44.481481
19.851852
def print_build_help(build_path, default_build_path): """ Print help text after configuration step is done. """ print(' configure step is done') print(' now you need to compile the sources:') if (build_path == default_build_path): print(' $ cd build') else: print(' $ cd ' + build_path) print(' $ make')
[ "def", "print_build_help", "(", "build_path", ",", "default_build_path", ")", ":", "print", "(", "' configure step is done'", ")", "print", "(", "' now you need to compile the sources:'", ")", "if", "(", "build_path", "==", "default_build_path", ")", ":", "print", "(", "' $ cd build'", ")", "else", ":", "print", "(", "' $ cd '", "+", "build_path", ")", "print", "(", "' $ make'", ")" ]
31.909091
9.363636
async def _playnow(self, ctx, *, query: str): """ Plays immediately a song. """ player = self.bot.lavalink.players.get(ctx.guild.id) if not player.queue and not player.is_playing: return await ctx.invoke(self._play, query=query) query = query.strip('<>') if not url_rx.match(query): query = f'ytsearch:{query}' results = await self.bot.lavalink.get_tracks(query) if not results or not results['tracks']: return await ctx.send('Nothing found!') tracks = results['tracks'] track = tracks.pop(0) if results['loadType'] == 'PLAYLIST_LOADED': for _track in tracks: player.add(requester=ctx.author.id, track=_track) await player.play_now(requester=ctx.author.id, track=track)
[ "async", "def", "_playnow", "(", "self", ",", "ctx", ",", "*", ",", "query", ":", "str", ")", ":", "player", "=", "self", ".", "bot", ".", "lavalink", ".", "players", ".", "get", "(", "ctx", ".", "guild", ".", "id", ")", "if", "not", "player", ".", "queue", "and", "not", "player", ".", "is_playing", ":", "return", "await", "ctx", ".", "invoke", "(", "self", ".", "_play", ",", "query", "=", "query", ")", "query", "=", "query", ".", "strip", "(", "'<>'", ")", "if", "not", "url_rx", ".", "match", "(", "query", ")", ":", "query", "=", "f'ytsearch:{query}'", "results", "=", "await", "self", ".", "bot", ".", "lavalink", ".", "get_tracks", "(", "query", ")", "if", "not", "results", "or", "not", "results", "[", "'tracks'", "]", ":", "return", "await", "ctx", ".", "send", "(", "'Nothing found!'", ")", "tracks", "=", "results", "[", "'tracks'", "]", "track", "=", "tracks", ".", "pop", "(", "0", ")", "if", "results", "[", "'loadType'", "]", "==", "'PLAYLIST_LOADED'", ":", "for", "_track", "in", "tracks", ":", "player", ".", "add", "(", "requester", "=", "ctx", ".", "author", ".", "id", ",", "track", "=", "_track", ")", "await", "player", ".", "play_now", "(", "requester", "=", "ctx", ".", "author", ".", "id", ",", "track", "=", "track", ")" ]
33.16
20.52
def _sin_to_angle(result, deriv, side=1): """Convert a sine and its derivatives to an angle and its derivatives""" v = np.arcsin(np.clip(result[0], -1, 1)) sign = side if sign == -1: if v < 0: offset = -np.pi else: offset = np.pi else: offset = 0.0 if deriv == 0: return v*sign + offset, if abs(result[0]) >= 1: factor1 = 0 else: factor1 = 1.0/np.sqrt(1-result[0]**2) d = factor1*result[1] if deriv == 1: return v*sign + offset, d*sign factor2 = result[0]*factor1**3 dd = factor2*np.outer(result[1], result[1]) + factor1*result[2] if deriv == 2: return v*sign + offset, d*sign, dd*sign raise ValueError("deriv must be 0, 1 or 2.")
[ "def", "_sin_to_angle", "(", "result", ",", "deriv", ",", "side", "=", "1", ")", ":", "v", "=", "np", ".", "arcsin", "(", "np", ".", "clip", "(", "result", "[", "0", "]", ",", "-", "1", ",", "1", ")", ")", "sign", "=", "side", "if", "sign", "==", "-", "1", ":", "if", "v", "<", "0", ":", "offset", "=", "-", "np", ".", "pi", "else", ":", "offset", "=", "np", ".", "pi", "else", ":", "offset", "=", "0.0", "if", "deriv", "==", "0", ":", "return", "v", "*", "sign", "+", "offset", ",", "if", "abs", "(", "result", "[", "0", "]", ")", ">=", "1", ":", "factor1", "=", "0", "else", ":", "factor1", "=", "1.0", "/", "np", ".", "sqrt", "(", "1", "-", "result", "[", "0", "]", "**", "2", ")", "d", "=", "factor1", "*", "result", "[", "1", "]", "if", "deriv", "==", "1", ":", "return", "v", "*", "sign", "+", "offset", ",", "d", "*", "sign", "factor2", "=", "result", "[", "0", "]", "*", "factor1", "**", "3", "dd", "=", "factor2", "*", "np", ".", "outer", "(", "result", "[", "1", "]", ",", "result", "[", "1", "]", ")", "+", "factor1", "*", "result", "[", "2", "]", "if", "deriv", "==", "2", ":", "return", "v", "*", "sign", "+", "offset", ",", "d", "*", "sign", ",", "dd", "*", "sign", "raise", "ValueError", "(", "\"deriv must be 0, 1 or 2.\"", ")" ]
30
15.6
def from_dict(data, ctx): """ Instantiate a new ClientConfigureTransaction from a dict (generally from loading a JSON response). The data used to instantiate the ClientConfigureTransaction is a shallow copy of the dict passed in, with any complex child types instantiated appropriately. """ data = data.copy() if data.get('marginRate') is not None: data['marginRate'] = ctx.convert_decimal_number( data.get('marginRate') ) return ClientConfigureTransaction(**data)
[ "def", "from_dict", "(", "data", ",", "ctx", ")", ":", "data", "=", "data", ".", "copy", "(", ")", "if", "data", ".", "get", "(", "'marginRate'", ")", "is", "not", "None", ":", "data", "[", "'marginRate'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginRate'", ")", ")", "return", "ClientConfigureTransaction", "(", "*", "*", "data", ")" ]
35.25
21.125
def f_lock_parameters(self): """Locks all non-empty parameters""" for par in self._parameters.values(): if not par.f_is_empty(): par.f_lock()
[ "def", "f_lock_parameters", "(", "self", ")", ":", "for", "par", "in", "self", ".", "_parameters", ".", "values", "(", ")", ":", "if", "not", "par", ".", "f_is_empty", "(", ")", ":", "par", ".", "f_lock", "(", ")" ]
36.2
6.6
def fit(self, df, duration_col, event_col=None, weights_col=None, show_progress=False): """ Parameters ---------- Fit the Aalen Additive model to a dataset. Parameters ---------- df: DataFrame a Pandas DataFrame with necessary columns `duration_col` and `event_col` (see below), covariates columns, and special columns (weights). `duration_col` refers to the lifetimes of the subjects. `event_col` refers to whether the 'death' events was observed: 1 if observed, 0 else (censored). duration_col: string the name of the column in DataFrame that contains the subjects' lifetimes. event_col: string, optional the name of the column in DataFrame that contains the subjects' death observation. If left as None, assume all individuals are uncensored. weights_col: string, optional an optional column in the DataFrame, df, that denotes the weight per subject. This column is expelled and not used as a covariate, but as a weight in the final regression. Default weight is 1. This can be used for case-weights. For example, a weight of 2 means there were two subjects with identical observations. This can be used for sampling weights. show_progress: boolean, optional (default=False) Since the fitter is iterative, show iteration number. Returns ------- self: AalenAdditiveFitter self with additional new properties: ``cumulative_hazards_``, etc. Examples -------- >>> from lifelines import AalenAdditiveFitter >>> >>> df = pd.DataFrame({ >>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7], >>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0], >>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2], >>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7], >>> }) >>> >>> aaf = AalenAdditiveFitter() >>> aaf.fit(df, 'T', 'E') >>> aaf.predict_median(df) >>> aaf.print_summary() """ self._time_fit_was_called = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + " UTC" self._censoring_type = CensoringType.RIGHT df = df.copy() self.duration_col = duration_col self.event_col = event_col self.weights_col = weights_col self._n_examples = df.shape[0] X, T, E, weights = self._preprocess_dataframe(df) self.durations = T.copy() self.event_observed = E.copy() self.weights = weights.copy() self._norm_std = X.std(0) # if we included an intercept, we need to fix not divide by zero. if self.fit_intercept: self._norm_std["_intercept"] = 1.0 else: # a _intercept was provided self._norm_std[self._norm_std < 1e-8] = 1.0 self.hazards_, self.cumulative_hazards_, self.cumulative_variance_ = self._fit_model( normalize(X, 0, self._norm_std), T, E, weights, show_progress ) self.hazards_ /= self._norm_std self.cumulative_hazards_ /= self._norm_std self.cumulative_variance_ /= self._norm_std self.confidence_intervals_ = self._compute_confidence_intervals() self._index = self.hazards_.index self._predicted_hazards_ = self.predict_cumulative_hazard(X).iloc[-1].values.ravel() return self
[ "def", "fit", "(", "self", ",", "df", ",", "duration_col", ",", "event_col", "=", "None", ",", "weights_col", "=", "None", ",", "show_progress", "=", "False", ")", ":", "self", ".", "_time_fit_was_called", "=", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ")", "+", "\" UTC\"", "self", ".", "_censoring_type", "=", "CensoringType", ".", "RIGHT", "df", "=", "df", ".", "copy", "(", ")", "self", ".", "duration_col", "=", "duration_col", "self", ".", "event_col", "=", "event_col", "self", ".", "weights_col", "=", "weights_col", "self", ".", "_n_examples", "=", "df", ".", "shape", "[", "0", "]", "X", ",", "T", ",", "E", ",", "weights", "=", "self", ".", "_preprocess_dataframe", "(", "df", ")", "self", ".", "durations", "=", "T", ".", "copy", "(", ")", "self", ".", "event_observed", "=", "E", ".", "copy", "(", ")", "self", ".", "weights", "=", "weights", ".", "copy", "(", ")", "self", ".", "_norm_std", "=", "X", ".", "std", "(", "0", ")", "# if we included an intercept, we need to fix not divide by zero.", "if", "self", ".", "fit_intercept", ":", "self", ".", "_norm_std", "[", "\"_intercept\"", "]", "=", "1.0", "else", ":", "# a _intercept was provided", "self", ".", "_norm_std", "[", "self", ".", "_norm_std", "<", "1e-8", "]", "=", "1.0", "self", ".", "hazards_", ",", "self", ".", "cumulative_hazards_", ",", "self", ".", "cumulative_variance_", "=", "self", ".", "_fit_model", "(", "normalize", "(", "X", ",", "0", ",", "self", ".", "_norm_std", ")", ",", "T", ",", "E", ",", "weights", ",", "show_progress", ")", "self", ".", "hazards_", "/=", "self", ".", "_norm_std", "self", ".", "cumulative_hazards_", "/=", "self", ".", "_norm_std", "self", ".", "cumulative_variance_", "/=", "self", ".", "_norm_std", "self", ".", "confidence_intervals_", "=", "self", ".", "_compute_confidence_intervals", "(", ")", "self", ".", "_index", "=", "self", ".", "hazards_", ".", "index", "self", ".", "_predicted_hazards_", "=", "self", ".", "predict_cumulative_hazard", "(", "X", ")", ".", "iloc", "[", "-", "1", "]", ".", "values", ".", "ravel", "(", ")", "return", "self" ]
36.526316
23.936842
def expand(fn, col, inputtype=pd.DataFrame): """ Wrap a function applying to a single column to make a function applying to a multi-dimensional dataframe or ndarray Parameters ---------- fn : function Function that applies to a series or vector. col : str or int Index of column to which to apply `fn`. inputtype : class or type Type of input to be expected by the wrapped function. Normally pd.DataFrame or np.ndarray. Defaults to pd.DataFrame. Returns ---------- wrapped : function Function that takes an input of type `inputtype` and applies `fn` to the specified `col`. """ if inputtype == pd.DataFrame: if isinstance(col, int): def _wrapper(*args, **kwargs): return fn(args[0].iloc[:, col], *args[1:], **kwargs) return _wrapper def _wrapper(*args, **kwargs): return fn(args[0].loc[:, col], *args[1:], **kwargs) return _wrapper elif inputtype == np.ndarray: def _wrapper(*args, **kwargs): return fn(args[0][:, col], *args[1:], **kwargs) return _wrapper raise TypeError("invalid input type")
[ "def", "expand", "(", "fn", ",", "col", ",", "inputtype", "=", "pd", ".", "DataFrame", ")", ":", "if", "inputtype", "==", "pd", ".", "DataFrame", ":", "if", "isinstance", "(", "col", ",", "int", ")", ":", "def", "_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "fn", "(", "args", "[", "0", "]", ".", "iloc", "[", ":", ",", "col", "]", ",", "*", "args", "[", "1", ":", "]", ",", "*", "*", "kwargs", ")", "return", "_wrapper", "def", "_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "fn", "(", "args", "[", "0", "]", ".", "loc", "[", ":", ",", "col", "]", ",", "*", "args", "[", "1", ":", "]", ",", "*", "*", "kwargs", ")", "return", "_wrapper", "elif", "inputtype", "==", "np", ".", "ndarray", ":", "def", "_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "fn", "(", "args", "[", "0", "]", "[", ":", ",", "col", "]", ",", "*", "args", "[", "1", ":", "]", ",", "*", "*", "kwargs", ")", "return", "_wrapper", "raise", "TypeError", "(", "\"invalid input type\"", ")" ]
32.638889
17.583333
def _CaptureExpression(self, frame, expression): """Evalutes the expression and captures it into a Variable object. Args: frame: evaluation context. expression: watched expression to compile and evaluate. Returns: Variable object (which will have error status if the expression fails to evaluate). """ rc, value = _EvaluateExpression(frame, expression) if not rc: return {'name': expression, 'status': value} return self.CaptureNamedVariable(expression, value, 0, self.expression_capture_limits)
[ "def", "_CaptureExpression", "(", "self", ",", "frame", ",", "expression", ")", ":", "rc", ",", "value", "=", "_EvaluateExpression", "(", "frame", ",", "expression", ")", "if", "not", "rc", ":", "return", "{", "'name'", ":", "expression", ",", "'status'", ":", "value", "}", "return", "self", ".", "CaptureNamedVariable", "(", "expression", ",", "value", ",", "0", ",", "self", ".", "expression_capture_limits", ")" ]
33.941176
21.647059
def expandvars(text, environ=None): """Expand shell variables of form $var and ${var}. Unknown variables are left unchanged. Args: text (str): String to expand. environ (dict): Environ dict to use for expansions, defaults to os.environ. Returns: The expanded string. """ if '$' not in text: return text i = 0 if environ is None: environ = os.environ while True: m = ENV_VAR_REGEX.search(text, i) if not m: break i, j = m.span(0) name = m.group(1) if name.startswith('{') and name.endswith('}'): name = name[1:-1] if name in environ: tail = text[j:] text = text[:i] + environ[name] i = len(text) text += tail else: i = j return text
[ "def", "expandvars", "(", "text", ",", "environ", "=", "None", ")", ":", "if", "'$'", "not", "in", "text", ":", "return", "text", "i", "=", "0", "if", "environ", "is", "None", ":", "environ", "=", "os", ".", "environ", "while", "True", ":", "m", "=", "ENV_VAR_REGEX", ".", "search", "(", "text", ",", "i", ")", "if", "not", "m", ":", "break", "i", ",", "j", "=", "m", ".", "span", "(", "0", ")", "name", "=", "m", ".", "group", "(", "1", ")", "if", "name", ".", "startswith", "(", "'{'", ")", "and", "name", ".", "endswith", "(", "'}'", ")", ":", "name", "=", "name", "[", "1", ":", "-", "1", "]", "if", "name", "in", "environ", ":", "tail", "=", "text", "[", "j", ":", "]", "text", "=", "text", "[", ":", "i", "]", "+", "environ", "[", "name", "]", "i", "=", "len", "(", "text", ")", "text", "+=", "tail", "else", ":", "i", "=", "j", "return", "text" ]
23.138889
19.166667
def to_output_script(address): ''' str -> bytes There's probably a better way to do this ''' parsed = parse(address) parsed_hash = b'' try: if (parsed.find(riemann.network.P2WPKH_PREFIX) == 0 and len(parsed) == 22): return parsed except TypeError: pass try: if (parsed.find(riemann.network.P2WSH_PREFIX) == 0 and len(parsed) == 34): return parsed except TypeError: pass try: if (parsed.find(riemann.network.CASHADDR_P2SH) == 0 and len(parsed) == len(riemann.network.CASHADDR_P2SH) + 20): prefix = b'\xa9\x14' # OP_HASH160 PUSH14 parsed_hash = parsed[len(riemann.network.P2SH_PREFIX):] suffix = b'\x87' # OP_EQUAL except TypeError: pass try: if (parsed.find(riemann.network.CASHADDR_P2PKH) == 0 and len(parsed) == len(riemann.network.CASHADDR_P2PKH) + 20): prefix = b'\x76\xa9\x14' # OP_DUP OP_HASH160 PUSH14 parsed_hash = parsed[len(riemann.network.P2PKH_PREFIX):] suffix = b'\x88\xac' # OP_EQUALVERIFY OP_CHECKSIG except TypeError: pass if (parsed.find(riemann.network.P2PKH_PREFIX) == 0 and len(parsed) == len(riemann.network.P2PKH_PREFIX) + 20): prefix = b'\x76\xa9\x14' # OP_DUP OP_HASH160 PUSH14 parsed_hash = parsed[len(riemann.network.P2PKH_PREFIX):] suffix = b'\x88\xac' # OP_EQUALVERIFY OP_CHECKSIG if (parsed.find(riemann.network.P2SH_PREFIX) == 0 and len(parsed) == len(riemann.network.P2SH_PREFIX) + 20): prefix = b'\xa9\x14' # OP_HASH160 PUSH14 parsed_hash = parsed[len(riemann.network.P2SH_PREFIX):] suffix = b'\x87' # OP_EQUAL if parsed_hash == b'': raise ValueError('Cannot parse output script from address.') output_script = prefix + parsed_hash + suffix return output_script
[ "def", "to_output_script", "(", "address", ")", ":", "parsed", "=", "parse", "(", "address", ")", "parsed_hash", "=", "b''", "try", ":", "if", "(", "parsed", ".", "find", "(", "riemann", ".", "network", ".", "P2WPKH_PREFIX", ")", "==", "0", "and", "len", "(", "parsed", ")", "==", "22", ")", ":", "return", "parsed", "except", "TypeError", ":", "pass", "try", ":", "if", "(", "parsed", ".", "find", "(", "riemann", ".", "network", ".", "P2WSH_PREFIX", ")", "==", "0", "and", "len", "(", "parsed", ")", "==", "34", ")", ":", "return", "parsed", "except", "TypeError", ":", "pass", "try", ":", "if", "(", "parsed", ".", "find", "(", "riemann", ".", "network", ".", "CASHADDR_P2SH", ")", "==", "0", "and", "len", "(", "parsed", ")", "==", "len", "(", "riemann", ".", "network", ".", "CASHADDR_P2SH", ")", "+", "20", ")", ":", "prefix", "=", "b'\\xa9\\x14'", "# OP_HASH160 PUSH14", "parsed_hash", "=", "parsed", "[", "len", "(", "riemann", ".", "network", ".", "P2SH_PREFIX", ")", ":", "]", "suffix", "=", "b'\\x87'", "# OP_EQUAL", "except", "TypeError", ":", "pass", "try", ":", "if", "(", "parsed", ".", "find", "(", "riemann", ".", "network", ".", "CASHADDR_P2PKH", ")", "==", "0", "and", "len", "(", "parsed", ")", "==", "len", "(", "riemann", ".", "network", ".", "CASHADDR_P2PKH", ")", "+", "20", ")", ":", "prefix", "=", "b'\\x76\\xa9\\x14'", "# OP_DUP OP_HASH160 PUSH14", "parsed_hash", "=", "parsed", "[", "len", "(", "riemann", ".", "network", ".", "P2PKH_PREFIX", ")", ":", "]", "suffix", "=", "b'\\x88\\xac'", "# OP_EQUALVERIFY OP_CHECKSIG", "except", "TypeError", ":", "pass", "if", "(", "parsed", ".", "find", "(", "riemann", ".", "network", ".", "P2PKH_PREFIX", ")", "==", "0", "and", "len", "(", "parsed", ")", "==", "len", "(", "riemann", ".", "network", ".", "P2PKH_PREFIX", ")", "+", "20", ")", ":", "prefix", "=", "b'\\x76\\xa9\\x14'", "# OP_DUP OP_HASH160 PUSH14", "parsed_hash", "=", "parsed", "[", "len", "(", "riemann", ".", "network", ".", "P2PKH_PREFIX", ")", ":", "]", "suffix", "=", "b'\\x88\\xac'", "# OP_EQUALVERIFY OP_CHECKSIG", "if", "(", "parsed", ".", "find", "(", "riemann", ".", "network", ".", "P2SH_PREFIX", ")", "==", "0", "and", "len", "(", "parsed", ")", "==", "len", "(", "riemann", ".", "network", ".", "P2SH_PREFIX", ")", "+", "20", ")", ":", "prefix", "=", "b'\\xa9\\x14'", "# OP_HASH160 PUSH14", "parsed_hash", "=", "parsed", "[", "len", "(", "riemann", ".", "network", ".", "P2SH_PREFIX", ")", ":", "]", "suffix", "=", "b'\\x87'", "# OP_EQUAL", "if", "parsed_hash", "==", "b''", ":", "raise", "ValueError", "(", "'Cannot parse output script from address.'", ")", "output_script", "=", "prefix", "+", "parsed_hash", "+", "suffix", "return", "output_script" ]
33.894737
23.157895
def get(self, action, version=None): """Get the method class handing the given action and version.""" by_version = self._by_action[action] if version in by_version: return by_version[version] else: return by_version[None]
[ "def", "get", "(", "self", ",", "action", ",", "version", "=", "None", ")", ":", "by_version", "=", "self", ".", "_by_action", "[", "action", "]", "if", "version", "in", "by_version", ":", "return", "by_version", "[", "version", "]", "else", ":", "return", "by_version", "[", "None", "]" ]
38.714286
7
def delta_stoichiometry( reactants, products ): """ Calculate the change in stoichiometry for reactants --> products. Args: reactants (list(vasppy.Calculation): A list of vasppy.Calculation objects. The initial state. products (list(vasppy.Calculation): A list of vasppy.Calculation objects. The final state. Returns: (Counter): The change in stoichiometry. """ totals = Counter() for r in reactants: totals.update( ( r * -1.0 ).stoichiometry ) for p in products: totals.update( p.stoichiometry ) to_return = {} for c in totals: if totals[c] != 0: to_return[c] = totals[c] return to_return
[ "def", "delta_stoichiometry", "(", "reactants", ",", "products", ")", ":", "totals", "=", "Counter", "(", ")", "for", "r", "in", "reactants", ":", "totals", ".", "update", "(", "(", "r", "*", "-", "1.0", ")", ".", "stoichiometry", ")", "for", "p", "in", "products", ":", "totals", ".", "update", "(", "p", ".", "stoichiometry", ")", "to_return", "=", "{", "}", "for", "c", "in", "totals", ":", "if", "totals", "[", "c", "]", "!=", "0", ":", "to_return", "[", "c", "]", "=", "totals", "[", "c", "]", "return", "to_return" ]
32.238095
21.238095
def create(self, **kwargs): """Create a new instance of this resource type. As a general rule, the identifier should have been provided, but in some subclasses the identifier is server-side-generated. Those classes have to overload this method to deal with that scenario. """ self.method = 'post' if self.primary_key in kwargs: del kwargs[self.primary_key] data = self._generate_input_dict(**kwargs) self.load(self.client.post(self.url, data=data)) return self
[ "def", "create", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "method", "=", "'post'", "if", "self", ".", "primary_key", "in", "kwargs", ":", "del", "kwargs", "[", "self", ".", "primary_key", "]", "data", "=", "self", ".", "_generate_input_dict", "(", "*", "*", "kwargs", ")", "self", ".", "load", "(", "self", ".", "client", ".", "post", "(", "self", ".", "url", ",", "data", "=", "data", ")", ")", "return", "self" ]
41.692308
16.307692
def signed_to_float(hex: str) -> float: """Convert signed hexadecimal to floating value.""" if int(hex, 16) & 0x8000: return -(int(hex, 16) & 0x7FFF) / 10 else: return int(hex, 16) / 10
[ "def", "signed_to_float", "(", "hex", ":", "str", ")", "->", "float", ":", "if", "int", "(", "hex", ",", "16", ")", "&", "0x8000", ":", "return", "-", "(", "int", "(", "hex", ",", "16", ")", "&", "0x7FFF", ")", "/", "10", "else", ":", "return", "int", "(", "hex", ",", "16", ")", "/", "10" ]
34.666667
9.166667
def merge_in_place(self, others): """ Add the models present other predictors into the current predictor. Parameters ---------- others : list of Class1AffinityPredictor Other predictors to merge into the current predictor. Returns ------- list of string : names of newly added models """ new_model_names = [] for predictor in others: for model in predictor.class1_pan_allele_models: model_name = self.model_name( "pan-class1", len(self.class1_pan_allele_models)) self.class1_pan_allele_models.append(model) row = pandas.Series(collections.OrderedDict([ ("model_name", model_name), ("allele", "pan-class1"), ("config_json", json.dumps(model.get_config())), ("model", model), ])).to_frame().T self._manifest_df = pandas.concat( [self.manifest_df, row], ignore_index=True) new_model_names.append(model_name) for allele in predictor.allele_to_allele_specific_models: if allele not in self.allele_to_allele_specific_models: self.allele_to_allele_specific_models[allele] = [] current_models = self.allele_to_allele_specific_models[allele] for model in predictor.allele_to_allele_specific_models[allele]: model_name = self.model_name(allele, len(current_models)) row = pandas.Series(collections.OrderedDict([ ("model_name", model_name), ("allele", allele), ("config_json", json.dumps(model.get_config())), ("model", model), ])).to_frame().T self._manifest_df = pandas.concat( [self.manifest_df, row], ignore_index=True) current_models.append(model) new_model_names.append(model_name) self.clear_cache() return new_model_names
[ "def", "merge_in_place", "(", "self", ",", "others", ")", ":", "new_model_names", "=", "[", "]", "for", "predictor", "in", "others", ":", "for", "model", "in", "predictor", ".", "class1_pan_allele_models", ":", "model_name", "=", "self", ".", "model_name", "(", "\"pan-class1\"", ",", "len", "(", "self", ".", "class1_pan_allele_models", ")", ")", "self", ".", "class1_pan_allele_models", ".", "append", "(", "model", ")", "row", "=", "pandas", ".", "Series", "(", "collections", ".", "OrderedDict", "(", "[", "(", "\"model_name\"", ",", "model_name", ")", ",", "(", "\"allele\"", ",", "\"pan-class1\"", ")", ",", "(", "\"config_json\"", ",", "json", ".", "dumps", "(", "model", ".", "get_config", "(", ")", ")", ")", ",", "(", "\"model\"", ",", "model", ")", ",", "]", ")", ")", ".", "to_frame", "(", ")", ".", "T", "self", ".", "_manifest_df", "=", "pandas", ".", "concat", "(", "[", "self", ".", "manifest_df", ",", "row", "]", ",", "ignore_index", "=", "True", ")", "new_model_names", ".", "append", "(", "model_name", ")", "for", "allele", "in", "predictor", ".", "allele_to_allele_specific_models", ":", "if", "allele", "not", "in", "self", ".", "allele_to_allele_specific_models", ":", "self", ".", "allele_to_allele_specific_models", "[", "allele", "]", "=", "[", "]", "current_models", "=", "self", ".", "allele_to_allele_specific_models", "[", "allele", "]", "for", "model", "in", "predictor", ".", "allele_to_allele_specific_models", "[", "allele", "]", ":", "model_name", "=", "self", ".", "model_name", "(", "allele", ",", "len", "(", "current_models", ")", ")", "row", "=", "pandas", ".", "Series", "(", "collections", ".", "OrderedDict", "(", "[", "(", "\"model_name\"", ",", "model_name", ")", ",", "(", "\"allele\"", ",", "allele", ")", ",", "(", "\"config_json\"", ",", "json", ".", "dumps", "(", "model", ".", "get_config", "(", ")", ")", ")", ",", "(", "\"model\"", ",", "model", ")", ",", "]", ")", ")", ".", "to_frame", "(", ")", ".", "T", "self", ".", "_manifest_df", "=", "pandas", ".", "concat", "(", "[", "self", ".", "manifest_df", ",", "row", "]", ",", "ignore_index", "=", "True", ")", "current_models", ".", "append", "(", "model", ")", "new_model_names", ".", "append", "(", "model_name", ")", "self", ".", "clear_cache", "(", ")", "return", "new_model_names" ]
43.16
19
def dlogpdf_df(self, f, y, Y_metadata=None): """ Evaluates the link function link(f) then computes the derivative of log likelihood using it Uses the Faa di Bruno's formula for the chain rule .. math:: \\frac{d\\log p(y|\\lambda(f))}{df} = \\frac{d\\log p(y|\\lambda(f))}{d\\lambda(f)}\\frac{d\\lambda(f)}{df} :param f: latent variables f :type f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata which is not used in student t distribution - not used :returns: derivative of log likelihood evaluated for this point :rtype: 1xN array """ if isinstance(self.gp_link, link_functions.Identity): return self.dlogpdf_dlink(f, y, Y_metadata=Y_metadata) else: inv_link_f = self.gp_link.transf(f) dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata) dlink_df = self.gp_link.dtransf_df(f) return chain_1(dlogpdf_dlink, dlink_df)
[ "def", "dlogpdf_df", "(", "self", ",", "f", ",", "y", ",", "Y_metadata", "=", "None", ")", ":", "if", "isinstance", "(", "self", ".", "gp_link", ",", "link_functions", ".", "Identity", ")", ":", "return", "self", ".", "dlogpdf_dlink", "(", "f", ",", "y", ",", "Y_metadata", "=", "Y_metadata", ")", "else", ":", "inv_link_f", "=", "self", ".", "gp_link", ".", "transf", "(", "f", ")", "dlogpdf_dlink", "=", "self", ".", "dlogpdf_dlink", "(", "inv_link_f", ",", "y", ",", "Y_metadata", "=", "Y_metadata", ")", "dlink_df", "=", "self", ".", "gp_link", ".", "dtransf_df", "(", "f", ")", "return", "chain_1", "(", "dlogpdf_dlink", ",", "dlink_df", ")" ]
44.695652
24.173913
def randomSplit(self, weights, seed=None): """ Randomly splits this RDD with the provided weights. :param weights: weights for splits, will be normalized if they don't sum to 1 :param seed: random seed :return: split RDDs in a list >>> rdd = sc.parallelize(range(500), 1) >>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17) >>> len(rdd1.collect() + rdd2.collect()) 500 >>> 150 < rdd1.count() < 250 True >>> 250 < rdd2.count() < 350 True """ s = float(sum(weights)) cweights = [0.0] for w in weights: cweights.append(cweights[-1] + w / s) if seed is None: seed = random.randint(0, 2 ** 32 - 1) return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True) for lb, ub in zip(cweights, cweights[1:])]
[ "def", "randomSplit", "(", "self", ",", "weights", ",", "seed", "=", "None", ")", ":", "s", "=", "float", "(", "sum", "(", "weights", ")", ")", "cweights", "=", "[", "0.0", "]", "for", "w", "in", "weights", ":", "cweights", ".", "append", "(", "cweights", "[", "-", "1", "]", "+", "w", "/", "s", ")", "if", "seed", "is", "None", ":", "seed", "=", "random", ".", "randint", "(", "0", ",", "2", "**", "32", "-", "1", ")", "return", "[", "self", ".", "mapPartitionsWithIndex", "(", "RDDRangeSampler", "(", "lb", ",", "ub", ",", "seed", ")", ".", "func", ",", "True", ")", "for", "lb", ",", "ub", "in", "zip", "(", "cweights", ",", "cweights", "[", "1", ":", "]", ")", "]" ]
35.04
16.56
def company(self, **kwargs): """ Search for companies by name. Args: query: CGI escpaed string. page: (optional) Minimum value of 1. Expected value is an integer. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('company') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "company", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_path", "(", "'company'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values", "(", "response", ")", "return", "response" ]
28.0625
17.3125
def config_string(self): """ See the class documentation. """ # Note: _write_to_conf is determined when the value is calculated. This # is a hidden function call due to property magic. val = self.str_value if not self._write_to_conf: return "" if self.orig_type in _BOOL_TRISTATE: return "{}{}={}\n" \ .format(self.kconfig.config_prefix, self.name, val) \ if val != "n" else \ "# {}{} is not set\n" \ .format(self.kconfig.config_prefix, self.name) if self.orig_type in _INT_HEX: return "{}{}={}\n" \ .format(self.kconfig.config_prefix, self.name, val) # sym.orig_type is STRING return '{}{}="{}"\n' \ .format(self.kconfig.config_prefix, self.name, escape(val))
[ "def", "config_string", "(", "self", ")", ":", "# Note: _write_to_conf is determined when the value is calculated. This", "# is a hidden function call due to property magic.", "val", "=", "self", ".", "str_value", "if", "not", "self", ".", "_write_to_conf", ":", "return", "\"\"", "if", "self", ".", "orig_type", "in", "_BOOL_TRISTATE", ":", "return", "\"{}{}={}\\n\"", ".", "format", "(", "self", ".", "kconfig", ".", "config_prefix", ",", "self", ".", "name", ",", "val", ")", "if", "val", "!=", "\"n\"", "else", "\"# {}{} is not set\\n\"", ".", "format", "(", "self", ".", "kconfig", ".", "config_prefix", ",", "self", ".", "name", ")", "if", "self", ".", "orig_type", "in", "_INT_HEX", ":", "return", "\"{}{}={}\\n\"", ".", "format", "(", "self", ".", "kconfig", ".", "config_prefix", ",", "self", ".", "name", ",", "val", ")", "# sym.orig_type is STRING", "return", "'{}{}=\"{}\"\\n'", ".", "format", "(", "self", ".", "kconfig", ".", "config_prefix", ",", "self", ".", "name", ",", "escape", "(", "val", ")", ")" ]
36.416667
16.5
def check_hex_chain(chain): """Verify a merkle chain, with hashes hex encoded, to see if the Merkle root can be reproduced. """ return codecs.encode(check_chain([(codecs.decode(i[0], 'hex_codec'), i[1]) for i in chain]), 'hex_codec')
[ "def", "check_hex_chain", "(", "chain", ")", ":", "return", "codecs", ".", "encode", "(", "check_chain", "(", "[", "(", "codecs", ".", "decode", "(", "i", "[", "0", "]", ",", "'hex_codec'", ")", ",", "i", "[", "1", "]", ")", "for", "i", "in", "chain", "]", ")", ",", "'hex_codec'", ")" ]
60.5
20.5
def create_position(self, params={}): """ Creates a position http://dev.wheniwork.com/#create-update-position """ url = "/2/positions/" body = params data = self._post_resource(url, body) return self.position_from_json(data["position"])
[ "def", "create_position", "(", "self", ",", "params", "=", "{", "}", ")", ":", "url", "=", "\"/2/positions/\"", "body", "=", "params", "data", "=", "self", ".", "_post_resource", "(", "url", ",", "body", ")", "return", "self", ".", "position_from_json", "(", "data", "[", "\"position\"", "]", ")" ]
26.545455
14.909091
def add_service_certificate(kwargs=None, conn=None, call=None): ''' .. versionadded:: 2015.8.0 Add a new service certificate CLI Example: .. code-block:: bash salt-cloud -f add_service_certificate my-azure name=my_service_certificate \\ data='...CERT_DATA...' certificate_format=sha1 password=verybadpass ''' if call != 'function': raise SaltCloudSystemExit( 'The add_service_certificate function must be called with -f or --function.' ) if not conn: conn = get_conn() if kwargs is None: kwargs = {} if 'name' not in kwargs: raise SaltCloudSystemExit('A name must be specified as "name"') if 'data' not in kwargs: raise SaltCloudSystemExit('Certificate data must be specified as "data"') if 'certificate_format' not in kwargs: raise SaltCloudSystemExit('A certificate_format must be specified as "certificate_format"') if 'password' not in kwargs: raise SaltCloudSystemExit('A password must be specified as "password"') try: data = conn.add_service_certificate( kwargs['name'], kwargs['data'], kwargs['certificate_format'], kwargs['password'], ) return {'Success': 'The service certificate was successfully added'} except AzureConflictHttpError: raise SaltCloudSystemExit('There was a conflict. This usually means that the ' 'service certificate already exists.')
[ "def", "add_service_certificate", "(", "kwargs", "=", "None", ",", "conn", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The add_service_certificate function must be called with -f or --function.'", ")", "if", "not", "conn", ":", "conn", "=", "get_conn", "(", ")", "if", "kwargs", "is", "None", ":", "kwargs", "=", "{", "}", "if", "'name'", "not", "in", "kwargs", ":", "raise", "SaltCloudSystemExit", "(", "'A name must be specified as \"name\"'", ")", "if", "'data'", "not", "in", "kwargs", ":", "raise", "SaltCloudSystemExit", "(", "'Certificate data must be specified as \"data\"'", ")", "if", "'certificate_format'", "not", "in", "kwargs", ":", "raise", "SaltCloudSystemExit", "(", "'A certificate_format must be specified as \"certificate_format\"'", ")", "if", "'password'", "not", "in", "kwargs", ":", "raise", "SaltCloudSystemExit", "(", "'A password must be specified as \"password\"'", ")", "try", ":", "data", "=", "conn", ".", "add_service_certificate", "(", "kwargs", "[", "'name'", "]", ",", "kwargs", "[", "'data'", "]", ",", "kwargs", "[", "'certificate_format'", "]", ",", "kwargs", "[", "'password'", "]", ",", ")", "return", "{", "'Success'", ":", "'The service certificate was successfully added'", "}", "except", "AzureConflictHttpError", ":", "raise", "SaltCloudSystemExit", "(", "'There was a conflict. This usually means that the '", "'service certificate already exists.'", ")" ]
31.87234
27.106383
def connect_rds(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.rds.RDSConnection` :return: A connection to RDS """ from boto.rds import RDSConnection return RDSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
[ "def", "connect_rds", "(", "aws_access_key_id", "=", "None", ",", "aws_secret_access_key", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", "boto", ".", "rds", "import", "RDSConnection", "return", "RDSConnection", "(", "aws_access_key_id", ",", "aws_secret_access_key", ",", "*", "*", "kwargs", ")" ]
35.923077
15.769231
def event_first_of(*events: _AbstractLinkable) -> Event: """ Waits until one of `events` is set. The event returned is /not/ cleared with any of the `events`, this value must not be reused if the clearing behavior is used. """ first_finished = Event() if not all(isinstance(e, _AbstractLinkable) for e in events): raise ValueError('all events must be linkable') for event in events: event.rawlink(lambda _: first_finished.set()) return first_finished
[ "def", "event_first_of", "(", "*", "events", ":", "_AbstractLinkable", ")", "->", "Event", ":", "first_finished", "=", "Event", "(", ")", "if", "not", "all", "(", "isinstance", "(", "e", ",", "_AbstractLinkable", ")", "for", "e", "in", "events", ")", ":", "raise", "ValueError", "(", "'all events must be linkable'", ")", "for", "event", "in", "events", ":", "event", ".", "rawlink", "(", "lambda", "_", ":", "first_finished", ".", "set", "(", ")", ")", "return", "first_finished" ]
32.533333
21.6
def setup(cls, configuration=None, **kwargs): # type: (Optional['Configuration'], Any) -> None """ Set up the HDX configuration Args: configuration (Optional[Configuration]): Configuration instance. Defaults to setting one up from passed arguments. **kwargs: See below user_agent (str): User agent string. HDXPythonLibrary/X.X.X- is prefixed. Must be supplied if remoteckan is not. user_agent_config_yaml (str): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (str): Lookup key for YAML. Ignored if user_agent supplied. hdx_url (str): HDX url to use. Overrides hdx_site. hdx_site (str): HDX site to use eg. prod, test. hdx_read_only (bool): Whether to access HDX in read only mode. Defaults to False. hdx_key (str): Your HDX key. Ignored if hdx_read_only = True. hdx_config_dict (dict): HDX configuration dictionary to use instead of above 3 parameters OR hdx_config_json (str): Path to JSON HDX configuration OR hdx_config_yaml (str): Path to YAML HDX configuration project_config_dict (dict): Project configuration dictionary OR project_config_json (str): Path to JSON Project configuration OR project_config_yaml (str): Path to YAML Project configuration hdx_base_config_dict (dict): HDX base configuration dictionary OR hdx_base_config_json (str): Path to JSON HDX base configuration OR hdx_base_config_yaml (str): Path to YAML HDX base configuration. Defaults to library's internal hdx_base_configuration.yml. Returns: None """ if configuration is None: cls._configuration = Configuration(**kwargs) else: cls._configuration = configuration
[ "def", "setup", "(", "cls", ",", "configuration", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# type: (Optional['Configuration'], Any) -> None", "if", "configuration", "is", "None", ":", "cls", ".", "_configuration", "=", "Configuration", "(", "*", "*", "kwargs", ")", "else", ":", "cls", ".", "_configuration", "=", "configuration" ]
57.636364
34.060606
def is_name_expired( self, name, block_number ): """ Given a name and block number, determine if it is expired at that block. * names in revealed but not ready namespaces are never expired, unless the namespace itself is expired; * names in ready namespaces expire once max(ready_block, renew_block) + lifetime blocks passes Return True if so Return False if not, or if the name doesn't exist """ cur = self.db.cursor() return namedb_get_name( cur, name, block_number ) is None
[ "def", "is_name_expired", "(", "self", ",", "name", ",", "block_number", ")", ":", "cur", "=", "self", ".", "db", ".", "cursor", "(", ")", "return", "namedb_get_name", "(", "cur", ",", "name", ",", "block_number", ")", "is", "None" ]
49.090909
26.181818
def conflicting_events(self): """ conflicting_events() This will return a list of conflicting events. **Example**:: event = service.calendar().get_event(id='<event_id>') for conflict in event.conflicting_events(): print conflict.subject """ if not self.conflicting_event_ids: return [] body = soap_request.get_item(exchange_id=self.conflicting_event_ids, format="AllProperties") response_xml = self.service.send(body) items = response_xml.xpath(u'//m:GetItemResponseMessage/m:Items', namespaces=soap_request.NAMESPACES) events = [] for item in items: event = Exchange2010CalendarEvent(service=self.service, xml=deepcopy(item)) if event.id: events.append(event) return events
[ "def", "conflicting_events", "(", "self", ")", ":", "if", "not", "self", ".", "conflicting_event_ids", ":", "return", "[", "]", "body", "=", "soap_request", ".", "get_item", "(", "exchange_id", "=", "self", ".", "conflicting_event_ids", ",", "format", "=", "\"AllProperties\"", ")", "response_xml", "=", "self", ".", "service", ".", "send", "(", "body", ")", "items", "=", "response_xml", ".", "xpath", "(", "u'//m:GetItemResponseMessage/m:Items'", ",", "namespaces", "=", "soap_request", ".", "NAMESPACES", ")", "events", "=", "[", "]", "for", "item", "in", "items", ":", "event", "=", "Exchange2010CalendarEvent", "(", "service", "=", "self", ".", "service", ",", "xml", "=", "deepcopy", "(", "item", ")", ")", "if", "event", ".", "id", ":", "events", ".", "append", "(", "event", ")", "return", "events" ]
27.178571
25.321429
def rolling_max(self, window_start, window_end, min_observations=None): """ Calculate a new SArray of the maximum value of different subsets over this SArray. The subset that the maximum is calculated over is defined as an inclusive range relative to the position to each value in the SArray, using `window_start` and `window_end`. For a better understanding of this, see the examples below. Parameters ---------- window_start : int The start of the subset to calculate the maximum relative to the current value. window_end : int The end of the subset to calculate the maximum relative to the current value. Must be greater than `window_start`. min_observations : int Minimum number of non-missing observations in window required to calculate the maximum (otherwise result is None). None signifies that the entire window must not include a missing value. A negative number throws an error. Returns ------- out : SArray Examples -------- >>> import pandas >>> sa = SArray([1,2,3,4,5]) >>> series = pandas.Series([1,2,3,4,5]) A rolling max with a window including the previous 2 entries including the current: >>> sa.rolling_max(-2,0) dtype: int Rows: 5 [None, None, 3, 4, 5] Pandas equivalent: >>> pandas.rolling_max(series, 3) 0 NaN 1 NaN 2 3 3 4 4 5 dtype: float64 Same rolling max operation, but 2 minimum observations: >>> sa.rolling_max(-2,0,min_observations=2) dtype: int Rows: 5 [None, 2, 3, 4, 5] Pandas equivalent: >>> pandas.rolling_max(series, 3, min_periods=2) 0 NaN 1 2 2 3 3 4 4 5 dtype: float64 A rolling max with a size of 3, centered around the current: >>> sa.rolling_max(-1,1) dtype: int Rows: 5 [None, 3, 4, 5, None] Pandas equivalent: >>> pandas.rolling_max(series, 3, center=True) 0 NaN 1 3 2 4 3 5 4 NaN dtype: float64 A rolling max with a window including the current and the 2 entries following: >>> sa.rolling_max(0,2) dtype: int Rows: 5 [3, 4, 5, None, None] A rolling max with a window including the previous 2 entries NOT including the current: >>> sa.rolling_max(-2,-1) dtype: int Rows: 5 [None, None, 2, 3, 4] """ min_observations = self.__check_min_observations(min_observations) agg_op = '__builtin__max__' return SArray(_proxy=self.__proxy__.builtin_rolling_apply(agg_op, window_start, window_end, min_observations))
[ "def", "rolling_max", "(", "self", ",", "window_start", ",", "window_end", ",", "min_observations", "=", "None", ")", ":", "min_observations", "=", "self", ".", "__check_min_observations", "(", "min_observations", ")", "agg_op", "=", "'__builtin__max__'", "return", "SArray", "(", "_proxy", "=", "self", ".", "__proxy__", ".", "builtin_rolling_apply", "(", "agg_op", ",", "window_start", ",", "window_end", ",", "min_observations", ")", ")" ]
29.535354
23.878788
def __yield_handlers(self, event_type): """ Yield all the handlers registered for the given event type. """ if event_type not in self.event_types: raise ValueError("%r not found in %r.event_types == %r" % (event_type, self, self.event_types)) # Search handler stack for matching event handlers for frame in list(self._event_stack): if event_type in frame: yield frame[event_type] # Check instance for an event handler if hasattr(self, event_type): yield getattr(self, event_type)
[ "def", "__yield_handlers", "(", "self", ",", "event_type", ")", ":", "if", "event_type", "not", "in", "self", ".", "event_types", ":", "raise", "ValueError", "(", "\"%r not found in %r.event_types == %r\"", "%", "(", "event_type", ",", "self", ",", "self", ".", "event_types", ")", ")", "# Search handler stack for matching event handlers", "for", "frame", "in", "list", "(", "self", ".", "_event_stack", ")", ":", "if", "event_type", "in", "frame", ":", "yield", "frame", "[", "event_type", "]", "# Check instance for an event handler", "if", "hasattr", "(", "self", ",", "event_type", ")", ":", "yield", "getattr", "(", "self", ",", "event_type", ")" ]
38.866667
14.733333
def l(*members, meta=None) -> List: """Creates a new list from members.""" return List( # pylint: disable=abstract-class-instantiated plist(iterable=members), meta=meta )
[ "def", "l", "(", "*", "members", ",", "meta", "=", "None", ")", "->", "List", ":", "return", "List", "(", "# pylint: disable=abstract-class-instantiated", "plist", "(", "iterable", "=", "members", ")", ",", "meta", "=", "meta", ")" ]
37.4
13
def _init_map(self): """call these all manually because non-cooperative""" DecimalValuesFormRecord._init_map(self) IntegerValuesFormRecord._init_map(self) TextAnswerFormRecord._init_map(self) FilesAnswerFormRecord._init_map(self) FeedbackAnswerFormRecord._init_map(self) super(CalculationInteractionFeedbackAndFilesAnswerFormRecord, self)._init_map() self.my_osid_object_form._my_map['toleranceMode'] = \ self._tolerance_mode_metadata['default_string_values'][0]
[ "def", "_init_map", "(", "self", ")", ":", "DecimalValuesFormRecord", ".", "_init_map", "(", "self", ")", "IntegerValuesFormRecord", ".", "_init_map", "(", "self", ")", "TextAnswerFormRecord", ".", "_init_map", "(", "self", ")", "FilesAnswerFormRecord", ".", "_init_map", "(", "self", ")", "FeedbackAnswerFormRecord", ".", "_init_map", "(", "self", ")", "super", "(", "CalculationInteractionFeedbackAndFilesAnswerFormRecord", ",", "self", ")", ".", "_init_map", "(", ")", "self", ".", "my_osid_object_form", ".", "_my_map", "[", "'toleranceMode'", "]", "=", "self", ".", "_tolerance_mode_metadata", "[", "'default_string_values'", "]", "[", "0", "]" ]
52.9
14.8
def position(parser, token): """ Render a given position for category. If some position is not defined for first category, position from its parent category is used unless nofallback is specified. Syntax:: {% position POSITION_NAME for CATEGORY [nofallback] %}{% endposition %} {% position POSITION_NAME for CATEGORY using BOX_TYPE [nofallback] %}{% endposition %} Example usage:: {% position top_left for category %}{% endposition %} """ bits = token.split_contents() nodelist = parser.parse(('end' + bits[0],)) parser.delete_first_token() return _parse_position_tag(bits, nodelist)
[ "def", "position", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "nodelist", "=", "parser", ".", "parse", "(", "(", "'end'", "+", "bits", "[", "0", "]", ",", ")", ")", "parser", ".", "delete_first_token", "(", ")", "return", "_parse_position_tag", "(", "bits", ",", "nodelist", ")" ]
33.526316
21.947368
def scan(self, match="*", count=1000, cursor=0): """ :see::meth:RedisMap.scan """ cursor, results = self._client.hscan( self.key_prefix, cursor=cursor, match=match, count=count) return (cursor, list(map(self._decode, results)))
[ "def", "scan", "(", "self", ",", "match", "=", "\"*\"", ",", "count", "=", "1000", ",", "cursor", "=", "0", ")", ":", "cursor", ",", "results", "=", "self", ".", "_client", ".", "hscan", "(", "self", ".", "key_prefix", ",", "cursor", "=", "cursor", ",", "match", "=", "match", ",", "count", "=", "count", ")", "return", "(", "cursor", ",", "list", "(", "map", "(", "self", ".", "_decode", ",", "results", ")", ")", ")" ]
51.8
11.8
def get_recipe_env(self, arch, with_flags_in_cc=True): """ Adds openssl recipe to include and library path. """ env = super(ScryptRecipe, self).get_recipe_env(arch, with_flags_in_cc) openssl_recipe = self.get_recipe('openssl', self.ctx) env['CFLAGS'] += openssl_recipe.include_flags(arch) env['LDFLAGS'] += ' -L{}'.format(self.ctx.get_libs_dir(arch.arch)) env['LDFLAGS'] += ' -L{}'.format(self.ctx.libs_dir) env['LDFLAGS'] += openssl_recipe.link_dirs_flags(arch) env['LIBS'] = env.get('LIBS', '') + openssl_recipe.link_libs_flags() return env
[ "def", "get_recipe_env", "(", "self", ",", "arch", ",", "with_flags_in_cc", "=", "True", ")", ":", "env", "=", "super", "(", "ScryptRecipe", ",", "self", ")", ".", "get_recipe_env", "(", "arch", ",", "with_flags_in_cc", ")", "openssl_recipe", "=", "self", ".", "get_recipe", "(", "'openssl'", ",", "self", ".", "ctx", ")", "env", "[", "'CFLAGS'", "]", "+=", "openssl_recipe", ".", "include_flags", "(", "arch", ")", "env", "[", "'LDFLAGS'", "]", "+=", "' -L{}'", ".", "format", "(", "self", ".", "ctx", ".", "get_libs_dir", "(", "arch", ".", "arch", ")", ")", "env", "[", "'LDFLAGS'", "]", "+=", "' -L{}'", ".", "format", "(", "self", ".", "ctx", ".", "libs_dir", ")", "env", "[", "'LDFLAGS'", "]", "+=", "openssl_recipe", ".", "link_dirs_flags", "(", "arch", ")", "env", "[", "'LIBS'", "]", "=", "env", ".", "get", "(", "'LIBS'", ",", "''", ")", "+", "openssl_recipe", ".", "link_libs_flags", "(", ")", "return", "env" ]
51.583333
20.083333
def predict(self, X, raw_score=False, num_iteration=None, pred_leaf=False, pred_contrib=False, **kwargs): """Return the predicted value for each sample. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] Input features matrix. raw_score : bool, optional (default=False) Whether to predict raw scores. num_iteration : int or None, optional (default=None) Limit number of iterations in the prediction. If None, if the best iteration exists, it is used; otherwise, all trees are used. If <= 0, all trees are used (no limits). pred_leaf : bool, optional (default=False) Whether to predict leaf index. pred_contrib : bool, optional (default=False) Whether to predict feature contributions. Note ---- If you want to get more explanations for your model's predictions using SHAP values, like SHAP interaction values, you can install the shap package (https://github.com/slundberg/shap). Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra column, where the last column is the expected value. **kwargs Other parameters for the prediction. Returns ------- predicted_result : array-like of shape = [n_samples] or shape = [n_samples, n_classes] The predicted values. X_leaves : array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes] If ``pred_leaf=True``, the predicted leaf of every tree for each sample. X_SHAP_values : array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] If ``pred_contrib=True``, the feature contributions for each sample. """ if self._n_features is None: raise LGBMNotFittedError("Estimator not fitted, call `fit` before exploiting the model.") if not isinstance(X, (DataFrame, DataTable)): X = _LGBMCheckArray(X, accept_sparse=True, force_all_finite=False) n_features = X.shape[1] if self._n_features != n_features: raise ValueError("Number of features of the model must " "match the input. Model n_features_ is %s and " "input n_features is %s " % (self._n_features, n_features)) return self.booster_.predict(X, raw_score=raw_score, num_iteration=num_iteration, pred_leaf=pred_leaf, pred_contrib=pred_contrib, **kwargs)
[ "def", "predict", "(", "self", ",", "X", ",", "raw_score", "=", "False", ",", "num_iteration", "=", "None", ",", "pred_leaf", "=", "False", ",", "pred_contrib", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_n_features", "is", "None", ":", "raise", "LGBMNotFittedError", "(", "\"Estimator not fitted, call `fit` before exploiting the model.\"", ")", "if", "not", "isinstance", "(", "X", ",", "(", "DataFrame", ",", "DataTable", ")", ")", ":", "X", "=", "_LGBMCheckArray", "(", "X", ",", "accept_sparse", "=", "True", ",", "force_all_finite", "=", "False", ")", "n_features", "=", "X", ".", "shape", "[", "1", "]", "if", "self", ".", "_n_features", "!=", "n_features", ":", "raise", "ValueError", "(", "\"Number of features of the model must \"", "\"match the input. Model n_features_ is %s and \"", "\"input n_features is %s \"", "%", "(", "self", ".", "_n_features", ",", "n_features", ")", ")", "return", "self", ".", "booster_", ".", "predict", "(", "X", ",", "raw_score", "=", "raw_score", ",", "num_iteration", "=", "num_iteration", ",", "pred_leaf", "=", "pred_leaf", ",", "pred_contrib", "=", "pred_contrib", ",", "*", "*", "kwargs", ")" ]
52.862745
26.960784
def run_steps(node: Node, pipeline: RenderingPipeline, **args): """Runs instance node rendering steps""" for step in pipeline.steps: result = step(node, pipeline=pipeline, **args) if isinstance(result, dict): args = {**args, **result}
[ "def", "run_steps", "(", "node", ":", "Node", ",", "pipeline", ":", "RenderingPipeline", ",", "*", "*", "args", ")", ":", "for", "step", "in", "pipeline", ".", "steps", ":", "result", "=", "step", "(", "node", ",", "pipeline", "=", "pipeline", ",", "*", "*", "args", ")", "if", "isinstance", "(", "result", ",", "dict", ")", ":", "args", "=", "{", "*", "*", "args", ",", "*", "*", "result", "}" ]
44.166667
8.833333
def get_or_create_shared_key(cls, force_new=False): """ Create a shared public/private key pair for certificate pushing, if the settings allow. """ if force_new: with transaction.atomic(): SharedKey.objects.filter(current=True).update(current=False) key = Key() return SharedKey.objects.create(public_key=key, private_key=key, current=True) # create a new shared key if one doesn't exist try: return SharedKey.objects.get(current=True) except SharedKey.DoesNotExist: key = Key() return SharedKey.objects.create(public_key=key, private_key=key, current=True)
[ "def", "get_or_create_shared_key", "(", "cls", ",", "force_new", "=", "False", ")", ":", "if", "force_new", ":", "with", "transaction", ".", "atomic", "(", ")", ":", "SharedKey", ".", "objects", ".", "filter", "(", "current", "=", "True", ")", ".", "update", "(", "current", "=", "False", ")", "key", "=", "Key", "(", ")", "return", "SharedKey", ".", "objects", ".", "create", "(", "public_key", "=", "key", ",", "private_key", "=", "key", ",", "current", "=", "True", ")", "# create a new shared key if one doesn't exist", "try", ":", "return", "SharedKey", ".", "objects", ".", "get", "(", "current", "=", "True", ")", "except", "SharedKey", ".", "DoesNotExist", ":", "key", "=", "Key", "(", ")", "return", "SharedKey", ".", "objects", ".", "create", "(", "public_key", "=", "key", ",", "private_key", "=", "key", ",", "current", "=", "True", ")" ]
44.1
16.1
def makeCubicxFunc(self,mLvl,pLvl,MedShk,xLvl): ''' Constructs the (unconstrained) expenditure function for this period using bilinear interpolation (over permanent income and the medical shock) among an array of cubic interpolations over market resources. Parameters ---------- mLvl : np.array Corresponding market resource points for interpolation. pLvl : np.array Corresponding permanent income level points for interpolation. MedShk : np.array Corresponding medical need shocks for interpolation. xLvl : np.array Expenditure points for interpolation, corresponding to those in mLvl, pLvl, and MedShk. Returns ------- xFuncUnc : BilinearInterpOnInterp1D Unconstrained total expenditure function for this period. ''' # Get state dimensions pCount = mLvl.shape[1] MedCount = mLvl.shape[0] # Calculate the MPC and MPM at each gridpoint EndOfPrdvPP = self.DiscFacEff*self.Rfree*self.Rfree*np.sum(self.vPPfuncNext(self.mLvlNext,\ self.pLvlNext)*self.ShkPrbs_temp,axis=0) EndOfPrdvPP = np.tile(np.reshape(EndOfPrdvPP,(1,pCount,EndOfPrdvPP.shape[1])),(MedCount,1,1)) dcda = EndOfPrdvPP/self.uPP(np.array(self.cLvlNow)) dMedda = EndOfPrdvPP/(self.MedShkVals_tiled*self.uMedPP(self.MedLvlNow)) dMedda[0,:,:] = 0.0 # dMedda goes crazy when MedShk=0 MPC = dcda/(1.0 + dcda + self.MedPrice*dMedda) MPM = dMedda/(1.0 + dcda + self.MedPrice*dMedda) # Convert to marginal propensity to spend MPX = MPC + self.MedPrice*MPM MPX = np.concatenate((np.reshape(MPX[:,:,0],(MedCount,pCount,1)),MPX),axis=2) # NEED TO CALCULATE MPM AT NATURAL BORROWING CONSTRAINT MPX[0,:,0] = self.MPCmaxNow # Loop over each permanent income level and medical shock and make a cubic xFunc xFunc_by_pLvl_and_MedShk = [] # Initialize the empty list of lists of 1D xFuncs for i in range(pCount): temp_list = [] pLvl_i = pLvl[0,i,0] mLvlMin_i = self.BoroCnstNat(pLvl_i) for j in range(MedCount): m_temp = mLvl[j,i,:] - mLvlMin_i x_temp = xLvl[j,i,:] MPX_temp = MPX[j,i,:] temp_list.append(CubicInterp(m_temp,x_temp,MPX_temp)) xFunc_by_pLvl_and_MedShk.append(deepcopy(temp_list)) # Combine the nested list of cubic xFuncs into a single function pLvl_temp = pLvl[0,:,0] MedShk_temp = MedShk[:,0,0] xFuncUncBase = BilinearInterpOnInterp1D(xFunc_by_pLvl_and_MedShk,pLvl_temp,MedShk_temp) xFuncUnc = VariableLowerBoundFunc3D(xFuncUncBase,self.BoroCnstNat) return xFuncUnc
[ "def", "makeCubicxFunc", "(", "self", ",", "mLvl", ",", "pLvl", ",", "MedShk", ",", "xLvl", ")", ":", "# Get state dimensions", "pCount", "=", "mLvl", ".", "shape", "[", "1", "]", "MedCount", "=", "mLvl", ".", "shape", "[", "0", "]", "# Calculate the MPC and MPM at each gridpoint", "EndOfPrdvPP", "=", "self", ".", "DiscFacEff", "*", "self", ".", "Rfree", "*", "self", ".", "Rfree", "*", "np", ".", "sum", "(", "self", ".", "vPPfuncNext", "(", "self", ".", "mLvlNext", ",", "self", ".", "pLvlNext", ")", "*", "self", ".", "ShkPrbs_temp", ",", "axis", "=", "0", ")", "EndOfPrdvPP", "=", "np", ".", "tile", "(", "np", ".", "reshape", "(", "EndOfPrdvPP", ",", "(", "1", ",", "pCount", ",", "EndOfPrdvPP", ".", "shape", "[", "1", "]", ")", ")", ",", "(", "MedCount", ",", "1", ",", "1", ")", ")", "dcda", "=", "EndOfPrdvPP", "/", "self", ".", "uPP", "(", "np", ".", "array", "(", "self", ".", "cLvlNow", ")", ")", "dMedda", "=", "EndOfPrdvPP", "/", "(", "self", ".", "MedShkVals_tiled", "*", "self", ".", "uMedPP", "(", "self", ".", "MedLvlNow", ")", ")", "dMedda", "[", "0", ",", ":", ",", ":", "]", "=", "0.0", "# dMedda goes crazy when MedShk=0", "MPC", "=", "dcda", "/", "(", "1.0", "+", "dcda", "+", "self", ".", "MedPrice", "*", "dMedda", ")", "MPM", "=", "dMedda", "/", "(", "1.0", "+", "dcda", "+", "self", ".", "MedPrice", "*", "dMedda", ")", "# Convert to marginal propensity to spend", "MPX", "=", "MPC", "+", "self", ".", "MedPrice", "*", "MPM", "MPX", "=", "np", ".", "concatenate", "(", "(", "np", ".", "reshape", "(", "MPX", "[", ":", ",", ":", ",", "0", "]", ",", "(", "MedCount", ",", "pCount", ",", "1", ")", ")", ",", "MPX", ")", ",", "axis", "=", "2", ")", "# NEED TO CALCULATE MPM AT NATURAL BORROWING CONSTRAINT", "MPX", "[", "0", ",", ":", ",", "0", "]", "=", "self", ".", "MPCmaxNow", "# Loop over each permanent income level and medical shock and make a cubic xFunc", "xFunc_by_pLvl_and_MedShk", "=", "[", "]", "# Initialize the empty list of lists of 1D xFuncs", "for", "i", "in", "range", "(", "pCount", ")", ":", "temp_list", "=", "[", "]", "pLvl_i", "=", "pLvl", "[", "0", ",", "i", ",", "0", "]", "mLvlMin_i", "=", "self", ".", "BoroCnstNat", "(", "pLvl_i", ")", "for", "j", "in", "range", "(", "MedCount", ")", ":", "m_temp", "=", "mLvl", "[", "j", ",", "i", ",", ":", "]", "-", "mLvlMin_i", "x_temp", "=", "xLvl", "[", "j", ",", "i", ",", ":", "]", "MPX_temp", "=", "MPX", "[", "j", ",", "i", ",", ":", "]", "temp_list", ".", "append", "(", "CubicInterp", "(", "m_temp", ",", "x_temp", ",", "MPX_temp", ")", ")", "xFunc_by_pLvl_and_MedShk", ".", "append", "(", "deepcopy", "(", "temp_list", ")", ")", "# Combine the nested list of cubic xFuncs into a single function", "pLvl_temp", "=", "pLvl", "[", "0", ",", ":", ",", "0", "]", "MedShk_temp", "=", "MedShk", "[", ":", ",", "0", ",", "0", "]", "xFuncUncBase", "=", "BilinearInterpOnInterp1D", "(", "xFunc_by_pLvl_and_MedShk", ",", "pLvl_temp", ",", "MedShk_temp", ")", "xFuncUnc", "=", "VariableLowerBoundFunc3D", "(", "xFuncUncBase", ",", "self", ".", "BoroCnstNat", ")", "return", "xFuncUnc" ]
46.262295
25.180328
def associate_interface_environments(self, int_env_map): """ Method to add an interface. :param int_env_map: List containing interfaces and environments ids desired to be associates. :return: Id. """ data = {'interface_environments': int_env_map} return super(ApiInterfaceRequest, self).post('api/v3/interface/environments/', data)
[ "def", "associate_interface_environments", "(", "self", ",", "int_env_map", ")", ":", "data", "=", "{", "'interface_environments'", ":", "int_env_map", "}", "return", "super", "(", "ApiInterfaceRequest", ",", "self", ")", ".", "post", "(", "'api/v3/interface/environments/'", ",", "data", ")" ]
42.222222
23.111111
def td_type(): '''get type of the tomodir (complex or dc and whether fpi) ''' cfg = np.genfromtxt('exe/crtomo.cfg', skip_header=15, dtype='str', usecols=([0])) is_complex = False if cfg[0] == 'F': is_complex = True is_fpi = False if cfg[2] == 'T': is_fpi = True return is_complex, is_fpi
[ "def", "td_type", "(", ")", ":", "cfg", "=", "np", ".", "genfromtxt", "(", "'exe/crtomo.cfg'", ",", "skip_header", "=", "15", ",", "dtype", "=", "'str'", ",", "usecols", "=", "(", "[", "0", "]", ")", ")", "is_complex", "=", "False", "if", "cfg", "[", "0", "]", "==", "'F'", ":", "is_complex", "=", "True", "is_fpi", "=", "False", "if", "cfg", "[", "2", "]", "==", "'T'", ":", "is_fpi", "=", "True", "return", "is_complex", ",", "is_fpi" ]
26.266667
16.8
def setInverted(self, state): """ Sets whether or not to invert the check state for collapsing. :param state | <bool> """ collapsed = self.isCollapsed() self._inverted = state if self.isCollapsible(): self.setCollapsed(collapsed)
[ "def", "setInverted", "(", "self", ",", "state", ")", ":", "collapsed", "=", "self", ".", "isCollapsed", "(", ")", "self", ".", "_inverted", "=", "state", "if", "self", ".", "isCollapsible", "(", ")", ":", "self", ".", "setCollapsed", "(", "collapsed", ")" ]
31.1
9.3
def get_data_file_names_from_scan_base(scan_base, filter_str=['_analyzed.h5', '_interpreted.h5', '_cut.h5', '_result.h5', '_hists.h5'], sort_by_time=True, meta_data_v2=True): """ Generate a list of .h5 files which have a similar file name. Parameters ---------- scan_base : list, string List of string or string of the scan base names. The scan_base will be used to search for files containing the string. The .h5 file extension will be added automatically. filter : list, string List of string or string which are used to filter the returned filenames. File names containing filter_str in the file name will not be returned. Use None to disable filter. sort_by_time : bool If True, return file name list sorted from oldest to newest. The time from meta table will be used to sort the files. meta_data_v2 : bool True for new (v2) meta data format, False for the old (v1) format. Returns ------- data_files : list List of file names matching the obove conditions. """ data_files = [] if scan_base is None: return data_files if isinstance(scan_base, basestring): scan_base = [scan_base] for scan_base_str in scan_base: if '.h5' == os.path.splitext(scan_base_str)[1]: data_files.append(scan_base_str) else: data_files.extend(glob.glob(scan_base_str + '*.h5')) if filter_str: if isinstance(filter_str, basestring): filter_str = [filter_str] data_files = filter(lambda data_file: not any([(True if x in data_file else False) for x in filter_str]), data_files) if sort_by_time and len(data_files) > 1: f_list = {} for data_file in data_files: with tb.open_file(data_file, mode="r") as h5_file: try: meta_data = h5_file.root.meta_data except tb.NoSuchNodeError: logging.warning("File %s is missing meta_data" % h5_file.filename) else: try: if meta_data_v2: timestamp = meta_data[0]["timestamp_start"] else: timestamp = meta_data[0]["timestamp"] except IndexError: logging.info("File %s has empty meta_data" % h5_file.filename) else: f_list[data_file] = timestamp data_files = list(sorted(f_list, key=f_list.__getitem__, reverse=False)) return data_files
[ "def", "get_data_file_names_from_scan_base", "(", "scan_base", ",", "filter_str", "=", "[", "'_analyzed.h5'", ",", "'_interpreted.h5'", ",", "'_cut.h5'", ",", "'_result.h5'", ",", "'_hists.h5'", "]", ",", "sort_by_time", "=", "True", ",", "meta_data_v2", "=", "True", ")", ":", "data_files", "=", "[", "]", "if", "scan_base", "is", "None", ":", "return", "data_files", "if", "isinstance", "(", "scan_base", ",", "basestring", ")", ":", "scan_base", "=", "[", "scan_base", "]", "for", "scan_base_str", "in", "scan_base", ":", "if", "'.h5'", "==", "os", ".", "path", ".", "splitext", "(", "scan_base_str", ")", "[", "1", "]", ":", "data_files", ".", "append", "(", "scan_base_str", ")", "else", ":", "data_files", ".", "extend", "(", "glob", ".", "glob", "(", "scan_base_str", "+", "'*.h5'", ")", ")", "if", "filter_str", ":", "if", "isinstance", "(", "filter_str", ",", "basestring", ")", ":", "filter_str", "=", "[", "filter_str", "]", "data_files", "=", "filter", "(", "lambda", "data_file", ":", "not", "any", "(", "[", "(", "True", "if", "x", "in", "data_file", "else", "False", ")", "for", "x", "in", "filter_str", "]", ")", ",", "data_files", ")", "if", "sort_by_time", "and", "len", "(", "data_files", ")", ">", "1", ":", "f_list", "=", "{", "}", "for", "data_file", "in", "data_files", ":", "with", "tb", ".", "open_file", "(", "data_file", ",", "mode", "=", "\"r\"", ")", "as", "h5_file", ":", "try", ":", "meta_data", "=", "h5_file", ".", "root", ".", "meta_data", "except", "tb", ".", "NoSuchNodeError", ":", "logging", ".", "warning", "(", "\"File %s is missing meta_data\"", "%", "h5_file", ".", "filename", ")", "else", ":", "try", ":", "if", "meta_data_v2", ":", "timestamp", "=", "meta_data", "[", "0", "]", "[", "\"timestamp_start\"", "]", "else", ":", "timestamp", "=", "meta_data", "[", "0", "]", "[", "\"timestamp\"", "]", "except", "IndexError", ":", "logging", ".", "info", "(", "\"File %s has empty meta_data\"", "%", "h5_file", ".", "filename", ")", "else", ":", "f_list", "[", "data_file", "]", "=", "timestamp", "data_files", "=", "list", "(", "sorted", "(", "f_list", ",", "key", "=", "f_list", ".", "__getitem__", ",", "reverse", "=", "False", ")", ")", "return", "data_files" ]
45.196429
27.589286
def persistent_write(self, address, byte, refresh_config=False): ''' Write a single byte to an address in persistent memory. Parameters ---------- address : int Address in persistent memory (e.g., EEPROM). byte : int Value to write to address. refresh_config : bool, optional Is ``True``, :meth:`load_config()` is called afterward to refresh the configuration settings. ''' self._persistent_write(address, byte) if refresh_config: self.load_config(False)
[ "def", "persistent_write", "(", "self", ",", "address", ",", "byte", ",", "refresh_config", "=", "False", ")", ":", "self", ".", "_persistent_write", "(", "address", ",", "byte", ")", "if", "refresh_config", ":", "self", ".", "load_config", "(", "False", ")" ]
34.058824
18.294118
def _project_perturbation(perturbation, epsilon, input_image, clip_min=None, clip_max=None): """Project `perturbation` onto L-infinity ball of radius `epsilon`. Also project into hypercube such that the resulting adversarial example is between clip_min and clip_max, if applicable. """ if clip_min is None or clip_max is None: raise NotImplementedError("_project_perturbation currently has clipping " "hard-coded in.") # Ensure inputs are in the correct range with tf.control_dependencies([ utils_tf.assert_less_equal(input_image, tf.cast(clip_max, input_image.dtype)), utils_tf.assert_greater_equal(input_image, tf.cast(clip_min, input_image.dtype)) ]): clipped_perturbation = utils_tf.clip_by_value( perturbation, -epsilon, epsilon) new_image = utils_tf.clip_by_value( input_image + clipped_perturbation, clip_min, clip_max) return new_image - input_image
[ "def", "_project_perturbation", "(", "perturbation", ",", "epsilon", ",", "input_image", ",", "clip_min", "=", "None", ",", "clip_max", "=", "None", ")", ":", "if", "clip_min", "is", "None", "or", "clip_max", "is", "None", ":", "raise", "NotImplementedError", "(", "\"_project_perturbation currently has clipping \"", "\"hard-coded in.\"", ")", "# Ensure inputs are in the correct range", "with", "tf", ".", "control_dependencies", "(", "[", "utils_tf", ".", "assert_less_equal", "(", "input_image", ",", "tf", ".", "cast", "(", "clip_max", ",", "input_image", ".", "dtype", ")", ")", ",", "utils_tf", ".", "assert_greater_equal", "(", "input_image", ",", "tf", ".", "cast", "(", "clip_min", ",", "input_image", ".", "dtype", ")", ")", "]", ")", ":", "clipped_perturbation", "=", "utils_tf", ".", "clip_by_value", "(", "perturbation", ",", "-", "epsilon", ",", "epsilon", ")", "new_image", "=", "utils_tf", ".", "clip_by_value", "(", "input_image", "+", "clipped_perturbation", ",", "clip_min", ",", "clip_max", ")", "return", "new_image", "-", "input_image" ]
44.434783
16
def _create_attach_record(self, id, timed): """ Create a new pivot attachement record. """ record = {} record[self._foreign_key] = self._parent.get_key() record[self._other_key] = id if timed: record = self._set_timestamps_on_attach(record) return record
[ "def", "_create_attach_record", "(", "self", ",", "id", ",", "timed", ")", ":", "record", "=", "{", "}", "record", "[", "self", ".", "_foreign_key", "]", "=", "self", ".", "_parent", ".", "get_key", "(", ")", "record", "[", "self", ".", "_other_key", "]", "=", "id", "if", "timed", ":", "record", "=", "self", ".", "_set_timestamps_on_attach", "(", "record", ")", "return", "record" ]
22.928571
19.5
def rotation(f, line = 'fast'): """ Find rotation of the survey Find the clock-wise rotation and origin of `line` as ``(rot, cdpx, cdpy)`` The clock-wise rotation is defined as the angle in radians between line given by the first and last trace of the first line and the axis that gives increasing CDP-Y, in the direction that gives increasing CDP-X. By default, the first line is the 'fast' direction, which is inlines if the file is inline sorted, and crossline if it's crossline sorted. Parameters ---------- f : SegyFile line : { 'fast', 'slow', 'iline', 'xline' } Returns ------- rotation : float cdpx : int cdpy : int Notes ----- .. versionadded:: 1.2 """ if f.unstructured: raise ValueError("Rotation requires a structured file") lines = { 'fast': f.fast, 'slow': f.slow, 'iline': f.iline, 'xline': f.xline, } if line not in lines: error = "Unknown line {}".format(line) solution = "Must be any of: {}".format(' '.join(lines.keys())) raise ValueError('{} {}'.format(error, solution)) l = lines[line] origin = f.header[0][segyio.su.cdpx, segyio.su.cdpy] cdpx, cdpy = origin[segyio.su.cdpx], origin[segyio.su.cdpy] rot = f.xfd.rotation( len(l), l.stride, len(f.offsets), np.fromiter(l.keys(), dtype = np.intc) ) return rot, cdpx, cdpy
[ "def", "rotation", "(", "f", ",", "line", "=", "'fast'", ")", ":", "if", "f", ".", "unstructured", ":", "raise", "ValueError", "(", "\"Rotation requires a structured file\"", ")", "lines", "=", "{", "'fast'", ":", "f", ".", "fast", ",", "'slow'", ":", "f", ".", "slow", ",", "'iline'", ":", "f", ".", "iline", ",", "'xline'", ":", "f", ".", "xline", ",", "}", "if", "line", "not", "in", "lines", ":", "error", "=", "\"Unknown line {}\"", ".", "format", "(", "line", ")", "solution", "=", "\"Must be any of: {}\"", ".", "format", "(", "' '", ".", "join", "(", "lines", ".", "keys", "(", ")", ")", ")", "raise", "ValueError", "(", "'{} {}'", ".", "format", "(", "error", ",", "solution", ")", ")", "l", "=", "lines", "[", "line", "]", "origin", "=", "f", ".", "header", "[", "0", "]", "[", "segyio", ".", "su", ".", "cdpx", ",", "segyio", ".", "su", ".", "cdpy", "]", "cdpx", ",", "cdpy", "=", "origin", "[", "segyio", ".", "su", ".", "cdpx", "]", ",", "origin", "[", "segyio", ".", "su", ".", "cdpy", "]", "rot", "=", "f", ".", "xfd", ".", "rotation", "(", "len", "(", "l", ")", ",", "l", ".", "stride", ",", "len", "(", "f", ".", "offsets", ")", ",", "np", ".", "fromiter", "(", "l", ".", "keys", "(", ")", ",", "dtype", "=", "np", ".", "intc", ")", ")", "return", "rot", ",", "cdpx", ",", "cdpy" ]
25.894737
25.824561
def post(self, url, data, headers={}): """ POST request for creating new objects. data should be a dictionary. """ response = self._run_method('POST', url, data=data, headers=headers) return self._handle_response(url, response)
[ "def", "post", "(", "self", ",", "url", ",", "data", ",", "headers", "=", "{", "}", ")", ":", "response", "=", "self", ".", "_run_method", "(", "'POST'", ",", "url", ",", "data", "=", "data", ",", "headers", "=", "headers", ")", "return", "self", ".", "_handle_response", "(", "url", ",", "response", ")" ]
38.428571
8.428571
def subclass(cls, vt_code, vt_args): """Return a dynamic subclass that has the extra parameters built in""" from geoid import get_class import geoid.census parser = get_class(geoid.census, vt_args.strip('/')).parse cls = type(vt_code.replace('/', '_'), (cls,), {'vt_code': vt_code, 'parser': parser}) globals()[cls.__name__] = cls assert cls.parser return cls
[ "def", "subclass", "(", "cls", ",", "vt_code", ",", "vt_args", ")", ":", "from", "geoid", "import", "get_class", "import", "geoid", ".", "census", "parser", "=", "get_class", "(", "geoid", ".", "census", ",", "vt_args", ".", "strip", "(", "'/'", ")", ")", ".", "parse", "cls", "=", "type", "(", "vt_code", ".", "replace", "(", "'/'", ",", "'_'", ")", ",", "(", "cls", ",", ")", ",", "{", "'vt_code'", ":", "vt_code", ",", "'parser'", ":", "parser", "}", ")", "globals", "(", ")", "[", "cls", ".", "__name__", "]", "=", "cls", "assert", "cls", ".", "parser", "return", "cls" ]
34.583333
21.75
def deprecated(*args): """ Deprecation warning decorator. Takes optional deprecation message, otherwise will use a generic warning. """ def wrap(func): def wrapped_func(*args, **kwargs): warnings.warn(msg, category=DeprecationWarning) return func(*args, **kwargs) return wrapped_func if len(args) == 1 and callable(args[0]): msg = "Function '%s' will be deprecated in future versions of " \ "Neurosynth." % args[0].__name__ return wrap(args[0]) else: msg = args[0] return wrap
[ "def", "deprecated", "(", "*", "args", ")", ":", "def", "wrap", "(", "func", ")", ":", "def", "wrapped_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "msg", ",", "category", "=", "DeprecationWarning", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapped_func", "if", "len", "(", "args", ")", "==", "1", "and", "callable", "(", "args", "[", "0", "]", ")", ":", "msg", "=", "\"Function '%s' will be deprecated in future versions of \"", "\"Neurosynth.\"", "%", "args", "[", "0", "]", ".", "__name__", "return", "wrap", "(", "args", "[", "0", "]", ")", "else", ":", "msg", "=", "args", "[", "0", "]", "return", "wrap" ]
35.375
14.8125
def _traverse(self, start_paths, traversal_path): """ Traverse a multi-hop traversal path from a list of start instance paths, and return the resulting list of instance paths. Parameters: start_paths (list of CIMInstanceName): Instance paths to start traversal from. traversal_path (list of string): Traversal hops, where the list contains pairs of items: association class name, far end class name. Example: a 2-hop traversal is represented as `['A1', 'C1', 'A2', 'C2']`. Returns: List of CIMInstanceName: Instances at the far end of the traversal. """ assert len(traversal_path) >= 2 assoc_class = traversal_path[0] far_class = traversal_path[1] total_next_paths = [] for path in start_paths: next_paths = self._conn.AssociatorNames( ObjectName=path, AssocClass=assoc_class, ResultClass=far_class) total_next_paths.extend(next_paths) traversal_path = traversal_path[2:] if traversal_path: total_next_paths = self._traverse(total_next_paths, traversal_path) return total_next_paths
[ "def", "_traverse", "(", "self", ",", "start_paths", ",", "traversal_path", ")", ":", "assert", "len", "(", "traversal_path", ")", ">=", "2", "assoc_class", "=", "traversal_path", "[", "0", "]", "far_class", "=", "traversal_path", "[", "1", "]", "total_next_paths", "=", "[", "]", "for", "path", "in", "start_paths", ":", "next_paths", "=", "self", ".", "_conn", ".", "AssociatorNames", "(", "ObjectName", "=", "path", ",", "AssocClass", "=", "assoc_class", ",", "ResultClass", "=", "far_class", ")", "total_next_paths", ".", "extend", "(", "next_paths", ")", "traversal_path", "=", "traversal_path", "[", "2", ":", "]", "if", "traversal_path", ":", "total_next_paths", "=", "self", ".", "_traverse", "(", "total_next_paths", ",", "traversal_path", ")", "return", "total_next_paths" ]
40.966667
16.033333
def delete_probes(probes, test=False, commit=True, **kwargs): # pylint: disable=unused-argument ''' Removes RPM/SLA probes from the network device. Calls the configuration template 'delete_probes' from the NAPALM library, providing as input a rich formatted dictionary with the configuration details of the probes to be removed from the configuration of the device. :param probes: Dictionary with a similar format as the output dictionary of the function config(), where the details are not necessary. :param test: Dry run? If set as True, will apply the config, discard and return the changes. Default: False :param commit: Commit? (default: True) Sometimes it is not needed to commit the config immediately after loading the changes. E.g.: a state loads a couple of parts (add / remove / update) and would not be optimal to commit after each operation. Also, from the CLI when the user needs to apply the similar changes before committing, can specify commit=False and will not discard the config. :raise MergeConfigException: If there is an error on the configuration sent. :return: A dictionary having the following keys: - result (bool): if the config was applied successfully. It is `False` only in case of failure. In case there are no changes to be applied and successfully performs all operations it is still `True` and so will be the `already_configured` flag (example below) - comment (str): a message for the user - already_configured (bool): flag to check if there were no changes applied - diff (str): returns the config changes applied Input example: .. code-block:: python probes = { 'existing_probe':{ 'existing_test1': {}, 'existing_test2': {} } } ''' return __salt__['net.load_template']('delete_probes', probes=probes, test=test, commit=commit, inherit_napalm_device=napalm_device)
[ "def", "delete_probes", "(", "probes", ",", "test", "=", "False", ",", "commit", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=unused-argument", "return", "__salt__", "[", "'net.load_template'", "]", "(", "'delete_probes'", ",", "probes", "=", "probes", ",", "test", "=", "test", ",", "commit", "=", "commit", ",", "inherit_napalm_device", "=", "napalm_device", ")" ]
44.729167
28.020833
def clone(self): """Creates a clone of this aggregator.""" return type(self)(self.__cmp, self.__key, self.__reverse, name = self.name, dataFormat = self._dataFormat)
[ "def", "clone", "(", "self", ")", ":", "return", "type", "(", "self", ")", "(", "self", ".", "__cmp", ",", "self", ".", "__key", ",", "self", ".", "__reverse", ",", "name", "=", "self", ".", "name", ",", "dataFormat", "=", "self", ".", "_dataFormat", ")" ]
57
31.333333
def _rule_id(self, id: int) -> str: """ Convert an integer into a gorule key id. """ if id is None or id == 0 or id >= 10000000: return "other" return "gorule-{:0>7}".format(id)
[ "def", "_rule_id", "(", "self", ",", "id", ":", "int", ")", "->", "str", ":", "if", "id", "is", "None", "or", "id", "==", "0", "or", "id", ">=", "10000000", ":", "return", "\"other\"", "return", "\"gorule-{:0>7}\"", ".", "format", "(", "id", ")" ]
27.875
9.875
def _cli_check_role(role): '''Checks that a basis set role exists and if not, raises a helpful exception''' if role is None: return None role = role.lower() if not role in api.get_roles(): errstr = "Role format '" + role + "' does not exist.\n" errstr += "For a complete list of roles, use the 'bse list-roles' command" raise RuntimeError(errstr) return role
[ "def", "_cli_check_role", "(", "role", ")", ":", "if", "role", "is", "None", ":", "return", "None", "role", "=", "role", ".", "lower", "(", ")", "if", "not", "role", "in", "api", ".", "get_roles", "(", ")", ":", "errstr", "=", "\"Role format '\"", "+", "role", "+", "\"' does not exist.\\n\"", "errstr", "+=", "\"For a complete list of roles, use the 'bse list-roles' command\"", "raise", "RuntimeError", "(", "errstr", ")", "return", "role" ]
30.846154
25.923077
def _get_min_distance_to_volcanic_front(lons, lats): """ Compute and return minimum distance between volcanic front and points specified by 'lon' and 'lat'. Distance is negative if point is located east of the volcanic front, positive otherwise. The method uses the same approach as :meth:`_get_min_distance_to_sub_trench` but final distance is returned without taking the absolute value. """ vf = _construct_surface(VOLCANIC_FRONT_LONS, VOLCANIC_FRONT_LATS, 0., 10.) sites = Mesh(lons, lats, None) return vf.get_rx_distance(sites)
[ "def", "_get_min_distance_to_volcanic_front", "(", "lons", ",", "lats", ")", ":", "vf", "=", "_construct_surface", "(", "VOLCANIC_FRONT_LONS", ",", "VOLCANIC_FRONT_LATS", ",", "0.", ",", "10.", ")", "sites", "=", "Mesh", "(", "lons", ",", "lats", ",", "None", ")", "return", "vf", ".", "get_rx_distance", "(", "sites", ")" ]
40.285714
21.285714
def segwit_address(self): """The public segwit nested in P2SH address you share with others to receive funds.""" # Only make segwit address if public key is compressed if self._segwit_address is None and self.is_compressed(): self._segwit_address = public_key_to_segwit_address( self._public_key, version=self.version) return self._segwit_address
[ "def", "segwit_address", "(", "self", ")", ":", "# Only make segwit address if public key is compressed", "if", "self", ".", "_segwit_address", "is", "None", "and", "self", ".", "is_compressed", "(", ")", ":", "self", ".", "_segwit_address", "=", "public_key_to_segwit_address", "(", "self", ".", "_public_key", ",", "version", "=", "self", ".", "version", ")", "return", "self", ".", "_segwit_address" ]
50.875
13.25
def show_zoning_enabled_configuration_input_request_type_get_next_request_last_rcvd_zone_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_zoning_enabled_configuration = ET.Element("show_zoning_enabled_configuration") config = show_zoning_enabled_configuration input = ET.SubElement(show_zoning_enabled_configuration, "input") request_type = ET.SubElement(input, "request-type") get_next_request = ET.SubElement(request_type, "get-next-request") last_rcvd_zone_name = ET.SubElement(get_next_request, "last-rcvd-zone-name") last_rcvd_zone_name.text = kwargs.pop('last_rcvd_zone_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "show_zoning_enabled_configuration_input_request_type_get_next_request_last_rcvd_zone_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "show_zoning_enabled_configuration", "=", "ET", ".", "Element", "(", "\"show_zoning_enabled_configuration\"", ")", "config", "=", "show_zoning_enabled_configuration", "input", "=", "ET", ".", "SubElement", "(", "show_zoning_enabled_configuration", ",", "\"input\"", ")", "request_type", "=", "ET", ".", "SubElement", "(", "input", ",", "\"request-type\"", ")", "get_next_request", "=", "ET", ".", "SubElement", "(", "request_type", ",", "\"get-next-request\"", ")", "last_rcvd_zone_name", "=", "ET", ".", "SubElement", "(", "get_next_request", ",", "\"last-rcvd-zone-name\"", ")", "last_rcvd_zone_name", ".", "text", "=", "kwargs", ".", "pop", "(", "'last_rcvd_zone_name'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
55.357143
25.571429
def subscribe( self, plan, charge_immediately=True, application_fee_percent=None, coupon=None, quantity=None, metadata=None, tax_percent=None, billing_cycle_anchor=None, trial_end=None, trial_from_plan=None, trial_period_days=None, ): """ Subscribes this customer to a plan. :param plan: The plan to which to subscribe the customer. :type plan: Plan or string (plan ID) :param application_fee_percent: This represents the percentage of the subscription invoice subtotal that will be transferred to the application owner's Stripe account. The request must be made with an OAuth key in order to set an application fee percentage. :type application_fee_percent: Decimal. Precision is 2; anything more will be ignored. A positive decimal between 1 and 100. :param coupon: The code of the coupon to apply to this subscription. A coupon applied to a subscription will only affect invoices created for that particular subscription. :type coupon: string :param quantity: The quantity applied to this subscription. Default is 1. :type quantity: integer :param metadata: A set of key/value pairs useful for storing additional information. :type metadata: dict :param tax_percent: This represents the percentage of the subscription invoice subtotal that will be calculated and added as tax to the final amount each billing period. :type tax_percent: Decimal. Precision is 2; anything more will be ignored. A positive decimal between 1 and 100. :param billing_cycle_anchor: A future timestamp to anchor the subscription’s billing cycle. This is used to determine the date of the first full invoice, and, for plans with month or year intervals, the day of the month for subsequent invoices. :type billing_cycle_anchor: datetime :param trial_end: The end datetime of the trial period the customer will get before being charged for the first time. If set, this will override the default trial period of the plan the customer is being subscribed to. The special value ``now`` can be provided to end the customer's trial immediately. :type trial_end: datetime :param charge_immediately: Whether or not to charge for the subscription upon creation. If False, an invoice will be created at the end of this period. :type charge_immediately: boolean :param trial_from_plan: Indicates if a plan’s trial_period_days should be applied to the subscription. Setting trial_end per subscription is preferred, and this defaults to false. Setting this flag to true together with trial_end is not allowed. :type trial_from_plan: boolean :param trial_period_days: Integer representing the number of trial period days before the customer is charged for the first time. This will always overwrite any trials that might apply via a subscribed plan. :type trial_period_days: integer .. Notes: .. ``charge_immediately`` is only available on ``Customer.subscribe()`` .. if you're using ``Customer.subscribe()`` instead of ``Customer.subscribe()``, ``plan`` \ can only be a string """ from .billing import Subscription # Convert Plan to id if isinstance(plan, StripeModel): plan = plan.id stripe_subscription = Subscription._api_create( plan=plan, customer=self.id, application_fee_percent=application_fee_percent, coupon=coupon, quantity=quantity, metadata=metadata, billing_cycle_anchor=billing_cycle_anchor, tax_percent=tax_percent, trial_end=trial_end, trial_from_plan=trial_from_plan, trial_period_days=trial_period_days, ) if charge_immediately: self.send_invoice() return Subscription.sync_from_stripe_data(stripe_subscription)
[ "def", "subscribe", "(", "self", ",", "plan", ",", "charge_immediately", "=", "True", ",", "application_fee_percent", "=", "None", ",", "coupon", "=", "None", ",", "quantity", "=", "None", ",", "metadata", "=", "None", ",", "tax_percent", "=", "None", ",", "billing_cycle_anchor", "=", "None", ",", "trial_end", "=", "None", ",", "trial_from_plan", "=", "None", ",", "trial_period_days", "=", "None", ",", ")", ":", "from", ".", "billing", "import", "Subscription", "# Convert Plan to id", "if", "isinstance", "(", "plan", ",", "StripeModel", ")", ":", "plan", "=", "plan", ".", "id", "stripe_subscription", "=", "Subscription", ".", "_api_create", "(", "plan", "=", "plan", ",", "customer", "=", "self", ".", "id", ",", "application_fee_percent", "=", "application_fee_percent", ",", "coupon", "=", "coupon", ",", "quantity", "=", "quantity", ",", "metadata", "=", "metadata", ",", "billing_cycle_anchor", "=", "billing_cycle_anchor", ",", "tax_percent", "=", "tax_percent", ",", "trial_end", "=", "trial_end", ",", "trial_from_plan", "=", "trial_from_plan", ",", "trial_period_days", "=", "trial_period_days", ",", ")", "if", "charge_immediately", ":", "self", ".", "send_invoice", "(", ")", "return", "Subscription", ".", "sync_from_stripe_data", "(", "stripe_subscription", ")" ]
41.436782
25.850575
def ystep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`.""" AXU = self.AX + self.U self.Y[..., 0:-1] = sp.prox_l2(AXU[..., 0:-1], self.mu/self.rho) self.Y[..., -1] = sp.prox_l1(AXU[..., -1], (self.lmbda/self.rho) * self.Wl1)
[ "def", "ystep", "(", "self", ")", ":", "AXU", "=", "self", ".", "AX", "+", "self", ".", "U", "self", ".", "Y", "[", "...", ",", "0", ":", "-", "1", "]", "=", "sp", ".", "prox_l2", "(", "AXU", "[", "...", ",", "0", ":", "-", "1", "]", ",", "self", ".", "mu", "/", "self", ".", "rho", ")", "self", ".", "Y", "[", "...", ",", "-", "1", "]", "=", "sp", ".", "prox_l1", "(", "AXU", "[", "...", ",", "-", "1", "]", ",", "(", "self", ".", "lmbda", "/", "self", ".", "rho", ")", "*", "self", ".", "Wl1", ")" ]
40.625
18.25
def maybe_download_and_extract(): """Download and extract the tarball from Alex's website.""" dest_directory = "/tmp/cifar" if not os.path.exists(dest_directory): os.makedirs(dest_directory) filename = DATA_URL.split('/')[-1] filepath = os.path.join(dest_directory, filename) if not os.path.exists(filepath): def _progress(count, block_size, total_size): sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0)) sys.stdout.flush() filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress) print() statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') tarfile.open(filepath, 'r:gz').extractall(dest_directory)
[ "def", "maybe_download_and_extract", "(", ")", ":", "dest_directory", "=", "\"/tmp/cifar\"", "if", "not", "os", ".", "path", ".", "exists", "(", "dest_directory", ")", ":", "os", ".", "makedirs", "(", "dest_directory", ")", "filename", "=", "DATA_URL", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "filepath", "=", "os", ".", "path", ".", "join", "(", "dest_directory", ",", "filename", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "filepath", ")", ":", "def", "_progress", "(", "count", ",", "block_size", ",", "total_size", ")", ":", "sys", ".", "stdout", ".", "write", "(", "'\\r>> Downloading %s %.1f%%'", "%", "(", "filename", ",", "float", "(", "count", "*", "block_size", ")", "/", "float", "(", "total_size", ")", "*", "100.0", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "filepath", ",", "_", "=", "urllib", ".", "request", ".", "urlretrieve", "(", "DATA_URL", ",", "filepath", ",", "_progress", ")", "print", "(", ")", "statinfo", "=", "os", ".", "stat", "(", "filepath", ")", "print", "(", "'Successfully downloaded'", ",", "filename", ",", "statinfo", ".", "st_size", ",", "'bytes.'", ")", "tarfile", ".", "open", "(", "filepath", ",", "'r:gz'", ")", ".", "extractall", "(", "dest_directory", ")" ]
45.411765
14.529412
def open_filezip(file_path, find_str): """ Open the wrapped file. Read directly from the zip without extracting its content. """ if zipfile.is_zipfile(file_path): zipf = zipfile.ZipFile(file_path) interesting_files = [f for f in zipf.infolist() if find_str in f] for inside_file in interesting_files: yield zipf.open(inside_file)
[ "def", "open_filezip", "(", "file_path", ",", "find_str", ")", ":", "if", "zipfile", ".", "is_zipfile", "(", "file_path", ")", ":", "zipf", "=", "zipfile", ".", "ZipFile", "(", "file_path", ")", "interesting_files", "=", "[", "f", "for", "f", "in", "zipf", ".", "infolist", "(", ")", "if", "find_str", "in", "f", "]", "for", "inside_file", "in", "interesting_files", ":", "yield", "zipf", ".", "open", "(", "inside_file", ")" ]
34.181818
10.909091
def convergence_criteria_small_relative_norm_weights_change( tolerance=1e-5, norm_order=2): """Returns Python `callable` which indicates fitting procedure has converged. Writing old, new `model_coefficients` as `w0`, `w1`, this function defines convergence as, ```python relative_euclidean_norm = (tf.norm(w0 - w1, ord=2, axis=-1) / (1. + tf.norm(w0, ord=2, axis=-1))) reduce_all(relative_euclidean_norm < tolerance) ``` where `tf.norm(x, ord=2)` denotes the [Euclidean norm]( https://en.wikipedia.org/wiki/Norm_(mathematics)#Euclidean_norm) of `x`. Args: tolerance: `float`-like `Tensor` indicating convergence, i.e., when max relative Euclidean norm weights difference < tolerance`. Default value: `1e-5`. norm_order: Order of the norm. Default value: `2` (i.e., "Euclidean norm".) Returns: convergence_criteria_fn: Python `callable` which returns `bool` `Tensor` indicated fitting procedure has converged. (See inner function specification for argument signature.) Default value: `1e-5`. """ def convergence_criteria_fn( is_converged_previous, # pylint: disable=unused-argument iter_, model_coefficients_previous, predicted_linear_response_previous, # pylint: disable=unused-argument model_coefficients_next, predicted_linear_response_next, # pylint: disable=unused-argument response, # pylint: disable=unused-argument model, # pylint: disable=unused-argument dispersion): # pylint: disable=unused-argument """Returns `bool` `Tensor` indicating if fitting procedure has converged. Args: is_converged_previous: "old" convergence results. iter_: Iteration number. model_coefficients_previous: "old" `model_coefficients`. predicted_linear_response_previous: "old" `predicted_linear_response`. model_coefficients_next: "new" `model_coefficients`. predicted_linear_response_next: "new: `predicted_linear_response`. response: (Batch of) vector-shaped `Tensor` where each element represents a sample's observed response (to the corresponding row of features). Must have same `dtype` as `model_matrix`. model: `tfp.glm.ExponentialFamily`-like instance used to construct the negative log-likelihood loss, gradient, and expected Hessian (i.e., the Fisher information matrix). dispersion: `Tensor` representing `response` dispersion, i.e., as in: `p(y|theta) := exp((y theta - A(theta)) / dispersion)`. Must broadcast with rows of `model_matrix`. Default value: `None` (i.e., "no dispersion"). Returns: is_converged: `bool` `Tensor`. """ relative_euclidean_norm = ( tf.norm( tensor=model_coefficients_previous - model_coefficients_next, ord=norm_order, axis=-1) / (1. + tf.norm(tensor=model_coefficients_previous, ord=norm_order, axis=-1))) return (iter_ > 0) & tf.reduce_all( input_tensor=relative_euclidean_norm < tolerance) return convergence_criteria_fn
[ "def", "convergence_criteria_small_relative_norm_weights_change", "(", "tolerance", "=", "1e-5", ",", "norm_order", "=", "2", ")", ":", "def", "convergence_criteria_fn", "(", "is_converged_previous", ",", "# pylint: disable=unused-argument", "iter_", ",", "model_coefficients_previous", ",", "predicted_linear_response_previous", ",", "# pylint: disable=unused-argument", "model_coefficients_next", ",", "predicted_linear_response_next", ",", "# pylint: disable=unused-argument", "response", ",", "# pylint: disable=unused-argument", "model", ",", "# pylint: disable=unused-argument", "dispersion", ")", ":", "# pylint: disable=unused-argument", "\"\"\"Returns `bool` `Tensor` indicating if fitting procedure has converged.\n\n Args:\n is_converged_previous: \"old\" convergence results.\n iter_: Iteration number.\n model_coefficients_previous: \"old\" `model_coefficients`.\n predicted_linear_response_previous: \"old\" `predicted_linear_response`.\n model_coefficients_next: \"new\" `model_coefficients`.\n predicted_linear_response_next: \"new: `predicted_linear_response`.\n response: (Batch of) vector-shaped `Tensor` where each element represents\n a sample's observed response (to the corresponding row of features).\n Must have same `dtype` as `model_matrix`.\n model: `tfp.glm.ExponentialFamily`-like instance used to construct the\n negative log-likelihood loss, gradient, and expected Hessian (i.e., the\n Fisher information matrix).\n dispersion: `Tensor` representing `response` dispersion, i.e., as in:\n `p(y|theta) := exp((y theta - A(theta)) / dispersion)`. Must broadcast\n with rows of `model_matrix`.\n Default value: `None` (i.e., \"no dispersion\").\n\n Returns:\n is_converged: `bool` `Tensor`.\n \"\"\"", "relative_euclidean_norm", "=", "(", "tf", ".", "norm", "(", "tensor", "=", "model_coefficients_previous", "-", "model_coefficients_next", ",", "ord", "=", "norm_order", ",", "axis", "=", "-", "1", ")", "/", "(", "1.", "+", "tf", ".", "norm", "(", "tensor", "=", "model_coefficients_previous", ",", "ord", "=", "norm_order", ",", "axis", "=", "-", "1", ")", ")", ")", "return", "(", "iter_", ">", "0", ")", "&", "tf", ".", "reduce_all", "(", "input_tensor", "=", "relative_euclidean_norm", "<", "tolerance", ")", "return", "convergence_criteria_fn" ]
41.931507
22.780822
def imap_unordered_async(self, func, iterable, chunksize=None, callback=None): """A variant of the imap_unordered() method which returns an ApplyResult object that provides an iterator (next method(timeout) available). If callback is specified then it should be a callable which accepts a single argument. When the resulting iterator becomes ready, callback is applied to it (unless the call failed). callback should complete immediately since otherwise the thread which handles the results will get blocked.""" apply_result = ApplyResult(callback=callback) collector = UnorderedResultCollector(apply_result) self._create_sequences(func, iterable, chunksize, collector) return apply_result
[ "def", "imap_unordered_async", "(", "self", ",", "func", ",", "iterable", ",", "chunksize", "=", "None", ",", "callback", "=", "None", ")", ":", "apply_result", "=", "ApplyResult", "(", "callback", "=", "callback", ")", "collector", "=", "UnorderedResultCollector", "(", "apply_result", ")", "self", ".", "_create_sequences", "(", "func", ",", "iterable", ",", "chunksize", ",", "collector", ")", "return", "apply_result" ]
55.214286
19.714286
def extract(dset, *d_slices): """ :param dset: a D-dimensional dataset or array :param d_slices: D slice objects (or similar) :returns: a reduced D-dimensional array >>> a = numpy.array([[1, 2, 3], [4, 5, 6]]) # shape (2, 3) >>> extract(a, slice(None), 1) array([[2], [5]]) >>> extract(a, [0, 1], slice(1, 3)) array([[2, 3], [5, 6]]) """ shp = list(dset.shape) if len(shp) != len(d_slices): raise ValueError('Array with %d dimensions but %d slices' % (len(shp), len(d_slices))) sizes = [] slices = [] for i, slc in enumerate(d_slices): if slc == slice(None): size = shp[i] slices.append([slice(None)]) elif hasattr(slc, 'start'): size = slc.stop - slc.start slices.append([slice(slc.start, slc.stop, 0)]) elif isinstance(slc, list): size = len(slc) slices.append([slice(s, s + 1, j) for j, s in enumerate(slc)]) elif isinstance(slc, Number): size = 1 slices.append([slice(slc, slc + 1, 0)]) else: size = shp[i] slices.append([slc]) sizes.append(size) array = numpy.zeros(sizes, dset.dtype) for tup in itertools.product(*slices): aidx = tuple(s if s.step is None else slice(s.step, s.step + s.stop - s.start) for s in tup) sel = tuple(s if s.step is None else slice(s.start, s.stop) for s in tup) array[aidx] = dset[sel] return array
[ "def", "extract", "(", "dset", ",", "*", "d_slices", ")", ":", "shp", "=", "list", "(", "dset", ".", "shape", ")", "if", "len", "(", "shp", ")", "!=", "len", "(", "d_slices", ")", ":", "raise", "ValueError", "(", "'Array with %d dimensions but %d slices'", "%", "(", "len", "(", "shp", ")", ",", "len", "(", "d_slices", ")", ")", ")", "sizes", "=", "[", "]", "slices", "=", "[", "]", "for", "i", ",", "slc", "in", "enumerate", "(", "d_slices", ")", ":", "if", "slc", "==", "slice", "(", "None", ")", ":", "size", "=", "shp", "[", "i", "]", "slices", ".", "append", "(", "[", "slice", "(", "None", ")", "]", ")", "elif", "hasattr", "(", "slc", ",", "'start'", ")", ":", "size", "=", "slc", ".", "stop", "-", "slc", ".", "start", "slices", ".", "append", "(", "[", "slice", "(", "slc", ".", "start", ",", "slc", ".", "stop", ",", "0", ")", "]", ")", "elif", "isinstance", "(", "slc", ",", "list", ")", ":", "size", "=", "len", "(", "slc", ")", "slices", ".", "append", "(", "[", "slice", "(", "s", ",", "s", "+", "1", ",", "j", ")", "for", "j", ",", "s", "in", "enumerate", "(", "slc", ")", "]", ")", "elif", "isinstance", "(", "slc", ",", "Number", ")", ":", "size", "=", "1", "slices", ".", "append", "(", "[", "slice", "(", "slc", ",", "slc", "+", "1", ",", "0", ")", "]", ")", "else", ":", "size", "=", "shp", "[", "i", "]", "slices", ".", "append", "(", "[", "slc", "]", ")", "sizes", ".", "append", "(", "size", ")", "array", "=", "numpy", ".", "zeros", "(", "sizes", ",", "dset", ".", "dtype", ")", "for", "tup", "in", "itertools", ".", "product", "(", "*", "slices", ")", ":", "aidx", "=", "tuple", "(", "s", "if", "s", ".", "step", "is", "None", "else", "slice", "(", "s", ".", "step", ",", "s", ".", "step", "+", "s", ".", "stop", "-", "s", ".", "start", ")", "for", "s", "in", "tup", ")", "sel", "=", "tuple", "(", "s", "if", "s", ".", "step", "is", "None", "else", "slice", "(", "s", ".", "start", ",", "s", ".", "stop", ")", "for", "s", "in", "tup", ")", "array", "[", "aidx", "]", "=", "dset", "[", "sel", "]", "return", "array" ]
34.130435
13.217391
def _set_return(self): """Sets the return parameter with description and rtype if any""" # TODO: manage return retrieved from element code (external) # TODO: manage different in/out styles if type(self.docs['in']['return']) is list and self.dst.style['out'] not in ['groups', 'numpydoc', 'google']: # TODO: manage return names # manage not setting return if not mandatory for numpy lst = self.docs['in']['return'] if lst: if lst[0][0] is not None: self.docs['out']['return'] = "%s-> %s" % (lst[0][0], lst[0][1]) else: self.docs['out']['return'] = lst[0][1] self.docs['out']['rtype'] = lst[0][2] else: self.docs['out']['return'] = self.docs['in']['return'] self.docs['out']['rtype'] = self.docs['in']['rtype']
[ "def", "_set_return", "(", "self", ")", ":", "# TODO: manage return retrieved from element code (external)", "# TODO: manage different in/out styles", "if", "type", "(", "self", ".", "docs", "[", "'in'", "]", "[", "'return'", "]", ")", "is", "list", "and", "self", ".", "dst", ".", "style", "[", "'out'", "]", "not", "in", "[", "'groups'", ",", "'numpydoc'", ",", "'google'", "]", ":", "# TODO: manage return names", "# manage not setting return if not mandatory for numpy", "lst", "=", "self", ".", "docs", "[", "'in'", "]", "[", "'return'", "]", "if", "lst", ":", "if", "lst", "[", "0", "]", "[", "0", "]", "is", "not", "None", ":", "self", ".", "docs", "[", "'out'", "]", "[", "'return'", "]", "=", "\"%s-> %s\"", "%", "(", "lst", "[", "0", "]", "[", "0", "]", ",", "lst", "[", "0", "]", "[", "1", "]", ")", "else", ":", "self", ".", "docs", "[", "'out'", "]", "[", "'return'", "]", "=", "lst", "[", "0", "]", "[", "1", "]", "self", ".", "docs", "[", "'out'", "]", "[", "'rtype'", "]", "=", "lst", "[", "0", "]", "[", "2", "]", "else", ":", "self", ".", "docs", "[", "'out'", "]", "[", "'return'", "]", "=", "self", ".", "docs", "[", "'in'", "]", "[", "'return'", "]", "self", ".", "docs", "[", "'out'", "]", "[", "'rtype'", "]", "=", "self", ".", "docs", "[", "'in'", "]", "[", "'rtype'", "]" ]
52.470588
20.647059