text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def getfigs(*fig_nums): """Get a list of matplotlib figures by figure numbers. If no arguments are given, all available figures are returned. If the argument list contains references to invalid figures, a warning is printed but the function continues pasting further figures. Parameters ---------- figs : tuple A tuple of ints giving the figure numbers of the figures to return. """ from matplotlib._pylab_helpers import Gcf if not fig_nums: fig_managers = Gcf.get_all_fig_managers() return [fm.canvas.figure for fm in fig_managers] else: figs = [] for num in fig_nums: f = Gcf.figs.get(num) if f is None: print('Warning: figure %s not available.' % num) else: figs.append(f.canvas.figure) return figs
[ "def", "getfigs", "(", "*", "fig_nums", ")", ":", "from", "matplotlib", ".", "_pylab_helpers", "import", "Gcf", "if", "not", "fig_nums", ":", "fig_managers", "=", "Gcf", ".", "get_all_fig_managers", "(", ")", "return", "[", "fm", ".", "canvas", ".", "figure", "for", "fm", "in", "fig_managers", "]", "else", ":", "figs", "=", "[", "]", "for", "num", "in", "fig_nums", ":", "f", "=", "Gcf", ".", "figs", ".", "get", "(", "num", ")", "if", "f", "is", "None", ":", "print", "(", "'Warning: figure %s not available.'", "%", "num", ")", "else", ":", "figs", ".", "append", "(", "f", ".", "canvas", ".", "figure", ")", "return", "figs" ]
33.6
20.2
def rectToArray(self, swapWH = False): """! \~english Rectangles converted to array of coordinates @return: an array of rect points. eg. (x1,y1,x2,y2) \~chinese 矩形数据转换为矩形坐标数组 @return: 矩形座标数组, 例如: ( x1,y1,x2,y2 ) """ if swapWH == False: return [self.x, self.y, self.x + self.width, self.y + self.height] else: return [self.x, self.y, self.x + self.height, self.y + self.width]
[ "def", "rectToArray", "(", "self", ",", "swapWH", "=", "False", ")", ":", "if", "swapWH", "==", "False", ":", "return", "[", "self", ".", "x", ",", "self", ".", "y", ",", "self", ".", "x", "+", "self", ".", "width", ",", "self", ".", "y", "+", "self", ".", "height", "]", "else", ":", "return", "[", "self", ".", "x", ",", "self", ".", "y", ",", "self", ".", "x", "+", "self", ".", "height", ",", "self", ".", "y", "+", "self", ".", "width", "]" ]
33.357143
18.428571
def set_prekeys_as_sent(self, prekeyIds): """ :param prekeyIds: :type prekeyIds: list :return: :rtype: """ logger.debug("set_prekeys_as_sent(prekeyIds=[%d prekeyIds])" % len(prekeyIds)) self._store.preKeyStore.setAsSent([prekey.getId() for prekey in prekeyIds])
[ "def", "set_prekeys_as_sent", "(", "self", ",", "prekeyIds", ")", ":", "logger", ".", "debug", "(", "\"set_prekeys_as_sent(prekeyIds=[%d prekeyIds])\"", "%", "len", "(", "prekeyIds", ")", ")", "self", ".", "_store", ".", "preKeyStore", ".", "setAsSent", "(", "[", "prekey", ".", "getId", "(", ")", "for", "prekey", "in", "prekeyIds", "]", ")" ]
35.222222
18.333333
def find_encrypt_data_assertion_list(self, _assertions): """ Verifies if a list of assertions contains encrypted data in the advice element. :param _assertions: A list of assertions. :return: True encrypted data exists otherwise false. """ for _assertion in _assertions: if _assertion.advice: if _assertion.advice.encrypted_assertion: res = self.find_encrypt_data_assertion( _assertion.advice.encrypted_assertion) if res: return True
[ "def", "find_encrypt_data_assertion_list", "(", "self", ",", "_assertions", ")", ":", "for", "_assertion", "in", "_assertions", ":", "if", "_assertion", ".", "advice", ":", "if", "_assertion", ".", "advice", ".", "encrypted_assertion", ":", "res", "=", "self", ".", "find_encrypt_data_assertion", "(", "_assertion", ".", "advice", ".", "encrypted_assertion", ")", "if", "res", ":", "return", "True" ]
41.785714
13.357143
def get_traffic_meter(self): """ Return dict of traffic meter stats. Returns None if error occurred. """ _LOGGER.info("Get traffic meter") def parse_text(text): """ there are three kinds of values in the returned data This function parses the different values and returns (total, avg), timedelta or a plain float """ def tofloats(lst): return (float(t) for t in lst) try: if "/" in text: # "6.19/0.88" total/avg return tuple(tofloats(text.split('/'))) elif ":" in text: # 11:14 hr:mn hour, mins = tofloats(text.split(':')) return timedelta(hours=hour, minutes=mins) else: return float(text) except ValueError: return None success, response = self._make_request(SERVICE_DEVICE_CONFIG, "GetTrafficMeterStatistics") if not success: return None success, node = _find_node( response.text, ".//GetTrafficMeterStatisticsResponse") if not success: return None return {t.tag: parse_text(t.text) for t in node}
[ "def", "get_traffic_meter", "(", "self", ")", ":", "_LOGGER", ".", "info", "(", "\"Get traffic meter\"", ")", "def", "parse_text", "(", "text", ")", ":", "\"\"\"\n there are three kinds of values in the returned data\n This function parses the different values and returns\n (total, avg), timedelta or a plain float\n \"\"\"", "def", "tofloats", "(", "lst", ")", ":", "return", "(", "float", "(", "t", ")", "for", "t", "in", "lst", ")", "try", ":", "if", "\"/\"", "in", "text", ":", "# \"6.19/0.88\" total/avg", "return", "tuple", "(", "tofloats", "(", "text", ".", "split", "(", "'/'", ")", ")", ")", "elif", "\":\"", "in", "text", ":", "# 11:14 hr:mn", "hour", ",", "mins", "=", "tofloats", "(", "text", ".", "split", "(", "':'", ")", ")", "return", "timedelta", "(", "hours", "=", "hour", ",", "minutes", "=", "mins", ")", "else", ":", "return", "float", "(", "text", ")", "except", "ValueError", ":", "return", "None", "success", ",", "response", "=", "self", ".", "_make_request", "(", "SERVICE_DEVICE_CONFIG", ",", "\"GetTrafficMeterStatistics\"", ")", "if", "not", "success", ":", "return", "None", "success", ",", "node", "=", "_find_node", "(", "response", ".", "text", ",", "\".//GetTrafficMeterStatisticsResponse\"", ")", "if", "not", "success", ":", "return", "None", "return", "{", "t", ".", "tag", ":", "parse_text", "(", "t", ".", "text", ")", "for", "t", "in", "node", "}" ]
34.342105
17.131579
def setup_statemachine(self): """Setup and start state machine""" machine = QtCore.QStateMachine() # _______________ # | | # | | # | | # |_______________| # group = util.QState("group", QtCore.QState.ParallelStates, machine) # _______________ # | ____ ____ | # || |---| || # ||____|---|____|| # |_______________| - Parallell State # visibility = util.QState("visibility", group) hidden = util.QState("hidden", visibility) visible = util.QState("visible", visibility) # _______________ # | ____ ____ | # || |---| || # ||____|---|____|| # |_______________| - Parallell State # operation = util.QState("operation", group) ready = util.QState("ready", operation) collecting = util.QState("collecting", operation) validating = util.QState("validating", operation) extracting = util.QState("extracting", operation) integrating = util.QState("integrating", operation) finished = util.QState("finished", operation) repairing = util.QState("repairing", operation) initialising = util.QState("initialising", operation) stopping = util.QState("stopping", operation) stopped = util.QState("stopped", operation) saving = util.QState("saving", operation) # _______________ # | ____ ____ | # || |---| || # ||____|---|____|| # |_______________| - Parallell State # errored = util.QState("errored", group) clean = util.QState("clean", errored) dirty = util.QState("dirty", errored) # _______________ # | ____ ____ | # || |---| || # ||____|---|____|| # |_______________| - Parallell State # States that block the underlying GUI suspended = util.QState("suspended", group) alive = util.QState("alive", suspended) acting = util.QState("acting", suspended) acted = QtCore.QHistoryState(operation) acted.setDefaultState(ready) # _______________ # | ____ ____ | # || |---| || # ||____|---|____|| # |_______________| # | ____ ____ | # || |---| || # ||____|---|____|| # |_______________| # hidden.addTransition(self.show, visible) visible.addTransition(self.hide, hidden) ready.addTransition(self.acting, acting) ready.addTransition(self.validating, validating) ready.addTransition(self.initialising, initialising) ready.addTransition(self.repairing, repairing) ready.addTransition(self.saving, saving) saving.addTransition(self.saved, ready) collecting.addTransition(self.initialised, ready) collecting.addTransition(self.stopping, stopping) validating.addTransition(self.stopping, stopping) validating.addTransition(self.finished, finished) validating.addTransition(self.extracting, extracting) extracting.addTransition(self.stopping, stopping) extracting.addTransition(self.finished, finished) extracting.addTransition(self.integrating, integrating) integrating.addTransition(self.stopping, stopping) integrating.addTransition(self.finished, finished) finished.addTransition(self.initialising, initialising) finished.addTransition(self.acting, acting) initialising.addTransition(self.collecting, collecting) stopping.addTransition(self.acted, acted) stopping.addTransition(self.finished, finished) dirty.addTransition(self.initialising, clean) clean.addTransition(self.changed, dirty) alive.addTransition(self.acting, acting) acting.addTransition(self.acted, acted) # Set initial states for compound, state in {machine: group, visibility: hidden, operation: ready, errored: clean, suspended: alive}.items(): compound.setInitialState(state) # Make connections for state in (hidden, visible, ready, collecting, validating, extracting, integrating, finished, repairing, initialising, stopping, saving, stopped, dirty, clean, acting, alive, acted): state.entered.connect( lambda state=state: self.state_changed.emit(state.name)) machine.start() return machine
[ "def", "setup_statemachine", "(", "self", ")", ":", "machine", "=", "QtCore", ".", "QStateMachine", "(", ")", "# _______________", "# | |", "# | |", "# | |", "# |_______________|", "#", "group", "=", "util", ".", "QState", "(", "\"group\"", ",", "QtCore", ".", "QState", ".", "ParallelStates", ",", "machine", ")", "# _______________", "# | ____ ____ |", "# || |---| ||", "# ||____|---|____||", "# |_______________| - Parallell State", "#", "visibility", "=", "util", ".", "QState", "(", "\"visibility\"", ",", "group", ")", "hidden", "=", "util", ".", "QState", "(", "\"hidden\"", ",", "visibility", ")", "visible", "=", "util", ".", "QState", "(", "\"visible\"", ",", "visibility", ")", "# _______________", "# | ____ ____ |", "# || |---| ||", "# ||____|---|____||", "# |_______________| - Parallell State", "#", "operation", "=", "util", ".", "QState", "(", "\"operation\"", ",", "group", ")", "ready", "=", "util", ".", "QState", "(", "\"ready\"", ",", "operation", ")", "collecting", "=", "util", ".", "QState", "(", "\"collecting\"", ",", "operation", ")", "validating", "=", "util", ".", "QState", "(", "\"validating\"", ",", "operation", ")", "extracting", "=", "util", ".", "QState", "(", "\"extracting\"", ",", "operation", ")", "integrating", "=", "util", ".", "QState", "(", "\"integrating\"", ",", "operation", ")", "finished", "=", "util", ".", "QState", "(", "\"finished\"", ",", "operation", ")", "repairing", "=", "util", ".", "QState", "(", "\"repairing\"", ",", "operation", ")", "initialising", "=", "util", ".", "QState", "(", "\"initialising\"", ",", "operation", ")", "stopping", "=", "util", ".", "QState", "(", "\"stopping\"", ",", "operation", ")", "stopped", "=", "util", ".", "QState", "(", "\"stopped\"", ",", "operation", ")", "saving", "=", "util", ".", "QState", "(", "\"saving\"", ",", "operation", ")", "# _______________", "# | ____ ____ |", "# || |---| ||", "# ||____|---|____||", "# |_______________| - Parallell State", "#", "errored", "=", "util", ".", "QState", "(", "\"errored\"", ",", "group", ")", "clean", "=", "util", ".", "QState", "(", "\"clean\"", ",", "errored", ")", "dirty", "=", "util", ".", "QState", "(", "\"dirty\"", ",", "errored", ")", "# _______________", "# | ____ ____ |", "# || |---| ||", "# ||____|---|____||", "# |_______________| - Parallell State", "# States that block the underlying GUI", "suspended", "=", "util", ".", "QState", "(", "\"suspended\"", ",", "group", ")", "alive", "=", "util", ".", "QState", "(", "\"alive\"", ",", "suspended", ")", "acting", "=", "util", ".", "QState", "(", "\"acting\"", ",", "suspended", ")", "acted", "=", "QtCore", ".", "QHistoryState", "(", "operation", ")", "acted", ".", "setDefaultState", "(", "ready", ")", "# _______________", "# | ____ ____ |", "# || |---| ||", "# ||____|---|____||", "# |_______________|", "# | ____ ____ |", "# || |---| ||", "# ||____|---|____||", "# |_______________|", "#", "hidden", ".", "addTransition", "(", "self", ".", "show", ",", "visible", ")", "visible", ".", "addTransition", "(", "self", ".", "hide", ",", "hidden", ")", "ready", ".", "addTransition", "(", "self", ".", "acting", ",", "acting", ")", "ready", ".", "addTransition", "(", "self", ".", "validating", ",", "validating", ")", "ready", ".", "addTransition", "(", "self", ".", "initialising", ",", "initialising", ")", "ready", ".", "addTransition", "(", "self", ".", "repairing", ",", "repairing", ")", "ready", ".", "addTransition", "(", "self", ".", "saving", ",", "saving", ")", "saving", ".", "addTransition", "(", "self", ".", "saved", ",", "ready", ")", "collecting", ".", "addTransition", "(", "self", ".", "initialised", ",", "ready", ")", "collecting", ".", "addTransition", "(", "self", ".", "stopping", ",", "stopping", ")", "validating", ".", "addTransition", "(", "self", ".", "stopping", ",", "stopping", ")", "validating", ".", "addTransition", "(", "self", ".", "finished", ",", "finished", ")", "validating", ".", "addTransition", "(", "self", ".", "extracting", ",", "extracting", ")", "extracting", ".", "addTransition", "(", "self", ".", "stopping", ",", "stopping", ")", "extracting", ".", "addTransition", "(", "self", ".", "finished", ",", "finished", ")", "extracting", ".", "addTransition", "(", "self", ".", "integrating", ",", "integrating", ")", "integrating", ".", "addTransition", "(", "self", ".", "stopping", ",", "stopping", ")", "integrating", ".", "addTransition", "(", "self", ".", "finished", ",", "finished", ")", "finished", ".", "addTransition", "(", "self", ".", "initialising", ",", "initialising", ")", "finished", ".", "addTransition", "(", "self", ".", "acting", ",", "acting", ")", "initialising", ".", "addTransition", "(", "self", ".", "collecting", ",", "collecting", ")", "stopping", ".", "addTransition", "(", "self", ".", "acted", ",", "acted", ")", "stopping", ".", "addTransition", "(", "self", ".", "finished", ",", "finished", ")", "dirty", ".", "addTransition", "(", "self", ".", "initialising", ",", "clean", ")", "clean", ".", "addTransition", "(", "self", ".", "changed", ",", "dirty", ")", "alive", ".", "addTransition", "(", "self", ".", "acting", ",", "acting", ")", "acting", ".", "addTransition", "(", "self", ".", "acted", ",", "acted", ")", "# Set initial states", "for", "compound", ",", "state", "in", "{", "machine", ":", "group", ",", "visibility", ":", "hidden", ",", "operation", ":", "ready", ",", "errored", ":", "clean", ",", "suspended", ":", "alive", "}", ".", "items", "(", ")", ":", "compound", ".", "setInitialState", "(", "state", ")", "# Make connections", "for", "state", "in", "(", "hidden", ",", "visible", ",", "ready", ",", "collecting", ",", "validating", ",", "extracting", ",", "integrating", ",", "finished", ",", "repairing", ",", "initialising", ",", "stopping", ",", "saving", ",", "stopped", ",", "dirty", ",", "clean", ",", "acting", ",", "alive", ",", "acted", ")", ":", "state", ".", "entered", ".", "connect", "(", "lambda", "state", "=", "state", ":", "self", ".", "state_changed", ".", "emit", "(", "state", ".", "name", ")", ")", "machine", ".", "start", "(", ")", "return", "machine" ]
33.013333
17.566667
def _example_stock_basic(quote_ctx): """ 获取股票信息,输出 股票代码,股票名,每手数量,股票类型,子类型所属正股 """ ret_status, ret_data = quote_ctx.get_stock_basicinfo(ft.Market.HK, ft.SecurityType.STOCK) if ret_status != ft.RET_OK: print(ret_data) exit() print("stock_basic") print(ret_data)
[ "def", "_example_stock_basic", "(", "quote_ctx", ")", ":", "ret_status", ",", "ret_data", "=", "quote_ctx", ".", "get_stock_basicinfo", "(", "ft", ".", "Market", ".", "HK", ",", "ft", ".", "SecurityType", ".", "STOCK", ")", "if", "ret_status", "!=", "ft", ".", "RET_OK", ":", "print", "(", "ret_data", ")", "exit", "(", ")", "print", "(", "\"stock_basic\"", ")", "print", "(", "ret_data", ")" ]
29.4
14.6
def send(self, buf, flags=0): """ Send data on the connection. NOTE: If you get one of the WantRead, WantWrite or WantX509Lookup exceptions on this, you have to call the method again with the SAME buffer. :param buf: The string, buffer or memoryview to send :param flags: (optional) Included for compatibility with the socket API, the value is ignored :return: The number of bytes written """ # Backward compatibility buf = _text_to_bytes_and_warn("buf", buf) if isinstance(buf, memoryview): buf = buf.tobytes() if isinstance(buf, _buffer): buf = str(buf) if not isinstance(buf, bytes): raise TypeError("data must be a memoryview, buffer or byte string") if len(buf) > 2147483647: raise ValueError("Cannot send more than 2**31-1 bytes at once.") result = _lib.SSL_write(self._ssl, buf, len(buf)) self._raise_ssl_error(self._ssl, result) return result
[ "def", "send", "(", "self", ",", "buf", ",", "flags", "=", "0", ")", ":", "# Backward compatibility", "buf", "=", "_text_to_bytes_and_warn", "(", "\"buf\"", ",", "buf", ")", "if", "isinstance", "(", "buf", ",", "memoryview", ")", ":", "buf", "=", "buf", ".", "tobytes", "(", ")", "if", "isinstance", "(", "buf", ",", "_buffer", ")", ":", "buf", "=", "str", "(", "buf", ")", "if", "not", "isinstance", "(", "buf", ",", "bytes", ")", ":", "raise", "TypeError", "(", "\"data must be a memoryview, buffer or byte string\"", ")", "if", "len", "(", "buf", ")", ">", "2147483647", ":", "raise", "ValueError", "(", "\"Cannot send more than 2**31-1 bytes at once.\"", ")", "result", "=", "_lib", ".", "SSL_write", "(", "self", ".", "_ssl", ",", "buf", ",", "len", "(", "buf", ")", ")", "self", ".", "_raise_ssl_error", "(", "self", ".", "_ssl", ",", "result", ")", "return", "result" ]
39.769231
17
def plot_time_elapsed(filename, elapsed=False, unit='s', plot_kwargs=None): '''Plot series data from MonitorTimeElapsed output text file. Args: filename (str): Path to *.series.txt file produced by :obj:`~nnabla.MonitorSeries` class. elapsed (bool): If ``True``, it plots the total elapsed time. unit (str): Time unit chosen from ``'s'``, ``'m'``, ``'h'``, or ``'d'``. plot_kwags (dict, optional): Keyward arguments passed to :function:`matplotlib.pyplot.plot`. Note: matplotlib package is required. ''' import matplotlib.pyplot as plt if plot_kwargs is None: plot_kwargs = {} data_column = 3 if elapsed else 1 data = np.genfromtxt(filename, dtype='i8,f4', usecols=(0, data_column), names=['k', 'v']) index = data['k'] values = data['v'] if unit == 's': pass elif unit == 'm': values /= 60 elif unit == 'h': values /= 3600 elif unit == 'd': values /= 3600 * 24 else: raise ValueError('The argument `unit` must be chosen from {s|m|h|d}.') plt.plot(index, values, **plot_kwargs)
[ "def", "plot_time_elapsed", "(", "filename", ",", "elapsed", "=", "False", ",", "unit", "=", "'s'", ",", "plot_kwargs", "=", "None", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "if", "plot_kwargs", "is", "None", ":", "plot_kwargs", "=", "{", "}", "data_column", "=", "3", "if", "elapsed", "else", "1", "data", "=", "np", ".", "genfromtxt", "(", "filename", ",", "dtype", "=", "'i8,f4'", ",", "usecols", "=", "(", "0", ",", "data_column", ")", ",", "names", "=", "[", "'k'", ",", "'v'", "]", ")", "index", "=", "data", "[", "'k'", "]", "values", "=", "data", "[", "'v'", "]", "if", "unit", "==", "'s'", ":", "pass", "elif", "unit", "==", "'m'", ":", "values", "/=", "60", "elif", "unit", "==", "'h'", ":", "values", "/=", "3600", "elif", "unit", "==", "'d'", ":", "values", "/=", "3600", "*", "24", "else", ":", "raise", "ValueError", "(", "'The argument `unit` must be chosen from {s|m|h|d}.'", ")", "plt", ".", "plot", "(", "index", ",", "values", ",", "*", "*", "plot_kwargs", ")" ]
31.861111
24.25
def count_subgraph_sizes(graph: BELGraph, annotation: str = 'Subgraph') -> Counter[int]: """Count the number of nodes in each subgraph induced by an annotation. :param annotation: The annotation to group by and compare. Defaults to 'Subgraph' :return: A dictionary from {annotation value: number of nodes} """ return count_dict_values(group_nodes_by_annotation(graph, annotation))
[ "def", "count_subgraph_sizes", "(", "graph", ":", "BELGraph", ",", "annotation", ":", "str", "=", "'Subgraph'", ")", "->", "Counter", "[", "int", "]", ":", "return", "count_dict_values", "(", "group_nodes_by_annotation", "(", "graph", ",", "annotation", ")", ")" ]
56.428571
27.571429
def list_tickets(self, **kwargs): """List all tickets, optionally filtered by a view. Specify filters as keyword arguments, such as: filter_name = one of ['all_tickets', 'new_my_open', 'spam', 'deleted', None] (defaults to 'all_tickets'; passing None uses the default) Multiple filters are AND'd together. """ filter_name = 'all_tickets' if 'filter_name' in kwargs and kwargs['filter_name'] is not None: filter_name = kwargs['filter_name'] del kwargs['filter_name'] url = 'helpdesk/tickets/filter/%s?format=json' % filter_name page = 1 tickets = [] # Skip pagination by looping over each page and adding tickets while True: this_page = self._api._get(url + '&page=%d' % page, kwargs) if len(this_page) == 0: break tickets += this_page page += 1 return [self.get_ticket(t['display_id']) for t in tickets]
[ "def", "list_tickets", "(", "self", ",", "*", "*", "kwargs", ")", ":", "filter_name", "=", "'all_tickets'", "if", "'filter_name'", "in", "kwargs", "and", "kwargs", "[", "'filter_name'", "]", "is", "not", "None", ":", "filter_name", "=", "kwargs", "[", "'filter_name'", "]", "del", "kwargs", "[", "'filter_name'", "]", "url", "=", "'helpdesk/tickets/filter/%s?format=json'", "%", "filter_name", "page", "=", "1", "tickets", "=", "[", "]", "# Skip pagination by looping over each page and adding tickets", "while", "True", ":", "this_page", "=", "self", ".", "_api", ".", "_get", "(", "url", "+", "'&page=%d'", "%", "page", ",", "kwargs", ")", "if", "len", "(", "this_page", ")", "==", "0", ":", "break", "tickets", "+=", "this_page", "page", "+=", "1", "return", "[", "self", ".", "get_ticket", "(", "t", "[", "'display_id'", "]", ")", "for", "t", "in", "tickets", "]" ]
35.034483
20.931034
def pegasus_node_placer_2d(G, scale=1., center=None, dim=2, crosses=False): """Generates a function that converts Pegasus indices to x, y coordinates for a plot. Parameters ---------- G : NetworkX graph Should be a Pegasus graph or a subgraph of a Pegasus graph. This should be the product of dwave_networkx.pegasus_graph scale : float (default 1.) Scale factor. When scale = 1, all positions fit within [0, 1] on the x-axis and [-1, 0] on the y-axis. center : None or array (default None) Coordinates of the top left corner. dim : int (default 2) Number of dimensions. When dim > 2, all extra dimensions are set to 0. crosses: boolean (optional, default False) If crosses is True, K_4,4 subgraphs are shown in a cross rather than L configuration. Returns ------- xy_coords : function A function that maps a Pegasus index (u, w, k, z) in a Pegasus lattice to x,y coordinates such as used by a plot. """ import numpy as np m = G.graph.get('rows') h_offsets = G.graph.get("horizontal_offsets") v_offsets = G.graph.get("vertical_offsets") tile_width = G.graph.get("tile") tile_center = tile_width / 2 - .5 # want the enter plot to fill in [0, 1] when scale=1 scale /= m * tile_width if center is None: center = np.zeros(dim) else: center = np.asarray(center) paddims = dim - 2 if paddims < 0: raise ValueError("layout must have at least two dimensions") if len(center) != dim: raise ValueError("length of center coordinates must match dimension of layout") if crosses: # adjustment for crosses cross_shift = 2. else: cross_shift = 0. def _xy_coords(u, w, k, z): # orientation, major perpendicular offset, minor perpendicular offset, parallel offset if k % 2: p = -.1 else: p = .1 if u: xy = np.array([z*tile_width+h_offsets[k] + tile_center, -tile_width*w-k-p+cross_shift]) else: xy = np.array([tile_width*w+k+p+cross_shift, -z*tile_width-v_offsets[k]-tile_center]) # convention for Pegasus-lattice pictures is to invert the y-axis return np.hstack((xy * scale, np.zeros(paddims))) + center return _xy_coords
[ "def", "pegasus_node_placer_2d", "(", "G", ",", "scale", "=", "1.", ",", "center", "=", "None", ",", "dim", "=", "2", ",", "crosses", "=", "False", ")", ":", "import", "numpy", "as", "np", "m", "=", "G", ".", "graph", ".", "get", "(", "'rows'", ")", "h_offsets", "=", "G", ".", "graph", ".", "get", "(", "\"horizontal_offsets\"", ")", "v_offsets", "=", "G", ".", "graph", ".", "get", "(", "\"vertical_offsets\"", ")", "tile_width", "=", "G", ".", "graph", ".", "get", "(", "\"tile\"", ")", "tile_center", "=", "tile_width", "/", "2", "-", ".5", "# want the enter plot to fill in [0, 1] when scale=1", "scale", "/=", "m", "*", "tile_width", "if", "center", "is", "None", ":", "center", "=", "np", ".", "zeros", "(", "dim", ")", "else", ":", "center", "=", "np", ".", "asarray", "(", "center", ")", "paddims", "=", "dim", "-", "2", "if", "paddims", "<", "0", ":", "raise", "ValueError", "(", "\"layout must have at least two dimensions\"", ")", "if", "len", "(", "center", ")", "!=", "dim", ":", "raise", "ValueError", "(", "\"length of center coordinates must match dimension of layout\"", ")", "if", "crosses", ":", "# adjustment for crosses", "cross_shift", "=", "2.", "else", ":", "cross_shift", "=", "0.", "def", "_xy_coords", "(", "u", ",", "w", ",", "k", ",", "z", ")", ":", "# orientation, major perpendicular offset, minor perpendicular offset, parallel offset", "if", "k", "%", "2", ":", "p", "=", "-", ".1", "else", ":", "p", "=", ".1", "if", "u", ":", "xy", "=", "np", ".", "array", "(", "[", "z", "*", "tile_width", "+", "h_offsets", "[", "k", "]", "+", "tile_center", ",", "-", "tile_width", "*", "w", "-", "k", "-", "p", "+", "cross_shift", "]", ")", "else", ":", "xy", "=", "np", ".", "array", "(", "[", "tile_width", "*", "w", "+", "k", "+", "p", "+", "cross_shift", ",", "-", "z", "*", "tile_width", "-", "v_offsets", "[", "k", "]", "-", "tile_center", "]", ")", "# convention for Pegasus-lattice pictures is to invert the y-axis", "return", "np", ".", "hstack", "(", "(", "xy", "*", "scale", ",", "np", ".", "zeros", "(", "paddims", ")", ")", ")", "+", "center", "return", "_xy_coords" ]
29.679487
24.884615
def lineage(self, tax_id=None, tax_name=None): """Public method for returning a lineage; includes tax_name and rank """ if not bool(tax_id) ^ bool(tax_name): msg = 'Exactly one of tax_id and tax_name may be provided.' raise ValueError(msg) if tax_name: tax_id, primary_name, is_primary = self.primary_from_name(tax_name) else: primary_name = None # assumes stable ordering of lineage from root --> leaf lintups = self._get_lineage(tax_id) ldict = dict(lintups) ldict['tax_id'] = tax_id try: # parent is second to last element, except for root __, ldict['parent_id'] = lintups[-2] except IndexError: ldict['parent_id'] = None ldict['rank'], __ = lintups[-1] # this taxon is last element in lineage ldict['tax_name'] = primary_name or self.primary_from_id(tax_id) return ldict
[ "def", "lineage", "(", "self", ",", "tax_id", "=", "None", ",", "tax_name", "=", "None", ")", ":", "if", "not", "bool", "(", "tax_id", ")", "^", "bool", "(", "tax_name", ")", ":", "msg", "=", "'Exactly one of tax_id and tax_name may be provided.'", "raise", "ValueError", "(", "msg", ")", "if", "tax_name", ":", "tax_id", ",", "primary_name", ",", "is_primary", "=", "self", ".", "primary_from_name", "(", "tax_name", ")", "else", ":", "primary_name", "=", "None", "# assumes stable ordering of lineage from root --> leaf", "lintups", "=", "self", ".", "_get_lineage", "(", "tax_id", ")", "ldict", "=", "dict", "(", "lintups", ")", "ldict", "[", "'tax_id'", "]", "=", "tax_id", "try", ":", "# parent is second to last element, except for root", "__", ",", "ldict", "[", "'parent_id'", "]", "=", "lintups", "[", "-", "2", "]", "except", "IndexError", ":", "ldict", "[", "'parent_id'", "]", "=", "None", "ldict", "[", "'rank'", "]", ",", "__", "=", "lintups", "[", "-", "1", "]", "# this taxon is last element in lineage", "ldict", "[", "'tax_name'", "]", "=", "primary_name", "or", "self", ".", "primary_from_id", "(", "tax_id", ")", "return", "ldict" ]
32.758621
21.965517
def demo_update(self): """ Performs a demonstration update by calling the demo optimization operation. Note that the batch data does not have to be fetched from the demo memory as this is now part of the TensorFlow operation of the demo update. """ fetches = self.demo_optimization_output self.monitored_session.run(fetches=fetches)
[ "def", "demo_update", "(", "self", ")", ":", "fetches", "=", "self", ".", "demo_optimization_output", "self", ".", "monitored_session", ".", "run", "(", "fetches", "=", "fetches", ")" ]
42.333333
21.666667
def download_file(image_name, output_path, width=DEFAULT_WIDTH): """Download a given Wikimedia Commons file.""" image_name = clean_up_filename(image_name) logging.info("Downloading %s with width %s", image_name, width) try: contents, output_file_name = get_thumbnail_of_file(image_name, width) except RequestedWidthBiggerThanSourceException: logging.warning("Requested width is bigger than source - downloading full size") contents, output_file_name = get_full_size_file(image_name) output_file_path = os.path.join(output_path, output_file_name) try: with open(output_file_path, 'wb') as f: logging.debug("Writing as %s", output_file_path) f.write(contents) return output_file_path except IOError, e: msg = 'Could not write file %s on disk to %s: %s' % \ (image_name, output_path, e.message) logging.error(msg) raise CouldNotWriteFileOnDiskException(msg) except Exception, e: logging.critical(e.message) msg = 'An unexpected error occured when downloading %s to %s: %s' % \ (image_name, output_path, e.message) raise DownloadException(msg)
[ "def", "download_file", "(", "image_name", ",", "output_path", ",", "width", "=", "DEFAULT_WIDTH", ")", ":", "image_name", "=", "clean_up_filename", "(", "image_name", ")", "logging", ".", "info", "(", "\"Downloading %s with width %s\"", ",", "image_name", ",", "width", ")", "try", ":", "contents", ",", "output_file_name", "=", "get_thumbnail_of_file", "(", "image_name", ",", "width", ")", "except", "RequestedWidthBiggerThanSourceException", ":", "logging", ".", "warning", "(", "\"Requested width is bigger than source - downloading full size\"", ")", "contents", ",", "output_file_name", "=", "get_full_size_file", "(", "image_name", ")", "output_file_path", "=", "os", ".", "path", ".", "join", "(", "output_path", ",", "output_file_name", ")", "try", ":", "with", "open", "(", "output_file_path", ",", "'wb'", ")", "as", "f", ":", "logging", ".", "debug", "(", "\"Writing as %s\"", ",", "output_file_path", ")", "f", ".", "write", "(", "contents", ")", "return", "output_file_path", "except", "IOError", ",", "e", ":", "msg", "=", "'Could not write file %s on disk to %s: %s'", "%", "(", "image_name", ",", "output_path", ",", "e", ".", "message", ")", "logging", ".", "error", "(", "msg", ")", "raise", "CouldNotWriteFileOnDiskException", "(", "msg", ")", "except", "Exception", ",", "e", ":", "logging", ".", "critical", "(", "e", ".", "message", ")", "msg", "=", "'An unexpected error occured when downloading %s to %s: %s'", "%", "(", "image_name", ",", "output_path", ",", "e", ".", "message", ")", "raise", "DownloadException", "(", "msg", ")" ]
47.64
18.52
def eval(self, x, y, z): """Evaluate the function in (x, y, z). The function is rotationally symmetric around z. """ ro = np.sqrt(x**2 + y**2) zs, xs = ro.shape v = self.eval_xz(ro.ravel(), z.ravel()) return v.reshape(zs, xs)
[ "def", "eval", "(", "self", ",", "x", ",", "y", ",", "z", ")", ":", "ro", "=", "np", ".", "sqrt", "(", "x", "**", "2", "+", "y", "**", "2", ")", "zs", ",", "xs", "=", "ro", ".", "shape", "v", "=", "self", ".", "eval_xz", "(", "ro", ".", "ravel", "(", ")", ",", "z", ".", "ravel", "(", ")", ")", "return", "v", ".", "reshape", "(", "zs", ",", "xs", ")" ]
34.25
8.625
def list_staged_files(self) -> typing.List[str]: """ :return: staged files :rtype: list of str """ staged_files: typing.List[str] = [x.a_path for x in self.repo.index.diff('HEAD')] LOGGER.debug('staged files: %s', staged_files) return staged_files
[ "def", "list_staged_files", "(", "self", ")", "->", "typing", ".", "List", "[", "str", "]", ":", "staged_files", ":", "typing", ".", "List", "[", "str", "]", "=", "[", "x", ".", "a_path", "for", "x", "in", "self", ".", "repo", ".", "index", ".", "diff", "(", "'HEAD'", ")", "]", "LOGGER", ".", "debug", "(", "'staged files: %s'", ",", "staged_files", ")", "return", "staged_files" ]
37
13.5
def _bsp_traverse( node_iter: Iterable[tcod.bsp.BSP], callback: Callable[[tcod.bsp.BSP, Any], None], userData: Any, ) -> None: """pack callback into a handle for use with the callback _pycall_bsp_callback """ for node in node_iter: callback(node, userData)
[ "def", "_bsp_traverse", "(", "node_iter", ":", "Iterable", "[", "tcod", ".", "bsp", ".", "BSP", "]", ",", "callback", ":", "Callable", "[", "[", "tcod", ".", "bsp", ".", "BSP", ",", "Any", "]", ",", "None", "]", ",", "userData", ":", "Any", ",", ")", "->", "None", ":", "for", "node", "in", "node_iter", ":", "callback", "(", "node", ",", "userData", ")" ]
28.3
12.4
def remove(self, path, recursive=False, use_sudo=False): """ Remove a file or directory """ func = use_sudo and run_as_root or self.run options = '-r ' if recursive else '' func('/bin/rm {0}{1}'.format(options, quote(path)))
[ "def", "remove", "(", "self", ",", "path", ",", "recursive", "=", "False", ",", "use_sudo", "=", "False", ")", ":", "func", "=", "use_sudo", "and", "run_as_root", "or", "self", ".", "run", "options", "=", "'-r '", "if", "recursive", "else", "''", "func", "(", "'/bin/rm {0}{1}'", ".", "format", "(", "options", ",", "quote", "(", "path", ")", ")", ")" ]
38
8
def add(self, data): """ Adds a new data node to the front list. The provided data will be encapsulated into a new instance of LinkedListNode class and linked list pointers will be updated, as well as list's size. :param data: the data to be inserted in the new list node :type data: object """ node = LinkedListNode(data, None) if self._size == 0: self._first_node = node self._last_node = node else: second_node = self._first_node self._first_node = node self._first_node.update_next(second_node) self._size += 1
[ "def", "add", "(", "self", ",", "data", ")", ":", "node", "=", "LinkedListNode", "(", "data", ",", "None", ")", "if", "self", ".", "_size", "==", "0", ":", "self", ".", "_first_node", "=", "node", "self", ".", "_last_node", "=", "node", "else", ":", "second_node", "=", "self", ".", "_first_node", "self", ".", "_first_node", "=", "node", "self", ".", "_first_node", ".", "update_next", "(", "second_node", ")", "self", ".", "_size", "+=", "1" ]
34
16.736842
def __connect(self): """ Connect to the database. """ self.__methods = _get_methods_by_uri(self.sqluri) uri_connect_method = self.__methods[METHOD_CONNECT] self.__dbapi2_conn = uri_connect_method(self.sqluri)
[ "def", "__connect", "(", "self", ")", ":", "self", ".", "__methods", "=", "_get_methods_by_uri", "(", "self", ".", "sqluri", ")", "uri_connect_method", "=", "self", ".", "__methods", "[", "METHOD_CONNECT", "]", "self", ".", "__dbapi2_conn", "=", "uri_connect_method", "(", "self", ".", "sqluri", ")" ]
31.75
16
def set_val(self, direct, section, val): """ set the config values """ if val is not None: self.config.set(direct, section, val) self.update()
[ "def", "set_val", "(", "self", ",", "direct", ",", "section", ",", "val", ")", ":", "if", "val", "is", "not", "None", ":", "self", ".", "config", ".", "set", "(", "direct", ",", "section", ",", "val", ")", "self", ".", "update", "(", ")" ]
35.6
7.4
def get_price_as_price(reward): """ Returns a Price data structure from either a float or a Price """ if isinstance(reward, Price): final_price = reward else: final_price = Price(reward) return final_price
[ "def", "get_price_as_price", "(", "reward", ")", ":", "if", "isinstance", "(", "reward", ",", "Price", ")", ":", "final_price", "=", "reward", "else", ":", "final_price", "=", "Price", "(", "reward", ")", "return", "final_price" ]
29.888889
10.111111
def code_deparse_around_offset(name, offset, co, out=StringIO(), version=None, is_pypy=None, debug_opts=DEFAULT_DEBUG_OPTS): """ Like deparse_code(), but given a function/module name and offset, finds the node closest to offset. If offset is not an instruction boundary, we raise an IndexError. """ assert iscode(co) if version is None: version = sysinfo2float() if is_pypy is None: is_pypy = IS_PYPY deparsed = code_deparse(co, out, version, is_pypy, debug_opts) if (name, offset) in deparsed.offsets.keys(): # This is the easy case return deparsed valid_offsets = [t for t in deparsed.offsets if isinstance(t[1], int)] offset_list = sorted([t[1] for t in valid_offsets if t[0] == name]) # FIXME: should check for branching? found_offset = find_gt(offset_list, offset) deparsed.offsets[name, offset] = deparsed.offsets[name, found_offset] return deparsed
[ "def", "code_deparse_around_offset", "(", "name", ",", "offset", ",", "co", ",", "out", "=", "StringIO", "(", ")", ",", "version", "=", "None", ",", "is_pypy", "=", "None", ",", "debug_opts", "=", "DEFAULT_DEBUG_OPTS", ")", ":", "assert", "iscode", "(", "co", ")", "if", "version", "is", "None", ":", "version", "=", "sysinfo2float", "(", ")", "if", "is_pypy", "is", "None", ":", "is_pypy", "=", "IS_PYPY", "deparsed", "=", "code_deparse", "(", "co", ",", "out", ",", "version", ",", "is_pypy", ",", "debug_opts", ")", "if", "(", "name", ",", "offset", ")", "in", "deparsed", ".", "offsets", ".", "keys", "(", ")", ":", "# This is the easy case", "return", "deparsed", "valid_offsets", "=", "[", "t", "for", "t", "in", "deparsed", ".", "offsets", "if", "isinstance", "(", "t", "[", "1", "]", ",", "int", ")", "]", "offset_list", "=", "sorted", "(", "[", "t", "[", "1", "]", "for", "t", "in", "valid_offsets", "if", "t", "[", "0", "]", "==", "name", "]", ")", "# FIXME: should check for branching?", "found_offset", "=", "find_gt", "(", "offset_list", ",", "offset", ")", "deparsed", ".", "offsets", "[", "name", ",", "offset", "]", "=", "deparsed", ".", "offsets", "[", "name", ",", "found_offset", "]", "return", "deparsed" ]
36.740741
21.037037
def send(self, message, envelope_from=None): """Verifies and sends message. :param message: Message instance. :param envelope_from: Email address to be used in MAIL FROM command. """ assert message.send_to, "No recipients have been added" assert message.sender, ( "The message does not specify a sender and a default sender " "has not been configured") if message.has_bad_headers(): raise BadHeaderError if message.date is None: message.date = time.time() ret = None if self.host: ret = self.host.sendmail( sanitize_address(envelope_from or message.sender), list(sanitize_addresses(message.send_to)), message.as_bytes() if PY3 else message.as_string(), message.mail_options, message.rcpt_options ) email_dispatched.send(message, app=current_app._get_current_object()) self.num_emails += 1 if self.num_emails == self.mail.max_emails: self.num_emails = 0 if self.host: self.host.quit() self.host = self.configure_host() return ret
[ "def", "send", "(", "self", ",", "message", ",", "envelope_from", "=", "None", ")", ":", "assert", "message", ".", "send_to", ",", "\"No recipients have been added\"", "assert", "message", ".", "sender", ",", "(", "\"The message does not specify a sender and a default sender \"", "\"has not been configured\"", ")", "if", "message", ".", "has_bad_headers", "(", ")", ":", "raise", "BadHeaderError", "if", "message", ".", "date", "is", "None", ":", "message", ".", "date", "=", "time", ".", "time", "(", ")", "ret", "=", "None", "if", "self", ".", "host", ":", "ret", "=", "self", ".", "host", ".", "sendmail", "(", "sanitize_address", "(", "envelope_from", "or", "message", ".", "sender", ")", ",", "list", "(", "sanitize_addresses", "(", "message", ".", "send_to", ")", ")", ",", "message", ".", "as_bytes", "(", ")", "if", "PY3", "else", "message", ".", "as_string", "(", ")", ",", "message", ".", "mail_options", ",", "message", ".", "rcpt_options", ")", "email_dispatched", ".", "send", "(", "message", ",", "app", "=", "current_app", ".", "_get_current_object", "(", ")", ")", "self", ".", "num_emails", "+=", "1", "if", "self", ".", "num_emails", "==", "self", ".", "mail", ".", "max_emails", ":", "self", ".", "num_emails", "=", "0", "if", "self", ".", "host", ":", "self", ".", "host", ".", "quit", "(", ")", "self", ".", "host", "=", "self", ".", "configure_host", "(", ")", "return", "ret" ]
31.25641
19.487179
def create_adv_by_name(model, x, attack_type, sess, dataset, y=None, **kwargs): """ Creates the symbolic graph of an adversarial example given the name of an attack. Simplifies creating the symbolic graph of an attack by defining dataset-specific parameters. Dataset-specific default parameters are used unless a different value is given in kwargs. :param model: an object of Model class :param x: Symbolic input to the attack. :param attack_type: A string that is the name of an attack. :param sess: Tensorflow session. :param dataset: The name of the dataset as a string to use for default params. :param y: (optional) a symbolic variable for the labels. :param kwargs: (optional) additional parameters to be passed to the attack. """ # TODO: black box attacks attack_names = {'FGSM': FastGradientMethod, 'MadryEtAl': MadryEtAl, 'MadryEtAl_y': MadryEtAl, 'MadryEtAl_multigpu': MadryEtAlMultiGPU, 'MadryEtAl_y_multigpu': MadryEtAlMultiGPU } if attack_type not in attack_names: raise Exception('Attack %s not defined.' % attack_type) attack_params_shared = { 'mnist': {'eps': .3, 'eps_iter': 0.01, 'clip_min': 0., 'clip_max': 1., 'nb_iter': 40}, 'cifar10': {'eps': 8./255, 'eps_iter': 0.01, 'clip_min': 0., 'clip_max': 1., 'nb_iter': 20} } with tf.variable_scope(attack_type): attack_class = attack_names[attack_type] attack = attack_class(model, sess=sess) # Extract feedable and structural keyword arguments from kwargs fd_kwargs = attack.feedable_kwargs.keys() + attack.structural_kwargs params = attack_params_shared[dataset].copy() params.update({k: v for k, v in kwargs.items() if v is not None}) params = {k: v for k, v in params.items() if k in fd_kwargs} if '_y' in attack_type: params['y'] = y logging.info(params) adv_x = attack.generate(x, **params) return adv_x
[ "def", "create_adv_by_name", "(", "model", ",", "x", ",", "attack_type", ",", "sess", ",", "dataset", ",", "y", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# TODO: black box attacks", "attack_names", "=", "{", "'FGSM'", ":", "FastGradientMethod", ",", "'MadryEtAl'", ":", "MadryEtAl", ",", "'MadryEtAl_y'", ":", "MadryEtAl", ",", "'MadryEtAl_multigpu'", ":", "MadryEtAlMultiGPU", ",", "'MadryEtAl_y_multigpu'", ":", "MadryEtAlMultiGPU", "}", "if", "attack_type", "not", "in", "attack_names", ":", "raise", "Exception", "(", "'Attack %s not defined.'", "%", "attack_type", ")", "attack_params_shared", "=", "{", "'mnist'", ":", "{", "'eps'", ":", ".3", ",", "'eps_iter'", ":", "0.01", ",", "'clip_min'", ":", "0.", ",", "'clip_max'", ":", "1.", ",", "'nb_iter'", ":", "40", "}", ",", "'cifar10'", ":", "{", "'eps'", ":", "8.", "/", "255", ",", "'eps_iter'", ":", "0.01", ",", "'clip_min'", ":", "0.", ",", "'clip_max'", ":", "1.", ",", "'nb_iter'", ":", "20", "}", "}", "with", "tf", ".", "variable_scope", "(", "attack_type", ")", ":", "attack_class", "=", "attack_names", "[", "attack_type", "]", "attack", "=", "attack_class", "(", "model", ",", "sess", "=", "sess", ")", "# Extract feedable and structural keyword arguments from kwargs", "fd_kwargs", "=", "attack", ".", "feedable_kwargs", ".", "keys", "(", ")", "+", "attack", ".", "structural_kwargs", "params", "=", "attack_params_shared", "[", "dataset", "]", ".", "copy", "(", ")", "params", ".", "update", "(", "{", "k", ":", "v", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", "if", "v", "is", "not", "None", "}", ")", "params", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "params", ".", "items", "(", ")", "if", "k", "in", "fd_kwargs", "}", "if", "'_y'", "in", "attack_type", ":", "params", "[", "'y'", "]", "=", "y", "logging", ".", "info", "(", "params", ")", "adv_x", "=", "attack", ".", "generate", "(", "x", ",", "*", "*", "params", ")", "return", "adv_x" ]
38.72549
20.019608
def selection_pos(self): """Return start and end positions of the visual selection respectively.""" buff = self._vim.current.buffer beg = buff.mark('<') end = buff.mark('>') return beg, end
[ "def", "selection_pos", "(", "self", ")", ":", "buff", "=", "self", ".", "_vim", ".", "current", ".", "buffer", "beg", "=", "buff", ".", "mark", "(", "'<'", ")", "end", "=", "buff", ".", "mark", "(", "'>'", ")", "return", "beg", ",", "end" ]
37.333333
9.666667
def _port_postfix(self): """ Returns empty string for the default port and ':port' otherwise """ port = self.real_connection.port default_port = {'https': 443, 'http': 80}[self._protocol] return ':{}'.format(port) if port != default_port else ''
[ "def", "_port_postfix", "(", "self", ")", ":", "port", "=", "self", ".", "real_connection", ".", "port", "default_port", "=", "{", "'https'", ":", "443", ",", "'http'", ":", "80", "}", "[", "self", ".", "_protocol", "]", "return", "':{}'", ".", "format", "(", "port", ")", "if", "port", "!=", "default_port", "else", "''" ]
41
13.857143
def _check_channel_state_for_update( self, channel_identifier: ChannelID, closer: Address, update_nonce: Nonce, block_identifier: BlockSpecification, ) -> Optional[str]: """Check the channel state on chain to see if it has been updated. Compare the nonce, we are about to update the contract with, with the updated nonce in the onchain state and, if it's the same, return a message with which the caller should raise a RaidenRecoverableError. If all is okay return None. """ msg = None closer_details = self._detail_participant( channel_identifier=channel_identifier, participant=closer, partner=self.node_address, block_identifier=block_identifier, ) if closer_details.nonce == update_nonce: msg = ( 'updateNonClosingBalanceProof transaction has already ' 'been mined and updated the channel succesfully.' ) return msg
[ "def", "_check_channel_state_for_update", "(", "self", ",", "channel_identifier", ":", "ChannelID", ",", "closer", ":", "Address", ",", "update_nonce", ":", "Nonce", ",", "block_identifier", ":", "BlockSpecification", ",", ")", "->", "Optional", "[", "str", "]", ":", "msg", "=", "None", "closer_details", "=", "self", ".", "_detail_participant", "(", "channel_identifier", "=", "channel_identifier", ",", "participant", "=", "closer", ",", "partner", "=", "self", ".", "node_address", ",", "block_identifier", "=", "block_identifier", ",", ")", "if", "closer_details", ".", "nonce", "==", "update_nonce", ":", "msg", "=", "(", "'updateNonClosingBalanceProof transaction has already '", "'been mined and updated the channel succesfully.'", ")", "return", "msg" ]
36.206897
18.310345
def encode(self, V, P, X, CC, seqNum, M, PT, SSRC, payload): """Encode the RTP packet with header fields and payload.""" timestamp = int(time()) header = bytearray(HEADER_SIZE) # Fill the header bytearray with RTP header fields # ... header[0] = header[0] | V << 6; header[0] = header[0] | P << 5; header[0] = header[0] | X << 4; header[0] = header[0] | CC; header[1] = header[1] | M << 7; header[1] = header[1] | PT; header[2] = (seqNum >> 8) & 0xFF; header[3] = seqNum & 0xFF; header[4] = (timestamp >> 24) & 0xFF; header[5] = (timestamp >> 16) & 0xFF; header[6] = (timestamp >> 8) & 0xFF; header[7] = timestamp & 0xFF; header[8] = (SSRC >> 24) & 0xFF; header[9] = (SSRC >> 16) & 0xFF; header[10] = (SSRC >> 8) & 0xFF; header[11] = SSRC & 0xFF self.header = header # Get the payload # ... self.payload = payload
[ "def", "encode", "(", "self", ",", "V", ",", "P", ",", "X", ",", "CC", ",", "seqNum", ",", "M", ",", "PT", ",", "SSRC", ",", "payload", ")", ":", "timestamp", "=", "int", "(", "time", "(", ")", ")", "header", "=", "bytearray", "(", "HEADER_SIZE", ")", "# Fill the header bytearray with RTP header fields", "# ...", "header", "[", "0", "]", "=", "header", "[", "0", "]", "|", "V", "<<", "6", "header", "[", "0", "]", "=", "header", "[", "0", "]", "|", "P", "<<", "5", "header", "[", "0", "]", "=", "header", "[", "0", "]", "|", "X", "<<", "4", "header", "[", "0", "]", "=", "header", "[", "0", "]", "|", "CC", "header", "[", "1", "]", "=", "header", "[", "1", "]", "|", "M", "<<", "7", "header", "[", "1", "]", "=", "header", "[", "1", "]", "|", "PT", "header", "[", "2", "]", "=", "(", "seqNum", ">>", "8", ")", "&", "0xFF", "header", "[", "3", "]", "=", "seqNum", "&", "0xFF", "header", "[", "4", "]", "=", "(", "timestamp", ">>", "24", ")", "&", "0xFF", "header", "[", "5", "]", "=", "(", "timestamp", ">>", "16", ")", "&", "0xFF", "header", "[", "6", "]", "=", "(", "timestamp", ">>", "8", ")", "&", "0xFF", "header", "[", "7", "]", "=", "timestamp", "&", "0xFF", "header", "[", "8", "]", "=", "(", "SSRC", ">>", "24", ")", "&", "0xFF", "header", "[", "9", "]", "=", "(", "SSRC", ">>", "16", ")", "&", "0xFF", "header", "[", "10", "]", "=", "(", "SSRC", ">>", "8", ")", "&", "0xFF", "header", "[", "11", "]", "=", "SSRC", "&", "0xFF", "self", ".", "header", "=", "header", "# Get the payload", "# ...", "self", ".", "payload", "=", "payload" ]
35.928571
9.035714
def encode_mv(vals): """For multivalues, values are wrapped in '$' and separated using ';' Literal '$' values are represented with '$$'""" s = "" for val in vals: val = val.replace('$', '$$') if len(s) > 0: s += ';' s += '$' + val + '$' return s
[ "def", "encode_mv", "(", "vals", ")", ":", "s", "=", "\"\"", "for", "val", "in", "vals", ":", "val", "=", "val", ".", "replace", "(", "'$'", ",", "'$$'", ")", "if", "len", "(", "s", ")", ">", "0", ":", "s", "+=", "';'", "s", "+=", "'$'", "+", "val", "+", "'$'", "return", "s" ]
26.545455
17.454545
def ngrams(path, elem, ignore_hash=True): """ Yields N-grams from a JSTOR DfR dataset. Parameters ---------- path : string Path to unzipped JSTOR DfR folder containing N-grams. elem : string Name of subdirectory containing N-grams. (e.g. 'bigrams'). ignore_hash : bool If True, will exclude all N-grams that contain the hash '#' character. Returns ------- ngrams : :class:`.FeatureSet` """ grams = GramGenerator(path, elem, ignore_hash=ignore_hash) return FeatureSet({k: Feature(f) for k, f in grams})
[ "def", "ngrams", "(", "path", ",", "elem", ",", "ignore_hash", "=", "True", ")", ":", "grams", "=", "GramGenerator", "(", "path", ",", "elem", ",", "ignore_hash", "=", "ignore_hash", ")", "return", "FeatureSet", "(", "{", "k", ":", "Feature", "(", "f", ")", "for", "k", ",", "f", "in", "grams", "}", ")" ]
26.714286
22.333333
def predict_proba(self, X, raw_score=False, num_iteration=None, pred_leaf=False, pred_contrib=False, **kwargs): """Return the predicted probability for each class for each sample. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] Input features matrix. raw_score : bool, optional (default=False) Whether to predict raw scores. num_iteration : int or None, optional (default=None) Limit number of iterations in the prediction. If None, if the best iteration exists, it is used; otherwise, all trees are used. If <= 0, all trees are used (no limits). pred_leaf : bool, optional (default=False) Whether to predict leaf index. pred_contrib : bool, optional (default=False) Whether to predict feature contributions. Note ---- If you want to get more explanations for your model's predictions using SHAP values, like SHAP interaction values, you can install the shap package (https://github.com/slundberg/shap). Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra column, where the last column is the expected value. **kwargs Other parameters for the prediction. Returns ------- predicted_probability : array-like of shape = [n_samples, n_classes] The predicted probability for each class for each sample. X_leaves : array-like of shape = [n_samples, n_trees * n_classes] If ``pred_leaf=True``, the predicted leaf of every tree for each sample. X_SHAP_values : array-like of shape = [n_samples, (n_features + 1) * n_classes] If ``pred_contrib=True``, the feature contributions for each sample. """ result = super(LGBMClassifier, self).predict(X, raw_score, num_iteration, pred_leaf, pred_contrib, **kwargs) if self._n_classes > 2 or raw_score or pred_leaf or pred_contrib: return result else: return np.vstack((1. - result, result)).transpose()
[ "def", "predict_proba", "(", "self", ",", "X", ",", "raw_score", "=", "False", ",", "num_iteration", "=", "None", ",", "pred_leaf", "=", "False", ",", "pred_contrib", "=", "False", ",", "*", "*", "kwargs", ")", ":", "result", "=", "super", "(", "LGBMClassifier", ",", "self", ")", ".", "predict", "(", "X", ",", "raw_score", ",", "num_iteration", ",", "pred_leaf", ",", "pred_contrib", ",", "*", "*", "kwargs", ")", "if", "self", ".", "_n_classes", ">", "2", "or", "raw_score", "or", "pred_leaf", "or", "pred_contrib", ":", "return", "result", "else", ":", "return", "np", ".", "vstack", "(", "(", "1.", "-", "result", ",", "result", ")", ")", ".", "transpose", "(", ")" ]
49.644444
26.133333
def run_daemon(self): """ Used as daemon starter. Warning: DO NOT OVERRIDE THIS. """ try: self.daemon_runner.do_action() except daemon.runner.DaemonRunnerStopFailureError: self.onStopFail() except SystemExit: self.onExit()
[ "def", "run_daemon", "(", "self", ")", ":", "try", ":", "self", ".", "daemon_runner", ".", "do_action", "(", ")", "except", "daemon", ".", "runner", ".", "DaemonRunnerStopFailureError", ":", "self", ".", "onStopFail", "(", ")", "except", "SystemExit", ":", "self", ".", "onExit", "(", ")" ]
24.230769
14.384615
def get_initial(self): """ Supply user object as initial data for the specified user_field(s). """ data = super(UserViewMixin, self).get_initial() for k in self.user_field: data[k] = self.request.user return data
[ "def", "get_initial", "(", "self", ")", ":", "data", "=", "super", "(", "UserViewMixin", ",", "self", ")", ".", "get_initial", "(", ")", "for", "k", "in", "self", ".", "user_field", ":", "data", "[", "k", "]", "=", "self", ".", "request", ".", "user", "return", "data" ]
26.5
17.7
def from_set(cls, database, key, data, clear=False): """ Create and populate a Set object from a data set. """ s = cls(database, key) if clear: s.clear() s.add(*data) return s
[ "def", "from_set", "(", "cls", ",", "database", ",", "key", ",", "data", ",", "clear", "=", "False", ")", ":", "s", "=", "cls", "(", "database", ",", "key", ")", "if", "clear", ":", "s", ".", "clear", "(", ")", "s", ".", "add", "(", "*", "data", ")", "return", "s" ]
26.111111
13.888889
def load_image(name, n, m=None, gpu=None, square=None): """Function to load images with certain size.""" if m is None: m = n if gpu is None: gpu = 0 if square is None: square = 0 command = ('Shearlab.load_image("{}", {}, {}, {}, {})'.format(name, n, m, gpu, square)) return j.eval(command)
[ "def", "load_image", "(", "name", ",", "n", ",", "m", "=", "None", ",", "gpu", "=", "None", ",", "square", "=", "None", ")", ":", "if", "m", "is", "None", ":", "m", "=", "n", "if", "gpu", "is", "None", ":", "gpu", "=", "0", "if", "square", "is", "None", ":", "square", "=", "0", "command", "=", "(", "'Shearlab.load_image(\"{}\", {}, {}, {}, {})'", ".", "format", "(", "name", ",", "n", ",", "m", ",", "gpu", ",", "square", ")", ")", "return", "j", ".", "eval", "(", "command", ")" ]
31.090909
18.363636
def _from_dict(cls, _dict): """Initialize a SourceOptions object from a json dictionary.""" args = {} if 'folders' in _dict: args['folders'] = [ SourceOptionsFolder._from_dict(x) for x in (_dict.get('folders')) ] if 'objects' in _dict: args['objects'] = [ SourceOptionsObject._from_dict(x) for x in (_dict.get('objects')) ] if 'site_collections' in _dict: args['site_collections'] = [ SourceOptionsSiteColl._from_dict(x) for x in (_dict.get('site_collections')) ] if 'urls' in _dict: args['urls'] = [ SourceOptionsWebCrawl._from_dict(x) for x in (_dict.get('urls')) ] if 'buckets' in _dict: args['buckets'] = [ SourceOptionsBuckets._from_dict(x) for x in (_dict.get('buckets')) ] if 'crawl_all_buckets' in _dict: args['crawl_all_buckets'] = _dict.get('crawl_all_buckets') return cls(**args)
[ "def", "_from_dict", "(", "cls", ",", "_dict", ")", ":", "args", "=", "{", "}", "if", "'folders'", "in", "_dict", ":", "args", "[", "'folders'", "]", "=", "[", "SourceOptionsFolder", ".", "_from_dict", "(", "x", ")", "for", "x", "in", "(", "_dict", ".", "get", "(", "'folders'", ")", ")", "]", "if", "'objects'", "in", "_dict", ":", "args", "[", "'objects'", "]", "=", "[", "SourceOptionsObject", ".", "_from_dict", "(", "x", ")", "for", "x", "in", "(", "_dict", ".", "get", "(", "'objects'", ")", ")", "]", "if", "'site_collections'", "in", "_dict", ":", "args", "[", "'site_collections'", "]", "=", "[", "SourceOptionsSiteColl", ".", "_from_dict", "(", "x", ")", "for", "x", "in", "(", "_dict", ".", "get", "(", "'site_collections'", ")", ")", "]", "if", "'urls'", "in", "_dict", ":", "args", "[", "'urls'", "]", "=", "[", "SourceOptionsWebCrawl", ".", "_from_dict", "(", "x", ")", "for", "x", "in", "(", "_dict", ".", "get", "(", "'urls'", ")", ")", "]", "if", "'buckets'", "in", "_dict", ":", "args", "[", "'buckets'", "]", "=", "[", "SourceOptionsBuckets", ".", "_from_dict", "(", "x", ")", "for", "x", "in", "(", "_dict", ".", "get", "(", "'buckets'", ")", ")", "]", "if", "'crawl_all_buckets'", "in", "_dict", ":", "args", "[", "'crawl_all_buckets'", "]", "=", "_dict", ".", "get", "(", "'crawl_all_buckets'", ")", "return", "cls", "(", "*", "*", "args", ")" ]
36.966667
13.8
def select_by_ctime(self, min_time=0, max_time=ts_2100, recursive=True): """ Select file path by create time. :param min_time: lower bound timestamp :param max_time: upper bound timestamp **中文文档** 选择所有 :attr:`pathlib_mate.pathlib2.Path.ctime` 在一定范围内的文件。 """ def filters(p): return min_time <= p.ctime <= max_time return self.select_file(filters, recursive)
[ "def", "select_by_ctime", "(", "self", ",", "min_time", "=", "0", ",", "max_time", "=", "ts_2100", ",", "recursive", "=", "True", ")", ":", "def", "filters", "(", "p", ")", ":", "return", "min_time", "<=", "p", ".", "ctime", "<=", "max_time", "return", "self", ".", "select_file", "(", "filters", ",", "recursive", ")" ]
27.625
19.25
def get_labs(format): """Gets Repair Cafe data from repairecafe.org.""" data = data_from_repaircafe_org() repaircafes = {} # Load all the Repair Cafes for i in data: # Create a lab current_lab = RepairCafe() # Add existing data from first scraping current_lab.name = i["name"] slug = i["url"].replace("https://repaircafe.org/locations/", "") if slug.endswith("/"): slug.replace("/", "") current_lab.slug = slug current_lab.url = i["url"] # Scrape for more data page_request = requests.get(i["url"]) if page_request.status_code == 200: page_source = BeautifulSoup(page_request.text, "lxml") else: output = "There was an error while accessing data on repaircafe.org." # Find Facebook and Twitter links, add also the other ones current_lab.links = {"facebook": "", "twitter": ""} column = page_source.find_all("div", class_="sc_column_item_2") for j in column: for p in j.find_all('p'): for a in p.find_all('a', href=True): if "facebook" in a['href']: current_lab.links["facebook"] = a['href'] elif "twitter" in a['href']: current_lab.links["twitter"] = a['href'] else: current_lab.links[a['href']] = a['href'] # Find address column = page_source.find_all("div", class_="sc_column_item_1") for x in column: if x.string: print x.string.strip() exit() # current_lab.address_1 = i["address_1"] # current_lab.address_2 = i["address_2"] # current_lab.address_notes = i["address_notes"] # current_lab.blurb = i["blurb"] # current_lab.city = i["city"] # current_lab.country_code = i["country_code"] # current_lab.county = i["county"] # current_lab.description = i["description"] # current_lab.email = i["email"] # current_lab.id = i["id"] # current_lab.phone = i["phone"] # current_lab.postal_code = i["postal_code"] # # # current_lab.continent = country_alpha2_to_continent_code(i[ # "country_code"].upper()) # current_country = pycountry.countries.get( # alpha_2=i["country_code"].upper()) # current_lab.country_code = current_country.alpha_3 # current_lab.country = current_country.name # if i["longitude"] is None or i["latitude"] is None: # # Be nice with the geocoder API limit # errorsb += 1 # # sleep(10) # # location = geolocator.geocode( # # {"city": i["city"], # # "country": i["country_code"].upper()}, # # addressdetails=True, # # language="en") # # if location is not None: # # current_lab.latitude = location.latitude # # current_lab.longitude = location.longitude # # if "county" in location.raw["address"]: # # current_lab.county = location.raw["address"][ # # "county"].encode('utf-8') # # if "state" in location.raw["address"]: # # current_lab.state = location.raw["address"][ # # "state"].encode('utf-8') # else: # # Be nice with the geocoder API limit # sleep(10) # errorsa += 1 # # location = geolocator.reverse((i["latitude"], i["longitude"])) # # if location is not None: # # if "county" in location.raw["address"]: # # current_lab.county = location.raw["address"][ # # "county"].encode('utf-8') # # if "state" in location.raw["address"]: # # current_lab.state = location.raw["address"][ # # "state"].encode('utf-8') # Add the lab to the list repaircafes[slug] = current_lab # Return a dictiornary / json if format.lower() == "dict" or format.lower() == "json": output = {} for j in repaircafes: output[j] = repaircafes[j].__dict__ # Return a geojson elif format.lower() == "geojson" or format.lower() == "geo": labs_list = [] for l in repaircafes: single = repaircafes[l].__dict__ single_lab = Feature( type="Feature", geometry=Point((single["latitude"], single["longitude"])), properties=single) labs_list.append(single_lab) output = dumps(FeatureCollection(labs_list)) # Return a Pandas DataFrame elif format.lower() == "pandas" or format.lower() == "dataframe": output = {} for j in repaircafes: output[j] = repaircafes[j].__dict__ # Transform the dict into a Pandas DataFrame output = pd.DataFrame.from_dict(output) output = output.transpose() # Return an object elif format.lower() == "object" or format.lower() == "obj": output = repaircafes # Default: return an oject else: output = repaircafes # Return a proper json if format.lower() == "json": output = json.dumps(output) return output
[ "def", "get_labs", "(", "format", ")", ":", "data", "=", "data_from_repaircafe_org", "(", ")", "repaircafes", "=", "{", "}", "# Load all the Repair Cafes", "for", "i", "in", "data", ":", "# Create a lab", "current_lab", "=", "RepairCafe", "(", ")", "# Add existing data from first scraping", "current_lab", ".", "name", "=", "i", "[", "\"name\"", "]", "slug", "=", "i", "[", "\"url\"", "]", ".", "replace", "(", "\"https://repaircafe.org/locations/\"", ",", "\"\"", ")", "if", "slug", ".", "endswith", "(", "\"/\"", ")", ":", "slug", ".", "replace", "(", "\"/\"", ",", "\"\"", ")", "current_lab", ".", "slug", "=", "slug", "current_lab", ".", "url", "=", "i", "[", "\"url\"", "]", "# Scrape for more data", "page_request", "=", "requests", ".", "get", "(", "i", "[", "\"url\"", "]", ")", "if", "page_request", ".", "status_code", "==", "200", ":", "page_source", "=", "BeautifulSoup", "(", "page_request", ".", "text", ",", "\"lxml\"", ")", "else", ":", "output", "=", "\"There was an error while accessing data on repaircafe.org.\"", "# Find Facebook and Twitter links, add also the other ones", "current_lab", ".", "links", "=", "{", "\"facebook\"", ":", "\"\"", ",", "\"twitter\"", ":", "\"\"", "}", "column", "=", "page_source", ".", "find_all", "(", "\"div\"", ",", "class_", "=", "\"sc_column_item_2\"", ")", "for", "j", "in", "column", ":", "for", "p", "in", "j", ".", "find_all", "(", "'p'", ")", ":", "for", "a", "in", "p", ".", "find_all", "(", "'a'", ",", "href", "=", "True", ")", ":", "if", "\"facebook\"", "in", "a", "[", "'href'", "]", ":", "current_lab", ".", "links", "[", "\"facebook\"", "]", "=", "a", "[", "'href'", "]", "elif", "\"twitter\"", "in", "a", "[", "'href'", "]", ":", "current_lab", ".", "links", "[", "\"twitter\"", "]", "=", "a", "[", "'href'", "]", "else", ":", "current_lab", ".", "links", "[", "a", "[", "'href'", "]", "]", "=", "a", "[", "'href'", "]", "# Find address", "column", "=", "page_source", ".", "find_all", "(", "\"div\"", ",", "class_", "=", "\"sc_column_item_1\"", ")", "for", "x", "in", "column", ":", "if", "x", ".", "string", ":", "print", "x", ".", "string", ".", "strip", "(", ")", "exit", "(", ")", "# current_lab.address_1 = i[\"address_1\"]", "# current_lab.address_2 = i[\"address_2\"]", "# current_lab.address_notes = i[\"address_notes\"]", "# current_lab.blurb = i[\"blurb\"]", "# current_lab.city = i[\"city\"]", "# current_lab.country_code = i[\"country_code\"]", "# current_lab.county = i[\"county\"]", "# current_lab.description = i[\"description\"]", "# current_lab.email = i[\"email\"]", "# current_lab.id = i[\"id\"]", "# current_lab.phone = i[\"phone\"]", "# current_lab.postal_code = i[\"postal_code\"]", "#", "#", "# current_lab.continent = country_alpha2_to_continent_code(i[", "# \"country_code\"].upper())", "# current_country = pycountry.countries.get(", "# alpha_2=i[\"country_code\"].upper())", "# current_lab.country_code = current_country.alpha_3", "# current_lab.country = current_country.name", "# if i[\"longitude\"] is None or i[\"latitude\"] is None:", "# # Be nice with the geocoder API limit", "# errorsb += 1", "# # sleep(10)", "# # location = geolocator.geocode(", "# # {\"city\": i[\"city\"],", "# # \"country\": i[\"country_code\"].upper()},", "# # addressdetails=True,", "# # language=\"en\")", "# # if location is not None:", "# # current_lab.latitude = location.latitude", "# # current_lab.longitude = location.longitude", "# # if \"county\" in location.raw[\"address\"]:", "# # current_lab.county = location.raw[\"address\"][", "# # \"county\"].encode('utf-8')", "# # if \"state\" in location.raw[\"address\"]:", "# # current_lab.state = location.raw[\"address\"][", "# # \"state\"].encode('utf-8')", "# else:", "# # Be nice with the geocoder API limit", "# sleep(10)", "# errorsa += 1", "# # location = geolocator.reverse((i[\"latitude\"], i[\"longitude\"]))", "# # if location is not None:", "# # if \"county\" in location.raw[\"address\"]:", "# # current_lab.county = location.raw[\"address\"][", "# # \"county\"].encode('utf-8')", "# # if \"state\" in location.raw[\"address\"]:", "# # current_lab.state = location.raw[\"address\"][", "# # \"state\"].encode('utf-8')", "# Add the lab to the list", "repaircafes", "[", "slug", "]", "=", "current_lab", "# Return a dictiornary / json", "if", "format", ".", "lower", "(", ")", "==", "\"dict\"", "or", "format", ".", "lower", "(", ")", "==", "\"json\"", ":", "output", "=", "{", "}", "for", "j", "in", "repaircafes", ":", "output", "[", "j", "]", "=", "repaircafes", "[", "j", "]", ".", "__dict__", "# Return a geojson", "elif", "format", ".", "lower", "(", ")", "==", "\"geojson\"", "or", "format", ".", "lower", "(", ")", "==", "\"geo\"", ":", "labs_list", "=", "[", "]", "for", "l", "in", "repaircafes", ":", "single", "=", "repaircafes", "[", "l", "]", ".", "__dict__", "single_lab", "=", "Feature", "(", "type", "=", "\"Feature\"", ",", "geometry", "=", "Point", "(", "(", "single", "[", "\"latitude\"", "]", ",", "single", "[", "\"longitude\"", "]", ")", ")", ",", "properties", "=", "single", ")", "labs_list", ".", "append", "(", "single_lab", ")", "output", "=", "dumps", "(", "FeatureCollection", "(", "labs_list", ")", ")", "# Return a Pandas DataFrame", "elif", "format", ".", "lower", "(", ")", "==", "\"pandas\"", "or", "format", ".", "lower", "(", ")", "==", "\"dataframe\"", ":", "output", "=", "{", "}", "for", "j", "in", "repaircafes", ":", "output", "[", "j", "]", "=", "repaircafes", "[", "j", "]", ".", "__dict__", "# Transform the dict into a Pandas DataFrame", "output", "=", "pd", ".", "DataFrame", ".", "from_dict", "(", "output", ")", "output", "=", "output", ".", "transpose", "(", ")", "# Return an object", "elif", "format", ".", "lower", "(", ")", "==", "\"object\"", "or", "format", ".", "lower", "(", ")", "==", "\"obj\"", ":", "output", "=", "repaircafes", "# Default: return an oject", "else", ":", "output", "=", "repaircafes", "# Return a proper json", "if", "format", ".", "lower", "(", ")", "==", "\"json\"", ":", "output", "=", "json", ".", "dumps", "(", "output", ")", "return", "output" ]
39.932331
15.789474
def readline(self, size=None): """Reads a single line of text. The functions reads one entire line from the file-like object. A trailing end-of-line indicator (newline by default) is kept in the byte string (but may be absent when a file ends with an incomplete line). An empty byte string is returned only when end-of-file is encountered immediately. Args: size (Optional[int]): maximum byte size to read. If present and non-negative, it is a maximum byte count (including the trailing end-of-line) and an incomplete line may be returned. Returns: bytes: line of text. Raises: ValueError: if the specified size is less than zero or greater than the maximum size allowed. """ if size is not None and size < 0: raise ValueError('Invalid size value smaller than zero.') if size is not None and size > self.MAXIMUM_READ_BUFFER_SIZE: raise ValueError( 'Invalid size value exceeds maximum value {0:d}.'.format( self.MAXIMUM_READ_BUFFER_SIZE)) if not self._lines: if self._lines_buffer_offset >= self._file_object_size: return b'' read_size = size if not read_size: read_size = self.MAXIMUM_READ_BUFFER_SIZE if self._lines_buffer_offset + read_size > self._file_object_size: size = self._file_object_size - self._lines_buffer_offset self._file_object.seek(self._lines_buffer_offset, os.SEEK_SET) read_buffer = self._file_object.read(read_size) self._lines_buffer_offset += len(read_buffer) self._lines = read_buffer.split(self.end_of_line) if self._lines_buffer: self._lines[0] = b''.join([self._lines_buffer, self._lines[0]]) self._lines_buffer = b'' if read_buffer[self._end_of_line_length:] != self.end_of_line: self._lines_buffer = self._lines.pop() for index, line in enumerate(self._lines): self._lines[index] = b''.join([line, self.end_of_line]) if (self._lines_buffer and self._lines_buffer_offset >= self._file_object_size): self._lines.append(self._lines_buffer) self._lines_buffer = b'' if not self._lines: line = self._lines_buffer self._lines_buffer = b'' elif not size or size >= len(self._lines[0]): line = self._lines.pop(0) else: line = self._lines[0] self._lines[0] = line[size:] line = line[:size] self._current_offset += len(line) return line
[ "def", "readline", "(", "self", ",", "size", "=", "None", ")", ":", "if", "size", "is", "not", "None", "and", "size", "<", "0", ":", "raise", "ValueError", "(", "'Invalid size value smaller than zero.'", ")", "if", "size", "is", "not", "None", "and", "size", ">", "self", ".", "MAXIMUM_READ_BUFFER_SIZE", ":", "raise", "ValueError", "(", "'Invalid size value exceeds maximum value {0:d}.'", ".", "format", "(", "self", ".", "MAXIMUM_READ_BUFFER_SIZE", ")", ")", "if", "not", "self", ".", "_lines", ":", "if", "self", ".", "_lines_buffer_offset", ">=", "self", ".", "_file_object_size", ":", "return", "b''", "read_size", "=", "size", "if", "not", "read_size", ":", "read_size", "=", "self", ".", "MAXIMUM_READ_BUFFER_SIZE", "if", "self", ".", "_lines_buffer_offset", "+", "read_size", ">", "self", ".", "_file_object_size", ":", "size", "=", "self", ".", "_file_object_size", "-", "self", ".", "_lines_buffer_offset", "self", ".", "_file_object", ".", "seek", "(", "self", ".", "_lines_buffer_offset", ",", "os", ".", "SEEK_SET", ")", "read_buffer", "=", "self", ".", "_file_object", ".", "read", "(", "read_size", ")", "self", ".", "_lines_buffer_offset", "+=", "len", "(", "read_buffer", ")", "self", ".", "_lines", "=", "read_buffer", ".", "split", "(", "self", ".", "end_of_line", ")", "if", "self", ".", "_lines_buffer", ":", "self", ".", "_lines", "[", "0", "]", "=", "b''", ".", "join", "(", "[", "self", ".", "_lines_buffer", ",", "self", ".", "_lines", "[", "0", "]", "]", ")", "self", ".", "_lines_buffer", "=", "b''", "if", "read_buffer", "[", "self", ".", "_end_of_line_length", ":", "]", "!=", "self", ".", "end_of_line", ":", "self", ".", "_lines_buffer", "=", "self", ".", "_lines", ".", "pop", "(", ")", "for", "index", ",", "line", "in", "enumerate", "(", "self", ".", "_lines", ")", ":", "self", ".", "_lines", "[", "index", "]", "=", "b''", ".", "join", "(", "[", "line", ",", "self", ".", "end_of_line", "]", ")", "if", "(", "self", ".", "_lines_buffer", "and", "self", ".", "_lines_buffer_offset", ">=", "self", ".", "_file_object_size", ")", ":", "self", ".", "_lines", ".", "append", "(", "self", ".", "_lines_buffer", ")", "self", ".", "_lines_buffer", "=", "b''", "if", "not", "self", ".", "_lines", ":", "line", "=", "self", ".", "_lines_buffer", "self", ".", "_lines_buffer", "=", "b''", "elif", "not", "size", "or", "size", ">=", "len", "(", "self", ".", "_lines", "[", "0", "]", ")", ":", "line", "=", "self", ".", "_lines", ".", "pop", "(", "0", ")", "else", ":", "line", "=", "self", ".", "_lines", "[", "0", "]", "self", ".", "_lines", "[", "0", "]", "=", "line", "[", "size", ":", "]", "line", "=", "line", "[", ":", "size", "]", "self", ".", "_current_offset", "+=", "len", "(", "line", ")", "return", "line" ]
32.573333
23.506667
def calc_expiry_time(minutes_valid): """Return specific time an auth_hash will expire.""" return ( timezone.now() + datetime.timedelta(minutes=minutes_valid + 1) ).replace(second=0, microsecond=0)
[ "def", "calc_expiry_time", "(", "minutes_valid", ")", ":", "return", "(", "timezone", ".", "now", "(", ")", "+", "datetime", ".", "timedelta", "(", "minutes", "=", "minutes_valid", "+", "1", ")", ")", ".", "replace", "(", "second", "=", "0", ",", "microsecond", "=", "0", ")" ]
42.4
12.8
def _scaling_func_list(bdry_fracs, exponent): """Return a list of lists of scaling functions for the boundary.""" def scaling(factor): def scaling_func(x): return x * factor return scaling_func func_list = [] for frac_l, frac_r in bdry_fracs: func_list_entry = [] if np.isclose(frac_l, 1.0): func_list_entry.append(None) else: func_list_entry.append(scaling(frac_l ** (1 / exponent))) if np.isclose(frac_r, 1.0): func_list_entry.append(None) else: func_list_entry.append(scaling(frac_r ** (1 / exponent))) func_list.append(func_list_entry) return func_list
[ "def", "_scaling_func_list", "(", "bdry_fracs", ",", "exponent", ")", ":", "def", "scaling", "(", "factor", ")", ":", "def", "scaling_func", "(", "x", ")", ":", "return", "x", "*", "factor", "return", "scaling_func", "func_list", "=", "[", "]", "for", "frac_l", ",", "frac_r", "in", "bdry_fracs", ":", "func_list_entry", "=", "[", "]", "if", "np", ".", "isclose", "(", "frac_l", ",", "1.0", ")", ":", "func_list_entry", ".", "append", "(", "None", ")", "else", ":", "func_list_entry", ".", "append", "(", "scaling", "(", "frac_l", "**", "(", "1", "/", "exponent", ")", ")", ")", "if", "np", ".", "isclose", "(", "frac_r", ",", "1.0", ")", ":", "func_list_entry", ".", "append", "(", "None", ")", "else", ":", "func_list_entry", ".", "append", "(", "scaling", "(", "frac_r", "**", "(", "1", "/", "exponent", ")", ")", ")", "func_list", ".", "append", "(", "func_list_entry", ")", "return", "func_list" ]
31
16.227273
def receive(self, data): """ Create and return a message from data, also triggers the **receive** event Returns: - Message: message object """ self.log_debug("Received: %s" % (data)) message = self.make_message(data) self.trigger("receive", data=data, message=message) return message
[ "def", "receive", "(", "self", ",", "data", ")", ":", "self", ".", "log_debug", "(", "\"Received: %s\"", "%", "(", "data", ")", ")", "message", "=", "self", ".", "make_message", "(", "data", ")", "self", ".", "trigger", "(", "\"receive\"", ",", "data", "=", "data", ",", "message", "=", "message", ")", "return", "message" ]
27.538462
16
def chunkiter(iterable, chunksize): """break an iterable into chunks and yield those chunks as lists until there's nothing left to yeild. """ iterator = iter(iterable) for chunk in iter(lambda: list(itertools.islice(iterator, chunksize)), []): yield chunk
[ "def", "chunkiter", "(", "iterable", ",", "chunksize", ")", ":", "iterator", "=", "iter", "(", "iterable", ")", "for", "chunk", "in", "iter", "(", "lambda", ":", "list", "(", "itertools", ".", "islice", "(", "iterator", ",", "chunksize", ")", ")", ",", "[", "]", ")", ":", "yield", "chunk" ]
39.571429
10.857143
def add_coeffs(self, Tmin, Tmax, coeffs): '''Called internally during the parsing of the Zabransky database, to add coefficients as they are read one per line''' self.n += 1 if not self.Ts: self.Ts = [Tmin, Tmax] self.coeff_sets = [coeffs] else: for ind, T in enumerate(self.Ts): if Tmin < T: # Under an existing coefficient set - assume Tmax will come from another set self.Ts.insert(ind, Tmin) self.coeff_sets.insert(ind, coeffs) return # Must be appended to end instead self.Ts.append(Tmax) self.coeff_sets.append(coeffs)
[ "def", "add_coeffs", "(", "self", ",", "Tmin", ",", "Tmax", ",", "coeffs", ")", ":", "self", ".", "n", "+=", "1", "if", "not", "self", ".", "Ts", ":", "self", ".", "Ts", "=", "[", "Tmin", ",", "Tmax", "]", "self", ".", "coeff_sets", "=", "[", "coeffs", "]", "else", ":", "for", "ind", ",", "T", "in", "enumerate", "(", "self", ".", "Ts", ")", ":", "if", "Tmin", "<", "T", ":", "# Under an existing coefficient set - assume Tmax will come from another set", "self", ".", "Ts", ".", "insert", "(", "ind", ",", "Tmin", ")", "self", ".", "coeff_sets", ".", "insert", "(", "ind", ",", "coeffs", ")", "return", "# Must be appended to end instead", "self", ".", "Ts", ".", "append", "(", "Tmax", ")", "self", ".", "coeff_sets", ".", "append", "(", "coeffs", ")" ]
42.176471
14.764706
def patch_api_service_status(self, name, body, **kwargs): """ partially update status of the specified APIService This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_api_service_status(name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the APIService (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. :return: V1beta1APIService If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_api_service_status_with_http_info(name, body, **kwargs) else: (data) = self.patch_api_service_status_with_http_info(name, body, **kwargs) return data
[ "def", "patch_api_service_status", "(", "self", ",", "name", ",", "body", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "patch_api_service_status_with_http_info", "(", "name", ",", "body", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "patch_api_service_status_with_http_info", "(", "name", ",", "body", ",", "*", "*", "kwargs", ")", "return", "data" ]
75.96
47.48
def percentSame(image1, image2): ''' Returns the percent of pixels that are equal @author: catshoes ''' # If the images differ in size, return 0% same. size_x1, size_y1 = image1.size size_x2, size_y2 = image2.size if (size_x1 != size_x2 or size_y1 != size_y2): return 0 # Images are the same size # Return the percent of pixels that are equal. numPixelsSame = 0 numPixelsTotal = size_x1 * size_y1 image1Pixels = image1.load() image2Pixels = image2.load() # Loop over all pixels, comparing pixel in image1 to image2 for x in range(size_x1): for y in range(size_y1): if image1Pixels[x, y] == image2Pixels[x, y]: numPixelsSame += 1 return numPixelsSame / float(numPixelsTotal)
[ "def", "percentSame", "(", "image1", ",", "image2", ")", ":", "# If the images differ in size, return 0% same.", "size_x1", ",", "size_y1", "=", "image1", ".", "size", "size_x2", ",", "size_y2", "=", "image2", ".", "size", "if", "(", "size_x1", "!=", "size_x2", "or", "size_y1", "!=", "size_y2", ")", ":", "return", "0", "# Images are the same size", "# Return the percent of pixels that are equal.", "numPixelsSame", "=", "0", "numPixelsTotal", "=", "size_x1", "*", "size_y1", "image1Pixels", "=", "image1", ".", "load", "(", ")", "image2Pixels", "=", "image2", ".", "load", "(", ")", "# Loop over all pixels, comparing pixel in image1 to image2", "for", "x", "in", "range", "(", "size_x1", ")", ":", "for", "y", "in", "range", "(", "size_y1", ")", ":", "if", "image1Pixels", "[", "x", ",", "y", "]", "==", "image2Pixels", "[", "x", ",", "y", "]", ":", "numPixelsSame", "+=", "1", "return", "numPixelsSame", "/", "float", "(", "numPixelsTotal", ")" ]
30.964286
16.321429
def _unsigned_mul_overflow(state, a, b): """ Sign extend the value to 512 bits and check the result can be represented in 256. Following there is a 32 bit excerpt of this condition: a * b +00000000000000000 +00000000000000001 +0000000003fffffff +0000000007fffffff +00000000080000001 +000000000bfffffff +000000000ffffffff +0000000000000000 +0000000000000000 +0000000000000000 +0000000000000000 +0000000000000000 +0000000000000000 +0000000000000000 +0000000000000000 +0000000000000001 +0000000000000000 +0000000000000001 +000000003fffffff +000000007fffffff +0000000080000001 +00000000bfffffff +00000000ffffffff +000000003fffffff +0000000000000000 +000000003fffffff *+0fffffff80000001 *+1fffffff40000001 *+1fffffffbfffffff *+2fffffff00000001 *+3ffffffec0000001 +000000007fffffff +0000000000000000 +000000007fffffff *+1fffffff40000001 *+3fffffff00000001 *+3fffffffffffffff *+5ffffffec0000001 *+7ffffffe80000001 +0000000080000001 +0000000000000000 +0000000080000001 *+1fffffffbfffffff *+3fffffffffffffff *+4000000100000001 *+600000003fffffff *+800000007fffffff +00000000bfffffff +0000000000000000 +00000000bfffffff *+2fffffff00000001 *+5ffffffec0000001 *+600000003fffffff *+8ffffffe80000001 *+bffffffe40000001 +00000000ffffffff +0000000000000000 +00000000ffffffff *+3ffffffec0000001 *+7ffffffe80000001 *+800000007fffffff *+bffffffe40000001 *+fffffffe00000001 """ mul = Operators.SEXTEND(a, 256, 512) * Operators.SEXTEND(b, 256, 512) cond = Operators.UGE(mul, 1 << 256) return cond
[ "def", "_unsigned_mul_overflow", "(", "state", ",", "a", ",", "b", ")", ":", "mul", "=", "Operators", ".", "SEXTEND", "(", "a", ",", "256", ",", "512", ")", "*", "Operators", ".", "SEXTEND", "(", "b", ",", "256", ",", "512", ")", "cond", "=", "Operators", ".", "UGE", "(", "mul", ",", "1", "<<", "256", ")", "return", "cond" ]
89.833333
64.277778
def __coord_cqt_hz(n, fmin=None, bins_per_octave=12, **_kwargs): '''Get CQT bin frequencies''' if fmin is None: fmin = core.note_to_hz('C1') # we drop by half a bin so that CQT bins are centered vertically return core.cqt_frequencies(n+1, fmin=fmin / 2.0**(0.5/bins_per_octave), bins_per_octave=bins_per_octave)
[ "def", "__coord_cqt_hz", "(", "n", ",", "fmin", "=", "None", ",", "bins_per_octave", "=", "12", ",", "*", "*", "_kwargs", ")", ":", "if", "fmin", "is", "None", ":", "fmin", "=", "core", ".", "note_to_hz", "(", "'C1'", ")", "# we drop by half a bin so that CQT bins are centered vertically", "return", "core", ".", "cqt_frequencies", "(", "n", "+", "1", ",", "fmin", "=", "fmin", "/", "2.0", "**", "(", "0.5", "/", "bins_per_octave", ")", ",", "bins_per_octave", "=", "bins_per_octave", ")" ]
43.555556
20.222222
def create_update_parameter(task_params, parameter_map): """ Builds the code block for the GPTool UpdateParameter method based on the input task_params. :param task_params: A list of task parameters from the task info structure. :return: A string representing the code block to the GPTool UpdateParameter method. """ gp_params = [] for param in task_params: if param['direction'].upper() == 'OUTPUT': continue # Convert DataType data_type = param['type'].upper() if 'dimensions' in param: data_type += 'ARRAY' if data_type in parameter_map: gp_params.append(parameter_map[data_type].update_parameter().substitute(param)) return ''.join(gp_params)
[ "def", "create_update_parameter", "(", "task_params", ",", "parameter_map", ")", ":", "gp_params", "=", "[", "]", "for", "param", "in", "task_params", ":", "if", "param", "[", "'direction'", "]", ".", "upper", "(", ")", "==", "'OUTPUT'", ":", "continue", "# Convert DataType", "data_type", "=", "param", "[", "'type'", "]", ".", "upper", "(", ")", "if", "'dimensions'", "in", "param", ":", "data_type", "+=", "'ARRAY'", "if", "data_type", "in", "parameter_map", ":", "gp_params", ".", "append", "(", "parameter_map", "[", "data_type", "]", ".", "update_parameter", "(", ")", ".", "substitute", "(", "param", ")", ")", "return", "''", ".", "join", "(", "gp_params", ")" ]
35.142857
22.571429
def visitBaseDecl(self, ctx: ShExDocParser.BaseDeclContext): """ baseDecl: KW_BASE IRIREF """ self.context.base = None self.context.base = self.context.iriref_to_shexj_iriref(ctx.IRIREF())
[ "def", "visitBaseDecl", "(", "self", ",", "ctx", ":", "ShExDocParser", ".", "BaseDeclContext", ")", ":", "self", ".", "context", ".", "base", "=", "None", "self", ".", "context", ".", "base", "=", "self", ".", "context", ".", "iriref_to_shexj_iriref", "(", "ctx", ".", "IRIREF", "(", ")", ")" ]
52.25
16.25
def update_payload(self, fields=None): """Wrap submitted data within an extra dict.""" payload = super(ConfigTemplate, self).update_payload(fields) if 'template_combinations' in payload: payload['template_combinations_attributes'] = payload.pop( 'template_combinations') return {u'config_template': payload}
[ "def", "update_payload", "(", "self", ",", "fields", "=", "None", ")", ":", "payload", "=", "super", "(", "ConfigTemplate", ",", "self", ")", ".", "update_payload", "(", "fields", ")", "if", "'template_combinations'", "in", "payload", ":", "payload", "[", "'template_combinations_attributes'", "]", "=", "payload", ".", "pop", "(", "'template_combinations'", ")", "return", "{", "u'config_template'", ":", "payload", "}" ]
51.571429
10
def _get_asym_hel(self,d): """ Find the asymmetry of each helicity. """ # get data 1+ 2+ 1- 2- d0 = d[0]; d1 = d[2]; d2 = d[1]; d3 = d[3] # pre-calcs denom1 = d0+d1; denom2 = d2+d3 # check for div by zero denom1[denom1==0] = np.nan denom2[denom2==0] = np.nan # asymmetries in both helicities asym_hel = [(d0-d1)/denom1, (d2-d3)/denom2] # errors # https://www.wolframalpha.com/input/?i=%E2%88%9A(F*(derivative+of+((F-B)%2F(F%2BB))+with+respect+to+F)%5E2+%2B+B*(derivative+of+((F-B)%2F(F%2BB))+with+respect+to+B)%5E2) asym_hel_err = [2*np.sqrt(d0*d1/np.power(denom1,3)), 2*np.sqrt(d2*d3/np.power(denom2,3))] # remove nan for i in range(2): asym_hel[i][np.isnan(asym_hel[i])] = 0. asym_hel_err[i][np.isnan(asym_hel_err[i])] = 0. # exit return [[asym_hel[1],asym_hel_err[1]], # something wrong with file? [asym_hel[0],asym_hel_err[0]]]
[ "def", "_get_asym_hel", "(", "self", ",", "d", ")", ":", "# get data 1+ 2+ 1- 2-", "d0", "=", "d", "[", "0", "]", "d1", "=", "d", "[", "2", "]", "d2", "=", "d", "[", "1", "]", "d3", "=", "d", "[", "3", "]", "# pre-calcs", "denom1", "=", "d0", "+", "d1", "denom2", "=", "d2", "+", "d3", "# check for div by zero", "denom1", "[", "denom1", "==", "0", "]", "=", "np", ".", "nan", "denom2", "[", "denom2", "==", "0", "]", "=", "np", ".", "nan", "# asymmetries in both helicities", "asym_hel", "=", "[", "(", "d0", "-", "d1", ")", "/", "denom1", ",", "(", "d2", "-", "d3", ")", "/", "denom2", "]", "# errors ", "# https://www.wolframalpha.com/input/?i=%E2%88%9A(F*(derivative+of+((F-B)%2F(F%2BB))+with+respect+to+F)%5E2+%2B+B*(derivative+of+((F-B)%2F(F%2BB))+with+respect+to+B)%5E2)", "asym_hel_err", "=", "[", "2", "*", "np", ".", "sqrt", "(", "d0", "*", "d1", "/", "np", ".", "power", "(", "denom1", ",", "3", ")", ")", ",", "2", "*", "np", ".", "sqrt", "(", "d2", "*", "d3", "/", "np", ".", "power", "(", "denom2", ",", "3", ")", ")", "]", "# remove nan ", "for", "i", "in", "range", "(", "2", ")", ":", "asym_hel", "[", "i", "]", "[", "np", ".", "isnan", "(", "asym_hel", "[", "i", "]", ")", "]", "=", "0.", "asym_hel_err", "[", "i", "]", "[", "np", ".", "isnan", "(", "asym_hel_err", "[", "i", "]", ")", "]", "=", "0.", "# exit", "return", "[", "[", "asym_hel", "[", "1", "]", ",", "asym_hel_err", "[", "1", "]", "]", ",", "# something wrong with file?", "[", "asym_hel", "[", "0", "]", ",", "asym_hel_err", "[", "0", "]", "]", "]" ]
34.545455
20.242424
def replace(self, scaling_group, name, cooldown, min_entities, max_entities, metadata=None): """ Replace an existing ScalingGroup configuration. All of the attributes must be specified. If you wish to delete any of the optional attributes, pass them in as None. """ return self._manager.replace(scaling_group, name, cooldown, min_entities, max_entities, metadata=metadata)
[ "def", "replace", "(", "self", ",", "scaling_group", ",", "name", ",", "cooldown", ",", "min_entities", ",", "max_entities", ",", "metadata", "=", "None", ")", ":", "return", "self", ".", "_manager", ".", "replace", "(", "scaling_group", ",", "name", ",", "cooldown", ",", "min_entities", ",", "max_entities", ",", "metadata", "=", "metadata", ")" ]
48.888889
15.333333
def generate_certificate( ctx, slot, management_key, pin, public_key, subject, valid_days): """ Generate a self-signed X.509 certificate. A self-signed certificate is generated and written to one of the slots on the YubiKey. A private key need to exist in the slot. \b SLOT PIV slot where private key is stored. PUBLIC-KEY File containing a public key. Use '-' to use stdin. """ controller = ctx.obj['controller'] _ensure_authenticated( ctx, controller, pin, management_key, require_pin_and_key=True) data = public_key.read() public_key = serialization.load_pem_public_key( data, default_backend()) now = datetime.datetime.now() valid_to = now + datetime.timedelta(days=valid_days) try: controller.generate_self_signed_certificate( slot, public_key, subject, now, valid_to, touch_callback=prompt_for_touch) except APDUError as e: logger.error('Failed to generate certificate for slot %s', slot, exc_info=e) ctx.fail('Certificate generation failed.')
[ "def", "generate_certificate", "(", "ctx", ",", "slot", ",", "management_key", ",", "pin", ",", "public_key", ",", "subject", ",", "valid_days", ")", ":", "controller", "=", "ctx", ".", "obj", "[", "'controller'", "]", "_ensure_authenticated", "(", "ctx", ",", "controller", ",", "pin", ",", "management_key", ",", "require_pin_and_key", "=", "True", ")", "data", "=", "public_key", ".", "read", "(", ")", "public_key", "=", "serialization", ".", "load_pem_public_key", "(", "data", ",", "default_backend", "(", ")", ")", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "valid_to", "=", "now", "+", "datetime", ".", "timedelta", "(", "days", "=", "valid_days", ")", "try", ":", "controller", ".", "generate_self_signed_certificate", "(", "slot", ",", "public_key", ",", "subject", ",", "now", ",", "valid_to", ",", "touch_callback", "=", "prompt_for_touch", ")", "except", "APDUError", "as", "e", ":", "logger", ".", "error", "(", "'Failed to generate certificate for slot %s'", ",", "slot", ",", "exc_info", "=", "e", ")", "ctx", ".", "fail", "(", "'Certificate generation failed.'", ")" ]
34.28125
20.46875
def get_langids(dev): r"""Retrieve the list of supported Language IDs from the device. Most client code should not call this function directly, but instead use the langids property on the Device object, which will call this function as needed and cache the result. USB LANGIDs are 16-bit integers familiar to Windows developers, where for example instead of en-US you say 0x0409. See the file USB_LANGIDS.pdf somewhere on the usb.org site for a list, which does not claim to be complete. It requires "system software must allow the enumeration and selection of LANGIDs that are not currently on this list." It also requires "system software should never request a LANGID not defined in the LANGID code array (string index = 0) presented by a device." Client code can check this tuple before issuing string requests for a specific language ID. dev is the Device object whose supported language IDs will be retrieved. The return value is a tuple of integer LANGIDs, possibly empty if the device does not support strings at all (which USB 3.1 r1.0 section 9.6.9 allows). In that case client code should not request strings at all. A USBError may be raised from this function for some devices that have no string support, instead of returning an empty tuple. The accessor for the langids property on Device catches that case and supplies an empty tuple, so client code can ignore this detail by using the langids property instead of directly calling this function. """ from usb.control import get_descriptor buf = get_descriptor( dev, 254, DESC_TYPE_STRING, 0 ) # The array is retrieved by asking for string descriptor zero, which is # never the index of a real string. The returned descriptor has bLength # and bDescriptorType bytes followed by pairs of bytes representing # little-endian LANGIDs. That is, buf[0] contains the length of the # returned array, buf[2] is the least-significant byte of the first LANGID # (if any), buf[3] is the most-significant byte, and in general the LSBs of # all the LANGIDs are given by buf[2:buf[0]:2] and MSBs by buf[3:buf[0]:2]. # If the length of buf came back odd, something is wrong. if len(buf) < 4 or buf[0] < 4 or buf[0]&1 != 0: return () return tuple(map(lambda x,y: x+(y<<8), buf[2:buf[0]:2], buf[3:buf[0]:2]))
[ "def", "get_langids", "(", "dev", ")", ":", "from", "usb", ".", "control", "import", "get_descriptor", "buf", "=", "get_descriptor", "(", "dev", ",", "254", ",", "DESC_TYPE_STRING", ",", "0", ")", "# The array is retrieved by asking for string descriptor zero, which is", "# never the index of a real string. The returned descriptor has bLength", "# and bDescriptorType bytes followed by pairs of bytes representing", "# little-endian LANGIDs. That is, buf[0] contains the length of the", "# returned array, buf[2] is the least-significant byte of the first LANGID", "# (if any), buf[3] is the most-significant byte, and in general the LSBs of", "# all the LANGIDs are given by buf[2:buf[0]:2] and MSBs by buf[3:buf[0]:2].", "# If the length of buf came back odd, something is wrong.", "if", "len", "(", "buf", ")", "<", "4", "or", "buf", "[", "0", "]", "<", "4", "or", "buf", "[", "0", "]", "&", "1", "!=", "0", ":", "return", "(", ")", "return", "tuple", "(", "map", "(", "lambda", "x", ",", "y", ":", "x", "+", "(", "y", "<<", "8", ")", ",", "buf", "[", "2", ":", "buf", "[", "0", "]", ":", "2", "]", ",", "buf", "[", "3", ":", "buf", "[", "0", "]", ":", "2", "]", ")", ")" ]
49.673469
29.530612
def rules(self): """ Returns a sorted list of firewall rules. Returns: list """ list_of_rules = [] for main_row in self.dict_rules: if 'rules' in main_row: for rule_row in main_row['rules']: if 'grants' in rule_row: for grant_row in rule_row['grants']: if 'group_id' in grant_row: # Set a var to not go over 80 chars group_id = grant_row['group_id'] # Some VPC grants don't specify a name if 'name' in grant_row: row_name = grant_row['name'] else: row_name = None fr = FirewallRule( main_row['id'], main_row['name'], main_row['description'], rules_direction=rule_row['direction'], rules_ip_protocol=rule_row['ip_protocol'], rules_from_port=rule_row['from_port'], rules_to_port=rule_row['to_port'], rules_grants_group_id=group_id, rules_grants_name=row_name, rules_description=grant_row['description']) list_of_rules.append(fr) elif 'cidr_ip' in grant_row: fr = FirewallRule( main_row['id'], main_row['name'], main_row['description'], rules_direction=rule_row['direction'], rules_ip_protocol=rule_row['ip_protocol'], rules_from_port=rule_row['from_port'], rules_to_port=rule_row['to_port'], rules_grants_cidr_ip=grant_row['cidr_ip'], rules_description=grant_row['description']) list_of_rules.append(fr) else: raise ValueError("Unsupported grant:", grant_row) else: fr = FirewallRule( main_row['id'], main_row['name'], main_row['description'], rules_direction=rule_row['direction'], rules_ip_protocol=rule_row['ip_protocol'], rules_from_port=rule_row['from_port'], rules_to_port=rule_row['to_port']) list_of_rules.append(fr) else: fr = FirewallRule(main_row['id'], main_row['name'], main_row['description']) list_of_rules.append(fr) # Sort the data in order to get a consistent output sorted_list = sorted(list_of_rules, key=lambda fr: (str(fr.id), str(fr.name), str(fr.description), str(fr.rules_direction), str(fr.rules_ip_protocol), str(fr.rules_from_port), str(fr.rules_to_port), str(fr.rules_grants_group_id), str(fr.rules_grants_name), str(fr.rules_grants_cidr_ip))) return sorted_list
[ "def", "rules", "(", "self", ")", ":", "list_of_rules", "=", "[", "]", "for", "main_row", "in", "self", ".", "dict_rules", ":", "if", "'rules'", "in", "main_row", ":", "for", "rule_row", "in", "main_row", "[", "'rules'", "]", ":", "if", "'grants'", "in", "rule_row", ":", "for", "grant_row", "in", "rule_row", "[", "'grants'", "]", ":", "if", "'group_id'", "in", "grant_row", ":", "# Set a var to not go over 80 chars", "group_id", "=", "grant_row", "[", "'group_id'", "]", "# Some VPC grants don't specify a name", "if", "'name'", "in", "grant_row", ":", "row_name", "=", "grant_row", "[", "'name'", "]", "else", ":", "row_name", "=", "None", "fr", "=", "FirewallRule", "(", "main_row", "[", "'id'", "]", ",", "main_row", "[", "'name'", "]", ",", "main_row", "[", "'description'", "]", ",", "rules_direction", "=", "rule_row", "[", "'direction'", "]", ",", "rules_ip_protocol", "=", "rule_row", "[", "'ip_protocol'", "]", ",", "rules_from_port", "=", "rule_row", "[", "'from_port'", "]", ",", "rules_to_port", "=", "rule_row", "[", "'to_port'", "]", ",", "rules_grants_group_id", "=", "group_id", ",", "rules_grants_name", "=", "row_name", ",", "rules_description", "=", "grant_row", "[", "'description'", "]", ")", "list_of_rules", ".", "append", "(", "fr", ")", "elif", "'cidr_ip'", "in", "grant_row", ":", "fr", "=", "FirewallRule", "(", "main_row", "[", "'id'", "]", ",", "main_row", "[", "'name'", "]", ",", "main_row", "[", "'description'", "]", ",", "rules_direction", "=", "rule_row", "[", "'direction'", "]", ",", "rules_ip_protocol", "=", "rule_row", "[", "'ip_protocol'", "]", ",", "rules_from_port", "=", "rule_row", "[", "'from_port'", "]", ",", "rules_to_port", "=", "rule_row", "[", "'to_port'", "]", ",", "rules_grants_cidr_ip", "=", "grant_row", "[", "'cidr_ip'", "]", ",", "rules_description", "=", "grant_row", "[", "'description'", "]", ")", "list_of_rules", ".", "append", "(", "fr", ")", "else", ":", "raise", "ValueError", "(", "\"Unsupported grant:\"", ",", "grant_row", ")", "else", ":", "fr", "=", "FirewallRule", "(", "main_row", "[", "'id'", "]", ",", "main_row", "[", "'name'", "]", ",", "main_row", "[", "'description'", "]", ",", "rules_direction", "=", "rule_row", "[", "'direction'", "]", ",", "rules_ip_protocol", "=", "rule_row", "[", "'ip_protocol'", "]", ",", "rules_from_port", "=", "rule_row", "[", "'from_port'", "]", ",", "rules_to_port", "=", "rule_row", "[", "'to_port'", "]", ")", "list_of_rules", ".", "append", "(", "fr", ")", "else", ":", "fr", "=", "FirewallRule", "(", "main_row", "[", "'id'", "]", ",", "main_row", "[", "'name'", "]", ",", "main_row", "[", "'description'", "]", ")", "list_of_rules", ".", "append", "(", "fr", ")", "# Sort the data in order to get a consistent output", "sorted_list", "=", "sorted", "(", "list_of_rules", ",", "key", "=", "lambda", "fr", ":", "(", "str", "(", "fr", ".", "id", ")", ",", "str", "(", "fr", ".", "name", ")", ",", "str", "(", "fr", ".", "description", ")", ",", "str", "(", "fr", ".", "rules_direction", ")", ",", "str", "(", "fr", ".", "rules_ip_protocol", ")", ",", "str", "(", "fr", ".", "rules_from_port", ")", ",", "str", "(", "fr", ".", "rules_to_port", ")", ",", "str", "(", "fr", ".", "rules_grants_group_id", ")", ",", "str", "(", "fr", ".", "rules_grants_name", ")", ",", "str", "(", "fr", ".", "rules_grants_cidr_ip", ")", ")", ")", "return", "sorted_list" ]
49.878049
21.146341
def getClassAllSupers(self, aURI): """ note: requires SPARQL 1.1 2015-06-04: currenlty not used, inferred from above """ aURI = aURI try: qres = self.rdfgraph.query( """SELECT DISTINCT ?x WHERE { { <%s> rdfs:subClassOf+ ?x } FILTER (!isBlank(?x)) } """ % (aURI)) except: printDebug("... warning: the 'getClassAllSupers' query failed (maybe missing SPARQL 1.1 support?)") qres = [] return list(qres)
[ "def", "getClassAllSupers", "(", "self", ",", "aURI", ")", ":", "aURI", "=", "aURI", "try", ":", "qres", "=", "self", ".", "rdfgraph", ".", "query", "(", "\"\"\"SELECT DISTINCT ?x\n WHERE {\n { <%s> rdfs:subClassOf+ ?x }\n FILTER (!isBlank(?x))\n }\n \"\"\"", "%", "(", "aURI", ")", ")", "except", ":", "printDebug", "(", "\"... warning: the 'getClassAllSupers' query failed (maybe missing SPARQL 1.1 support?)\"", ")", "qres", "=", "[", "]", "return", "list", "(", "qres", ")" ]
34
14.5
def _handle_tag_defineshape4(self): """Handle the DefineShape4 tag.""" obj = _make_object("DefineShape4") obj.ShapeId = unpack_ui16(self._src) obj.ShapeBounds = self._get_struct_rect() obj.EdgeBounds = self._get_struct_rect() bc = BitConsumer(self._src) bc.u_get(5) # reserved obj.UsesFillWindingRule = bc.u_get(1) obj.UsesNonScalingStrokes = bc.u_get(1) obj.UsesScalingStrokes = bc.u_get(1) obj.Shapes = self._get_struct_shapewithstyle(4) return obj
[ "def", "_handle_tag_defineshape4", "(", "self", ")", ":", "obj", "=", "_make_object", "(", "\"DefineShape4\"", ")", "obj", ".", "ShapeId", "=", "unpack_ui16", "(", "self", ".", "_src", ")", "obj", ".", "ShapeBounds", "=", "self", ".", "_get_struct_rect", "(", ")", "obj", ".", "EdgeBounds", "=", "self", ".", "_get_struct_rect", "(", ")", "bc", "=", "BitConsumer", "(", "self", ".", "_src", ")", "bc", ".", "u_get", "(", "5", ")", "# reserved", "obj", ".", "UsesFillWindingRule", "=", "bc", ".", "u_get", "(", "1", ")", "obj", ".", "UsesNonScalingStrokes", "=", "bc", ".", "u_get", "(", "1", ")", "obj", ".", "UsesScalingStrokes", "=", "bc", ".", "u_get", "(", "1", ")", "obj", ".", "Shapes", "=", "self", ".", "_get_struct_shapewithstyle", "(", "4", ")", "return", "obj" ]
38.214286
9.642857
def _determine_stream_spread_single(sigomatrixEig, thetasTrack, sigOmega, sigAngle, allinvjacsTrack): """sigAngle input may either be a function that returns the dispersion in perpendicular angle as a function of parallel angle, or a value""" #Estimate the spread in all frequencies and angles sigObig2= sigOmega(thetasTrack)**2. tsigOdiag= copy.copy(sigomatrixEig[0]) tsigOdiag[numpy.argmax(tsigOdiag)]= sigObig2 tsigO= numpy.dot(sigomatrixEig[1], numpy.dot(numpy.diag(tsigOdiag), numpy.linalg.inv(sigomatrixEig[1]))) #angles if hasattr(sigAngle,'__call__'): sigangle2= sigAngle(thetasTrack)**2. else: sigangle2= sigAngle**2. tsigadiag= numpy.ones(3)*sigangle2 tsigadiag[numpy.argmax(tsigOdiag)]= 1. tsiga= numpy.dot(sigomatrixEig[1], numpy.dot(numpy.diag(tsigadiag), numpy.linalg.inv(sigomatrixEig[1]))) #correlations, assume half correlated for now (can be calculated) correlations= numpy.diag(0.5*numpy.ones(3))*numpy.sqrt(tsigOdiag*tsigadiag) correlations[numpy.argmax(tsigOdiag),numpy.argmax(tsigOdiag)]= 0. correlations= numpy.dot(sigomatrixEig[1], numpy.dot(correlations, numpy.linalg.inv(sigomatrixEig[1]))) #Now convert fullMatrix= numpy.empty((6,6)) fullMatrix[:3,:3]= tsigO fullMatrix[3:,3:]= tsiga fullMatrix[3:,:3]= correlations fullMatrix[:3,3:]= correlations.T return numpy.dot(allinvjacsTrack,numpy.dot(fullMatrix,allinvjacsTrack.T))
[ "def", "_determine_stream_spread_single", "(", "sigomatrixEig", ",", "thetasTrack", ",", "sigOmega", ",", "sigAngle", ",", "allinvjacsTrack", ")", ":", "#Estimate the spread in all frequencies and angles", "sigObig2", "=", "sigOmega", "(", "thetasTrack", ")", "**", "2.", "tsigOdiag", "=", "copy", ".", "copy", "(", "sigomatrixEig", "[", "0", "]", ")", "tsigOdiag", "[", "numpy", ".", "argmax", "(", "tsigOdiag", ")", "]", "=", "sigObig2", "tsigO", "=", "numpy", ".", "dot", "(", "sigomatrixEig", "[", "1", "]", ",", "numpy", ".", "dot", "(", "numpy", ".", "diag", "(", "tsigOdiag", ")", ",", "numpy", ".", "linalg", ".", "inv", "(", "sigomatrixEig", "[", "1", "]", ")", ")", ")", "#angles", "if", "hasattr", "(", "sigAngle", ",", "'__call__'", ")", ":", "sigangle2", "=", "sigAngle", "(", "thetasTrack", ")", "**", "2.", "else", ":", "sigangle2", "=", "sigAngle", "**", "2.", "tsigadiag", "=", "numpy", ".", "ones", "(", "3", ")", "*", "sigangle2", "tsigadiag", "[", "numpy", ".", "argmax", "(", "tsigOdiag", ")", "]", "=", "1.", "tsiga", "=", "numpy", ".", "dot", "(", "sigomatrixEig", "[", "1", "]", ",", "numpy", ".", "dot", "(", "numpy", ".", "diag", "(", "tsigadiag", ")", ",", "numpy", ".", "linalg", ".", "inv", "(", "sigomatrixEig", "[", "1", "]", ")", ")", ")", "#correlations, assume half correlated for now (can be calculated)", "correlations", "=", "numpy", ".", "diag", "(", "0.5", "*", "numpy", ".", "ones", "(", "3", ")", ")", "*", "numpy", ".", "sqrt", "(", "tsigOdiag", "*", "tsigadiag", ")", "correlations", "[", "numpy", ".", "argmax", "(", "tsigOdiag", ")", ",", "numpy", ".", "argmax", "(", "tsigOdiag", ")", "]", "=", "0.", "correlations", "=", "numpy", ".", "dot", "(", "sigomatrixEig", "[", "1", "]", ",", "numpy", ".", "dot", "(", "correlations", ",", "numpy", ".", "linalg", ".", "inv", "(", "sigomatrixEig", "[", "1", "]", ")", ")", ")", "#Now convert", "fullMatrix", "=", "numpy", ".", "empty", "(", "(", "6", ",", "6", ")", ")", "fullMatrix", "[", ":", "3", ",", ":", "3", "]", "=", "tsigO", "fullMatrix", "[", "3", ":", ",", "3", ":", "]", "=", "tsiga", "fullMatrix", "[", "3", ":", ",", ":", "3", "]", "=", "correlations", "fullMatrix", "[", ":", "3", ",", "3", ":", "]", "=", "correlations", ".", "T", "return", "numpy", ".", "dot", "(", "allinvjacsTrack", ",", "numpy", ".", "dot", "(", "fullMatrix", ",", "allinvjacsTrack", ".", "T", ")", ")" ]
46.972973
12.837838
def copy_recurse(lib_path, copy_filt_func = None, copied_libs = None): """ Analyze `lib_path` for library dependencies and copy libraries `lib_path` is a directory containing libraries. The libraries might themselves have dependencies. This function analyzes the dependencies and copies library dependencies that match the filter `copy_filt_func`. It also adjusts the depending libraries to use the copy. It keeps iterating over `lib_path` until all matching dependencies (of dependencies of dependencies ...) have been copied. Parameters ---------- lib_path : str Directory containing libraries copy_filt_func : None or callable, optional If None, copy any library that found libraries depend on. If callable, called on each depended library name; copy where ``copy_filt_func(libname)`` is True, don't copy otherwise copied_libs : dict Dict with (key, value) pairs of (``copied_lib_path``, ``dependings_dict``) where ``copied_lib_path`` is the canonical path of a library that has been copied to `lib_path`, and ``dependings_dict`` is a dictionary with (key, value) pairs of (``depending_lib_path``, ``install_name``). ``depending_lib_path`` is the canonical path of the library depending on ``copied_lib_path``, ``install_name`` is the name that ``depending_lib_path`` uses to refer to ``copied_lib_path`` (in its install names). Returns ------- copied_libs : dict Input `copied_libs` dict with any extra libraries and / or dependencies added. """ if copied_libs is None: copied_libs = {} else: copied_libs = dict(copied_libs) done = False while not done: in_len = len(copied_libs) _copy_required(lib_path, copy_filt_func, copied_libs) done = len(copied_libs) == in_len return copied_libs
[ "def", "copy_recurse", "(", "lib_path", ",", "copy_filt_func", "=", "None", ",", "copied_libs", "=", "None", ")", ":", "if", "copied_libs", "is", "None", ":", "copied_libs", "=", "{", "}", "else", ":", "copied_libs", "=", "dict", "(", "copied_libs", ")", "done", "=", "False", "while", "not", "done", ":", "in_len", "=", "len", "(", "copied_libs", ")", "_copy_required", "(", "lib_path", ",", "copy_filt_func", ",", "copied_libs", ")", "done", "=", "len", "(", "copied_libs", ")", "==", "in_len", "return", "copied_libs" ]
42.954545
24.568182
def _generate_cfgnode(self, cfg_job, current_function_addr): """ Generate a CFGNode that starts at `cfg_job.addr`. Since lifting machine code to IRSBs is slow, self._nodes is used as a cache of CFGNodes. If the current architecture is ARM, this method will try to lift the block in the mode specified by the address (determined by the parity of the address: even for ARM, odd for THUMB), and in case of decoding failures, try the other mode. If the basic block is successfully decoded in the other mode (different from the initial one), `addr` and `current_function_addr` are updated. :param CFGJob cfg_job: The CFGJob instance. :param int current_function_addr: Address of the current function. :return: A 4-tuple of (new address, new function address, CFGNode instance, IRSB object) :rtype: tuple """ addr = cfg_job.addr try: if addr in self._nodes: cfg_node = self._nodes[addr] irsb = cfg_node.irsb if cfg_node.function_address != current_function_addr: # the node has been assigned to another function before. # we should update the function address. current_function_addr = cfg_node.function_address return addr, current_function_addr, cfg_node, irsb is_x86_x64_arch = self.project.arch.name in ('X86', 'AMD64') if is_arm_arch(self.project.arch): real_addr = addr & (~1) else: real_addr = addr # if possible, check the distance between `addr` and the end of this section distance = VEX_IRSB_MAX_SIZE obj = self.project.loader.find_object_containing(addr, membership_check=False) if obj: # is there a section? has_executable_section = len([ sec for sec in obj.sections if sec.is_executable ]) > 0 # pylint:disable=len-as-condition section = self.project.loader.find_section_containing(addr) if has_executable_section and section is None: # the basic block should not exist here... return None, None, None, None if section is not None: if not section.is_executable: # the section is not executable... return None, None, None, None distance = section.vaddr + section.memsize - real_addr distance = min(distance, VEX_IRSB_MAX_SIZE) # TODO: handle segment information as well # also check the distance between `addr` and the closest function. # we don't want to have a basic block that spans across function boundaries next_func = self.functions.ceiling_func(addr + 1) if next_func is not None: distance_to_func = (next_func.addr & (~1) if is_arm_arch(self.project.arch) else next_func.addr) - real_addr if distance_to_func != 0: if distance is None: distance = distance_to_func else: distance = min(distance, distance_to_func) # in the end, check the distance between `addr` and the closest occupied region in segment list next_noncode_addr = self._seg_list.next_pos_with_sort_not_in(addr, { "code" }, max_distance=distance) if next_noncode_addr is not None: distance_to_noncode_addr = next_noncode_addr - addr distance = min(distance, distance_to_noncode_addr) # Let's try to create the pyvex IRSB directly, since it's much faster nodecode = False irsb = None irsb_string = None try: lifted_block = self._lift(addr, size=distance, opt_level=self._iropt_level, collect_data_refs=True) irsb = lifted_block.vex_nostmt irsb_string = lifted_block.bytes[:irsb.size] except SimTranslationError: nodecode = True if (nodecode or irsb.size == 0 or irsb.jumpkind == 'Ijk_NoDecode') and \ is_arm_arch(self.project.arch) and \ self._arch_options.switch_mode_on_nodecode: # maybe the current mode is wrong? nodecode = False if addr % 2 == 0: addr_0 = addr + 1 else: addr_0 = addr - 1 if addr_0 in self._nodes: # it has been analyzed before cfg_node = self._nodes[addr_0] irsb = cfg_node.irsb return addr_0, cfg_node.function_address, cfg_node, irsb try: lifted_block = self._lift(addr_0, size=distance, opt_level=self._iropt_level, collect_data_refs=True) irsb = lifted_block.vex_nostmt irsb_string = lifted_block.bytes[:irsb.size] except SimTranslationError: nodecode = True if not (nodecode or irsb.size == 0 or irsb.jumpkind == 'Ijk_NoDecode'): # it is decodeable if current_function_addr == addr: current_function_addr = addr_0 addr = addr_0 if nodecode or irsb.size == 0 or irsb.jumpkind == 'Ijk_NoDecode': # decoding error # we still occupy that location since it cannot be decoded anyways if irsb is None: irsb_size = 0 else: irsb_size = irsb.size # special handling for ud, ud1, and ud2 on x86 and x86-64 if is_x86_x64_arch \ and len(irsb_string) >= 2 \ and irsb_string[-2:] in { b'\x0f\xff', # ud0 b'\x0f\xb9', # ud1 b'\x0f\x0b', # ud2 }: # ud0, ud1, and ud2 are actually valid instructions. valid_ins = True nodecode_size = 2 else: valid_ins = False nodecode_size = 1 self._seg_list.occupy(addr, irsb_size, 'code') self._seg_list.occupy(addr + irsb_size, nodecode_size, 'nodecode') if not valid_ins: l.error("Decoding error occurred at address %#x of function %#x.", addr + irsb_size, current_function_addr ) return None, None, None, None is_thumb = False # Occupy the block in segment list if irsb.size > 0: if is_arm_arch(self.project.arch) and addr % 2 == 1: # thumb mode is_thumb=True self._seg_list.occupy(real_addr, irsb.size, "code") # Create a CFG node, and add it to the graph cfg_node = CFGNode(addr, irsb.size, self.model, function_address=current_function_addr, block_id=addr, irsb=irsb, thumb=is_thumb, byte_string=irsb_string, ) if self._cfb is not None: self._cfb.add_obj(addr, lifted_block) self._nodes[addr] = cfg_node self._nodes_by_addr[addr].append(cfg_node) return addr, current_function_addr, cfg_node, irsb except (SimMemoryError, SimEngineError): return None, None, None, None
[ "def", "_generate_cfgnode", "(", "self", ",", "cfg_job", ",", "current_function_addr", ")", ":", "addr", "=", "cfg_job", ".", "addr", "try", ":", "if", "addr", "in", "self", ".", "_nodes", ":", "cfg_node", "=", "self", ".", "_nodes", "[", "addr", "]", "irsb", "=", "cfg_node", ".", "irsb", "if", "cfg_node", ".", "function_address", "!=", "current_function_addr", ":", "# the node has been assigned to another function before.", "# we should update the function address.", "current_function_addr", "=", "cfg_node", ".", "function_address", "return", "addr", ",", "current_function_addr", ",", "cfg_node", ",", "irsb", "is_x86_x64_arch", "=", "self", ".", "project", ".", "arch", ".", "name", "in", "(", "'X86'", ",", "'AMD64'", ")", "if", "is_arm_arch", "(", "self", ".", "project", ".", "arch", ")", ":", "real_addr", "=", "addr", "&", "(", "~", "1", ")", "else", ":", "real_addr", "=", "addr", "# if possible, check the distance between `addr` and the end of this section", "distance", "=", "VEX_IRSB_MAX_SIZE", "obj", "=", "self", ".", "project", ".", "loader", ".", "find_object_containing", "(", "addr", ",", "membership_check", "=", "False", ")", "if", "obj", ":", "# is there a section?", "has_executable_section", "=", "len", "(", "[", "sec", "for", "sec", "in", "obj", ".", "sections", "if", "sec", ".", "is_executable", "]", ")", ">", "0", "# pylint:disable=len-as-condition", "section", "=", "self", ".", "project", ".", "loader", ".", "find_section_containing", "(", "addr", ")", "if", "has_executable_section", "and", "section", "is", "None", ":", "# the basic block should not exist here...", "return", "None", ",", "None", ",", "None", ",", "None", "if", "section", "is", "not", "None", ":", "if", "not", "section", ".", "is_executable", ":", "# the section is not executable...", "return", "None", ",", "None", ",", "None", ",", "None", "distance", "=", "section", ".", "vaddr", "+", "section", ".", "memsize", "-", "real_addr", "distance", "=", "min", "(", "distance", ",", "VEX_IRSB_MAX_SIZE", ")", "# TODO: handle segment information as well", "# also check the distance between `addr` and the closest function.", "# we don't want to have a basic block that spans across function boundaries", "next_func", "=", "self", ".", "functions", ".", "ceiling_func", "(", "addr", "+", "1", ")", "if", "next_func", "is", "not", "None", ":", "distance_to_func", "=", "(", "next_func", ".", "addr", "&", "(", "~", "1", ")", "if", "is_arm_arch", "(", "self", ".", "project", ".", "arch", ")", "else", "next_func", ".", "addr", ")", "-", "real_addr", "if", "distance_to_func", "!=", "0", ":", "if", "distance", "is", "None", ":", "distance", "=", "distance_to_func", "else", ":", "distance", "=", "min", "(", "distance", ",", "distance_to_func", ")", "# in the end, check the distance between `addr` and the closest occupied region in segment list", "next_noncode_addr", "=", "self", ".", "_seg_list", ".", "next_pos_with_sort_not_in", "(", "addr", ",", "{", "\"code\"", "}", ",", "max_distance", "=", "distance", ")", "if", "next_noncode_addr", "is", "not", "None", ":", "distance_to_noncode_addr", "=", "next_noncode_addr", "-", "addr", "distance", "=", "min", "(", "distance", ",", "distance_to_noncode_addr", ")", "# Let's try to create the pyvex IRSB directly, since it's much faster", "nodecode", "=", "False", "irsb", "=", "None", "irsb_string", "=", "None", "try", ":", "lifted_block", "=", "self", ".", "_lift", "(", "addr", ",", "size", "=", "distance", ",", "opt_level", "=", "self", ".", "_iropt_level", ",", "collect_data_refs", "=", "True", ")", "irsb", "=", "lifted_block", ".", "vex_nostmt", "irsb_string", "=", "lifted_block", ".", "bytes", "[", ":", "irsb", ".", "size", "]", "except", "SimTranslationError", ":", "nodecode", "=", "True", "if", "(", "nodecode", "or", "irsb", ".", "size", "==", "0", "or", "irsb", ".", "jumpkind", "==", "'Ijk_NoDecode'", ")", "and", "is_arm_arch", "(", "self", ".", "project", ".", "arch", ")", "and", "self", ".", "_arch_options", ".", "switch_mode_on_nodecode", ":", "# maybe the current mode is wrong?", "nodecode", "=", "False", "if", "addr", "%", "2", "==", "0", ":", "addr_0", "=", "addr", "+", "1", "else", ":", "addr_0", "=", "addr", "-", "1", "if", "addr_0", "in", "self", ".", "_nodes", ":", "# it has been analyzed before", "cfg_node", "=", "self", ".", "_nodes", "[", "addr_0", "]", "irsb", "=", "cfg_node", ".", "irsb", "return", "addr_0", ",", "cfg_node", ".", "function_address", ",", "cfg_node", ",", "irsb", "try", ":", "lifted_block", "=", "self", ".", "_lift", "(", "addr_0", ",", "size", "=", "distance", ",", "opt_level", "=", "self", ".", "_iropt_level", ",", "collect_data_refs", "=", "True", ")", "irsb", "=", "lifted_block", ".", "vex_nostmt", "irsb_string", "=", "lifted_block", ".", "bytes", "[", ":", "irsb", ".", "size", "]", "except", "SimTranslationError", ":", "nodecode", "=", "True", "if", "not", "(", "nodecode", "or", "irsb", ".", "size", "==", "0", "or", "irsb", ".", "jumpkind", "==", "'Ijk_NoDecode'", ")", ":", "# it is decodeable", "if", "current_function_addr", "==", "addr", ":", "current_function_addr", "=", "addr_0", "addr", "=", "addr_0", "if", "nodecode", "or", "irsb", ".", "size", "==", "0", "or", "irsb", ".", "jumpkind", "==", "'Ijk_NoDecode'", ":", "# decoding error", "# we still occupy that location since it cannot be decoded anyways", "if", "irsb", "is", "None", ":", "irsb_size", "=", "0", "else", ":", "irsb_size", "=", "irsb", ".", "size", "# special handling for ud, ud1, and ud2 on x86 and x86-64", "if", "is_x86_x64_arch", "and", "len", "(", "irsb_string", ")", ">=", "2", "and", "irsb_string", "[", "-", "2", ":", "]", "in", "{", "b'\\x0f\\xff'", ",", "# ud0", "b'\\x0f\\xb9'", ",", "# ud1", "b'\\x0f\\x0b'", ",", "# ud2", "}", ":", "# ud0, ud1, and ud2 are actually valid instructions.", "valid_ins", "=", "True", "nodecode_size", "=", "2", "else", ":", "valid_ins", "=", "False", "nodecode_size", "=", "1", "self", ".", "_seg_list", ".", "occupy", "(", "addr", ",", "irsb_size", ",", "'code'", ")", "self", ".", "_seg_list", ".", "occupy", "(", "addr", "+", "irsb_size", ",", "nodecode_size", ",", "'nodecode'", ")", "if", "not", "valid_ins", ":", "l", ".", "error", "(", "\"Decoding error occurred at address %#x of function %#x.\"", ",", "addr", "+", "irsb_size", ",", "current_function_addr", ")", "return", "None", ",", "None", ",", "None", ",", "None", "is_thumb", "=", "False", "# Occupy the block in segment list", "if", "irsb", ".", "size", ">", "0", ":", "if", "is_arm_arch", "(", "self", ".", "project", ".", "arch", ")", "and", "addr", "%", "2", "==", "1", ":", "# thumb mode", "is_thumb", "=", "True", "self", ".", "_seg_list", ".", "occupy", "(", "real_addr", ",", "irsb", ".", "size", ",", "\"code\"", ")", "# Create a CFG node, and add it to the graph", "cfg_node", "=", "CFGNode", "(", "addr", ",", "irsb", ".", "size", ",", "self", ".", "model", ",", "function_address", "=", "current_function_addr", ",", "block_id", "=", "addr", ",", "irsb", "=", "irsb", ",", "thumb", "=", "is_thumb", ",", "byte_string", "=", "irsb_string", ",", ")", "if", "self", ".", "_cfb", "is", "not", "None", ":", "self", ".", "_cfb", ".", "add_obj", "(", "addr", ",", "lifted_block", ")", "self", ".", "_nodes", "[", "addr", "]", "=", "cfg_node", "self", ".", "_nodes_by_addr", "[", "addr", "]", ".", "append", "(", "cfg_node", ")", "return", "addr", ",", "current_function_addr", ",", "cfg_node", ",", "irsb", "except", "(", "SimMemoryError", ",", "SimEngineError", ")", ":", "return", "None", ",", "None", ",", "None", ",", "None" ]
45.853801
22.684211
def autocompleter(): """Return autocompleter results""" # set blocked engines disabled_engines = request.preferences.engines.get_disabled() # parse query if PY3: raw_text_query = RawTextQuery(request.form.get('q', b''), disabled_engines) else: raw_text_query = RawTextQuery(request.form.get('q', u'').encode('utf-8'), disabled_engines) raw_text_query.parse_query() # check if search query is set if not raw_text_query.getSearchQuery(): return '', 400 # run autocompleter completer = autocomplete_backends.get(request.preferences.get_value('autocomplete')) # parse searx specific autocompleter results like !bang raw_results = searx_bang(raw_text_query) # normal autocompletion results only appear if max 3 inner results returned if len(raw_results) <= 3 and completer: # get language from cookie language = request.preferences.get_value('language') if not language or language == 'all': language = 'en' else: language = language.split('-')[0] # run autocompletion raw_results.extend(completer(raw_text_query.getSearchQuery(), language)) # parse results (write :language and !engine back to result string) results = [] for result in raw_results: raw_text_query.changeSearchQuery(result) # add parsed result results.append(raw_text_query.getFullQuery()) # return autocompleter results if request.form.get('format') == 'x-suggestions': return Response(json.dumps([raw_text_query.query, results]), mimetype='application/json') return Response(json.dumps(results), mimetype='application/json')
[ "def", "autocompleter", "(", ")", ":", "# set blocked engines", "disabled_engines", "=", "request", ".", "preferences", ".", "engines", ".", "get_disabled", "(", ")", "# parse query", "if", "PY3", ":", "raw_text_query", "=", "RawTextQuery", "(", "request", ".", "form", ".", "get", "(", "'q'", ",", "b''", ")", ",", "disabled_engines", ")", "else", ":", "raw_text_query", "=", "RawTextQuery", "(", "request", ".", "form", ".", "get", "(", "'q'", ",", "u''", ")", ".", "encode", "(", "'utf-8'", ")", ",", "disabled_engines", ")", "raw_text_query", ".", "parse_query", "(", ")", "# check if search query is set", "if", "not", "raw_text_query", ".", "getSearchQuery", "(", ")", ":", "return", "''", ",", "400", "# run autocompleter", "completer", "=", "autocomplete_backends", ".", "get", "(", "request", ".", "preferences", ".", "get_value", "(", "'autocomplete'", ")", ")", "# parse searx specific autocompleter results like !bang", "raw_results", "=", "searx_bang", "(", "raw_text_query", ")", "# normal autocompletion results only appear if max 3 inner results returned", "if", "len", "(", "raw_results", ")", "<=", "3", "and", "completer", ":", "# get language from cookie", "language", "=", "request", ".", "preferences", ".", "get_value", "(", "'language'", ")", "if", "not", "language", "or", "language", "==", "'all'", ":", "language", "=", "'en'", "else", ":", "language", "=", "language", ".", "split", "(", "'-'", ")", "[", "0", "]", "# run autocompletion", "raw_results", ".", "extend", "(", "completer", "(", "raw_text_query", ".", "getSearchQuery", "(", ")", ",", "language", ")", ")", "# parse results (write :language and !engine back to result string)", "results", "=", "[", "]", "for", "result", "in", "raw_results", ":", "raw_text_query", ".", "changeSearchQuery", "(", "result", ")", "# add parsed result", "results", ".", "append", "(", "raw_text_query", ".", "getFullQuery", "(", ")", ")", "# return autocompleter results", "if", "request", ".", "form", ".", "get", "(", "'format'", ")", "==", "'x-suggestions'", ":", "return", "Response", "(", "json", ".", "dumps", "(", "[", "raw_text_query", ".", "query", ",", "results", "]", ")", ",", "mimetype", "=", "'application/json'", ")", "return", "Response", "(", "json", ".", "dumps", "(", "results", ")", ",", "mimetype", "=", "'application/json'", ")" ]
34.816327
22.530612
def ctxtResetPush(self, chunk, size, filename, encoding): """Reset a push parser context """ ret = libxml2mod.xmlCtxtResetPush(self._o, chunk, size, filename, encoding) return ret
[ "def", "ctxtResetPush", "(", "self", ",", "chunk", ",", "size", ",", "filename", ",", "encoding", ")", ":", "ret", "=", "libxml2mod", ".", "xmlCtxtResetPush", "(", "self", ".", "_o", ",", "chunk", ",", "size", ",", "filename", ",", "encoding", ")", "return", "ret" ]
50
20.5
def _getGroundTruth(self, inferenceElement): """ Get the actual value for this field Parameters: ----------------------------------------------------------------------- sensorInputElement: The inference element (part of the inference) that is being used for this metric """ sensorInputElement = InferenceElement.getInputElement(inferenceElement) if sensorInputElement is None: return None return getattr(self.__currentGroundTruth.sensorInput, sensorInputElement)
[ "def", "_getGroundTruth", "(", "self", ",", "inferenceElement", ")", ":", "sensorInputElement", "=", "InferenceElement", ".", "getInputElement", "(", "inferenceElement", ")", "if", "sensorInputElement", "is", "None", ":", "return", "None", "return", "getattr", "(", "self", ".", "__currentGroundTruth", ".", "sensorInput", ",", "sensorInputElement", ")" ]
40.538462
20.230769
def get_subject(self): """ The assertion must contain a Subject """ assert self.assertion.subject subject = self.assertion.subject subjconf = [] if not self.verify_attesting_entity(subject.subject_confirmation): raise VerificationError("No valid attesting address") for subject_confirmation in subject.subject_confirmation: _data = subject_confirmation.subject_confirmation_data if subject_confirmation.method == SCM_BEARER: if not self._bearer_confirmed(_data): continue elif subject_confirmation.method == SCM_HOLDER_OF_KEY: if not self._holder_of_key_confirmed(_data): continue elif subject_confirmation.method == SCM_SENDER_VOUCHES: pass else: raise ValueError("Unknown subject confirmation method: %s" % ( subject_confirmation.method,)) _recip = _data.recipient if not _recip or not self.verify_recipient(_recip): raise VerificationError("No valid recipient") subjconf.append(subject_confirmation) if not subjconf: raise VerificationError("No valid subject confirmation") subject.subject_confirmation = subjconf # The subject may contain a name_id if subject.name_id: self.name_id = subject.name_id elif subject.encrypted_id: # decrypt encrypted ID _name_id_str = self.sec.decrypt( subject.encrypted_id.encrypted_data.to_string()) _name_id = saml.name_id_from_string(_name_id_str) self.name_id = _name_id logger.info("Subject NameID: %s", self.name_id) return self.name_id
[ "def", "get_subject", "(", "self", ")", ":", "assert", "self", ".", "assertion", ".", "subject", "subject", "=", "self", ".", "assertion", ".", "subject", "subjconf", "=", "[", "]", "if", "not", "self", ".", "verify_attesting_entity", "(", "subject", ".", "subject_confirmation", ")", ":", "raise", "VerificationError", "(", "\"No valid attesting address\"", ")", "for", "subject_confirmation", "in", "subject", ".", "subject_confirmation", ":", "_data", "=", "subject_confirmation", ".", "subject_confirmation_data", "if", "subject_confirmation", ".", "method", "==", "SCM_BEARER", ":", "if", "not", "self", ".", "_bearer_confirmed", "(", "_data", ")", ":", "continue", "elif", "subject_confirmation", ".", "method", "==", "SCM_HOLDER_OF_KEY", ":", "if", "not", "self", ".", "_holder_of_key_confirmed", "(", "_data", ")", ":", "continue", "elif", "subject_confirmation", ".", "method", "==", "SCM_SENDER_VOUCHES", ":", "pass", "else", ":", "raise", "ValueError", "(", "\"Unknown subject confirmation method: %s\"", "%", "(", "subject_confirmation", ".", "method", ",", ")", ")", "_recip", "=", "_data", ".", "recipient", "if", "not", "_recip", "or", "not", "self", ".", "verify_recipient", "(", "_recip", ")", ":", "raise", "VerificationError", "(", "\"No valid recipient\"", ")", "subjconf", ".", "append", "(", "subject_confirmation", ")", "if", "not", "subjconf", ":", "raise", "VerificationError", "(", "\"No valid subject confirmation\"", ")", "subject", ".", "subject_confirmation", "=", "subjconf", "# The subject may contain a name_id", "if", "subject", ".", "name_id", ":", "self", ".", "name_id", "=", "subject", ".", "name_id", "elif", "subject", ".", "encrypted_id", ":", "# decrypt encrypted ID", "_name_id_str", "=", "self", ".", "sec", ".", "decrypt", "(", "subject", ".", "encrypted_id", ".", "encrypted_data", ".", "to_string", "(", ")", ")", "_name_id", "=", "saml", ".", "name_id_from_string", "(", "_name_id_str", ")", "self", ".", "name_id", "=", "_name_id", "logger", ".", "info", "(", "\"Subject NameID: %s\"", ",", "self", ".", "name_id", ")", "return", "self", ".", "name_id" ]
36.469388
20.163265
def validate_is_not_none(config_val, evar): """ If the value is ``None``, fail validation. :param str config_val: The env var value. :param EnvironmentVariable evar: The EVar object we are validating a value for. :raises: ValueError if the config value is None. """ if config_val is None: raise ValueError( "Value for environment variable '{evar_name}' can't " "be empty.".format(evar_name=evar.name)) return config_val
[ "def", "validate_is_not_none", "(", "config_val", ",", "evar", ")", ":", "if", "config_val", "is", "None", ":", "raise", "ValueError", "(", "\"Value for environment variable '{evar_name}' can't \"", "\"be empty.\"", ".", "format", "(", "evar_name", "=", "evar", ".", "name", ")", ")", "return", "config_val" ]
34.214286
14.357143
def compute_gas_limit_bounds(parent: BlockHeader) -> Tuple[int, int]: """ Compute the boundaries for the block gas limit based on the parent block. """ boundary_range = parent.gas_limit // GAS_LIMIT_ADJUSTMENT_FACTOR upper_bound = parent.gas_limit + boundary_range lower_bound = max(GAS_LIMIT_MINIMUM, parent.gas_limit - boundary_range) return lower_bound, upper_bound
[ "def", "compute_gas_limit_bounds", "(", "parent", ":", "BlockHeader", ")", "->", "Tuple", "[", "int", ",", "int", "]", ":", "boundary_range", "=", "parent", ".", "gas_limit", "//", "GAS_LIMIT_ADJUSTMENT_FACTOR", "upper_bound", "=", "parent", ".", "gas_limit", "+", "boundary_range", "lower_bound", "=", "max", "(", "GAS_LIMIT_MINIMUM", ",", "parent", ".", "gas_limit", "-", "boundary_range", ")", "return", "lower_bound", ",", "upper_bound" ]
48.625
18.125
def defaults(features): """ Returns the default property values for the given features. """ assert is_iterable_typed(features, Feature) # FIXME: should merge feature and property modules. from . import property result = [] for f in features: if not f.free and not f.optional and f.default: result.append(property.Property(f, f.default)) return result
[ "def", "defaults", "(", "features", ")", ":", "assert", "is_iterable_typed", "(", "features", ",", "Feature", ")", "# FIXME: should merge feature and property modules.", "from", ".", "import", "property", "result", "=", "[", "]", "for", "f", "in", "features", ":", "if", "not", "f", ".", "free", "and", "not", "f", ".", "optional", "and", "f", ".", "default", ":", "result", ".", "append", "(", "property", ".", "Property", "(", "f", ",", "f", ".", "default", ")", ")", "return", "result" ]
30.153846
17.846154
def _insert_optional_roles(cursor, model, ident): """Inserts the optional roles if values for the optional roles exist. """ optional_roles = [ # (<metadata-attr>, <db-role-id>,), ('translators', 4,), ('editors', 5,), ] for attr, role_id in optional_roles: roles = model.metadata.get(attr) if not roles: # Bail out, no roles for this type. continue usernames = [parse_user_uri(x['id']) for x in roles] cursor.execute("""\ INSERT INTO moduleoptionalroles (module_ident, roleid, personids) VALUES (%s, %s, %s)""", (ident, role_id, usernames,))
[ "def", "_insert_optional_roles", "(", "cursor", ",", "model", ",", "ident", ")", ":", "optional_roles", "=", "[", "# (<metadata-attr>, <db-role-id>,),", "(", "'translators'", ",", "4", ",", ")", ",", "(", "'editors'", ",", "5", ",", ")", ",", "]", "for", "attr", ",", "role_id", "in", "optional_roles", ":", "roles", "=", "model", ".", "metadata", ".", "get", "(", "attr", ")", "if", "not", "roles", ":", "# Bail out, no roles for this type.", "continue", "usernames", "=", "[", "parse_user_uri", "(", "x", "[", "'id'", "]", ")", "for", "x", "in", "roles", "]", "cursor", ".", "execute", "(", "\"\"\"\\\nINSERT INTO moduleoptionalroles (module_ident, roleid, personids)\nVALUES (%s, %s, %s)\"\"\"", ",", "(", "ident", ",", "role_id", ",", "usernames", ",", ")", ")" ]
34.833333
11.888889
def check_presence_of_mandatory_args(args, mandatory_args): ''' Checks whether all mandatory arguments are passed. This function aims at methods with many arguments which are passed as kwargs so that the order in which the are passed does not matter. :args: The dictionary passed as args. :mandatory_args: A list of keys that have to be present in the dictionary. :raise: :exc:`~ValueError` :returns: True, if all mandatory args are passed. If not, an exception is raised. ''' missing_args = [] for name in mandatory_args: if name not in args.keys(): missing_args.append(name) if len(missing_args) > 0: raise ValueError('Missing mandatory arguments: '+', '.join(missing_args)) else: return True
[ "def", "check_presence_of_mandatory_args", "(", "args", ",", "mandatory_args", ")", ":", "missing_args", "=", "[", "]", "for", "name", "in", "mandatory_args", ":", "if", "name", "not", "in", "args", ".", "keys", "(", ")", ":", "missing_args", ".", "append", "(", "name", ")", "if", "len", "(", "missing_args", ")", ">", "0", ":", "raise", "ValueError", "(", "'Missing mandatory arguments: '", "+", "', '", ".", "join", "(", "missing_args", ")", ")", "else", ":", "return", "True" ]
32.916667
18.75
def abs(self): """Apply an absolute value function to all numeric columns. Returns: A new DataFrame with the applied absolute value. """ self._validate_dtypes(numeric_only=True) return self.__constructor__(query_compiler=self._query_compiler.abs())
[ "def", "abs", "(", "self", ")", ":", "self", ".", "_validate_dtypes", "(", "numeric_only", "=", "True", ")", "return", "self", ".", "__constructor__", "(", "query_compiler", "=", "self", ".", "_query_compiler", ".", "abs", "(", ")", ")" ]
37.625
19.375
async def async_set_switch_state( self, switch_number: SwitchNumber, state: bool) -> None: """ Turn a switch on or off. :param switch_number: the switch to be set. :param state: True to turn on, False to turn off. """ await self._protocol.async_execute( SetSwitchCommand( switch_number, SwitchState.On if state else SwitchState.Off))
[ "async", "def", "async_set_switch_state", "(", "self", ",", "switch_number", ":", "SwitchNumber", ",", "state", ":", "bool", ")", "->", "None", ":", "await", "self", ".", "_protocol", ".", "async_execute", "(", "SetSwitchCommand", "(", "switch_number", ",", "SwitchState", ".", "On", "if", "state", "else", "SwitchState", ".", "Off", ")", ")" ]
32.846154
15.153846
def remote(ctx): """Display repo github path """ with command(): m = RepoManager(ctx.obj['agile']) click.echo(m.github_repo().repo_path)
[ "def", "remote", "(", "ctx", ")", ":", "with", "command", "(", ")", ":", "m", "=", "RepoManager", "(", "ctx", ".", "obj", "[", "'agile'", "]", ")", "click", ".", "echo", "(", "m", ".", "github_repo", "(", ")", ".", "repo_path", ")" ]
26.5
8.5
def _input_stmt(self, stmt: Statement, sctx: SchemaContext) -> None: """Handle RPC or action input statement.""" self.get_child("input")._handle_substatements(stmt, sctx)
[ "def", "_input_stmt", "(", "self", ",", "stmt", ":", "Statement", ",", "sctx", ":", "SchemaContext", ")", "->", "None", ":", "self", ".", "get_child", "(", "\"input\"", ")", ".", "_handle_substatements", "(", "stmt", ",", "sctx", ")" ]
61.333333
17.666667
def add_query_occurrence(self, report): """Adds a report to the report aggregation""" initial_millis = int(report['parsed']['stats']['millis']) mask = report['queryMask'] existing_report = self._get_existing_report(mask, report) if existing_report is not None: self._merge_report(existing_report, report) else: time = None if 'ts' in report['parsed']: time = report['parsed']['ts'] self._reports.append(OrderedDict([ ('namespace', report['namespace']), ('lastSeenDate', time), ('queryMask', mask), ('supported', report['queryAnalysis']['supported']), ('indexStatus', report['indexStatus']), ('recommendation', report['recommendation']), ('stats', OrderedDict([('count', 1), ('totalTimeMillis', initial_millis), ('avgTimeMillis', initial_millis)]))]))
[ "def", "add_query_occurrence", "(", "self", ",", "report", ")", ":", "initial_millis", "=", "int", "(", "report", "[", "'parsed'", "]", "[", "'stats'", "]", "[", "'millis'", "]", ")", "mask", "=", "report", "[", "'queryMask'", "]", "existing_report", "=", "self", ".", "_get_existing_report", "(", "mask", ",", "report", ")", "if", "existing_report", "is", "not", "None", ":", "self", ".", "_merge_report", "(", "existing_report", ",", "report", ")", "else", ":", "time", "=", "None", "if", "'ts'", "in", "report", "[", "'parsed'", "]", ":", "time", "=", "report", "[", "'parsed'", "]", "[", "'ts'", "]", "self", ".", "_reports", ".", "append", "(", "OrderedDict", "(", "[", "(", "'namespace'", ",", "report", "[", "'namespace'", "]", ")", ",", "(", "'lastSeenDate'", ",", "time", ")", ",", "(", "'queryMask'", ",", "mask", ")", ",", "(", "'supported'", ",", "report", "[", "'queryAnalysis'", "]", "[", "'supported'", "]", ")", ",", "(", "'indexStatus'", ",", "report", "[", "'indexStatus'", "]", ")", ",", "(", "'recommendation'", ",", "report", "[", "'recommendation'", "]", ")", ",", "(", "'stats'", ",", "OrderedDict", "(", "[", "(", "'count'", ",", "1", ")", ",", "(", "'totalTimeMillis'", ",", "initial_millis", ")", ",", "(", "'avgTimeMillis'", ",", "initial_millis", ")", "]", ")", ")", "]", ")", ")" ]
43
17.208333
def duplicates(*iterables, **kwargs): """ Yield duplicate items from any number of sorted iterables of items >>> items_a = [1, 2, 3] >>> items_b = [0, 3, 4, 5, 6] >>> list(duplicates(items_a, items_b)) [(3, 3)] It won't behave as you expect if the iterables aren't ordered >>> items_b.append(1) >>> list(duplicates(items_a, items_b)) [(3, 3)] >>> list(duplicates(items_a, sorted(items_b))) [(1, 1), (3, 3)] This function is most interesting when it's operating on a key of more complex objects. >>> items_a = [dict(email='[email protected]', id=1)] >>> items_b = [dict(email='[email protected]', id=2), dict(email='other')] >>> dupe, = duplicates(items_a, items_b, key=operator.itemgetter('email')) >>> dupe[0]['email'] == dupe[1]['email'] == '[email protected]' True >>> dupe[0]['id'] 1 >>> dupe[1]['id'] 2 """ key = kwargs.pop('key', lambda x: x) assert not kwargs zipped = more_itertools.collate(*iterables, key=key) grouped = itertools.groupby(zipped, key=key) groups = ( tuple(g) for k, g in grouped ) def has_dupes(group): return len(group) > 1 return filter(has_dupes, groups)
[ "def", "duplicates", "(", "*", "iterables", ",", "*", "*", "kwargs", ")", ":", "key", "=", "kwargs", ".", "pop", "(", "'key'", ",", "lambda", "x", ":", "x", ")", "assert", "not", "kwargs", "zipped", "=", "more_itertools", ".", "collate", "(", "*", "iterables", ",", "key", "=", "key", ")", "grouped", "=", "itertools", ".", "groupby", "(", "zipped", ",", "key", "=", "key", ")", "groups", "=", "(", "tuple", "(", "g", ")", "for", "k", ",", "g", "in", "grouped", ")", "def", "has_dupes", "(", "group", ")", ":", "return", "len", "(", "group", ")", ">", "1", "return", "filter", "(", "has_dupes", ",", "groups", ")" ]
25.714286
22.047619
def squeeze(attrs, inputs, proto_obj): """Remove single-dimensional entries from the shape of a tensor.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'axes' : 'axis'}) return 'squeeze', new_attrs, inputs
[ "def", "squeeze", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_fix_attribute_names", "(", "attrs", ",", "{", "'axes'", ":", "'axis'", "}", ")", "return", "'squeeze'", ",", "new_attrs", ",", "inputs" ]
56.4
11.4
def standard_system_dimensions(num_boards): """Calculate the standard network dimensions (in chips) for a full torus system with the specified number of SpiNN-5 boards. Returns ------- (w, h) Width and height of the network in chips. Standard SpiNNaker systems are constructed as squarely as possible given the number of boards available. When a square system cannot be made, the function prefers wider systems over taller systems. Raises ------ ValueError If the number of boards is not a multiple of three. """ # Special case to avoid division by 0 if num_boards == 0: return (0, 0) # Special case: meaningful systems with 1 board can exist if num_boards == 1: return (8, 8) if num_boards % 3 != 0: raise ValueError("{} is not a multiple of 3".format(num_boards)) # Find the largest pair of factors to discover the squarest system in terms # of triads of boards. for h in reversed( # pragma: no branch range(1, int(sqrt(num_boards // 3)) + 1)): if (num_boards // 3) % h == 0: break w = (num_boards // 3) // h # Convert the number of triads into numbers of chips (each triad of boards # contributes as 12x12 block of chips). return (w * 12, h * 12)
[ "def", "standard_system_dimensions", "(", "num_boards", ")", ":", "# Special case to avoid division by 0", "if", "num_boards", "==", "0", ":", "return", "(", "0", ",", "0", ")", "# Special case: meaningful systems with 1 board can exist", "if", "num_boards", "==", "1", ":", "return", "(", "8", ",", "8", ")", "if", "num_boards", "%", "3", "!=", "0", ":", "raise", "ValueError", "(", "\"{} is not a multiple of 3\"", ".", "format", "(", "num_boards", ")", ")", "# Find the largest pair of factors to discover the squarest system in terms", "# of triads of boards.", "for", "h", "in", "reversed", "(", "# pragma: no branch", "range", "(", "1", ",", "int", "(", "sqrt", "(", "num_boards", "//", "3", ")", ")", "+", "1", ")", ")", ":", "if", "(", "num_boards", "//", "3", ")", "%", "h", "==", "0", ":", "break", "w", "=", "(", "num_boards", "//", "3", ")", "//", "h", "# Convert the number of triads into numbers of chips (each triad of boards", "# contributes as 12x12 block of chips).", "return", "(", "w", "*", "12", ",", "h", "*", "12", ")" ]
31.804878
22.707317
def create_affinity_group(kwargs=None, conn=None, call=None): ''' .. versionadded:: 2015.8.0 Create a new affinity group CLI Example: .. code-block:: bash salt-cloud -f create_affinity_group my-azure name=my_affinity_group ''' if call != 'function': raise SaltCloudSystemExit( 'The create_affinity_group function must be called with -f or --function.' ) if not conn: conn = get_conn() if kwargs is None: kwargs = {} if 'name' not in kwargs: raise SaltCloudSystemExit('A name must be specified as "name"') if 'label' not in kwargs: raise SaltCloudSystemExit('A label must be specified as "label"') if 'location' not in kwargs: raise SaltCloudSystemExit('A location must be specified as "location"') try: conn.create_affinity_group( kwargs['name'], kwargs['label'], kwargs['location'], kwargs.get('description', None), ) return {'Success': 'The affinity group was successfully created'} except AzureConflictHttpError: raise SaltCloudSystemExit('There was a conflict. This usually means that the affinity group already exists.')
[ "def", "create_affinity_group", "(", "kwargs", "=", "None", ",", "conn", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The create_affinity_group function must be called with -f or --function.'", ")", "if", "not", "conn", ":", "conn", "=", "get_conn", "(", ")", "if", "kwargs", "is", "None", ":", "kwargs", "=", "{", "}", "if", "'name'", "not", "in", "kwargs", ":", "raise", "SaltCloudSystemExit", "(", "'A name must be specified as \"name\"'", ")", "if", "'label'", "not", "in", "kwargs", ":", "raise", "SaltCloudSystemExit", "(", "'A label must be specified as \"label\"'", ")", "if", "'location'", "not", "in", "kwargs", ":", "raise", "SaltCloudSystemExit", "(", "'A location must be specified as \"location\"'", ")", "try", ":", "conn", ".", "create_affinity_group", "(", "kwargs", "[", "'name'", "]", ",", "kwargs", "[", "'label'", "]", ",", "kwargs", "[", "'location'", "]", ",", "kwargs", ".", "get", "(", "'description'", ",", "None", ")", ",", ")", "return", "{", "'Success'", ":", "'The affinity group was successfully created'", "}", "except", "AzureConflictHttpError", ":", "raise", "SaltCloudSystemExit", "(", "'There was a conflict. This usually means that the affinity group already exists.'", ")" ]
28.714286
26.47619
def sevenths(reference_labels, estimated_labels): """Compare chords along MIREX 'sevenths' rules. Chords with qualities outside [maj, maj7, 7, min, min7, N] are ignored. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> est_intervals, est_labels = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, ref_intervals.min(), ... ref_intervals.max(), mir_eval.chord.NO_CHORD, ... mir_eval.chord.NO_CHORD) >>> (intervals, ... ref_labels, ... est_labels) = mir_eval.util.merge_labeled_intervals( ... ref_intervals, ref_labels, est_intervals, est_labels) >>> durations = mir_eval.util.intervals_to_durations(intervals) >>> comparisons = mir_eval.chord.sevenths(ref_labels, est_labels) >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations) Parameters ---------- reference_labels : list, len=n Reference chord labels to score against. estimated_labels : list, len=n Estimated chord labels to score against. Returns ------- comparison_scores : np.ndarray, shape=(n,), dtype=float Comparison scores, in [0.0, 1.0], or -1 if the comparison is out of gamut. """ validate(reference_labels, estimated_labels) seventh_qualities = ['maj', 'min', 'maj7', '7', 'min7', ''] valid_semitones = np.array([QUALITIES[name] for name in seventh_qualities]) ref_roots, ref_semitones = encode_many(reference_labels, False)[:2] est_roots, est_semitones = encode_many(estimated_labels, False)[:2] eq_root = ref_roots == est_roots eq_semitones = np.all(np.equal(ref_semitones, est_semitones), axis=1) comparison_scores = (eq_root * eq_semitones).astype(np.float) # Test for reference chord inclusion is_valid = np.array([np.all(np.equal(ref_semitones, semitones), axis=1) for semitones in valid_semitones]) # Drop if NOR comparison_scores[np.sum(is_valid, axis=0) == 0] = -1 return comparison_scores
[ "def", "sevenths", "(", "reference_labels", ",", "estimated_labels", ")", ":", "validate", "(", "reference_labels", ",", "estimated_labels", ")", "seventh_qualities", "=", "[", "'maj'", ",", "'min'", ",", "'maj7'", ",", "'7'", ",", "'min7'", ",", "''", "]", "valid_semitones", "=", "np", ".", "array", "(", "[", "QUALITIES", "[", "name", "]", "for", "name", "in", "seventh_qualities", "]", ")", "ref_roots", ",", "ref_semitones", "=", "encode_many", "(", "reference_labels", ",", "False", ")", "[", ":", "2", "]", "est_roots", ",", "est_semitones", "=", "encode_many", "(", "estimated_labels", ",", "False", ")", "[", ":", "2", "]", "eq_root", "=", "ref_roots", "==", "est_roots", "eq_semitones", "=", "np", ".", "all", "(", "np", ".", "equal", "(", "ref_semitones", ",", "est_semitones", ")", ",", "axis", "=", "1", ")", "comparison_scores", "=", "(", "eq_root", "*", "eq_semitones", ")", ".", "astype", "(", "np", ".", "float", ")", "# Test for reference chord inclusion", "is_valid", "=", "np", ".", "array", "(", "[", "np", ".", "all", "(", "np", ".", "equal", "(", "ref_semitones", ",", "semitones", ")", ",", "axis", "=", "1", ")", "for", "semitones", "in", "valid_semitones", "]", ")", "# Drop if NOR", "comparison_scores", "[", "np", ".", "sum", "(", "is_valid", ",", "axis", "=", "0", ")", "==", "0", "]", "=", "-", "1", "return", "comparison_scores" ]
40.09434
22.811321
def get_config(ini_path=None, rootdir=None): """ Load configuration from INI. :return Namespace: """ config = Namespace() config.default_section = 'pylama' if not ini_path: path = get_default_config_file(rootdir) if path: config.read(path) else: config.read(ini_path) return config
[ "def", "get_config", "(", "ini_path", "=", "None", ",", "rootdir", "=", "None", ")", ":", "config", "=", "Namespace", "(", ")", "config", ".", "default_section", "=", "'pylama'", "if", "not", "ini_path", ":", "path", "=", "get_default_config_file", "(", "rootdir", ")", "if", "path", ":", "config", ".", "read", "(", "path", ")", "else", ":", "config", ".", "read", "(", "ini_path", ")", "return", "config" ]
19.823529
19.294118
def makeicons(source): """ Create all the neccessary icons from source image """ im = Image.open(source) for name, (_, w, h, func) in icon_sizes.iteritems(): print('Making icon %s...' % name) tn = func(im, (w, h)) bg = Image.new('RGBA', (w, h), (255, 255, 255)) x = (w / 2) - (tn.size[0] / 2) y = (h / 2) - (tn.size[1] / 2) bg.paste(tn, (x, y)) bg.save(path.join(env.dir, name))
[ "def", "makeicons", "(", "source", ")", ":", "im", "=", "Image", ".", "open", "(", "source", ")", "for", "name", ",", "(", "_", ",", "w", ",", "h", ",", "func", ")", "in", "icon_sizes", ".", "iteritems", "(", ")", ":", "print", "(", "'Making icon %s...'", "%", "name", ")", "tn", "=", "func", "(", "im", ",", "(", "w", ",", "h", ")", ")", "bg", "=", "Image", ".", "new", "(", "'RGBA'", ",", "(", "w", ",", "h", ")", ",", "(", "255", ",", "255", ",", "255", ")", ")", "x", "=", "(", "w", "/", "2", ")", "-", "(", "tn", ".", "size", "[", "0", "]", "/", "2", ")", "y", "=", "(", "h", "/", "2", ")", "-", "(", "tn", ".", "size", "[", "1", "]", "/", "2", ")", "bg", ".", "paste", "(", "tn", ",", "(", "x", ",", "y", ")", ")", "bg", ".", "save", "(", "path", ".", "join", "(", "env", ".", "dir", ",", "name", ")", ")" ]
31.571429
10.285714
def comm_grid(patch, cols, splits, divs, metric='Sorensen'): """ Calculates commonality as a function of distance for a gridded patch Parameters ---------- {0} divs : str Description of how to divide x_col and y_col. Unlike SAR and EAR, only one division can be given at a time. See notes. metric : str One of Sorensen or Jaccard, giving the metric to use for commonality calculation Returns ------- {1} Result has three columns, pair, x, and y, that give the locations of the pair of patches for which commonality is calculated, the distance between those cells, and the Sorensen or Jaccard result. Notes ----- {2} For gridded commonality, cols must also contain x_col and y_col, giving the x and y dimensions along which to grid the patch. {3} """ (spp_col, count_col, x_col, y_col), patch = \ _get_cols(['spp_col', 'count_col', 'x_col', 'y_col'], cols, patch) # Loop through each split result_list = [] for substring, subpatch in _yield_subpatches(patch, splits): # Get spatial table and break out columns spatial_table = _yield_spatial_table(subpatch, divs, spp_col, count_col, x_col, y_col) spp_set = spatial_table['spp_set'] cell_loc = spatial_table['cell_loc'] n_spp = spatial_table['n_spp'] # Get all possible pairwise combinations of cells pair_list = [] dist_list = [] comm_list = [] for i in range(len(spatial_table)): for j in range(i+1, len(spatial_table)): iloc = np.round(cell_loc[i], 6) jloc = np.round(cell_loc[j], 6) pair_list.append('('+str(iloc[0])+' '+str(iloc[1])+') - '+ '('+str(jloc[0])+' '+str(jloc[1])+')') dist_list.append(_distance(cell_loc[i], cell_loc[j])) ij_intersect = spp_set[i] & spp_set[j] if metric.lower() == 'sorensen': comm = 2*len(ij_intersect) / (n_spp[i] + n_spp[j]) elif metric.lower() == 'jaccard': comm = len(ij_intersect) / len(spp_set[i] | spp_set[j]) else: raise ValueError, ("Only Sorensen and Jaccard metrics are " "available for gridded commonality") comm_list.append(comm) # Append subset result subresult = pd.DataFrame({'pair': pair_list, 'x': dist_list, 'y': comm_list}) result_list.append((substring, subresult)) # Return all results return result_list
[ "def", "comm_grid", "(", "patch", ",", "cols", ",", "splits", ",", "divs", ",", "metric", "=", "'Sorensen'", ")", ":", "(", "spp_col", ",", "count_col", ",", "x_col", ",", "y_col", ")", ",", "patch", "=", "_get_cols", "(", "[", "'spp_col'", ",", "'count_col'", ",", "'x_col'", ",", "'y_col'", "]", ",", "cols", ",", "patch", ")", "# Loop through each split", "result_list", "=", "[", "]", "for", "substring", ",", "subpatch", "in", "_yield_subpatches", "(", "patch", ",", "splits", ")", ":", "# Get spatial table and break out columns", "spatial_table", "=", "_yield_spatial_table", "(", "subpatch", ",", "divs", ",", "spp_col", ",", "count_col", ",", "x_col", ",", "y_col", ")", "spp_set", "=", "spatial_table", "[", "'spp_set'", "]", "cell_loc", "=", "spatial_table", "[", "'cell_loc'", "]", "n_spp", "=", "spatial_table", "[", "'n_spp'", "]", "# Get all possible pairwise combinations of cells", "pair_list", "=", "[", "]", "dist_list", "=", "[", "]", "comm_list", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "spatial_table", ")", ")", ":", "for", "j", "in", "range", "(", "i", "+", "1", ",", "len", "(", "spatial_table", ")", ")", ":", "iloc", "=", "np", ".", "round", "(", "cell_loc", "[", "i", "]", ",", "6", ")", "jloc", "=", "np", ".", "round", "(", "cell_loc", "[", "j", "]", ",", "6", ")", "pair_list", ".", "append", "(", "'('", "+", "str", "(", "iloc", "[", "0", "]", ")", "+", "' '", "+", "str", "(", "iloc", "[", "1", "]", ")", "+", "') - '", "+", "'('", "+", "str", "(", "jloc", "[", "0", "]", ")", "+", "' '", "+", "str", "(", "jloc", "[", "1", "]", ")", "+", "')'", ")", "dist_list", ".", "append", "(", "_distance", "(", "cell_loc", "[", "i", "]", ",", "cell_loc", "[", "j", "]", ")", ")", "ij_intersect", "=", "spp_set", "[", "i", "]", "&", "spp_set", "[", "j", "]", "if", "metric", ".", "lower", "(", ")", "==", "'sorensen'", ":", "comm", "=", "2", "*", "len", "(", "ij_intersect", ")", "/", "(", "n_spp", "[", "i", "]", "+", "n_spp", "[", "j", "]", ")", "elif", "metric", ".", "lower", "(", ")", "==", "'jaccard'", ":", "comm", "=", "len", "(", "ij_intersect", ")", "/", "len", "(", "spp_set", "[", "i", "]", "|", "spp_set", "[", "j", "]", ")", "else", ":", "raise", "ValueError", ",", "(", "\"Only Sorensen and Jaccard metrics are \"", "\"available for gridded commonality\"", ")", "comm_list", ".", "append", "(", "comm", ")", "# Append subset result", "subresult", "=", "pd", ".", "DataFrame", "(", "{", "'pair'", ":", "pair_list", ",", "'x'", ":", "dist_list", ",", "'y'", ":", "comm_list", "}", ")", "result_list", ".", "append", "(", "(", "substring", ",", "subresult", ")", ")", "# Return all results", "return", "result_list" ]
34.986842
24.644737
def v2010(self): """ :returns: Version v2010 of api :rtype: twilio.rest.api.v2010.V2010 """ if self._v2010 is None: self._v2010 = V2010(self) return self._v2010
[ "def", "v2010", "(", "self", ")", ":", "if", "self", ".", "_v2010", "is", "None", ":", "self", ".", "_v2010", "=", "V2010", "(", "self", ")", "return", "self", ".", "_v2010" ]
26.625
6.875
def wait( self, timeout = -1 ): """Wait for all pending results to be finished. If timeout is set, return after this many seconds regardless. :param timeout: timeout period in seconds (defaults to forever) :returns: True if all the results completed""" # we can't use ipyparallel.Client.wait() for this, because that # method only works for cases where the Client object is the one that # submitted the jobs to the cluster hub -- and therefore has the # necessary data structures to perform synchronisation. This isn't the # case for us, as one of the main goals of epyc is to support disconnected # operation, which implies a different Client object retrieving results # than the one that submitted the jobs in the first place. This is # unfortunate, but understandable given the typical use cases for # Client objects. # # Instead. we have to code around a little busily. The ClusterLab.WaitingTime # global sets the latency for waiting, and we repeatedly wait for this amount # of time before updating the results. The latency value essentially controls # how busy this process is: given that most simulations are expected to # be long, a latency in the tens of seconds feels about right as a default if self.numberOfPendingResults() > 0: # we've got pending results, wait for them timeWaited = 0 while (timeout < 0) or (timeWaited < timeout): if self.numberOfPendingResults() == 0: # no pending jobs left, we're complete return True else: # not done yet, calculate the waiting period if timeout == -1: # wait for the default waiting period dt = self.WaitingTime else: # wait for the default waiting period or until the end of the timeout. # whichever comes first if (timeout - timeWaited) < self.WaitingTime: dt = timeout - timeWaited else: dt = self.WaitingTime # sleep for a while time.sleep(dt) timeWaited = timeWaited + dt # if we get here, the timeout expired, so do a final check # and then exit return (self.numberOfPendingResults() == 0) else: # no results, so we got them all return True
[ "def", "wait", "(", "self", ",", "timeout", "=", "-", "1", ")", ":", "# we can't use ipyparallel.Client.wait() for this, because that", "# method only works for cases where the Client object is the one that", "# submitted the jobs to the cluster hub -- and therefore has the", "# necessary data structures to perform synchronisation. This isn't the", "# case for us, as one of the main goals of epyc is to support disconnected", "# operation, which implies a different Client object retrieving results", "# than the one that submitted the jobs in the first place. This is", "# unfortunate, but understandable given the typical use cases for", "# Client objects.", "#", "# Instead. we have to code around a little busily. The ClusterLab.WaitingTime", "# global sets the latency for waiting, and we repeatedly wait for this amount", "# of time before updating the results. The latency value essentially controls", "# how busy this process is: given that most simulations are expected to", "# be long, a latency in the tens of seconds feels about right as a default", "if", "self", ".", "numberOfPendingResults", "(", ")", ">", "0", ":", "# we've got pending results, wait for them", "timeWaited", "=", "0", "while", "(", "timeout", "<", "0", ")", "or", "(", "timeWaited", "<", "timeout", ")", ":", "if", "self", ".", "numberOfPendingResults", "(", ")", "==", "0", ":", "# no pending jobs left, we're complete", "return", "True", "else", ":", "# not done yet, calculate the waiting period", "if", "timeout", "==", "-", "1", ":", "# wait for the default waiting period", "dt", "=", "self", ".", "WaitingTime", "else", ":", "# wait for the default waiting period or until the end of the timeout.", "# whichever comes first", "if", "(", "timeout", "-", "timeWaited", ")", "<", "self", ".", "WaitingTime", ":", "dt", "=", "timeout", "-", "timeWaited", "else", ":", "dt", "=", "self", ".", "WaitingTime", "# sleep for a while", "time", ".", "sleep", "(", "dt", ")", "timeWaited", "=", "timeWaited", "+", "dt", "# if we get here, the timeout expired, so do a final check", "# and then exit", "return", "(", "self", ".", "numberOfPendingResults", "(", ")", "==", "0", ")", "else", ":", "# no results, so we got them all", "return", "True" ]
49.754717
22.509434
def get_inputs(self): """Get all inputs.""" self.request(EP_GET_INPUTS) return {} if self.last_response is None else self.last_response.get('payload').get('devices')
[ "def", "get_inputs", "(", "self", ")", ":", "self", ".", "request", "(", "EP_GET_INPUTS", ")", "return", "{", "}", "if", "self", ".", "last_response", "is", "None", "else", "self", ".", "last_response", ".", "get", "(", "'payload'", ")", ".", "get", "(", "'devices'", ")" ]
46.5
21.25
def media_download(self, mxcurl, allow_remote=True): """Download raw media from provided mxc URL. Args: mxcurl (str): mxc media URL. allow_remote (bool): indicates to the server that it should not attempt to fetch the media if it is deemed remote. Defaults to true if not provided. """ query_params = {} if not allow_remote: query_params["allow_remote"] = False if mxcurl.startswith('mxc://'): return self._send( "GET", mxcurl[6:], api_path="/_matrix/media/r0/download/", query_params=query_params, return_json=False ) else: raise ValueError( "MXC URL '%s' did not begin with 'mxc://'" % mxcurl )
[ "def", "media_download", "(", "self", ",", "mxcurl", ",", "allow_remote", "=", "True", ")", ":", "query_params", "=", "{", "}", "if", "not", "allow_remote", ":", "query_params", "[", "\"allow_remote\"", "]", "=", "False", "if", "mxcurl", ".", "startswith", "(", "'mxc://'", ")", ":", "return", "self", ".", "_send", "(", "\"GET\"", ",", "mxcurl", "[", "6", ":", "]", ",", "api_path", "=", "\"/_matrix/media/r0/download/\"", ",", "query_params", "=", "query_params", ",", "return_json", "=", "False", ")", "else", ":", "raise", "ValueError", "(", "\"MXC URL '%s' did not begin with 'mxc://'\"", "%", "mxcurl", ")" ]
35.956522
14.956522
def sample_path(alpha, A, pobs, T=None): """ Sample the hidden pathway S from the conditional distribution P ( S | Parameters, Observations ) Parameters ---------- alpha : ndarray((T,N), dtype = float), optional, default = None alpha[t,i] is the ith forward coefficient of time t. A : ndarray((N,N), dtype = float) transition matrix of the hidden states pobs : ndarray((T,N), dtype = float) pobs[t,i] is the observation probability for observation at time t given hidden state i T : int number of time steps Returns ------- S : numpy.array shape (T) maximum likelihood hidden path """ if __impl__ == __IMPL_PYTHON__: return ip.sample_path(alpha, A, pobs, T=T, dtype=config.dtype) elif __impl__ == __IMPL_C__: return ic.sample_path(alpha, A, pobs, T=T, dtype=config.dtype) else: raise RuntimeError('Nonexisting implementation selected: '+str(__impl__))
[ "def", "sample_path", "(", "alpha", ",", "A", ",", "pobs", ",", "T", "=", "None", ")", ":", "if", "__impl__", "==", "__IMPL_PYTHON__", ":", "return", "ip", ".", "sample_path", "(", "alpha", ",", "A", ",", "pobs", ",", "T", "=", "T", ",", "dtype", "=", "config", ".", "dtype", ")", "elif", "__impl__", "==", "__IMPL_C__", ":", "return", "ic", ".", "sample_path", "(", "alpha", ",", "A", ",", "pobs", ",", "T", "=", "T", ",", "dtype", "=", "config", ".", "dtype", ")", "else", ":", "raise", "RuntimeError", "(", "'Nonexisting implementation selected: '", "+", "str", "(", "__impl__", ")", ")" ]
36.5
20.769231
def fillDataProducts(self, dps): """Fills listview with existing data products""" item = None for dp in dps: if not dp.ignored: item = self._makeDPItem(self, dp, item) # ensure combobox widgets are made self._itemComboBox(item, self.ColAction) self._itemComboBox(item, self.ColRender)
[ "def", "fillDataProducts", "(", "self", ",", "dps", ")", ":", "item", "=", "None", "for", "dp", "in", "dps", ":", "if", "not", "dp", ".", "ignored", ":", "item", "=", "self", ".", "_makeDPItem", "(", "self", ",", "dp", ",", "item", ")", "# ensure combobox widgets are made", "self", ".", "_itemComboBox", "(", "item", ",", "self", ".", "ColAction", ")", "self", ".", "_itemComboBox", "(", "item", ",", "self", ".", "ColRender", ")" ]
41.777778
12.666667
def Page_getResourceContent(self, frameId, url): """ Function path: Page.getResourceContent Domain: Page Method name: getResourceContent WARNING: This function is marked 'Experimental'! Parameters: Required arguments: 'frameId' (type: FrameId) -> Frame id to get resource for. 'url' (type: string) -> URL of the resource to get content for. Returns: 'content' (type: string) -> Resource content. 'base64Encoded' (type: boolean) -> True, if content was served as base64. Description: Returns content of the given resource. """ assert isinstance(url, (str,) ), "Argument 'url' must be of type '['str']'. Received type: '%s'" % type( url) subdom_funcs = self.synchronous_command('Page.getResourceContent', frameId=frameId, url=url) return subdom_funcs
[ "def", "Page_getResourceContent", "(", "self", ",", "frameId", ",", "url", ")", ":", "assert", "isinstance", "(", "url", ",", "(", "str", ",", ")", ")", ",", "\"Argument 'url' must be of type '['str']'. Received type: '%s'\"", "%", "type", "(", "url", ")", "subdom_funcs", "=", "self", ".", "synchronous_command", "(", "'Page.getResourceContent'", ",", "frameId", "=", "frameId", ",", "url", "=", "url", ")", "return", "subdom_funcs" ]
33.5
20.083333
def merge_likelihood_headers(filenames, outfile): """ Merge header information from likelihood files. Parameters: ----------- filenames : input filenames oufile : the merged file to write Returns: -------- data : the data being written """ filenames = np.atleast_1d(filenames) ext='PIX_DATA' nside = fitsio.read_header(filenames[0],ext=ext)['LKDNSIDE'] keys=['STELLAR','NINSIDE','NANNULUS'] data_dict = odict(PIXEL=[]) for k in keys: data_dict[k] = [] for i,filename in enumerate(filenames): logger.debug('(%i/%i) %s'%(i+1, len(filenames), filename)) header = fitsio.read_header(filename,ext=ext) data_dict['PIXEL'].append(header['LKDPIX']) for key in keys: data_dict[key].append(header[key]) del header data_dict['PIXEL'] = np.array(data_dict['PIXEL'],dtype=int) for key in keys: data_dict[key] = np.array(data_dict[key],dtype='f4') #import pdb; pdb.set_trace() write_partial_map(outfile, data_dict, nside) return data_dict
[ "def", "merge_likelihood_headers", "(", "filenames", ",", "outfile", ")", ":", "filenames", "=", "np", ".", "atleast_1d", "(", "filenames", ")", "ext", "=", "'PIX_DATA'", "nside", "=", "fitsio", ".", "read_header", "(", "filenames", "[", "0", "]", ",", "ext", "=", "ext", ")", "[", "'LKDNSIDE'", "]", "keys", "=", "[", "'STELLAR'", ",", "'NINSIDE'", ",", "'NANNULUS'", "]", "data_dict", "=", "odict", "(", "PIXEL", "=", "[", "]", ")", "for", "k", "in", "keys", ":", "data_dict", "[", "k", "]", "=", "[", "]", "for", "i", ",", "filename", "in", "enumerate", "(", "filenames", ")", ":", "logger", ".", "debug", "(", "'(%i/%i) %s'", "%", "(", "i", "+", "1", ",", "len", "(", "filenames", ")", ",", "filename", ")", ")", "header", "=", "fitsio", ".", "read_header", "(", "filename", ",", "ext", "=", "ext", ")", "data_dict", "[", "'PIXEL'", "]", ".", "append", "(", "header", "[", "'LKDPIX'", "]", ")", "for", "key", "in", "keys", ":", "data_dict", "[", "key", "]", ".", "append", "(", "header", "[", "key", "]", ")", "del", "header", "data_dict", "[", "'PIXEL'", "]", "=", "np", ".", "array", "(", "data_dict", "[", "'PIXEL'", "]", ",", "dtype", "=", "int", ")", "for", "key", "in", "keys", ":", "data_dict", "[", "key", "]", "=", "np", ".", "array", "(", "data_dict", "[", "key", "]", ",", "dtype", "=", "'f4'", ")", "#import pdb; pdb.set_trace()", "write_partial_map", "(", "outfile", ",", "data_dict", ",", "nside", ")", "return", "data_dict" ]
26.85
19.25
def getPort(self): """ Helper method for testing; returns the TCP port used for this registration, even if it was specified as 0 and thus allocated by the OS. """ disp = self.pbmanager.dispatchers[self.portstr] return disp.port.getHost().port
[ "def", "getPort", "(", "self", ")", ":", "disp", "=", "self", ".", "pbmanager", ".", "dispatchers", "[", "self", ".", "portstr", "]", "return", "disp", ".", "port", ".", "getHost", "(", ")", ".", "port" ]
36.375
16.625
def get_instance_by_bin_uuid(model, bin_uuid): """Get an instance by binary uuid. :param model: a string, model name in rio.models. :param bin_uuid: a 16-bytes binary string. :return: None or a SQLAlchemy instance. """ try: model = get_model(model) except ImportError: return None return model.query.filter_by(**{'bin_uuid': bin_uuid}).first()
[ "def", "get_instance_by_bin_uuid", "(", "model", ",", "bin_uuid", ")", ":", "try", ":", "model", "=", "get_model", "(", "model", ")", "except", "ImportError", ":", "return", "None", "return", "model", ".", "query", ".", "filter_by", "(", "*", "*", "{", "'bin_uuid'", ":", "bin_uuid", "}", ")", ".", "first", "(", ")" ]
29.307692
16.307692
def get_uris(config): """ returns a tuple of total file size in bytes, and the list of files """ file_names = [] if config.INPUT_DATA is None: sys.stderr.write("you need to provide INPUT_DATA in config\n") sys.exit(1) if isinstance(config.INPUT_DATA, basestring): config.INPUT_DATA = [config.INPUT_DATA] file_size = 0 for uri in config.INPUT_DATA: for regex, uri_method, _, _ in URI_REGEXES: m = regex.match(uri) if m is not None: file_size += uri_method(m, file_names, config) break print("going to process {} files...".format(len(file_names))) return file_size, file_names
[ "def", "get_uris", "(", "config", ")", ":", "file_names", "=", "[", "]", "if", "config", ".", "INPUT_DATA", "is", "None", ":", "sys", ".", "stderr", ".", "write", "(", "\"you need to provide INPUT_DATA in config\\n\"", ")", "sys", ".", "exit", "(", "1", ")", "if", "isinstance", "(", "config", ".", "INPUT_DATA", ",", "basestring", ")", ":", "config", ".", "INPUT_DATA", "=", "[", "config", ".", "INPUT_DATA", "]", "file_size", "=", "0", "for", "uri", "in", "config", ".", "INPUT_DATA", ":", "for", "regex", ",", "uri_method", ",", "_", ",", "_", "in", "URI_REGEXES", ":", "m", "=", "regex", ".", "match", "(", "uri", ")", "if", "m", "is", "not", "None", ":", "file_size", "+=", "uri_method", "(", "m", ",", "file_names", ",", "config", ")", "break", "print", "(", "\"going to process {} files...\"", ".", "format", "(", "len", "(", "file_names", ")", ")", ")", "return", "file_size", ",", "file_names" ]
39.882353
14.588235
def get(cls, database, conditions=""): """ Get all data from system.parts table :param database: A database object to fetch data from. :param conditions: WHERE clause conditions. Database condition is added automatically :return: A list of SystemPart objects """ assert isinstance(database, Database), "database must be database.Database class instance" assert isinstance(conditions, string_types), "conditions must be a string" if conditions: conditions += " AND" field_names = ','.join(cls.fields()) return database.select("SELECT %s FROM %s WHERE %s database='%s'" % (field_names, cls.table_name(), conditions, database.db_name), model_class=cls)
[ "def", "get", "(", "cls", ",", "database", ",", "conditions", "=", "\"\"", ")", ":", "assert", "isinstance", "(", "database", ",", "Database", ")", ",", "\"database must be database.Database class instance\"", "assert", "isinstance", "(", "conditions", ",", "string_types", ")", ",", "\"conditions must be a string\"", "if", "conditions", ":", "conditions", "+=", "\" AND\"", "field_names", "=", "','", ".", "join", "(", "cls", ".", "fields", "(", ")", ")", "return", "database", ".", "select", "(", "\"SELECT %s FROM %s WHERE %s database='%s'\"", "%", "(", "field_names", ",", "cls", ".", "table_name", "(", ")", ",", "conditions", ",", "database", ".", "db_name", ")", ",", "model_class", "=", "cls", ")" ]
55.785714
23.357143
def unassign_assessment_taken_from_bank(self, assessment_taken_id, bank_id): """Removes an ``AssessmentTaken`` from a ``Bank``. arg: assessment_taken_id (osid.id.Id): the ``Id`` of the ``AssessmentTaken`` arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank`` raise: NotFound - ``assessment_taken_id`` or ``bank_id`` not found or ``assessment_taken_id`` not assigned to ``bank_id`` raise: NullArgument - ``assessment_taken_id`` or ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinAssignmentSession.unassign_resource_from_bin mgr = self._get_provider_manager('ASSESSMENT', local=True) lookup_session = mgr.get_bank_lookup_session(proxy=self._proxy) lookup_session.get_bank(bank_id) # to raise NotFound self._unassign_object_from_catalog(assessment_taken_id, bank_id)
[ "def", "unassign_assessment_taken_from_bank", "(", "self", ",", "assessment_taken_id", ",", "bank_id", ")", ":", "# Implemented from template for", "# osid.resource.ResourceBinAssignmentSession.unassign_resource_from_bin", "mgr", "=", "self", ".", "_get_provider_manager", "(", "'ASSESSMENT'", ",", "local", "=", "True", ")", "lookup_session", "=", "mgr", ".", "get_bank_lookup_session", "(", "proxy", "=", "self", ".", "_proxy", ")", "lookup_session", ".", "get_bank", "(", "bank_id", ")", "# to raise NotFound", "self", ".", "_unassign_object_from_catalog", "(", "assessment_taken_id", ",", "bank_id", ")" ]
52.136364
23.090909
def coordinate_filter(self, query, mongo_query): """ Adds genomic coordinated-related filters to the query object Args: query(dict): a dictionary of query filters specified by the users mongo_query(dict): the query that is going to be submitted to the database Returns: mongo_query(dict): returned object contains coordinate filters """ LOG.debug('Adding genomic coordinates to the query') chromosome = query['chrom'] mongo_query['chromosome'] = chromosome if (query.get('start') and query.get('end')): mongo_query['position'] = {'$lte': int(query['end'])} mongo_query['end'] = {'$gte': int(query['start'])} return mongo_query
[ "def", "coordinate_filter", "(", "self", ",", "query", ",", "mongo_query", ")", ":", "LOG", ".", "debug", "(", "'Adding genomic coordinates to the query'", ")", "chromosome", "=", "query", "[", "'chrom'", "]", "mongo_query", "[", "'chromosome'", "]", "=", "chromosome", "if", "(", "query", ".", "get", "(", "'start'", ")", "and", "query", ".", "get", "(", "'end'", ")", ")", ":", "mongo_query", "[", "'position'", "]", "=", "{", "'$lte'", ":", "int", "(", "query", "[", "'end'", "]", ")", "}", "mongo_query", "[", "'end'", "]", "=", "{", "'$gte'", ":", "int", "(", "query", "[", "'start'", "]", ")", "}", "return", "mongo_query" ]
37.2
24.05
def visit_Rep1N(self, node: parsing.Rep0N) -> [ast.stmt]: """Generates python code for a clause repeated 1 or more times. <code for the clause> while True: <code for the clause> """ clause = self.visit(node.pt) if isinstance(clause, ast.expr): return (self._clause(clause) + self.visit_Rep0N(node)) self.in_loop += 1 clause = self._clause(self.visit(node.pt)) self.in_loop -= 1 return self._clause(self.visit(node.pt)) + [ ast.While(ast.Name('True', ast.Load()), clause, [])]
[ "def", "visit_Rep1N", "(", "self", ",", "node", ":", "parsing", ".", "Rep0N", ")", "->", "[", "ast", ".", "stmt", "]", ":", "clause", "=", "self", ".", "visit", "(", "node", ".", "pt", ")", "if", "isinstance", "(", "clause", ",", "ast", ".", "expr", ")", ":", "return", "(", "self", ".", "_clause", "(", "clause", ")", "+", "self", ".", "visit_Rep0N", "(", "node", ")", ")", "self", ".", "in_loop", "+=", "1", "clause", "=", "self", ".", "_clause", "(", "self", ".", "visit", "(", "node", ".", "pt", ")", ")", "self", ".", "in_loop", "-=", "1", "return", "self", ".", "_clause", "(", "self", ".", "visit", "(", "node", ".", "pt", ")", ")", "+", "[", "ast", ".", "While", "(", "ast", ".", "Name", "(", "'True'", ",", "ast", ".", "Load", "(", ")", ")", ",", "clause", ",", "[", "]", ")", "]" ]
38.533333
13.466667