text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def populate(self, struct): """Generates the list tree. struct: if a list/set/tuple is given, a flat list is generated <*l><li>v1</li><li>v2</li>...</*l> If the list type is 'Dl' a flat list without definitions is generated <*l><dt>v1</dt><dt>v2</dt>...</*l> If the given struct is a dict, key contaninct lists/tuples/sets/dicts will be transformed in nested lists, and so on recursively, using dict keys as list items, and dict values as sublists. If type is 'Dl' each value will be transformed in definition (or list of definitions) except others dict. In that case, it will be transformed in <dfn> tags. >>> struct = {'ele1': None, 'ele2': ['sub21', 'sub22'], 'ele3': {'sub31': None, 'sub32': None, '_typ': 'Ol'}} >>> TempyList(struct=struct) <ul> <li>ele1</li> <li>ele2 <ul> <li>sub21</li> <li>sub22</li> </ul> </li> <li>ele3 <ol> <li>sub31</li> <li>sub32</li> </ol> </li> </ul> """ if struct is None: # Maybe raise? Empty the list? return self if isinstance(struct, (list, set, tuple)): struct = dict(zip_longest(struct, [None])) if not isinstance(struct, dict): raise WidgetDataError( self, "List Input not managed, expected (dict, list), got %s" % type(struct), ) else: if self._typ == Dl: self.__process_dl_struct(struct) else: self.__process_li_struct(struct) return self
[ "def", "populate", "(", "self", ",", "struct", ")", ":", "if", "struct", "is", "None", ":", "# Maybe raise? Empty the list?", "return", "self", "if", "isinstance", "(", "struct", ",", "(", "list", ",", "set", ",", "tuple", ")", ")", ":", "struct", "=", "dict", "(", "zip_longest", "(", "struct", ",", "[", "None", "]", ")", ")", "if", "not", "isinstance", "(", "struct", ",", "dict", ")", ":", "raise", "WidgetDataError", "(", "self", ",", "\"List Input not managed, expected (dict, list), got %s\"", "%", "type", "(", "struct", ")", ",", ")", "else", ":", "if", "self", ".", "_typ", "==", "Dl", ":", "self", ".", "__process_dl_struct", "(", "struct", ")", "else", ":", "self", ".", "__process_li_struct", "(", "struct", ")", "return", "self" ]
34.96
21.16
def authenticate(self, provider): """ Starts OAuth authorization flow, will redirect to 3rd party site. """ callback_url = url_for(".callback", provider=provider, _external=True) provider = self.get_provider(provider) session['next'] = request.args.get('next') or '' return provider.authorize(callback_url)
[ "def", "authenticate", "(", "self", ",", "provider", ")", ":", "callback_url", "=", "url_for", "(", "\".callback\"", ",", "provider", "=", "provider", ",", "_external", "=", "True", ")", "provider", "=", "self", ".", "get_provider", "(", "provider", ")", "session", "[", "'next'", "]", "=", "request", ".", "args", ".", "get", "(", "'next'", ")", "or", "''", "return", "provider", ".", "authorize", "(", "callback_url", ")" ]
44.375
13.375
def is_contained_in(pe_pe, root): ''' Determine if a PE_PE is contained within a EP_PKG or a C_C. ''' if not pe_pe: return False if type(pe_pe).__name__ != 'PE_PE': pe_pe = one(pe_pe).PE_PE[8001]() ep_pkg = one(pe_pe).EP_PKG[8000]() c_c = one(pe_pe).C_C[8003]() if root in [ep_pkg, c_c]: return True elif is_contained_in(ep_pkg, root): return True elif is_contained_in(c_c, root): return True else: return False
[ "def", "is_contained_in", "(", "pe_pe", ",", "root", ")", ":", "if", "not", "pe_pe", ":", "return", "False", "if", "type", "(", "pe_pe", ")", ".", "__name__", "!=", "'PE_PE'", ":", "pe_pe", "=", "one", "(", "pe_pe", ")", ".", "PE_PE", "[", "8001", "]", "(", ")", "ep_pkg", "=", "one", "(", "pe_pe", ")", ".", "EP_PKG", "[", "8000", "]", "(", ")", "c_c", "=", "one", "(", "pe_pe", ")", ".", "C_C", "[", "8003", "]", "(", ")", "if", "root", "in", "[", "ep_pkg", ",", "c_c", "]", ":", "return", "True", "elif", "is_contained_in", "(", "ep_pkg", ",", "root", ")", ":", "return", "True", "elif", "is_contained_in", "(", "c_c", ",", "root", ")", ":", "return", "True", "else", ":", "return", "False" ]
21.25
20.666667
def build_columns(self, X, verbose=False): """construct the model matrix columns for the term Parameters ---------- X : array-like Input dataset with n rows verbose : bool whether to show warnings Returns ------- scipy sparse array with n rows """ return sp.sparse.csc_matrix(X[:, self.feature][:, np.newaxis])
[ "def", "build_columns", "(", "self", ",", "X", ",", "verbose", "=", "False", ")", ":", "return", "sp", ".", "sparse", ".", "csc_matrix", "(", "X", "[", ":", ",", "self", ".", "feature", "]", "[", ":", ",", "np", ".", "newaxis", "]", ")" ]
25.125
18.1875
def newtons_method_scalar(f: fl.Fluxion, x: float, tol: float =1e-8) -> float: """Solve the equation f(x) = 0 for a function from R->R using Newton's method""" max_iters: int = 100 for i in range(max_iters): # Evaluate f(x) and f'(x) y, dy_dx = f(x) # Is y within the tolerance? if abs(y) < tol: break # Compute the newton step dx = -y / dy_dx # update x x += float(dx) # Return x and the number of iterations required return x, i
[ "def", "newtons_method_scalar", "(", "f", ":", "fl", ".", "Fluxion", ",", "x", ":", "float", ",", "tol", ":", "float", "=", "1e-8", ")", "->", "float", ":", "max_iters", ":", "int", "=", "100", "for", "i", "in", "range", "(", "max_iters", ")", ":", "# Evaluate f(x) and f'(x)", "y", ",", "dy_dx", "=", "f", "(", "x", ")", "# Is y within the tolerance?", "if", "abs", "(", "y", ")", "<", "tol", ":", "break", "# Compute the newton step", "dx", "=", "-", "y", "/", "dy_dx", "# update x", "x", "+=", "float", "(", "dx", ")", "# Return x and the number of iterations required", "return", "x", ",", "i" ]
34.133333
15.466667
def canForward(self, request: Request): """ Determine whether to forward client REQUESTs to replicas, based on the following logic: - If exactly f+1 PROPAGATE requests are received, then forward. - If less than f+1 of requests then probably there's no consensus on the REQUEST, don't forward. - If more than f+1 then already forwarded to replicas, don't forward Even if the node hasn't received the client REQUEST itself, if it has received enough number of PROPAGATE messages for the same, the REQUEST can be forwarded. :param request: the client REQUEST """ if self.requests.forwarded(request): return 'already forwarded' # If not enough Propagates, don't bother comparing if not self.quorums.propagate.is_reached(self.requests.votes(request)): return 'not finalised' req = self.requests.req_with_acceptable_quorum(request, self.quorums.propagate) if req: self.requests.set_finalised(req) return None else: return 'not finalised'
[ "def", "canForward", "(", "self", ",", "request", ":", "Request", ")", ":", "if", "self", ".", "requests", ".", "forwarded", "(", "request", ")", ":", "return", "'already forwarded'", "# If not enough Propagates, don't bother comparing", "if", "not", "self", ".", "quorums", ".", "propagate", ".", "is_reached", "(", "self", ".", "requests", ".", "votes", "(", "request", ")", ")", ":", "return", "'not finalised'", "req", "=", "self", ".", "requests", ".", "req_with_acceptable_quorum", "(", "request", ",", "self", ".", "quorums", ".", "propagate", ")", "if", "req", ":", "self", ".", "requests", ".", "set_finalised", "(", "req", ")", "return", "None", "else", ":", "return", "'not finalised'" ]
37.741935
22.83871
def add(self, quantity): """ Adds an angle to the value """ newvalue = self._value + quantity self.set(newvalue.deg)
[ "def", "add", "(", "self", ",", "quantity", ")", ":", "newvalue", "=", "self", ".", "_value", "+", "quantity", "self", ".", "set", "(", "newvalue", ".", "deg", ")" ]
25.166667
5.5
def deserialize_iso(attr): """Deserialize ISO-8601 formatted string into Datetime object. :param str attr: response string to be deserialized. :rtype: Datetime :raises: DeserializationError if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text try: attr = attr.upper() match = Deserializer.valid_date.match(attr) if not match: raise ValueError("Invalid datetime string: " + attr) check_decimal = attr.split('.') if len(check_decimal) > 1: decimal_str = "" for digit in check_decimal[1]: if digit.isdigit(): decimal_str += digit else: break if len(decimal_str) > 6: attr = attr.replace(decimal_str, decimal_str[0:6]) date_obj = isodate.parse_datetime(attr) test_utc = date_obj.utctimetuple() if test_utc.tm_year > 9999 or test_utc.tm_year < 1: raise OverflowError("Hit max or min date") except(ValueError, OverflowError, AttributeError) as err: msg = "Cannot deserialize datetime object." raise_with_traceback(DeserializationError, msg, err) else: return date_obj
[ "def", "deserialize_iso", "(", "attr", ")", ":", "if", "isinstance", "(", "attr", ",", "ET", ".", "Element", ")", ":", "attr", "=", "attr", ".", "text", "try", ":", "attr", "=", "attr", ".", "upper", "(", ")", "match", "=", "Deserializer", ".", "valid_date", ".", "match", "(", "attr", ")", "if", "not", "match", ":", "raise", "ValueError", "(", "\"Invalid datetime string: \"", "+", "attr", ")", "check_decimal", "=", "attr", ".", "split", "(", "'.'", ")", "if", "len", "(", "check_decimal", ")", ">", "1", ":", "decimal_str", "=", "\"\"", "for", "digit", "in", "check_decimal", "[", "1", "]", ":", "if", "digit", ".", "isdigit", "(", ")", ":", "decimal_str", "+=", "digit", "else", ":", "break", "if", "len", "(", "decimal_str", ")", ">", "6", ":", "attr", "=", "attr", ".", "replace", "(", "decimal_str", ",", "decimal_str", "[", "0", ":", "6", "]", ")", "date_obj", "=", "isodate", ".", "parse_datetime", "(", "attr", ")", "test_utc", "=", "date_obj", ".", "utctimetuple", "(", ")", "if", "test_utc", ".", "tm_year", ">", "9999", "or", "test_utc", ".", "tm_year", "<", "1", ":", "raise", "OverflowError", "(", "\"Hit max or min date\"", ")", "except", "(", "ValueError", ",", "OverflowError", ",", "AttributeError", ")", "as", "err", ":", "msg", "=", "\"Cannot deserialize datetime object.\"", "raise_with_traceback", "(", "DeserializationError", ",", "msg", ",", "err", ")", "else", ":", "return", "date_obj" ]
38.885714
15.485714
def create_jwt_token(secret, client_id): """ Create JWT token for GOV.UK Notify Tokens have standard header: { "typ": "JWT", "alg": "HS256" } Claims consist of: iss: identifier for the client iat: issued at in epoch seconds (UTC) :param secret: Application signing secret :param client_id: Identifier for the client :return: JWT token for this request """ assert secret, "Missing secret key" assert client_id, "Missing client id" headers = { "typ": __type__, "alg": __algorithm__ } claims = { 'iss': client_id, 'iat': epoch_seconds() } return jwt.encode(payload=claims, key=secret, headers=headers).decode()
[ "def", "create_jwt_token", "(", "secret", ",", "client_id", ")", ":", "assert", "secret", ",", "\"Missing secret key\"", "assert", "client_id", ",", "\"Missing client id\"", "headers", "=", "{", "\"typ\"", ":", "__type__", ",", "\"alg\"", ":", "__algorithm__", "}", "claims", "=", "{", "'iss'", ":", "client_id", ",", "'iat'", ":", "epoch_seconds", "(", ")", "}", "return", "jwt", ".", "encode", "(", "payload", "=", "claims", ",", "key", "=", "secret", ",", "headers", "=", "headers", ")", ".", "decode", "(", ")" ]
22.0625
18.9375
def order(self, pair, side, price, quantity, private_key, use_native_token=True, order_type="limit"): """ This function is a wrapper function around the create and execute order functions to help make this processes simpler for the end user by combining these requests in 1 step. Execution of this function is as follows:: order(pair="SWTH_NEO", side="buy", price=0.0002, quantity=100, private_key=kp, use_native_token=True, order_type="limit") The expected return result for this function is the same as the execute_order function:: { 'id': '4e6a59fd-d750-4332-aaf0-f2babfa8ad67', 'blockchain': 'neo', 'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82', 'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59', 'side': 'buy', 'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', 'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132', 'offer_amount': '2000000', 'want_amount': '10000000000', 'transfer_amount': '0', 'priority_gas_amount': '0', 'use_native_token': True, 'native_fee_transfer_amount': 0, 'deposit_txn': None, 'created_at': '2018-08-05T10:38:37.714Z', 'status': 'processed', 'fills': [], 'makes': [ { 'id': 'e30a7fdf-779c-4623-8f92-8a961450d843', 'offer_hash': 'b45ddfb97ade5e0363d9e707dac9ad1c530448db263e86494225a0025006f968', 'available_amount': '2000000', 'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', 'offer_amount': '2000000', 'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132', 'want_amount': '10000000000', 'filled_amount': '0.0', 'txn': None, 'cancel_txn': None, 'price': '0.0002', 'status': 'confirming', 'created_at': '2018-08-05T10:38:37.731Z', 'transaction_hash': '5c4cb1e73b9f2e608b6e768e0654649a4d15e08a7fe63fc536c454fa563a2f0f', 'trades': [] } ] } :param pair: The trading pair this order is being submitted for. :type pair: str :param side: The side of the trade being submitted i.e. buy or sell :type side: str :param price: The price target for this trade. :type price: float :param quantity: The amount of the asset being exchanged in the trade. :type quantity: float :param private_key: The Private Key (ETH) or KeyPair (NEO) for the wallet being used to sign deposit message. :type private_key: KeyPair or str :param use_native_token: Flag to indicate whether or not to pay fees with the Switcheo native token. :type use_native_token: bool :param order_type: The type of order being submitted, currently this can only be a limit order. :type order_type: str :return: Dictionary of the transaction on the order book. """ create_order = self.create_order(private_key=private_key, pair=pair, side=side, price=price, quantity=quantity, use_native_token=use_native_token, order_type=order_type) return self.execute_order(order_params=create_order, private_key=private_key)
[ "def", "order", "(", "self", ",", "pair", ",", "side", ",", "price", ",", "quantity", ",", "private_key", ",", "use_native_token", "=", "True", ",", "order_type", "=", "\"limit\"", ")", ":", "create_order", "=", "self", ".", "create_order", "(", "private_key", "=", "private_key", ",", "pair", "=", "pair", ",", "side", "=", "side", ",", "price", "=", "price", ",", "quantity", "=", "quantity", ",", "use_native_token", "=", "use_native_token", ",", "order_type", "=", "order_type", ")", "return", "self", ".", "execute_order", "(", "order_params", "=", "create_order", ",", "private_key", "=", "private_key", ")" ]
53.15493
25.352113
def abfGroups(abfFolder): """ Given a folder path or list of files, return groups (dict) by cell. Rules which define parents (cells): * assume each cell has one or several ABFs * that cell can be labeled by its "ID" or "parent" ABF (first abf) * the ID is just the filename of the first abf without .abf * if any file starts with an "ID", that ID becomes a parent. * examples could be 16o14044.TIF or 16o14044-cell1-stuff.jpg * usually this is done by saving a pic of the cell with same filename Returns a dict of "parent IDs" representing the "children" groups["16o14041"] = ["16o14041","16o14042","16o14043"] From there, getting children files is trivial. Just find all files in the same folder whose filenames begin with one of the children. """ # prepare the list of files, filenames, and IDs files=False if type(abfFolder) is str and os.path.isdir(abfFolder): files=abfSort(os.listdir(abfFolder)) elif type(abfFolder) is list: files=abfSort(abfFolder) assert type(files) is list files=list_to_lowercase(files) # group every filename in a different list, and determine parents abfs, IDs, others, parents, days = [],[],[],[],[] for fname in files: if fname.endswith(".abf"): abfs.append(fname) IDs.append(fname[:-4]) days.append(fname[:5]) else: others.append(fname) for ID in IDs: for fname in others: if fname.startswith(ID): parents.append(ID) parents=abfSort(set(parents)) # allow only one copy each days=abfSort(set(days)) # allow only one copy each # match up children with parents, respecting daily orphans. groups={} for day in days: parent=None for fname in [x for x in abfs if x.startswith(day)]: ID=fname[:-4] if ID in parents: parent=ID if not parent in groups.keys(): groups[parent]=[] groups[parent].extend([ID]) return groups
[ "def", "abfGroups", "(", "abfFolder", ")", ":", "# prepare the list of files, filenames, and IDs", "files", "=", "False", "if", "type", "(", "abfFolder", ")", "is", "str", "and", "os", ".", "path", ".", "isdir", "(", "abfFolder", ")", ":", "files", "=", "abfSort", "(", "os", ".", "listdir", "(", "abfFolder", ")", ")", "elif", "type", "(", "abfFolder", ")", "is", "list", ":", "files", "=", "abfSort", "(", "abfFolder", ")", "assert", "type", "(", "files", ")", "is", "list", "files", "=", "list_to_lowercase", "(", "files", ")", "# group every filename in a different list, and determine parents", "abfs", ",", "IDs", ",", "others", ",", "parents", ",", "days", "=", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "for", "fname", "in", "files", ":", "if", "fname", ".", "endswith", "(", "\".abf\"", ")", ":", "abfs", ".", "append", "(", "fname", ")", "IDs", ".", "append", "(", "fname", "[", ":", "-", "4", "]", ")", "days", ".", "append", "(", "fname", "[", ":", "5", "]", ")", "else", ":", "others", ".", "append", "(", "fname", ")", "for", "ID", "in", "IDs", ":", "for", "fname", "in", "others", ":", "if", "fname", ".", "startswith", "(", "ID", ")", ":", "parents", ".", "append", "(", "ID", ")", "parents", "=", "abfSort", "(", "set", "(", "parents", ")", ")", "# allow only one copy each", "days", "=", "abfSort", "(", "set", "(", "days", ")", ")", "# allow only one copy each", "# match up children with parents, respecting daily orphans.", "groups", "=", "{", "}", "for", "day", "in", "days", ":", "parent", "=", "None", "for", "fname", "in", "[", "x", "for", "x", "in", "abfs", "if", "x", ".", "startswith", "(", "day", ")", "]", ":", "ID", "=", "fname", "[", ":", "-", "4", "]", "if", "ID", "in", "parents", ":", "parent", "=", "ID", "if", "not", "parent", "in", "groups", ".", "keys", "(", ")", ":", "groups", "[", "parent", "]", "=", "[", "]", "groups", "[", "parent", "]", ".", "extend", "(", "[", "ID", "]", ")", "return", "groups" ]
36.678571
18.428571
def save_trailer(self, tocpos): """Save the trailer to disk. CArchives can be opened from the end - the trailer points back to the start. """ totallen = tocpos + self.toclen + self.TRLLEN pyvers = sys.version_info[0]*10 + sys.version_info[1] trl = struct.pack(self.TRLSTRUCT, self.MAGIC, totallen, tocpos, self.toclen, pyvers) self.lib.write(trl)
[ "def", "save_trailer", "(", "self", ",", "tocpos", ")", ":", "totallen", "=", "tocpos", "+", "self", ".", "toclen", "+", "self", ".", "TRLLEN", "pyvers", "=", "sys", ".", "version_info", "[", "0", "]", "*", "10", "+", "sys", ".", "version_info", "[", "1", "]", "trl", "=", "struct", ".", "pack", "(", "self", ".", "TRLSTRUCT", ",", "self", ".", "MAGIC", ",", "totallen", ",", "tocpos", ",", "self", ".", "toclen", ",", "pyvers", ")", "self", ".", "lib", ".", "write", "(", "trl", ")" ]
42.6
16.1
def doeigs_s(tau, Vdirs): """ get elements of s from eigenvaulues - note that this is very unstable Input: tau,V: tau is an list of eigenvalues in decreasing order: [t1,t2,t3] V is an list of the eigenvector directions [[V1_dec,V1_inc],[V2_dec,V2_inc],[V3_dec,V3_inc]] Output: The six tensor elements as a list: s=[x11,x22,x33,x12,x23,x13] """ t = np.zeros((3, 3,), 'f') # initialize the tau diagonal matrix V = [] for j in range(3): t[j][j] = tau[j] # diagonalize tau for k in range(3): V.append(dir2cart([Vdirs[k][0], Vdirs[k][1], 1.0])) V = np.transpose(V) tmp = np.dot(V, t) chi = np.dot(tmp, np.transpose(V)) return a2s(chi)
[ "def", "doeigs_s", "(", "tau", ",", "Vdirs", ")", ":", "t", "=", "np", ".", "zeros", "(", "(", "3", ",", "3", ",", ")", ",", "'f'", ")", "# initialize the tau diagonal matrix", "V", "=", "[", "]", "for", "j", "in", "range", "(", "3", ")", ":", "t", "[", "j", "]", "[", "j", "]", "=", "tau", "[", "j", "]", "# diagonalize tau", "for", "k", "in", "range", "(", "3", ")", ":", "V", ".", "append", "(", "dir2cart", "(", "[", "Vdirs", "[", "k", "]", "[", "0", "]", ",", "Vdirs", "[", "k", "]", "[", "1", "]", ",", "1.0", "]", ")", ")", "V", "=", "np", ".", "transpose", "(", "V", ")", "tmp", "=", "np", ".", "dot", "(", "V", ",", "t", ")", "chi", "=", "np", ".", "dot", "(", "tmp", ",", "np", ".", "transpose", "(", "V", ")", ")", "return", "a2s", "(", "chi", ")" ]
31.5
17.666667
def find_first_available_template(self, template_name_list): """ Given a list of template names, find the first one that actually exists and is available. """ if isinstance(template_name_list, six.string_types): return template_name_list else: # Take advantage of fluent_pages' internal implementation return _select_template_name(template_name_list)
[ "def", "find_first_available_template", "(", "self", ",", "template_name_list", ")", ":", "if", "isinstance", "(", "template_name_list", ",", "six", ".", "string_types", ")", ":", "return", "template_name_list", "else", ":", "# Take advantage of fluent_pages' internal implementation", "return", "_select_template_name", "(", "template_name_list", ")" ]
42.5
17.3
def is_set(self, key): """Check to see if the key has been set in any of the data locations. """ path = key.split(self._key_delimiter) lower_case_key = key.lower() val = self._find(lower_case_key) if val is None: source = self._find(path[0].lower()) if source is not None and isinstance(source, dict): val = self._search_dict(source, path[1::]) return val is not None
[ "def", "is_set", "(", "self", ",", "key", ")", ":", "path", "=", "key", ".", "split", "(", "self", ".", "_key_delimiter", ")", "lower_case_key", "=", "key", ".", "lower", "(", ")", "val", "=", "self", ".", "_find", "(", "lower_case_key", ")", "if", "val", "is", "None", ":", "source", "=", "self", ".", "_find", "(", "path", "[", "0", "]", ".", "lower", "(", ")", ")", "if", "source", "is", "not", "None", "and", "isinstance", "(", "source", ",", "dict", ")", ":", "val", "=", "self", ".", "_search_dict", "(", "source", ",", "path", "[", "1", ":", ":", "]", ")", "return", "val", "is", "not", "None" ]
32.357143
15.928571
def image_create(comptparms, clrspc): """Creates a new image structure. Wraps the openjp2 library function opj_image_create. Parameters ---------- cmptparms : comptparms_t The component parameters. clrspc : int Specifies the color space. Returns ------- image : ImageType Reference to ImageType instance. """ OPENJP2.opj_image_create.argtypes = [ctypes.c_uint32, ctypes.POINTER(ImageComptParmType), COLOR_SPACE_TYPE] OPENJP2.opj_image_create.restype = ctypes.POINTER(ImageType) image = OPENJP2.opj_image_create(len(comptparms), comptparms, clrspc) return image
[ "def", "image_create", "(", "comptparms", ",", "clrspc", ")", ":", "OPENJP2", ".", "opj_image_create", ".", "argtypes", "=", "[", "ctypes", ".", "c_uint32", ",", "ctypes", ".", "POINTER", "(", "ImageComptParmType", ")", ",", "COLOR_SPACE_TYPE", "]", "OPENJP2", ".", "opj_image_create", ".", "restype", "=", "ctypes", ".", "POINTER", "(", "ImageType", ")", "image", "=", "OPENJP2", ".", "opj_image_create", "(", "len", "(", "comptparms", ")", ",", "comptparms", ",", "clrspc", ")", "return", "image" ]
29.807692
19.269231
def find(self, obj): """Returns the index of the given object in the queue, it might be string which will be searched inside each task. :arg obj: object we are looking :return: -1 if the object is not found or else the location of the task """ if not self.connected: raise ConnectionError('Queue is not connected') data = self.rdb.lrange(self._name, 0, -1) for i, datum in enumerate(data): if datum.find(str(obj)) != -1: return i return -1
[ "def", "find", "(", "self", ",", "obj", ")", ":", "if", "not", "self", ".", "connected", ":", "raise", "ConnectionError", "(", "'Queue is not connected'", ")", "data", "=", "self", ".", "rdb", ".", "lrange", "(", "self", ".", "_name", ",", "0", ",", "-", "1", ")", "for", "i", ",", "datum", "in", "enumerate", "(", "data", ")", ":", "if", "datum", ".", "find", "(", "str", "(", "obj", ")", ")", "!=", "-", "1", ":", "return", "i", "return", "-", "1" ]
33.6875
16.6875
def multiplicative_self_attention(units, n_hidden=None, n_output_features=None, activation=None): """ Computes multiplicative self attention for time series of vectors (with batch dimension) the formula: score(h_i, h_j) = <W_1 h_i, W_2 h_j>, W_1 and W_2 are learnable matrices with dimensionality [n_hidden, n_input_features], where <a, b> stands for a and b dot product Args: units: tf tensor with dimensionality [batch_size, time_steps, n_input_features] n_hidden: number of units in hidden representation of similarity measure n_output_features: number of features in output dense layer activation: activation at the output Returns: output: self attended tensor with dimensionality [batch_size, time_steps, n_output_features] """ n_input_features = units.get_shape().as_list()[2] if n_hidden is None: n_hidden = n_input_features if n_output_features is None: n_output_features = n_input_features queries = tf.layers.dense(expand_tile(units, 1), n_hidden, kernel_initializer=INITIALIZER()) keys = tf.layers.dense(expand_tile(units, 2), n_hidden, kernel_initializer=INITIALIZER()) scores = tf.reduce_sum(queries * keys, axis=3, keep_dims=True) attention = tf.nn.softmax(scores, dim=2) attended_units = tf.reduce_sum(attention * expand_tile(units, 1), axis=2) output = tf.layers.dense(attended_units, n_output_features, activation, kernel_initializer=INITIALIZER()) return output
[ "def", "multiplicative_self_attention", "(", "units", ",", "n_hidden", "=", "None", ",", "n_output_features", "=", "None", ",", "activation", "=", "None", ")", ":", "n_input_features", "=", "units", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "2", "]", "if", "n_hidden", "is", "None", ":", "n_hidden", "=", "n_input_features", "if", "n_output_features", "is", "None", ":", "n_output_features", "=", "n_input_features", "queries", "=", "tf", ".", "layers", ".", "dense", "(", "expand_tile", "(", "units", ",", "1", ")", ",", "n_hidden", ",", "kernel_initializer", "=", "INITIALIZER", "(", ")", ")", "keys", "=", "tf", ".", "layers", ".", "dense", "(", "expand_tile", "(", "units", ",", "2", ")", ",", "n_hidden", ",", "kernel_initializer", "=", "INITIALIZER", "(", ")", ")", "scores", "=", "tf", ".", "reduce_sum", "(", "queries", "*", "keys", ",", "axis", "=", "3", ",", "keep_dims", "=", "True", ")", "attention", "=", "tf", ".", "nn", ".", "softmax", "(", "scores", ",", "dim", "=", "2", ")", "attended_units", "=", "tf", ".", "reduce_sum", "(", "attention", "*", "expand_tile", "(", "units", ",", "1", ")", ",", "axis", "=", "2", ")", "output", "=", "tf", ".", "layers", ".", "dense", "(", "attended_units", ",", "n_output_features", ",", "activation", ",", "kernel_initializer", "=", "INITIALIZER", "(", ")", ")", "return", "output" ]
55.296296
30.074074
def module_remove(name): ''' Removes SELinux module name The name of the module to remove .. versionadded:: 2016.11.6 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} modules = __salt__['selinux.list_semod']() if name not in modules: ret['comment'] = 'Module {0} is not available'.format(name) ret['result'] = False return ret if __salt__['selinux.remove_semod'](name): ret['comment'] = 'Module {0} has been removed'.format(name) return ret ret['result'] = False ret['comment'] = 'Failed to remove module {0}'.format(name) return ret
[ "def", "module_remove", "(", "name", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "modules", "=", "__salt__", "[", "'selinux.list_semod'", "]", "(", ")", "if", "name", "not", "in", "modules", ":", "ret", "[", "'comment'", "]", "=", "'Module {0} is not available'", ".", "format", "(", "name", ")", "ret", "[", "'result'", "]", "=", "False", "return", "ret", "if", "__salt__", "[", "'selinux.remove_semod'", "]", "(", "name", ")", ":", "ret", "[", "'comment'", "]", "=", "'Module {0} has been removed'", ".", "format", "(", "name", ")", "return", "ret", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to remove module {0}'", ".", "format", "(", "name", ")", "return", "ret" ]
27.625
19.791667
def push(self, cart, env=None, callback=None): """ `cart` - Release cart to push items from `callback` - Optional callback to call if juicer.utils.upload_rpm succeeds Pushes the items in a release cart to the pre-release environment. """ juicer.utils.Log.log_debug("Initializing push of cart '%s'" % cart.cart_name) if not env: env = self._defaults['start_in'] cart.current_env = env self.sign_cart_for_env_maybe(cart, env) self.upload(env, cart, callback) return True
[ "def", "push", "(", "self", ",", "cart", ",", "env", "=", "None", ",", "callback", "=", "None", ")", ":", "juicer", ".", "utils", ".", "Log", ".", "log_debug", "(", "\"Initializing push of cart '%s'\"", "%", "cart", ".", "cart_name", ")", "if", "not", "env", ":", "env", "=", "self", ".", "_defaults", "[", "'start_in'", "]", "cart", ".", "current_env", "=", "env", "self", ".", "sign_cart_for_env_maybe", "(", "cart", ",", "env", ")", "self", ".", "upload", "(", "env", ",", "cart", ",", "callback", ")", "return", "True" ]
34.75
19.875
def make_long_description(): """ Generate the reST long_description for setup() from source files. Returns the generated long_description as a unicode string. """ readme_path = README_PATH # Remove our HTML comments because PyPI does not allow it. # See the setup.py docstring for more info on this. readme_md = strip_html_comments(read(readme_path)) history_md = strip_html_comments(read(HISTORY_PATH)) license_md = """\ License ======= """ + read(LICENSE_PATH) sections = [readme_md, history_md, license_md] md_description = '\n\n'.join(sections) # Write the combined Markdown file to a temp path. md_ext = os.path.splitext(readme_path)[1] md_description_path = make_temp_path(RST_DESCRIPTION_PATH, new_ext=md_ext) write(md_description, md_description_path) rst_temp_path = make_temp_path(RST_DESCRIPTION_PATH) long_description = convert_md_to_rst(md_path=md_description_path, rst_temp_path=rst_temp_path) return "\n".join([RST_LONG_DESCRIPTION_INTRO, long_description])
[ "def", "make_long_description", "(", ")", ":", "readme_path", "=", "README_PATH", "# Remove our HTML comments because PyPI does not allow it.", "# See the setup.py docstring for more info on this.", "readme_md", "=", "strip_html_comments", "(", "read", "(", "readme_path", ")", ")", "history_md", "=", "strip_html_comments", "(", "read", "(", "HISTORY_PATH", ")", ")", "license_md", "=", "\"\"\"\\\nLicense\n=======\n\n\"\"\"", "+", "read", "(", "LICENSE_PATH", ")", "sections", "=", "[", "readme_md", ",", "history_md", ",", "license_md", "]", "md_description", "=", "'\\n\\n'", ".", "join", "(", "sections", ")", "# Write the combined Markdown file to a temp path.", "md_ext", "=", "os", ".", "path", ".", "splitext", "(", "readme_path", ")", "[", "1", "]", "md_description_path", "=", "make_temp_path", "(", "RST_DESCRIPTION_PATH", ",", "new_ext", "=", "md_ext", ")", "write", "(", "md_description", ",", "md_description_path", ")", "rst_temp_path", "=", "make_temp_path", "(", "RST_DESCRIPTION_PATH", ")", "long_description", "=", "convert_md_to_rst", "(", "md_path", "=", "md_description_path", ",", "rst_temp_path", "=", "rst_temp_path", ")", "return", "\"\\n\"", ".", "join", "(", "[", "RST_LONG_DESCRIPTION_INTRO", ",", "long_description", "]", ")" ]
33.3125
22.03125
def rellink (source, dest): """Create a symbolic link to path *source* from path *dest*. If either *source* or *dest* is an absolute path, the link from *dest* will point to the absolute path of *source*. Otherwise, the link to *source* from *dest* will be a relative link. """ from os.path import isabs, dirname, relpath, abspath if isabs (source): os.symlink (source, dest) elif isabs (dest): os.symlink (abspath (source), dest) else: os.symlink (relpath (source, dirname (dest)), dest)
[ "def", "rellink", "(", "source", ",", "dest", ")", ":", "from", "os", ".", "path", "import", "isabs", ",", "dirname", ",", "relpath", ",", "abspath", "if", "isabs", "(", "source", ")", ":", "os", ".", "symlink", "(", "source", ",", "dest", ")", "elif", "isabs", "(", "dest", ")", ":", "os", ".", "symlink", "(", "abspath", "(", "source", ")", ",", "dest", ")", "else", ":", "os", ".", "symlink", "(", "relpath", "(", "source", ",", "dirname", "(", "dest", ")", ")", ",", "dest", ")" ]
35.733333
19.533333
def sadd(self, key, *members): """Add the specified members to the set stored at key. Specified members that are already a member of this set are ignored. If key does not exist, a new set is created before adding the specified members. An error is returned when the value stored at key is not a set. Returns :data:`True` if all requested members are added. If more than one member is passed in and not all members are added, the number of added members is returned. .. note:: **Time complexity**: ``O(N)`` where ``N`` is the number of members to be added. :param key: The key of the set :type key: :class:`str`, :class:`bytes` :param members: One or more positional arguments to add to the set :type key: :class:`str`, :class:`bytes` :returns: Number of items added to the set :rtype: bool, int """ return self._execute([b'SADD', key] + list(members), len(members))
[ "def", "sadd", "(", "self", ",", "key", ",", "*", "members", ")", ":", "return", "self", ".", "_execute", "(", "[", "b'SADD'", ",", "key", "]", "+", "list", "(", "members", ")", ",", "len", "(", "members", ")", ")" ]
39.88
24.32
def startResponse(self, status, headers, excInfo=None): """ extends startResponse to call speakerBox in a thread """ self.status = status self.headers = headers self.reactor.callInThread( responseInColor, self.request, status, headers ) return self.write
[ "def", "startResponse", "(", "self", ",", "status", ",", "headers", ",", "excInfo", "=", "None", ")", ":", "self", ".", "status", "=", "status", "self", ".", "headers", "=", "headers", "self", ".", "reactor", ".", "callInThread", "(", "responseInColor", ",", "self", ".", "request", ",", "status", ",", "headers", ")", "return", "self", ".", "write" ]
32.1
12.7
def build_source_reading(expnum, ccd=None, ftype='p'): """ Build an astrom.Observation object for a SourceReading :param expnum: (str) Name or CFHT Exposure number of the observation. :param ccd: (str) CCD is this observation associated with. (can be None) :param ftype: (str) exposure time (specific to CFHT imaging) :return: An astrom.Observation object for the observation. :rtype: astrom.Observation """ logger.debug("Building source reading for expnum:{} ccd:{} ftype:{}".format(expnum, ccd, ftype)) return astrom.Observation(expnum=str(expnum), ftype=ftype, ccdnum=ccd)
[ "def", "build_source_reading", "(", "expnum", ",", "ccd", "=", "None", ",", "ftype", "=", "'p'", ")", ":", "logger", ".", "debug", "(", "\"Building source reading for expnum:{} ccd:{} ftype:{}\"", ".", "format", "(", "expnum", ",", "ccd", ",", "ftype", ")", ")", "return", "astrom", ".", "Observation", "(", "expnum", "=", "str", "(", "expnum", ")", ",", "ftype", "=", "ftype", ",", "ccdnum", "=", "ccd", ")" ]
41.823529
24.764706
def mouse_click(self, widget, event=None): """Triggered when mouse click is pressed in the history tree. The method shows all scoped data for an execution step as tooltip or fold and unfold the tree by double-click and select respective state for double clicked element. """ if event.type == Gdk.EventType._2BUTTON_PRESS and event.get_button()[1] == 1: (model, row) = self.history_tree.get_selection().get_selected() if row is not None: histroy_item_path = self.history_tree_store.get_path(row) histroy_item_iter = self.history_tree_store.get_iter(histroy_item_path) # logger.info(history_item.state_reference) # TODO generalize double-click folding and unfolding -> also used in states tree of state machine if histroy_item_path is not None and self.history_tree_store.iter_n_children(histroy_item_iter): if self.history_tree.row_expanded(histroy_item_path): self.history_tree.collapse_row(histroy_item_path) else: self.history_tree.expand_to_path(histroy_item_path) sm = self.get_history_item_for_tree_iter(histroy_item_iter).state_reference.get_state_machine() if sm: if sm.state_machine_id != self.model.selected_state_machine_id: self.model.selected_state_machine_id = sm.state_machine_id else: logger.info("No state machine could be found for selected item's state reference and " "therefore no selection is performed.") return active_sm_m = self.model.get_selected_state_machine_model() assert active_sm_m.state_machine is sm state_path = self.get_history_item_for_tree_iter(histroy_item_iter).state_reference.get_path() ref_state_m = active_sm_m.get_state_model_by_path(state_path) if ref_state_m and active_sm_m: active_sm_m.selection.set(ref_state_m) return True if event.type == Gdk.EventType.BUTTON_PRESS and event.get_button()[1] == 2: x = int(event.x) y = int(event.y) pthinfo = self.history_tree.get_path_at_pos(x, y) if pthinfo is not None: path, col, cellx, celly = pthinfo self.history_tree.grab_focus() self.history_tree.set_cursor(path, col, 0) self.open_selected_history_separately(None) if event.type == Gdk.EventType.BUTTON_PRESS and event.get_button()[1] == 3: x = int(event.x) y = int(event.y) time = event.time pthinfo = self.history_tree.get_path_at_pos(x, y) if pthinfo is not None: path, col, cellx, celly = pthinfo self.history_tree.grab_focus() self.history_tree.set_cursor(path, col, 0) popup_menu = Gtk.Menu() model, row = self.history_tree.get_selection().get_selected() history_item = model[row][self.HISTORY_ITEM_STORAGE_ID] if not isinstance(history_item, ScopedDataItem) or history_item.scoped_data is None: return scoped_data = history_item.scoped_data input_output_data = history_item.child_state_input_output_data state_reference = history_item.state_reference self.append_string_to_menu(popup_menu, "------------------------") self.append_string_to_menu(popup_menu, "Scoped Data: ") self.append_string_to_menu(popup_menu, "------------------------") for key, data in scoped_data.items(): menu_item_string = " %s (%s - %s):\t%s" % ( data.name.replace("_", "__"), key, data.value_type, data.value) self.append_string_to_menu(popup_menu, menu_item_string) if input_output_data: if isinstance(history_item, CallItem): self.append_string_to_menu(popup_menu, "------------------------") self.append_string_to_menu(popup_menu, "Input Data:") self.append_string_to_menu(popup_menu, "------------------------") else: self.append_string_to_menu(popup_menu, "------------------------") self.append_string_to_menu(popup_menu, "Output Data:") self.append_string_to_menu(popup_menu, "------------------------") for key, data in input_output_data.items(): menu_item_string = " %s :\t%s" % (key.replace("_", "__"), data) self.append_string_to_menu(popup_menu, menu_item_string) if state_reference: if history_item.outcome: self.append_string_to_menu(popup_menu, "------------------------") final_outcome_menu_item_string = "Final outcome: " + str(history_item.outcome) self.append_string_to_menu(popup_menu, final_outcome_menu_item_string) self.append_string_to_menu(popup_menu, "------------------------") popup_menu.show() popup_menu.popup(None, None, None, None, event.get_button()[1], time) return True
[ "def", "mouse_click", "(", "self", ",", "widget", ",", "event", "=", "None", ")", ":", "if", "event", ".", "type", "==", "Gdk", ".", "EventType", ".", "_2BUTTON_PRESS", "and", "event", ".", "get_button", "(", ")", "[", "1", "]", "==", "1", ":", "(", "model", ",", "row", ")", "=", "self", ".", "history_tree", ".", "get_selection", "(", ")", ".", "get_selected", "(", ")", "if", "row", "is", "not", "None", ":", "histroy_item_path", "=", "self", ".", "history_tree_store", ".", "get_path", "(", "row", ")", "histroy_item_iter", "=", "self", ".", "history_tree_store", ".", "get_iter", "(", "histroy_item_path", ")", "# logger.info(history_item.state_reference)", "# TODO generalize double-click folding and unfolding -> also used in states tree of state machine", "if", "histroy_item_path", "is", "not", "None", "and", "self", ".", "history_tree_store", ".", "iter_n_children", "(", "histroy_item_iter", ")", ":", "if", "self", ".", "history_tree", ".", "row_expanded", "(", "histroy_item_path", ")", ":", "self", ".", "history_tree", ".", "collapse_row", "(", "histroy_item_path", ")", "else", ":", "self", ".", "history_tree", ".", "expand_to_path", "(", "histroy_item_path", ")", "sm", "=", "self", ".", "get_history_item_for_tree_iter", "(", "histroy_item_iter", ")", ".", "state_reference", ".", "get_state_machine", "(", ")", "if", "sm", ":", "if", "sm", ".", "state_machine_id", "!=", "self", ".", "model", ".", "selected_state_machine_id", ":", "self", ".", "model", ".", "selected_state_machine_id", "=", "sm", ".", "state_machine_id", "else", ":", "logger", ".", "info", "(", "\"No state machine could be found for selected item's state reference and \"", "\"therefore no selection is performed.\"", ")", "return", "active_sm_m", "=", "self", ".", "model", ".", "get_selected_state_machine_model", "(", ")", "assert", "active_sm_m", ".", "state_machine", "is", "sm", "state_path", "=", "self", ".", "get_history_item_for_tree_iter", "(", "histroy_item_iter", ")", ".", "state_reference", ".", "get_path", "(", ")", "ref_state_m", "=", "active_sm_m", ".", "get_state_model_by_path", "(", "state_path", ")", "if", "ref_state_m", "and", "active_sm_m", ":", "active_sm_m", ".", "selection", ".", "set", "(", "ref_state_m", ")", "return", "True", "if", "event", ".", "type", "==", "Gdk", ".", "EventType", ".", "BUTTON_PRESS", "and", "event", ".", "get_button", "(", ")", "[", "1", "]", "==", "2", ":", "x", "=", "int", "(", "event", ".", "x", ")", "y", "=", "int", "(", "event", ".", "y", ")", "pthinfo", "=", "self", ".", "history_tree", ".", "get_path_at_pos", "(", "x", ",", "y", ")", "if", "pthinfo", "is", "not", "None", ":", "path", ",", "col", ",", "cellx", ",", "celly", "=", "pthinfo", "self", ".", "history_tree", ".", "grab_focus", "(", ")", "self", ".", "history_tree", ".", "set_cursor", "(", "path", ",", "col", ",", "0", ")", "self", ".", "open_selected_history_separately", "(", "None", ")", "if", "event", ".", "type", "==", "Gdk", ".", "EventType", ".", "BUTTON_PRESS", "and", "event", ".", "get_button", "(", ")", "[", "1", "]", "==", "3", ":", "x", "=", "int", "(", "event", ".", "x", ")", "y", "=", "int", "(", "event", ".", "y", ")", "time", "=", "event", ".", "time", "pthinfo", "=", "self", ".", "history_tree", ".", "get_path_at_pos", "(", "x", ",", "y", ")", "if", "pthinfo", "is", "not", "None", ":", "path", ",", "col", ",", "cellx", ",", "celly", "=", "pthinfo", "self", ".", "history_tree", ".", "grab_focus", "(", ")", "self", ".", "history_tree", ".", "set_cursor", "(", "path", ",", "col", ",", "0", ")", "popup_menu", "=", "Gtk", ".", "Menu", "(", ")", "model", ",", "row", "=", "self", ".", "history_tree", ".", "get_selection", "(", ")", ".", "get_selected", "(", ")", "history_item", "=", "model", "[", "row", "]", "[", "self", ".", "HISTORY_ITEM_STORAGE_ID", "]", "if", "not", "isinstance", "(", "history_item", ",", "ScopedDataItem", ")", "or", "history_item", ".", "scoped_data", "is", "None", ":", "return", "scoped_data", "=", "history_item", ".", "scoped_data", "input_output_data", "=", "history_item", ".", "child_state_input_output_data", "state_reference", "=", "history_item", ".", "state_reference", "self", ".", "append_string_to_menu", "(", "popup_menu", ",", "\"------------------------\"", ")", "self", ".", "append_string_to_menu", "(", "popup_menu", ",", "\"Scoped Data: \"", ")", "self", ".", "append_string_to_menu", "(", "popup_menu", ",", "\"------------------------\"", ")", "for", "key", ",", "data", "in", "scoped_data", ".", "items", "(", ")", ":", "menu_item_string", "=", "\" %s (%s - %s):\\t%s\"", "%", "(", "data", ".", "name", ".", "replace", "(", "\"_\"", ",", "\"__\"", ")", ",", "key", ",", "data", ".", "value_type", ",", "data", ".", "value", ")", "self", ".", "append_string_to_menu", "(", "popup_menu", ",", "menu_item_string", ")", "if", "input_output_data", ":", "if", "isinstance", "(", "history_item", ",", "CallItem", ")", ":", "self", ".", "append_string_to_menu", "(", "popup_menu", ",", "\"------------------------\"", ")", "self", ".", "append_string_to_menu", "(", "popup_menu", ",", "\"Input Data:\"", ")", "self", ".", "append_string_to_menu", "(", "popup_menu", ",", "\"------------------------\"", ")", "else", ":", "self", ".", "append_string_to_menu", "(", "popup_menu", ",", "\"------------------------\"", ")", "self", ".", "append_string_to_menu", "(", "popup_menu", ",", "\"Output Data:\"", ")", "self", ".", "append_string_to_menu", "(", "popup_menu", ",", "\"------------------------\"", ")", "for", "key", ",", "data", "in", "input_output_data", ".", "items", "(", ")", ":", "menu_item_string", "=", "\" %s :\\t%s\"", "%", "(", "key", ".", "replace", "(", "\"_\"", ",", "\"__\"", ")", ",", "data", ")", "self", ".", "append_string_to_menu", "(", "popup_menu", ",", "menu_item_string", ")", "if", "state_reference", ":", "if", "history_item", ".", "outcome", ":", "self", ".", "append_string_to_menu", "(", "popup_menu", ",", "\"------------------------\"", ")", "final_outcome_menu_item_string", "=", "\"Final outcome: \"", "+", "str", "(", "history_item", ".", "outcome", ")", "self", ".", "append_string_to_menu", "(", "popup_menu", ",", "final_outcome_menu_item_string", ")", "self", ".", "append_string_to_menu", "(", "popup_menu", ",", "\"------------------------\"", ")", "popup_menu", ".", "show", "(", ")", "popup_menu", ".", "popup", "(", "None", ",", "None", ",", "None", ",", "None", ",", "event", ".", "get_button", "(", ")", "[", "1", "]", ",", "time", ")", "return", "True" ]
56.132653
30.459184
def is_dark_terminal_background(cls): """ :return: Whether we have a dark Terminal background color, or None if unknown. We currently just check the env var COLORFGBG, which some terminals define like "<foreground-color>:<background-color>", and if <background-color> in {0,1,2,3,4,5,6,8}, then we have some dark background. There are many other complex heuristics we could do here, which work in some cases but not in others. See e.g. `here <https://stackoverflow.com/questions/2507337/terminals-background-color>`__. But instead of adding more heuristics, we think that explicitly setting COLORFGBG would be the best thing, in case it's not like you want it. :rtype: bool|None """ if os.environ.get("COLORFGBG", None): parts = os.environ["COLORFGBG"].split(";") try: last_number = int(parts[-1]) if 0 <= last_number <= 6 or last_number == 8: return True else: return False except ValueError: # not an integer? pass return None
[ "def", "is_dark_terminal_background", "(", "cls", ")", ":", "if", "os", ".", "environ", ".", "get", "(", "\"COLORFGBG\"", ",", "None", ")", ":", "parts", "=", "os", ".", "environ", "[", "\"COLORFGBG\"", "]", ".", "split", "(", "\";\"", ")", "try", ":", "last_number", "=", "int", "(", "parts", "[", "-", "1", "]", ")", "if", "0", "<=", "last_number", "<=", "6", "or", "last_number", "==", "8", ":", "return", "True", "else", ":", "return", "False", "except", "ValueError", ":", "# not an integer?", "pass", "return", "None" ]
51.26087
24.130435
def join(self, channel): """Add this user to the channel's user list and add the channel to this user's list of joined channels. """ if channel not in self.channels: channel.users.add(self.nick) self.channels.append(channel)
[ "def", "join", "(", "self", ",", "channel", ")", ":", "if", "channel", "not", "in", "self", ".", "channels", ":", "channel", ".", "users", ".", "add", "(", "self", ".", "nick", ")", "self", ".", "channels", ".", "append", "(", "channel", ")" ]
35.25
6.25
def plot_data(self, proj, ax): """ Creates and plots the contourplot of the original data. This is done by evaluating the density of projected datapoints on a grid. """ x, y = proj x_data = self.ig.independent_data[x] y_data = self.ig.dependent_data[y] projected_data = np.column_stack((x_data, y_data)).T kde = gaussian_kde(projected_data) xx, yy = np.meshgrid(self.ig._x_points[x], self.ig._y_points[y]) x_grid = xx.flatten() y_grid = yy.flatten() contour_grid = kde.pdf(np.column_stack((x_grid, y_grid)).T) # This is an fugly kludge, but it seems nescessary to make low density # areas show up. if self.ig.log_contour: contour_grid = np.log(contour_grid) vmin = -7 else: vmin = None ax.contourf(xx, yy, contour_grid.reshape(xx.shape), 50, vmin=vmin, cmap='Blues')
[ "def", "plot_data", "(", "self", ",", "proj", ",", "ax", ")", ":", "x", ",", "y", "=", "proj", "x_data", "=", "self", ".", "ig", ".", "independent_data", "[", "x", "]", "y_data", "=", "self", ".", "ig", ".", "dependent_data", "[", "y", "]", "projected_data", "=", "np", ".", "column_stack", "(", "(", "x_data", ",", "y_data", ")", ")", ".", "T", "kde", "=", "gaussian_kde", "(", "projected_data", ")", "xx", ",", "yy", "=", "np", ".", "meshgrid", "(", "self", ".", "ig", ".", "_x_points", "[", "x", "]", ",", "self", ".", "ig", ".", "_y_points", "[", "y", "]", ")", "x_grid", "=", "xx", ".", "flatten", "(", ")", "y_grid", "=", "yy", ".", "flatten", "(", ")", "contour_grid", "=", "kde", ".", "pdf", "(", "np", ".", "column_stack", "(", "(", "x_grid", ",", "y_grid", ")", ")", ".", "T", ")", "# This is an fugly kludge, but it seems nescessary to make low density", "# areas show up.", "if", "self", ".", "ig", ".", "log_contour", ":", "contour_grid", "=", "np", ".", "log", "(", "contour_grid", ")", "vmin", "=", "-", "7", "else", ":", "vmin", "=", "None", "ax", ".", "contourf", "(", "xx", ",", "yy", ",", "contour_grid", ".", "reshape", "(", "xx", ".", "shape", ")", ",", "50", ",", "vmin", "=", "vmin", ",", "cmap", "=", "'Blues'", ")" ]
37.76
17.76
def transform_audio(self, y): '''Compute the tempogram Parameters ---------- y : np.ndarray Audio buffer Returns ------- data : dict data['tempogram'] : np.ndarray, shape=(n_frames, win_length) The tempogram ''' n_frames = self.n_frames(get_duration(y=y, sr=self.sr)) tgram = tempogram(y=y, sr=self.sr, hop_length=self.hop_length, win_length=self.win_length).astype(np.float32) tgram = fix_length(tgram, n_frames) return {'tempogram': tgram.T[self.idx]}
[ "def", "transform_audio", "(", "self", ",", "y", ")", ":", "n_frames", "=", "self", ".", "n_frames", "(", "get_duration", "(", "y", "=", "y", ",", "sr", "=", "self", ".", "sr", ")", ")", "tgram", "=", "tempogram", "(", "y", "=", "y", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ",", "win_length", "=", "self", ".", "win_length", ")", ".", "astype", "(", "np", ".", "float32", ")", "tgram", "=", "fix_length", "(", "tgram", ",", "n_frames", ")", "return", "{", "'tempogram'", ":", "tgram", ".", "T", "[", "self", ".", "idx", "]", "}" ]
28.363636
21.818182
def get_definition(self, name: YangIdentifier, kw: YangIdentifier) -> Optional["Statement"]: """Search ancestor statements for a definition. Args: name: Name of a grouping or datatype (with no prefix). kw: ``grouping`` or ``typedef``. Raises: DefinitionNotFound: If the definition is not found. """ stmt = self.superstmt while stmt: res = stmt.find1(kw, name) if res: return res stmt = stmt.superstmt return None
[ "def", "get_definition", "(", "self", ",", "name", ":", "YangIdentifier", ",", "kw", ":", "YangIdentifier", ")", "->", "Optional", "[", "\"Statement\"", "]", ":", "stmt", "=", "self", ".", "superstmt", "while", "stmt", ":", "res", "=", "stmt", ".", "find1", "(", "kw", ",", "name", ")", "if", "res", ":", "return", "res", "stmt", "=", "stmt", ".", "superstmt", "return", "None" ]
31.333333
17.555556
def at(self, row, col): """Return the value at the given cell position. Args: row (int): zero-based row number col (int): zero-based column number Returns: cell value Raises: TypeError: if ``row`` or ``col`` is not an ``int`` IndexError: if the position is out of range """ if not (isinstance(row, int) and isinstance(col, int)): raise TypeError(row, col) return self._values[row][col]
[ "def", "at", "(", "self", ",", "row", ",", "col", ")", ":", "if", "not", "(", "isinstance", "(", "row", ",", "int", ")", "and", "isinstance", "(", "col", ",", "int", ")", ")", ":", "raise", "TypeError", "(", "row", ",", "col", ")", "return", "self", ".", "_values", "[", "row", "]", "[", "col", "]" ]
33.2
15.066667
def base_url(self): """Base URL for resolving resource URLs""" if self.doc.package_url: return self.doc.package_url return self.doc._ref
[ "def", "base_url", "(", "self", ")", ":", "if", "self", ".", "doc", ".", "package_url", ":", "return", "self", ".", "doc", ".", "package_url", "return", "self", ".", "doc", ".", "_ref" ]
24
17.428571
def pop_result(self, key): """Returns the result for ``key`` and unregisters it.""" self.pending_callbacks.remove(key) return self.results.pop(key)
[ "def", "pop_result", "(", "self", ",", "key", ")", ":", "self", ".", "pending_callbacks", ".", "remove", "(", "key", ")", "return", "self", ".", "results", ".", "pop", "(", "key", ")" ]
42
5
def save(self, *args, **kwargs): """ call synchronizer "after_external_layer_saved" method for any additional operation that must be executed after save """ after_save = kwargs.pop('after_save', True) super(LayerExternal, self).save(*args, **kwargs) # call after_external_layer_saved method of synchronizer if after_save: try: synchronizer = self.synchronizer except ImproperlyConfigured: pass else: if synchronizer: synchronizer.after_external_layer_saved(self.config) # reload schema self._reload_schema()
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "after_save", "=", "kwargs", ".", "pop", "(", "'after_save'", ",", "True", ")", "super", "(", "LayerExternal", ",", "self", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# call after_external_layer_saved method of synchronizer", "if", "after_save", ":", "try", ":", "synchronizer", "=", "self", ".", "synchronizer", "except", "ImproperlyConfigured", ":", "pass", "else", ":", "if", "synchronizer", ":", "synchronizer", ".", "after_external_layer_saved", "(", "self", ".", "config", ")", "# reload schema", "self", ".", "_reload_schema", "(", ")" ]
37.444444
15
def stSpectralRollOff(X, c, fs): """Computes spectral roll-off""" totalEnergy = numpy.sum(X ** 2) fftLength = len(X) Thres = c*totalEnergy # Ffind the spectral rolloff as the frequency position # where the respective spectral energy is equal to c*totalEnergy CumSum = numpy.cumsum(X ** 2) + eps [a, ] = numpy.nonzero(CumSum > Thres) if len(a) > 0: mC = numpy.float64(a[0]) / (float(fftLength)) else: mC = 0.0 return (mC)
[ "def", "stSpectralRollOff", "(", "X", ",", "c", ",", "fs", ")", ":", "totalEnergy", "=", "numpy", ".", "sum", "(", "X", "**", "2", ")", "fftLength", "=", "len", "(", "X", ")", "Thres", "=", "c", "*", "totalEnergy", "# Ffind the spectral rolloff as the frequency position ", "# where the respective spectral energy is equal to c*totalEnergy", "CumSum", "=", "numpy", ".", "cumsum", "(", "X", "**", "2", ")", "+", "eps", "[", "a", ",", "]", "=", "numpy", ".", "nonzero", "(", "CumSum", ">", "Thres", ")", "if", "len", "(", "a", ")", ">", "0", ":", "mC", "=", "numpy", ".", "float64", "(", "a", "[", "0", "]", ")", "/", "(", "float", "(", "fftLength", ")", ")", "else", ":", "mC", "=", "0.0", "return", "(", "mC", ")" ]
33.428571
15
def on_conflict(self, fields: List[Union[str, Tuple[str]]], action, index_predicate: str=None): """Sets the action to take when conflicts arise when attempting to insert/create a new row. Arguments: fields: The fields the conflicts can occur in. action: The action to take when the conflict occurs. index_predicate: The index predicate to satisfy an arbiter partial index. """ return self.get_queryset().on_conflict(fields, action, index_predicate)
[ "def", "on_conflict", "(", "self", ",", "fields", ":", "List", "[", "Union", "[", "str", ",", "Tuple", "[", "str", "]", "]", "]", ",", "action", ",", "index_predicate", ":", "str", "=", "None", ")", ":", "return", "self", ".", "get_queryset", "(", ")", ".", "on_conflict", "(", "fields", ",", "action", ",", "index_predicate", ")" ]
37.4
24.066667
def paste_clipboard(self, event): """ Send the clipboard content as user input to the CPU. """ log.critical("paste clipboard") clipboard = self.root.clipboard_get() for line in clipboard.splitlines(): log.critical("paste line: %s", repr(line)) self.add_user_input(line + "\r")
[ "def", "paste_clipboard", "(", "self", ",", "event", ")", ":", "log", ".", "critical", "(", "\"paste clipboard\"", ")", "clipboard", "=", "self", ".", "root", ".", "clipboard_get", "(", ")", "for", "line", "in", "clipboard", ".", "splitlines", "(", ")", ":", "log", ".", "critical", "(", "\"paste line: %s\"", ",", "repr", "(", "line", ")", ")", "self", ".", "add_user_input", "(", "line", "+", "\"\\r\"", ")" ]
37.777778
6
def Reset(self): """Preserves FSM but resets starting state and current record.""" # Current state is Start state. self._cur_state = self.states['Start'] self._cur_state_name = 'Start' # Clear table of results and current record. self._result = [] self._ClearAllRecord()
[ "def", "Reset", "(", "self", ")", ":", "# Current state is Start state.", "self", ".", "_cur_state", "=", "self", ".", "states", "[", "'Start'", "]", "self", ".", "_cur_state_name", "=", "'Start'", "# Clear table of results and current record.", "self", ".", "_result", "=", "[", "]", "self", ".", "_ClearAllRecord", "(", ")" ]
29.1
15.8
def windows_install(path_to_python=""): """ Sets the .py extension to be associated with the ftype Python which is then set to the python.exe you provide in the path_to_python variable or after the -p flag if run as a script. Once the python environment is set up the function proceeds to set PATH and PYTHONPATH using setx. Parameters ---------- path_to_python : the path the python.exe you want windows to execute when running .py files """ if not path_to_python: print("Please enter the path to your python.exe you wish Windows to use to run python files. If you do not, this script will not be able to set up a full python environment in Windows. If you already have a python environment set up in Windows such that you can run python scripts from command prompt with just a file name then ignore this message. Otherwise, you will need to run dev_setup.py again with the command line option '-p' followed by the correct full path to python.\nRun dev_setup.py with the -h flag for more details") print("Would you like to continue? [y/N] ") ans = input() if ans == 'y': pass else: return # be sure to add python.exe if the user forgets to include the file name if os.path.isdir(path_to_python): path_to_python = os.path.join(path_to_python, "python.exe") if not os.path.isfile(path_to_python): print("The path to python provided is not a full path to the python.exe file or this path does not exist, was given %s.\nPlease run again with the command line option '-p' followed by the correct full path to python.\nRun dev_setup.py with the -h flag for more details" % path_to_python) return # make windows associate .py with python subprocess.check_call('assoc .py=Python', shell=True) subprocess.check_call('ftype Python=%s ' % path_to_python + '"%1" %*', shell=True) PmagPyDir = os.path.abspath(".") ProgramsDir = os.path.join(PmagPyDir, 'programs') dirs_to_add = [ProgramsDir] for d in next(os.walk(ProgramsDir))[1]: dirs_to_add.append(os.path.join(ProgramsDir, d)) path = str(subprocess.check_output('echo %PATH%', shell=True)).strip('\n') if "PATH" in path: path = '' pypath = str(subprocess.check_output( 'echo %PYTHONPATH%', shell=True)).strip('\n') if "PYTHONPATH" in pypath: pypath = PmagPyDir + ';' + ProgramsDir else: pypath += ';' + PmagPyDir + ';' + ProgramsDir for d_add in dirs_to_add: path += ';' + d_add unique_path_list = [] for p in path.split(';'): p = p.replace('"', '') if p not in unique_path_list: unique_path_list.append(p) unique_pypath_list = [] for p in pypath.split(';'): p = p.replace('"', '') if p not in unique_pypath_list: unique_pypath_list.append(p) path = functools.reduce(lambda x, y: x + ';' + y, unique_path_list) pypath = functools.reduce(lambda x, y: x + ';' + y, unique_pypath_list) print('setx PATH "%s"' % path) subprocess.call('setx PATH "%s"' % path, shell=True) print('setx PYTHONPATH "%s"' % pypath) subprocess.call('setx PYTHONPATH "%s"' % (pypath), shell=True) print("Install complete. Please restart the command prompt to complete install")
[ "def", "windows_install", "(", "path_to_python", "=", "\"\"", ")", ":", "if", "not", "path_to_python", ":", "print", "(", "\"Please enter the path to your python.exe you wish Windows to use to run python files. If you do not, this script will not be able to set up a full python environment in Windows. If you already have a python environment set up in Windows such that you can run python scripts from command prompt with just a file name then ignore this message. Otherwise, you will need to run dev_setup.py again with the command line option '-p' followed by the correct full path to python.\\nRun dev_setup.py with the -h flag for more details\"", ")", "print", "(", "\"Would you like to continue? [y/N] \"", ")", "ans", "=", "input", "(", ")", "if", "ans", "==", "'y'", ":", "pass", "else", ":", "return", "# be sure to add python.exe if the user forgets to include the file name", "if", "os", ".", "path", ".", "isdir", "(", "path_to_python", ")", ":", "path_to_python", "=", "os", ".", "path", ".", "join", "(", "path_to_python", ",", "\"python.exe\"", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "path_to_python", ")", ":", "print", "(", "\"The path to python provided is not a full path to the python.exe file or this path does not exist, was given %s.\\nPlease run again with the command line option '-p' followed by the correct full path to python.\\nRun dev_setup.py with the -h flag for more details\"", "%", "path_to_python", ")", "return", "# make windows associate .py with python", "subprocess", ".", "check_call", "(", "'assoc .py=Python'", ",", "shell", "=", "True", ")", "subprocess", ".", "check_call", "(", "'ftype Python=%s '", "%", "path_to_python", "+", "'\"%1\" %*'", ",", "shell", "=", "True", ")", "PmagPyDir", "=", "os", ".", "path", ".", "abspath", "(", "\".\"", ")", "ProgramsDir", "=", "os", ".", "path", ".", "join", "(", "PmagPyDir", ",", "'programs'", ")", "dirs_to_add", "=", "[", "ProgramsDir", "]", "for", "d", "in", "next", "(", "os", ".", "walk", "(", "ProgramsDir", ")", ")", "[", "1", "]", ":", "dirs_to_add", ".", "append", "(", "os", ".", "path", ".", "join", "(", "ProgramsDir", ",", "d", ")", ")", "path", "=", "str", "(", "subprocess", ".", "check_output", "(", "'echo %PATH%'", ",", "shell", "=", "True", ")", ")", ".", "strip", "(", "'\\n'", ")", "if", "\"PATH\"", "in", "path", ":", "path", "=", "''", "pypath", "=", "str", "(", "subprocess", ".", "check_output", "(", "'echo %PYTHONPATH%'", ",", "shell", "=", "True", ")", ")", ".", "strip", "(", "'\\n'", ")", "if", "\"PYTHONPATH\"", "in", "pypath", ":", "pypath", "=", "PmagPyDir", "+", "';'", "+", "ProgramsDir", "else", ":", "pypath", "+=", "';'", "+", "PmagPyDir", "+", "';'", "+", "ProgramsDir", "for", "d_add", "in", "dirs_to_add", ":", "path", "+=", "';'", "+", "d_add", "unique_path_list", "=", "[", "]", "for", "p", "in", "path", ".", "split", "(", "';'", ")", ":", "p", "=", "p", ".", "replace", "(", "'\"'", ",", "''", ")", "if", "p", "not", "in", "unique_path_list", ":", "unique_path_list", ".", "append", "(", "p", ")", "unique_pypath_list", "=", "[", "]", "for", "p", "in", "pypath", ".", "split", "(", "';'", ")", ":", "p", "=", "p", ".", "replace", "(", "'\"'", ",", "''", ")", "if", "p", "not", "in", "unique_pypath_list", ":", "unique_pypath_list", ".", "append", "(", "p", ")", "path", "=", "functools", ".", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "+", "';'", "+", "y", ",", "unique_path_list", ")", "pypath", "=", "functools", ".", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "+", "';'", "+", "y", ",", "unique_pypath_list", ")", "print", "(", "'setx PATH \"%s\"'", "%", "path", ")", "subprocess", ".", "call", "(", "'setx PATH \"%s\"'", "%", "path", ",", "shell", "=", "True", ")", "print", "(", "'setx PYTHONPATH \"%s\"'", "%", "pypath", ")", "subprocess", ".", "call", "(", "'setx PYTHONPATH \"%s\"'", "%", "(", "pypath", ")", ",", "shell", "=", "True", ")", "print", "(", "\"Install complete. Please restart the command prompt to complete install\"", ")" ]
49.253731
28.865672
def modify(self, management_address=None, username=None, password=None, connection_type=None): """ Modifies a remote system for remote replication. :param management_address: same as the one in `create` method. :param username: username for accessing the remote system. :param password: password for accessing the remote system. :param connection_type: same as the one in `create` method. """ req_body = self._cli.make_body( managementAddress=management_address, username=username, password=password, connectionType=connection_type) resp = self.action('modify', **req_body) resp.raise_if_err() return resp
[ "def", "modify", "(", "self", ",", "management_address", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ",", "connection_type", "=", "None", ")", ":", "req_body", "=", "self", ".", "_cli", ".", "make_body", "(", "managementAddress", "=", "management_address", ",", "username", "=", "username", ",", "password", "=", "password", ",", "connectionType", "=", "connection_type", ")", "resp", "=", "self", ".", "action", "(", "'modify'", ",", "*", "*", "req_body", ")", "resp", ".", "raise_if_err", "(", ")", "return", "resp" ]
42.235294
19.529412
def open(self, url, mode='rb', reload=False, filename=None): """Open a file, downloading it first if it does not yet exist. Unlike when you call a loader directly like ``my_loader()``, this ``my_loader.open()`` method does not attempt to parse or interpret the file; it simply returns an open file object. The ``url`` can be either an external URL, or else the path to a file on the current filesystem. A relative path will be assumed to be relative to the base directory of this loader object. If a URL was provided and the ``reload`` parameter is true, then any existing file will be removed before the download starts. The ``filename`` parameter lets you specify an alternative local filename instead of having the filename extracted from the final component of the URL. """ if '://' not in url: path_that_might_be_relative = url path = os.path.join(self.directory, path_that_might_be_relative) return open(path, mode) if filename is None: filename = urlparse(url).path.split('/')[-1] path = self.path_to(filename) if reload and os.path.exists(path): os.remove(path) if not os.path.exists(path): download(url, path, self.verbose) return open(path, mode)
[ "def", "open", "(", "self", ",", "url", ",", "mode", "=", "'rb'", ",", "reload", "=", "False", ",", "filename", "=", "None", ")", ":", "if", "'://'", "not", "in", "url", ":", "path_that_might_be_relative", "=", "url", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "directory", ",", "path_that_might_be_relative", ")", "return", "open", "(", "path", ",", "mode", ")", "if", "filename", "is", "None", ":", "filename", "=", "urlparse", "(", "url", ")", ".", "path", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "path", "=", "self", ".", "path_to", "(", "filename", ")", "if", "reload", "and", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "os", ".", "remove", "(", "path", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "download", "(", "url", ",", "path", ",", "self", ".", "verbose", ")", "return", "open", "(", "path", ",", "mode", ")" ]
43.741935
21.064516
def get_resource(self, resource_wrapper=None): """ Returns a ``Resource`` instance. :param resource_wrapper: A Resource subclass. :return: A ``Resource`` instance. :raises PoolEmptyError: If attempt to get resource fails or times out. """ rtracker = None if resource_wrapper is None: resource_wrapper = self._resource_wrapper if self.empty(): self._harvest_lost_resources() try: rtracker = self._get(0) except PoolEmptyError: pass if rtracker is None: # Could not find resource, try to make one. try: rtracker = self._make_resource() except PoolFullError: pass if rtracker is None: # Could not find or make resource, so must wait for a resource # to be returned to the pool. try: rtracker = self._get(timeout=self._timeout) except PoolEmptyError: pass if rtracker is None: raise PoolEmptyError # Ensure resource is active. if not self.ping(rtracker.resource): # Lock here to prevent another thread creating a resource in the # index that will have this resource removed. This ensures there # will be space for _make_resource() to place a newly created # resource. with self._lock: self._remove(rtracker) rtracker = self._make_resource() # Ensure all resources leave pool with same attributes. # normalize_connection() is used since it calls # normalize_resource(), so if a user implements either one, the # resource will still be normalized. This will be changed in 1.0 to # call normalize_resource() when normalize_connection() is # removed. self.normalize_connection(rtracker.resource) return rtracker.wrap_resource(self, resource_wrapper)
[ "def", "get_resource", "(", "self", ",", "resource_wrapper", "=", "None", ")", ":", "rtracker", "=", "None", "if", "resource_wrapper", "is", "None", ":", "resource_wrapper", "=", "self", ".", "_resource_wrapper", "if", "self", ".", "empty", "(", ")", ":", "self", ".", "_harvest_lost_resources", "(", ")", "try", ":", "rtracker", "=", "self", ".", "_get", "(", "0", ")", "except", "PoolEmptyError", ":", "pass", "if", "rtracker", "is", "None", ":", "# Could not find resource, try to make one.", "try", ":", "rtracker", "=", "self", ".", "_make_resource", "(", ")", "except", "PoolFullError", ":", "pass", "if", "rtracker", "is", "None", ":", "# Could not find or make resource, so must wait for a resource", "# to be returned to the pool.", "try", ":", "rtracker", "=", "self", ".", "_get", "(", "timeout", "=", "self", ".", "_timeout", ")", "except", "PoolEmptyError", ":", "pass", "if", "rtracker", "is", "None", ":", "raise", "PoolEmptyError", "# Ensure resource is active.", "if", "not", "self", ".", "ping", "(", "rtracker", ".", "resource", ")", ":", "# Lock here to prevent another thread creating a resource in the", "# index that will have this resource removed. This ensures there", "# will be space for _make_resource() to place a newly created", "# resource.", "with", "self", ".", "_lock", ":", "self", ".", "_remove", "(", "rtracker", ")", "rtracker", "=", "self", ".", "_make_resource", "(", ")", "# Ensure all resources leave pool with same attributes.", "# normalize_connection() is used since it calls", "# normalize_resource(), so if a user implements either one, the", "# resource will still be normalized. This will be changed in 1.0 to", "# call normalize_resource() when normalize_connection() is", "# removed.", "self", ".", "normalize_connection", "(", "rtracker", ".", "resource", ")", "return", "rtracker", ".", "wrap_resource", "(", "self", ",", "resource_wrapper", ")" ]
33.283333
19.916667
def findall(pattern, string, flags=0): """Return a list of all non-overlapping matches in the string. If one or more groups are present in the pattern, return a list of groups; this will be a list of tuples if the pattern has more than one group. Empty matches are included in the result.""" return _compile(pattern, flags).findall(string) # if sys.hexversion >= 0x02020000: # __all__.append("finditer") def finditer(pattern, string, flags=0): """Return an iterator over all non-overlapping matches in the string. For each match, the iterator returns a match object. Empty matches are included in the result.""" return _compile(pattern, flags).finditer(string)
[ "def", "findall", "(", "pattern", ",", "string", ",", "flags", "=", "0", ")", ":", "return", "_compile", "(", "pattern", ",", "flags", ")", ".", "findall", "(", "string", ")", "# if sys.hexversion >= 0x02020000:", "# __all__.append(\"finditer\")", "def", "finditer", "(", "pattern", ",", "string", ",", "flags", "=", "0", ")", ":", "\"\"\"Return an iterator over all non-overlapping matches in the\n string. For each match, the iterator returns a match object.\n\n Empty matches are included in the result.\"\"\"", "return", "_compile", "(", "pattern", ",", "flags", ")", ".", "finditer", "(", "string", ")" ]
40
15.833333
def eigenvalues_auto(mat, n_eivals='auto'): """ Automatically computes the spectrum of a given Laplacian matrix. Parameters ---------- mat : numpy.ndarray or scipy.sparse Laplacian matrix n_eivals : string or int or tuple Number of eigenvalues to compute / use for approximation. If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues. If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation. If tuple, we expect two ints, first for lower part of approximation, and second for the upper part. Returns ------- np.ndarray Vector of approximated eigenvalues Examples -------- >>> eigenvalues_auto(numpy.array([[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]), 'auto') array([0, 3, 3]) """ do_full = True n_lower = 150 n_upper = 150 nv = mat.shape[0] if n_eivals == 'auto': if mat.shape[0] > 1024: do_full = False if n_eivals == 'full': do_full = True if isinstance(n_eivals, int): n_lower = n_upper = n_eivals do_full = False if isinstance(n_eivals, tuple): n_lower, n_upper = n_eivals do_full = False if do_full and sps.issparse(mat): mat = mat.todense() if sps.issparse(mat): if n_lower == n_upper: tr_eivals = spsl.eigsh(mat, 2*n_lower, which='BE', return_eigenvectors=False) return updown_linear_approx(tr_eivals[:n_upper], tr_eivals[n_upper:], nv) else: lo_eivals = spsl.eigsh(mat, n_lower, which='SM', return_eigenvectors=False)[::-1] up_eivals = spsl.eigsh(mat, n_upper, which='LM', return_eigenvectors=False) return updown_linear_approx(lo_eivals, up_eivals, nv) else: if do_full: return spl.eigvalsh(mat) else: lo_eivals = spl.eigvalsh(mat, eigvals=(0, n_lower-1)) up_eivals = spl.eigvalsh(mat, eigvals=(nv-n_upper-1, nv-1)) return updown_linear_approx(lo_eivals, up_eivals, nv)
[ "def", "eigenvalues_auto", "(", "mat", ",", "n_eivals", "=", "'auto'", ")", ":", "do_full", "=", "True", "n_lower", "=", "150", "n_upper", "=", "150", "nv", "=", "mat", ".", "shape", "[", "0", "]", "if", "n_eivals", "==", "'auto'", ":", "if", "mat", ".", "shape", "[", "0", "]", ">", "1024", ":", "do_full", "=", "False", "if", "n_eivals", "==", "'full'", ":", "do_full", "=", "True", "if", "isinstance", "(", "n_eivals", ",", "int", ")", ":", "n_lower", "=", "n_upper", "=", "n_eivals", "do_full", "=", "False", "if", "isinstance", "(", "n_eivals", ",", "tuple", ")", ":", "n_lower", ",", "n_upper", "=", "n_eivals", "do_full", "=", "False", "if", "do_full", "and", "sps", ".", "issparse", "(", "mat", ")", ":", "mat", "=", "mat", ".", "todense", "(", ")", "if", "sps", ".", "issparse", "(", "mat", ")", ":", "if", "n_lower", "==", "n_upper", ":", "tr_eivals", "=", "spsl", ".", "eigsh", "(", "mat", ",", "2", "*", "n_lower", ",", "which", "=", "'BE'", ",", "return_eigenvectors", "=", "False", ")", "return", "updown_linear_approx", "(", "tr_eivals", "[", ":", "n_upper", "]", ",", "tr_eivals", "[", "n_upper", ":", "]", ",", "nv", ")", "else", ":", "lo_eivals", "=", "spsl", ".", "eigsh", "(", "mat", ",", "n_lower", ",", "which", "=", "'SM'", ",", "return_eigenvectors", "=", "False", ")", "[", ":", ":", "-", "1", "]", "up_eivals", "=", "spsl", ".", "eigsh", "(", "mat", ",", "n_upper", ",", "which", "=", "'LM'", ",", "return_eigenvectors", "=", "False", ")", "return", "updown_linear_approx", "(", "lo_eivals", ",", "up_eivals", ",", "nv", ")", "else", ":", "if", "do_full", ":", "return", "spl", ".", "eigvalsh", "(", "mat", ")", "else", ":", "lo_eivals", "=", "spl", ".", "eigvalsh", "(", "mat", ",", "eigvals", "=", "(", "0", ",", "n_lower", "-", "1", ")", ")", "up_eivals", "=", "spl", ".", "eigvalsh", "(", "mat", ",", "eigvals", "=", "(", "nv", "-", "n_upper", "-", "1", ",", "nv", "-", "1", ")", ")", "return", "updown_linear_approx", "(", "lo_eivals", ",", "up_eivals", ",", "nv", ")" ]
37.666667
25.105263
def rpc_receiver_count(self, service, routing_id): '''Get the number of peers that would handle a particular RPC :param service: the service name :type service: anything hash-able :param routing_id: the id used for narrowing within the service handlers :type routing_id: int :returns: the integer number of peers that would receive the described RPC ''' peers = len(list(self._dispatcher.find_peer_routes( const.MSG_TYPE_RPC_REQUEST, service, routing_id))) if self._dispatcher.locally_handles(const.MSG_TYPE_RPC_REQUEST, service, routing_id): return peers + 1 return peers
[ "def", "rpc_receiver_count", "(", "self", ",", "service", ",", "routing_id", ")", ":", "peers", "=", "len", "(", "list", "(", "self", ".", "_dispatcher", ".", "find_peer_routes", "(", "const", ".", "MSG_TYPE_RPC_REQUEST", ",", "service", ",", "routing_id", ")", ")", ")", "if", "self", ".", "_dispatcher", ".", "locally_handles", "(", "const", ".", "MSG_TYPE_RPC_REQUEST", ",", "service", ",", "routing_id", ")", ":", "return", "peers", "+", "1", "return", "peers" ]
38.944444
20.277778
def generate_search_subparser(subparsers): """Adds a sub-command parser to `subparsers` to generate search results for a set of n-grams.""" parser = subparsers.add_parser( 'search', description=constants.SEARCH_DESCRIPTION, epilog=constants.SEARCH_EPILOG, formatter_class=ParagraphFormatter, help=constants.SEARCH_HELP) parser.set_defaults(func=search_texts) utils.add_common_arguments(parser) utils.add_db_arguments(parser) utils.add_corpus_arguments(parser) utils.add_query_arguments(parser) parser.add_argument('ngrams', help=constants.SEARCH_NGRAMS_HELP, nargs='*', metavar='NGRAMS')
[ "def", "generate_search_subparser", "(", "subparsers", ")", ":", "parser", "=", "subparsers", ".", "add_parser", "(", "'search'", ",", "description", "=", "constants", ".", "SEARCH_DESCRIPTION", ",", "epilog", "=", "constants", ".", "SEARCH_EPILOG", ",", "formatter_class", "=", "ParagraphFormatter", ",", "help", "=", "constants", ".", "SEARCH_HELP", ")", "parser", ".", "set_defaults", "(", "func", "=", "search_texts", ")", "utils", ".", "add_common_arguments", "(", "parser", ")", "utils", ".", "add_db_arguments", "(", "parser", ")", "utils", ".", "add_corpus_arguments", "(", "parser", ")", "utils", ".", "add_query_arguments", "(", "parser", ")", "parser", ".", "add_argument", "(", "'ngrams'", ",", "help", "=", "constants", ".", "SEARCH_NGRAMS_HELP", ",", "nargs", "=", "'*'", ",", "metavar", "=", "'NGRAMS'", ")" ]
47
8.642857
def instruction_list(self): """Return a list of instructions for this CompositeGate. If the CompositeGate itself contains composites, call this method recursively. """ instruction_list = [] for instruction in self.data: if isinstance(instruction, CompositeGate): instruction_list.extend(instruction.instruction_list()) else: instruction_list.append(instruction) return instruction_list
[ "def", "instruction_list", "(", "self", ")", ":", "instruction_list", "=", "[", "]", "for", "instruction", "in", "self", ".", "data", ":", "if", "isinstance", "(", "instruction", ",", "CompositeGate", ")", ":", "instruction_list", ".", "extend", "(", "instruction", ".", "instruction_list", "(", ")", ")", "else", ":", "instruction_list", ".", "append", "(", "instruction", ")", "return", "instruction_list" ]
37.384615
14.230769
def fetch_credential(self, credential=None, profile=None): """Fetch credential from credentials file. Args: credential (str): Credential to fetch. profile (str): Credentials profile. Defaults to ``'default'``. Returns: str, None: Fetched credential or ``None``. """ q = self.db.get(self.query.profile == profile) if q is not None: return q.get(credential)
[ "def", "fetch_credential", "(", "self", ",", "credential", "=", "None", ",", "profile", "=", "None", ")", ":", "q", "=", "self", ".", "db", ".", "get", "(", "self", ".", "query", ".", "profile", "==", "profile", ")", "if", "q", "is", "not", "None", ":", "return", "q", ".", "get", "(", "credential", ")" ]
31.5
20
def pass_creds_to_nylas(): """ This view loads the credentials from Google and passes them to Nylas, to set up native authentication. """ # If you haven't already connected with Google, this won't work. if not google.authorized: return "Error: not yet connected with Google!", 400 if "refresh_token" not in google.token: # We're missing the refresh token from Google, and the only way to get # a new one is to force reauthentication. That's annoying. return ( ( "Error: missing Google refresh token. " "Uncomment the `reprompt_consent` line in the code to fix this." ), 500, ) # Look up the user's name and email address from Google. google_resp = google.get("/oauth2/v2/userinfo?fields=name,email") assert google_resp.ok, "Received failure response from Google userinfo API" google_userinfo = google_resp.json() # Start the connection process by looking up all the information that # Nylas needs in order to connect, and sending it to the authorize API. nylas_authorize_data = { "client_id": app.config["NYLAS_OAUTH_CLIENT_ID"], "name": google_userinfo["name"], "email_address": google_userinfo["email"], "provider": "gmail", "settings": { "google_client_id": app.config["GOOGLE_OAUTH_CLIENT_ID"], "google_client_secret": app.config["GOOGLE_OAUTH_CLIENT_SECRET"], "google_refresh_token": google.token["refresh_token"], }, } nylas_authorize_resp = requests.post( "https://api.nylas.com/connect/authorize", json=nylas_authorize_data ) assert nylas_authorize_resp.ok, "Received failure response from Nylas authorize API" nylas_code = nylas_authorize_resp.json()["code"] # Now that we've got the `code` from the authorize response, # pass it to the token response to complete the connection. nylas_token_data = { "client_id": app.config["NYLAS_OAUTH_CLIENT_ID"], "client_secret": app.config["NYLAS_OAUTH_CLIENT_SECRET"], "code": nylas_code, } nylas_token_resp = requests.post( "https://api.nylas.com/connect/token", json=nylas_token_data ) assert nylas_token_resp.ok, "Received failure response from Nylas token API" nylas_access_token = nylas_token_resp.json()["access_token"] # Great, we've connected Google to Nylas! In the process, Nylas gave us # an OAuth access token, which we'll need in order to make API requests # to Nylas in the future. We'll save that access token in the Flask session, # so we can pick it up later and use it when we need it. session["nylas_access_token"] = nylas_access_token # We're all done here. Redirect the user back to the home page, # which will pick up the access token we just saved. return redirect(url_for("index"))
[ "def", "pass_creds_to_nylas", "(", ")", ":", "# If you haven't already connected with Google, this won't work.", "if", "not", "google", ".", "authorized", ":", "return", "\"Error: not yet connected with Google!\"", ",", "400", "if", "\"refresh_token\"", "not", "in", "google", ".", "token", ":", "# We're missing the refresh token from Google, and the only way to get", "# a new one is to force reauthentication. That's annoying.", "return", "(", "(", "\"Error: missing Google refresh token. \"", "\"Uncomment the `reprompt_consent` line in the code to fix this.\"", ")", ",", "500", ",", ")", "# Look up the user's name and email address from Google.", "google_resp", "=", "google", ".", "get", "(", "\"/oauth2/v2/userinfo?fields=name,email\"", ")", "assert", "google_resp", ".", "ok", ",", "\"Received failure response from Google userinfo API\"", "google_userinfo", "=", "google_resp", ".", "json", "(", ")", "# Start the connection process by looking up all the information that", "# Nylas needs in order to connect, and sending it to the authorize API.", "nylas_authorize_data", "=", "{", "\"client_id\"", ":", "app", ".", "config", "[", "\"NYLAS_OAUTH_CLIENT_ID\"", "]", ",", "\"name\"", ":", "google_userinfo", "[", "\"name\"", "]", ",", "\"email_address\"", ":", "google_userinfo", "[", "\"email\"", "]", ",", "\"provider\"", ":", "\"gmail\"", ",", "\"settings\"", ":", "{", "\"google_client_id\"", ":", "app", ".", "config", "[", "\"GOOGLE_OAUTH_CLIENT_ID\"", "]", ",", "\"google_client_secret\"", ":", "app", ".", "config", "[", "\"GOOGLE_OAUTH_CLIENT_SECRET\"", "]", ",", "\"google_refresh_token\"", ":", "google", ".", "token", "[", "\"refresh_token\"", "]", ",", "}", ",", "}", "nylas_authorize_resp", "=", "requests", ".", "post", "(", "\"https://api.nylas.com/connect/authorize\"", ",", "json", "=", "nylas_authorize_data", ")", "assert", "nylas_authorize_resp", ".", "ok", ",", "\"Received failure response from Nylas authorize API\"", "nylas_code", "=", "nylas_authorize_resp", ".", "json", "(", ")", "[", "\"code\"", "]", "# Now that we've got the `code` from the authorize response,", "# pass it to the token response to complete the connection.", "nylas_token_data", "=", "{", "\"client_id\"", ":", "app", ".", "config", "[", "\"NYLAS_OAUTH_CLIENT_ID\"", "]", ",", "\"client_secret\"", ":", "app", ".", "config", "[", "\"NYLAS_OAUTH_CLIENT_SECRET\"", "]", ",", "\"code\"", ":", "nylas_code", ",", "}", "nylas_token_resp", "=", "requests", ".", "post", "(", "\"https://api.nylas.com/connect/token\"", ",", "json", "=", "nylas_token_data", ")", "assert", "nylas_token_resp", ".", "ok", ",", "\"Received failure response from Nylas token API\"", "nylas_access_token", "=", "nylas_token_resp", ".", "json", "(", ")", "[", "\"access_token\"", "]", "# Great, we've connected Google to Nylas! In the process, Nylas gave us", "# an OAuth access token, which we'll need in order to make API requests", "# to Nylas in the future. We'll save that access token in the Flask session,", "# so we can pick it up later and use it when we need it.", "session", "[", "\"nylas_access_token\"", "]", "=", "nylas_access_token", "# We're all done here. Redirect the user back to the home page,", "# which will pick up the access token we just saved.", "return", "redirect", "(", "url_for", "(", "\"index\"", ")", ")" ]
43.439394
24.106061
def push_call_history_item(self, state, call_type, state_for_scoped_data, input_data=None): """Adds a new call-history-item to the history item list A call history items stores information about the point in time where a method (entry, execute, exit) of certain state was called. :param state: the state that was called :param call_type: the call type of the execution step, i.e. if it refers to a container state or an execution state :param state_for_scoped_data: the state of which the scoped data needs to be saved for further usages (e.g. backward stepping) """ last_history_item = self.get_last_history_item() from rafcon.core.states.library_state import LibraryState # delayed imported on purpose if isinstance(state_for_scoped_data, LibraryState): state_for_scoped_data = state_for_scoped_data.state_copy return_item = CallItem(state, last_history_item, call_type, state_for_scoped_data, input_data, state.run_id) return self._push_item(last_history_item, return_item)
[ "def", "push_call_history_item", "(", "self", ",", "state", ",", "call_type", ",", "state_for_scoped_data", ",", "input_data", "=", "None", ")", ":", "last_history_item", "=", "self", ".", "get_last_history_item", "(", ")", "from", "rafcon", ".", "core", ".", "states", ".", "library_state", "import", "LibraryState", "# delayed imported on purpose", "if", "isinstance", "(", "state_for_scoped_data", ",", "LibraryState", ")", ":", "state_for_scoped_data", "=", "state_for_scoped_data", ".", "state_copy", "return_item", "=", "CallItem", "(", "state", ",", "last_history_item", ",", "call_type", ",", "state_for_scoped_data", ",", "input_data", ",", "state", ".", "run_id", ")", "return", "self", ".", "_push_item", "(", "last_history_item", ",", "return_item", ")" ]
60
29.105263
def get_recipients_data(self, reports): """Recipients data to be used in the template """ if not reports: return [] recipients = [] recipient_names = [] for num, report in enumerate(reports): # get the linked AR of this ARReport ar = report.getAnalysisRequest() # recipient names of this report report_recipient_names = [] for recipient in self.get_recipients(ar): name = recipient.get("Fullname") email = recipient.get("EmailAddress") record = { "name": name, "email": email, "valid": True, } if record not in recipients: recipients.append(record) # remember the name of the recipient for this report report_recipient_names.append(name) recipient_names.append(report_recipient_names) # recipient names, which all of the reports have in common common_names = set(recipient_names[0]).intersection(*recipient_names) # mark recipients not in common for recipient in recipients: if recipient.get("name") not in common_names: recipient["valid"] = False return recipients
[ "def", "get_recipients_data", "(", "self", ",", "reports", ")", ":", "if", "not", "reports", ":", "return", "[", "]", "recipients", "=", "[", "]", "recipient_names", "=", "[", "]", "for", "num", ",", "report", "in", "enumerate", "(", "reports", ")", ":", "# get the linked AR of this ARReport", "ar", "=", "report", ".", "getAnalysisRequest", "(", ")", "# recipient names of this report", "report_recipient_names", "=", "[", "]", "for", "recipient", "in", "self", ".", "get_recipients", "(", "ar", ")", ":", "name", "=", "recipient", ".", "get", "(", "\"Fullname\"", ")", "email", "=", "recipient", ".", "get", "(", "\"EmailAddress\"", ")", "record", "=", "{", "\"name\"", ":", "name", ",", "\"email\"", ":", "email", ",", "\"valid\"", ":", "True", ",", "}", "if", "record", "not", "in", "recipients", ":", "recipients", ".", "append", "(", "record", ")", "# remember the name of the recipient for this report", "report_recipient_names", ".", "append", "(", "name", ")", "recipient_names", ".", "append", "(", "report_recipient_names", ")", "# recipient names, which all of the reports have in common", "common_names", "=", "set", "(", "recipient_names", "[", "0", "]", ")", ".", "intersection", "(", "*", "recipient_names", ")", "# mark recipients not in common", "for", "recipient", "in", "recipients", ":", "if", "recipient", ".", "get", "(", "\"name\"", ")", "not", "in", "common_names", ":", "recipient", "[", "\"valid\"", "]", "=", "False", "return", "recipients" ]
37.885714
13.314286
def dotprint( expr, styles=None, maxdepth=None, repeat=True, labelfunc=expr_labelfunc(str, str), idfunc=None, get_children=_op_children, **kwargs): """Return the `DOT`_ (graph) description of an Expression tree as a string Args: expr (object): The expression to render into a graph. Typically an instance of :class:`~qnet.algebra.abstract_algebra.Expression`, but with appropriate `get_children`, `labelfunc`, and `id_func`, this could be any tree-like object styles (list or None): A list of tuples ``(expr_filter, style_dict)`` where ``expr_filter`` is a callable and ``style_dict`` is a list of `DOT`_ node properties that should be used when rendering a node for which ``expr_filter(expr)`` return True. maxdepth (int or None): The maximum depth of the resulting tree (any node at `maxdepth` will be drawn as a leaf) repeat (bool): By default, if identical sub-expressions occur in multiple locations (as identified by `idfunc`, they will be repeated in the graph. If ``repeat=False`` is given, each unique (sub-)expression is only drawn once. The resulting graph may no longer be a proper tree, as recurring expressions will have multiple parents. labelfunc (callable): A function that receives `expr` and a boolean ``is_leaf`` and returns the label of the corresponding node in the graph. Defaults to ``expr_labelfunc(str, str)``. idfunc (callable or None): A function that returns the ID of the node representing a given expression. Expressions for which `idfunc` returns identical results are considered identical if `repeat` is False. The default value None uses a function that is appropriate to a single standalone DOT file. If this is insufficient, something like ``hash`` or ``str`` would make a good `idfunc`. get_children (callable): A function that return a list of sub-expressions (the children of `expr`). Defaults to the operands of an :class:`~qnet.algebra.abstract_algebra.Operation` (thus, anything that is not an Operation is a leaf) kwargs: All further keyword arguments set custom `DOT`_ graph attributes Returns: str: a multiline str representing a graph in the `DOT`_ language Notes: The node `styles` are additive. For example, consider the following custom styles:: styles = [ (lambda expr: isinstance(expr, SCALAR_TYPES), {'color': 'blue', 'shape': 'box', 'fontsize': 12}), (lambda expr: isinstance(expr, Expression), {'color': 'red', 'shape': 'box', 'fontsize': 12}), (lambda expr: isinstance(expr, Operation), {'color': 'black', 'shape': 'ellipse'})] For Operations (which are a subclass of Expression) the color and shape are overwritten, while the fontsize 12 is inherited. Keyword arguments are directly translated into graph styles. For example, in order to produce a horizontal instead of vertical graph, use ``dotprint(..., rankdir='LR')``. See also: :func:`sympy.printing.dot.dotprint` provides an equivalent function for SymPy expressions. """ # the routine is called 'dotprint' to match sympy (even though most of the # similar routines for the other printers are called e.g. 'latex', not # 'latexprint' if idfunc is None: if repeat: idfunc = lambda expr: 'node' else: idfunc = hash graphstyle = {'rankdir': 'TD', 'ordering': 'out'} graphstyle.update(kwargs) nodes = [] edges = [] level = 0 pos = 0 pos_counter = defaultdict(int) # level => current pos stack = [(level, pos, expr)] if styles is None: styles = [] while len(stack) > 0: level, pos, expr = stack.pop(0) node_id = _node_id(expr, (level, pos), idfunc, repeat) children = get_children(expr) is_leaf = len(children) == 0 if maxdepth is not None and level >= maxdepth: is_leaf = True style = _styleof(expr, styles) style['label'] = labelfunc(expr, is_leaf) nodes.append('"%s" [%s];' % (node_id, _attrprint(style))) if not is_leaf: try: for expr_sub in children: i_sub = pos_counter[level+1] id_sub = _node_id( expr_sub, (level+1, i_sub), idfunc, repeat) edges.append('"%s" -> "%s"' % (node_id, id_sub)) stack.append((level+1, i_sub, expr_sub)) pos_counter[level+1] += 1 except AttributeError: pass return template % { 'graphstyle': _attrprint(graphstyle, delimiter='\n'), 'nodes': '\n'.join(nodes), 'edges': '\n'.join(edges)}
[ "def", "dotprint", "(", "expr", ",", "styles", "=", "None", ",", "maxdepth", "=", "None", ",", "repeat", "=", "True", ",", "labelfunc", "=", "expr_labelfunc", "(", "str", ",", "str", ")", ",", "idfunc", "=", "None", ",", "get_children", "=", "_op_children", ",", "*", "*", "kwargs", ")", ":", "# the routine is called 'dotprint' to match sympy (even though most of the", "# similar routines for the other printers are called e.g. 'latex', not", "# 'latexprint'", "if", "idfunc", "is", "None", ":", "if", "repeat", ":", "idfunc", "=", "lambda", "expr", ":", "'node'", "else", ":", "idfunc", "=", "hash", "graphstyle", "=", "{", "'rankdir'", ":", "'TD'", ",", "'ordering'", ":", "'out'", "}", "graphstyle", ".", "update", "(", "kwargs", ")", "nodes", "=", "[", "]", "edges", "=", "[", "]", "level", "=", "0", "pos", "=", "0", "pos_counter", "=", "defaultdict", "(", "int", ")", "# level => current pos", "stack", "=", "[", "(", "level", ",", "pos", ",", "expr", ")", "]", "if", "styles", "is", "None", ":", "styles", "=", "[", "]", "while", "len", "(", "stack", ")", ">", "0", ":", "level", ",", "pos", ",", "expr", "=", "stack", ".", "pop", "(", "0", ")", "node_id", "=", "_node_id", "(", "expr", ",", "(", "level", ",", "pos", ")", ",", "idfunc", ",", "repeat", ")", "children", "=", "get_children", "(", "expr", ")", "is_leaf", "=", "len", "(", "children", ")", "==", "0", "if", "maxdepth", "is", "not", "None", "and", "level", ">=", "maxdepth", ":", "is_leaf", "=", "True", "style", "=", "_styleof", "(", "expr", ",", "styles", ")", "style", "[", "'label'", "]", "=", "labelfunc", "(", "expr", ",", "is_leaf", ")", "nodes", ".", "append", "(", "'\"%s\" [%s];'", "%", "(", "node_id", ",", "_attrprint", "(", "style", ")", ")", ")", "if", "not", "is_leaf", ":", "try", ":", "for", "expr_sub", "in", "children", ":", "i_sub", "=", "pos_counter", "[", "level", "+", "1", "]", "id_sub", "=", "_node_id", "(", "expr_sub", ",", "(", "level", "+", "1", ",", "i_sub", ")", ",", "idfunc", ",", "repeat", ")", "edges", ".", "append", "(", "'\"%s\" -> \"%s\"'", "%", "(", "node_id", ",", "id_sub", ")", ")", "stack", ".", "append", "(", "(", "level", "+", "1", ",", "i_sub", ",", "expr_sub", ")", ")", "pos_counter", "[", "level", "+", "1", "]", "+=", "1", "except", "AttributeError", ":", "pass", "return", "template", "%", "{", "'graphstyle'", ":", "_attrprint", "(", "graphstyle", ",", "delimiter", "=", "'\\n'", ")", ",", "'nodes'", ":", "'\\n'", ".", "join", "(", "nodes", ")", ",", "'edges'", ":", "'\\n'", ".", "join", "(", "edges", ")", "}" ]
46.398148
23.166667
def ipi_base_number(name=None): """ IPI Base Number field. An IPI Base Number code written on a field follows the Pattern C-NNNNNNNNN-M. This being: - C: header, a character. - N: numeric value. - M: control digit. So, for example, an IPI Base Number code field can contain I-000000229-7. :param name: name for the field :return: a parser for the IPI Base Number field """ if name is None: name = 'IPI Base Number Field' field = pp.Regex('I-[0-9]{9}-[0-9]') # Name field.setName(name) field_num = basic.numeric(13) field_num.setName(name) field = field | field_num # White spaces are not removed field.leaveWhitespace() return field.setResultsName('ipi_base_n')
[ "def", "ipi_base_number", "(", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "'IPI Base Number Field'", "field", "=", "pp", ".", "Regex", "(", "'I-[0-9]{9}-[0-9]'", ")", "# Name", "field", ".", "setName", "(", "name", ")", "field_num", "=", "basic", ".", "numeric", "(", "13", ")", "field_num", ".", "setName", "(", "name", ")", "field", "=", "field", "|", "field_num", "# White spaces are not removed", "field", ".", "leaveWhitespace", "(", ")", "return", "field", ".", "setResultsName", "(", "'ipi_base_n'", ")" ]
22.151515
20.636364
def configure_count(self, ns, definition): """ Register a count endpoint. The definition's func should be a count function, which must: - accept kwargs for the query string - return a count is the total number of items available The definition's request_schema will be used to process query string arguments. :param ns: the namespace :param definition: the endpoint definition """ @self.add_route(ns.collection_path, Operation.Count, ns) @qs(definition.request_schema) @wraps(definition.func) def count(**path_data): request_data = load_query_string_data(definition.request_schema) response_data = dict() count = definition.func(**merge_data(path_data, request_data)) headers = encode_count_header(count) definition.header_func(headers, response_data) response_format = self.negotiate_response_content(definition.response_formats) return dump_response_data( None, None, headers=headers, response_format=response_format, ) count.__doc__ = "Count the size of the collection of all {}".format(pluralize(ns.subject_name))
[ "def", "configure_count", "(", "self", ",", "ns", ",", "definition", ")", ":", "@", "self", ".", "add_route", "(", "ns", ".", "collection_path", ",", "Operation", ".", "Count", ",", "ns", ")", "@", "qs", "(", "definition", ".", "request_schema", ")", "@", "wraps", "(", "definition", ".", "func", ")", "def", "count", "(", "*", "*", "path_data", ")", ":", "request_data", "=", "load_query_string_data", "(", "definition", ".", "request_schema", ")", "response_data", "=", "dict", "(", ")", "count", "=", "definition", ".", "func", "(", "*", "*", "merge_data", "(", "path_data", ",", "request_data", ")", ")", "headers", "=", "encode_count_header", "(", "count", ")", "definition", ".", "header_func", "(", "headers", ",", "response_data", ")", "response_format", "=", "self", ".", "negotiate_response_content", "(", "definition", ".", "response_formats", ")", "return", "dump_response_data", "(", "None", ",", "None", ",", "headers", "=", "headers", ",", "response_format", "=", "response_format", ",", ")", "count", ".", "__doc__", "=", "\"Count the size of the collection of all {}\"", ".", "format", "(", "pluralize", "(", "ns", ".", "subject_name", ")", ")" ]
39.46875
20.96875
def read_hdf(path_or_buf, key=None, mode='r', **kwargs): """ Read from the store, close it if we opened it. Retrieve pandas object stored in file, optionally based on where criteria Parameters ---------- path_or_buf : string, buffer or path object Path to the file to open, or an open :class:`pandas.HDFStore` object. Supports any object implementing the ``__fspath__`` protocol. This includes :class:`pathlib.Path` and py._path.local.LocalPath objects. .. versionadded:: 0.19.0 support for pathlib, py.path. .. versionadded:: 0.21.0 support for __fspath__ protocol. key : object, optional The group identifier in the store. Can be omitted if the HDF file contains a single pandas object. mode : {'r', 'r+', 'a'}, optional Mode to use when opening the file. Ignored if path_or_buf is a :class:`pandas.HDFStore`. Default is 'r'. where : list, optional A list of Term (or convertible) objects. start : int, optional Row number to start selection. stop : int, optional Row number to stop selection. columns : list, optional A list of columns names to return. iterator : bool, optional Return an iterator object. chunksize : int, optional Number of rows to include in an iteration when using an iterator. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. **kwargs Additional keyword arguments passed to HDFStore. Returns ------- item : object The selected object. Return type depends on the object stored. See Also -------- DataFrame.to_hdf : Write a HDF file from a DataFrame. HDFStore : Low-level access to HDF files. Examples -------- >>> df = pd.DataFrame([[1, 1.0, 'a']], columns=['x', 'y', 'z']) >>> df.to_hdf('./store.h5', 'data') >>> reread = pd.read_hdf('./store.h5') """ if mode not in ['r', 'r+', 'a']: raise ValueError('mode {0} is not allowed while performing a read. ' 'Allowed modes are r, r+ and a.'.format(mode)) # grab the scope if 'where' in kwargs: kwargs['where'] = _ensure_term(kwargs['where'], scope_level=1) if isinstance(path_or_buf, HDFStore): if not path_or_buf.is_open: raise IOError('The HDFStore must be open for reading.') store = path_or_buf auto_close = False else: path_or_buf = _stringify_path(path_or_buf) if not isinstance(path_or_buf, str): raise NotImplementedError('Support for generic buffers has not ' 'been implemented.') try: exists = os.path.exists(path_or_buf) # if filepath is too long except (TypeError, ValueError): exists = False if not exists: raise FileNotFoundError( 'File {path} does not exist'.format(path=path_or_buf)) store = HDFStore(path_or_buf, mode=mode, **kwargs) # can't auto open/close if we are using an iterator # so delegate to the iterator auto_close = True try: if key is None: groups = store.groups() if len(groups) == 0: raise ValueError('No dataset in HDF5 file.') candidate_only_group = groups[0] # For the HDF file to have only one dataset, all other groups # should then be metadata groups for that candidate group. (This # assumes that the groups() method enumerates parent groups # before their children.) for group_to_check in groups[1:]: if not _is_metadata_of(group_to_check, candidate_only_group): raise ValueError('key must be provided when HDF5 file ' 'contains multiple datasets.') key = candidate_only_group._v_pathname return store.select(key, auto_close=auto_close, **kwargs) except (ValueError, TypeError, KeyError): # if there is an error, close the store try: store.close() except AttributeError: pass raise
[ "def", "read_hdf", "(", "path_or_buf", ",", "key", "=", "None", ",", "mode", "=", "'r'", ",", "*", "*", "kwargs", ")", ":", "if", "mode", "not", "in", "[", "'r'", ",", "'r+'", ",", "'a'", "]", ":", "raise", "ValueError", "(", "'mode {0} is not allowed while performing a read. '", "'Allowed modes are r, r+ and a.'", ".", "format", "(", "mode", ")", ")", "# grab the scope", "if", "'where'", "in", "kwargs", ":", "kwargs", "[", "'where'", "]", "=", "_ensure_term", "(", "kwargs", "[", "'where'", "]", ",", "scope_level", "=", "1", ")", "if", "isinstance", "(", "path_or_buf", ",", "HDFStore", ")", ":", "if", "not", "path_or_buf", ".", "is_open", ":", "raise", "IOError", "(", "'The HDFStore must be open for reading.'", ")", "store", "=", "path_or_buf", "auto_close", "=", "False", "else", ":", "path_or_buf", "=", "_stringify_path", "(", "path_or_buf", ")", "if", "not", "isinstance", "(", "path_or_buf", ",", "str", ")", ":", "raise", "NotImplementedError", "(", "'Support for generic buffers has not '", "'been implemented.'", ")", "try", ":", "exists", "=", "os", ".", "path", ".", "exists", "(", "path_or_buf", ")", "# if filepath is too long", "except", "(", "TypeError", ",", "ValueError", ")", ":", "exists", "=", "False", "if", "not", "exists", ":", "raise", "FileNotFoundError", "(", "'File {path} does not exist'", ".", "format", "(", "path", "=", "path_or_buf", ")", ")", "store", "=", "HDFStore", "(", "path_or_buf", ",", "mode", "=", "mode", ",", "*", "*", "kwargs", ")", "# can't auto open/close if we are using an iterator", "# so delegate to the iterator", "auto_close", "=", "True", "try", ":", "if", "key", "is", "None", ":", "groups", "=", "store", ".", "groups", "(", ")", "if", "len", "(", "groups", ")", "==", "0", ":", "raise", "ValueError", "(", "'No dataset in HDF5 file.'", ")", "candidate_only_group", "=", "groups", "[", "0", "]", "# For the HDF file to have only one dataset, all other groups", "# should then be metadata groups for that candidate group. (This", "# assumes that the groups() method enumerates parent groups", "# before their children.)", "for", "group_to_check", "in", "groups", "[", "1", ":", "]", ":", "if", "not", "_is_metadata_of", "(", "group_to_check", ",", "candidate_only_group", ")", ":", "raise", "ValueError", "(", "'key must be provided when HDF5 file '", "'contains multiple datasets.'", ")", "key", "=", "candidate_only_group", ".", "_v_pathname", "return", "store", ".", "select", "(", "key", ",", "auto_close", "=", "auto_close", ",", "*", "*", "kwargs", ")", "except", "(", "ValueError", ",", "TypeError", ",", "KeyError", ")", ":", "# if there is an error, close the store", "try", ":", "store", ".", "close", "(", ")", "except", "AttributeError", ":", "pass", "raise" ]
35.697479
20.789916
def invoke_with_usage (self, args, **kwargs): """Invoke the command with standardized usage-help processing. Same calling convention as `Command.invoke()`, except here *args* is an un-parsed list of strings. """ ap = self.get_arg_parser (**kwargs) args = ap.parse_args (args) return self.invoke (args, **kwargs)
[ "def", "invoke_with_usage", "(", "self", ",", "args", ",", "*", "*", "kwargs", ")", ":", "ap", "=", "self", ".", "get_arg_parser", "(", "*", "*", "kwargs", ")", "args", "=", "ap", ".", "parse_args", "(", "args", ")", "return", "self", ".", "invoke", "(", "args", ",", "*", "*", "kwargs", ")" ]
40
10.666667
def get_frequency_list(lang, wordlist='best', match_cutoff=30): """ Read the raw data from a wordlist file, returning it as a list of lists. (See `read_cBpack` for what this represents.) Because we use the `langcodes` module, we can handle slight variations in language codes. For example, looking for 'pt-BR', 'pt_br', or even 'PT_BR' will get you the 'pt' (Portuguese) list. Looking up the alternate code 'por' will also get the same list. """ available = available_languages(wordlist) best, score = langcodes.best_match(lang, list(available), min_score=match_cutoff) if score == 0: raise LookupError("No wordlist %r available for language %r" % (wordlist, lang)) if best != lang: logger.warning( "You asked for word frequencies in language %r. Using the " "nearest match, which is %r (%s)." % (lang, best, langcodes.get(best).language_name('en')) ) return read_cBpack(available[best])
[ "def", "get_frequency_list", "(", "lang", ",", "wordlist", "=", "'best'", ",", "match_cutoff", "=", "30", ")", ":", "available", "=", "available_languages", "(", "wordlist", ")", "best", ",", "score", "=", "langcodes", ".", "best_match", "(", "lang", ",", "list", "(", "available", ")", ",", "min_score", "=", "match_cutoff", ")", "if", "score", "==", "0", ":", "raise", "LookupError", "(", "\"No wordlist %r available for language %r\"", "%", "(", "wordlist", ",", "lang", ")", ")", "if", "best", "!=", "lang", ":", "logger", ".", "warning", "(", "\"You asked for word frequencies in language %r. Using the \"", "\"nearest match, which is %r (%s).\"", "%", "(", "lang", ",", "best", ",", "langcodes", ".", "get", "(", "best", ")", ".", "language_name", "(", "'en'", ")", ")", ")", "return", "read_cBpack", "(", "available", "[", "best", "]", ")" ]
41.72
21.24
def input_validate_yubikey_secret(data, name='data'): """ Input validation for YHSM_YubiKeySecret or string. """ if isinstance(data, pyhsm.aead_cmd.YHSM_YubiKeySecret): data = data.pack() return input_validate_str(data, name)
[ "def", "input_validate_yubikey_secret", "(", "data", ",", "name", "=", "'data'", ")", ":", "if", "isinstance", "(", "data", ",", "pyhsm", ".", "aead_cmd", ".", "YHSM_YubiKeySecret", ")", ":", "data", "=", "data", ".", "pack", "(", ")", "return", "input_validate_str", "(", "data", ",", "name", ")" ]
48.2
9.4
def set_perplexities(self, new_perplexities): """Change the perplexities of the affinity matrix. Note that we only allow lowering the perplexities or restoring them to their original maximum value. This restriction exists because setting a higher perplexity value requires recomputing all the nearest neighbors, which can take a long time. To avoid potential confusion as to why execution time is slow, this is not allowed. If you would like to increase the perplexity above the initial value, simply create a new instance. Parameters ---------- new_perplexities: List[float] The new list of perplexities. """ if np.array_equal(self.perplexities, new_perplexities): return new_perplexities = self.check_perplexities(new_perplexities) max_perplexity = np.max(new_perplexities) k_neighbors = min(self.n_samples - 1, int(3 * max_perplexity)) if k_neighbors > self.__neighbors.shape[1]: raise RuntimeError( "The largest perplexity `%.2f` is larger than the initial one " "used. This would need to recompute the nearest neighbors, " "which is not efficient. Please create a new `%s` instance " "with the increased perplexity." % (max_perplexity, self.__class__.__name__) ) self.perplexities = new_perplexities self.P = self._calculate_P( self.__neighbors[:, :k_neighbors], self.__distances[:, :k_neighbors], self.perplexities, symmetrize=True, n_jobs=self.n_jobs, )
[ "def", "set_perplexities", "(", "self", ",", "new_perplexities", ")", ":", "if", "np", ".", "array_equal", "(", "self", ".", "perplexities", ",", "new_perplexities", ")", ":", "return", "new_perplexities", "=", "self", ".", "check_perplexities", "(", "new_perplexities", ")", "max_perplexity", "=", "np", ".", "max", "(", "new_perplexities", ")", "k_neighbors", "=", "min", "(", "self", ".", "n_samples", "-", "1", ",", "int", "(", "3", "*", "max_perplexity", ")", ")", "if", "k_neighbors", ">", "self", ".", "__neighbors", ".", "shape", "[", "1", "]", ":", "raise", "RuntimeError", "(", "\"The largest perplexity `%.2f` is larger than the initial one \"", "\"used. This would need to recompute the nearest neighbors, \"", "\"which is not efficient. Please create a new `%s` instance \"", "\"with the increased perplexity.\"", "%", "(", "max_perplexity", ",", "self", ".", "__class__", ".", "__name__", ")", ")", "self", ".", "perplexities", "=", "new_perplexities", "self", ".", "P", "=", "self", ".", "_calculate_P", "(", "self", ".", "__neighbors", "[", ":", ",", ":", "k_neighbors", "]", ",", "self", ".", "__distances", "[", ":", ",", ":", "k_neighbors", "]", ",", "self", ".", "perplexities", ",", "symmetrize", "=", "True", ",", "n_jobs", "=", "self", ".", "n_jobs", ",", ")" ]
40.829268
22.317073
def process_targets_element(cls, scanner_target): """ Receive an XML object with the target, ports and credentials to run a scan against. @param: XML element with target subelements. Each target has <hosts> and <ports> subelements. Hosts can be a single host, a host range, a comma-separated host list or a network address. <ports> and <credentials> are optional. Therefore each ospd-scanner should check for a valid ones if needed. Example form: <targets> <target> <hosts>localhosts</hosts> <ports>80,443</ports> </target> <target> <hosts>192.168.0.0/24</hosts> <ports>22</ports> <credentials> <credential type="up" service="ssh" port="22"> <username>scanuser</username> <password>mypass</password> </credential> <credential type="up" service="smb"> <username>smbuser</username> <password>mypass</password> </credential> </credentials> </target> </targets> @return: A list of (hosts, port) tuples. Example form: [['localhost', '80,43'], ['192.168.0.0/24', '22', {'smb': {'type': type, 'port': port, 'username': username, 'password': pass, }}]] """ target_list = [] for target in scanner_target: ports = '' credentials = {} for child in target: if child.tag == 'hosts': hosts = child.text if child.tag == 'ports': ports = child.text if child.tag == 'credentials': credentials = cls.process_credentials_elements(child) if hosts: target_list.append([hosts, ports, credentials]) else: raise OSPDError('No target to scan', 'start_scan') return target_list
[ "def", "process_targets_element", "(", "cls", ",", "scanner_target", ")", ":", "target_list", "=", "[", "]", "for", "target", "in", "scanner_target", ":", "ports", "=", "''", "credentials", "=", "{", "}", "for", "child", "in", "target", ":", "if", "child", ".", "tag", "==", "'hosts'", ":", "hosts", "=", "child", ".", "text", "if", "child", ".", "tag", "==", "'ports'", ":", "ports", "=", "child", ".", "text", "if", "child", ".", "tag", "==", "'credentials'", ":", "credentials", "=", "cls", ".", "process_credentials_elements", "(", "child", ")", "if", "hosts", ":", "target_list", ".", "append", "(", "[", "hosts", ",", "ports", ",", "credentials", "]", ")", "else", ":", "raise", "OSPDError", "(", "'No target to scan'", ",", "'start_scan'", ")", "return", "target_list" ]
40.305085
15.864407
def add_pending(self, family: str, email: str=None) -> models.Analysis: """Add pending entry for an analysis.""" started_at = dt.datetime.now() new_log = self.Analysis(family=family, status='pending', started_at=started_at) new_log.user = self.user(email) if email else None self.add_commit(new_log) return new_log
[ "def", "add_pending", "(", "self", ",", "family", ":", "str", ",", "email", ":", "str", "=", "None", ")", "->", "models", ".", "Analysis", ":", "started_at", "=", "dt", ".", "datetime", ".", "now", "(", ")", "new_log", "=", "self", ".", "Analysis", "(", "family", "=", "family", ",", "status", "=", "'pending'", ",", "started_at", "=", "started_at", ")", "new_log", ".", "user", "=", "self", ".", "user", "(", "email", ")", "if", "email", "else", "None", "self", ".", "add_commit", "(", "new_log", ")", "return", "new_log" ]
50.857143
17.714286
def build_from_job_list(scheme_files, templates, base_output_dir): """Use $scheme_files as a job lists and build base16 templates using $templates (a list of TemplateGroup objects).""" queue = Queue() for scheme in scheme_files: queue.put(scheme) if len(scheme_files) < 40: thread_num = len(scheme_files) else: thread_num = 40 threads = [] for _ in range(thread_num): thread = Thread(target=build_single_worker, args=(queue, templates, base_output_dir)) thread.start() threads.append(thread) queue.join() for _ in range(thread_num): queue.put(None) for thread in threads: thread.join()
[ "def", "build_from_job_list", "(", "scheme_files", ",", "templates", ",", "base_output_dir", ")", ":", "queue", "=", "Queue", "(", ")", "for", "scheme", "in", "scheme_files", ":", "queue", ".", "put", "(", "scheme", ")", "if", "len", "(", "scheme_files", ")", "<", "40", ":", "thread_num", "=", "len", "(", "scheme_files", ")", "else", ":", "thread_num", "=", "40", "threads", "=", "[", "]", "for", "_", "in", "range", "(", "thread_num", ")", ":", "thread", "=", "Thread", "(", "target", "=", "build_single_worker", ",", "args", "=", "(", "queue", ",", "templates", ",", "base_output_dir", ")", ")", "thread", ".", "start", "(", ")", "threads", ".", "append", "(", "thread", ")", "queue", ".", "join", "(", ")", "for", "_", "in", "range", "(", "thread_num", ")", ":", "queue", ".", "put", "(", "None", ")", "for", "thread", "in", "threads", ":", "thread", ".", "join", "(", ")" ]
26.807692
19.653846
def request(self, source="candidate"): """Validate the contents of the specified configuration. *source* is the name of the configuration datastore being validated or `config` element containing the configuration subtree to be validated :seealso: :ref:`srctarget_params`""" node = new_ele("validate") if type(source) is str: src = util.datastore_or_url("source", source, self._assert) else: validated_element(source, ("config", qualify("config"))) src = new_ele("source") src.append(source) node.append(src) return self._request(node)
[ "def", "request", "(", "self", ",", "source", "=", "\"candidate\"", ")", ":", "node", "=", "new_ele", "(", "\"validate\"", ")", "if", "type", "(", "source", ")", "is", "str", ":", "src", "=", "util", ".", "datastore_or_url", "(", "\"source\"", ",", "source", ",", "self", ".", "_assert", ")", "else", ":", "validated_element", "(", "source", ",", "(", "\"config\"", ",", "qualify", "(", "\"config\"", ")", ")", ")", "src", "=", "new_ele", "(", "\"source\"", ")", "src", ".", "append", "(", "source", ")", "node", ".", "append", "(", "src", ")", "return", "self", ".", "_request", "(", "node", ")" ]
42.266667
21.866667
def parse_params(self, y_target=None, image_target=None, initial_num_evals=100, max_num_evals=10000, stepsize_search='grid_search', num_iterations=64, gamma=0.01, constraint='l2', batch_size=128, verbose=True, clip_min=0, clip_max=1): """ :param y: A tensor of shape (1, nb_classes) for true labels. :param y_target: A tensor of shape (1, nb_classes) for target labels. Required for targeted attack. :param image_target: A tensor of shape (1, **image shape) for initial target images. Required for targeted attack. :param initial_num_evals: initial number of evaluations for gradient estimation. :param max_num_evals: maximum number of evaluations for gradient estimation. :param stepsize_search: How to search for stepsize; choices are 'geometric_progression', 'grid_search'. 'geometric progression' initializes the stepsize by ||x_t - x||_p / sqrt(iteration), and keep decreasing by half until reaching the target side of the boundary. 'grid_search' chooses the optimal epsilon over a grid, in the scale of ||x_t - x||_p. :param num_iterations: The number of iterations. :param gamma: The binary search threshold theta is gamma / sqrt(d) for l2 attack and gamma / d for linf attack. :param constraint: The distance to optimize; choices are 'l2', 'linf'. :param batch_size: batch_size for model prediction. :param verbose: (boolean) Whether distance at each step is printed. :param clip_min: (optional float) Minimum input component value :param clip_max: (optional float) Maximum input component value """ # ignore the y and y_target argument self.y_target = y_target self.image_target = image_target self.initial_num_evals = initial_num_evals self.max_num_evals = max_num_evals self.stepsize_search = stepsize_search self.num_iterations = num_iterations self.gamma = gamma self.constraint = constraint self.batch_size = batch_size self.clip_min = clip_min self.clip_max = clip_max self.verbose = verbose
[ "def", "parse_params", "(", "self", ",", "y_target", "=", "None", ",", "image_target", "=", "None", ",", "initial_num_evals", "=", "100", ",", "max_num_evals", "=", "10000", ",", "stepsize_search", "=", "'grid_search'", ",", "num_iterations", "=", "64", ",", "gamma", "=", "0.01", ",", "constraint", "=", "'l2'", ",", "batch_size", "=", "128", ",", "verbose", "=", "True", ",", "clip_min", "=", "0", ",", "clip_max", "=", "1", ")", ":", "# ignore the y and y_target argument", "self", ".", "y_target", "=", "y_target", "self", ".", "image_target", "=", "image_target", "self", ".", "initial_num_evals", "=", "initial_num_evals", "self", ".", "max_num_evals", "=", "max_num_evals", "self", ".", "stepsize_search", "=", "stepsize_search", "self", ".", "num_iterations", "=", "num_iterations", "self", ".", "gamma", "=", "gamma", "self", ".", "constraint", "=", "constraint", "self", ".", "batch_size", "=", "batch_size", "self", ".", "clip_min", "=", "clip_min", "self", ".", "clip_max", "=", "clip_max", "self", ".", "verbose", "=", "verbose" ]
46.301887
15.735849
def output_path(self, path_name): """ Modify a path so it fits expectations. Avoid returning relative paths that start with '../' and possibly return relative paths when output and cache directories match. """ # make sure it is a valid posix format path = to_posix(path_name) assert (path == path_name), "path_name passed to output_path must be in posix format" if posixpath.isabs(path): if self.output == self.cache: # worth seeing if an absolute path can be avoided path = posixpath.relpath(path, self.output) else: return posixpath.realpath(path) if path.startswith('../'): joined = posixpath.join(self.output, path) return posixpath.realpath(joined) return path
[ "def", "output_path", "(", "self", ",", "path_name", ")", ":", "# make sure it is a valid posix format", "path", "=", "to_posix", "(", "path_name", ")", "assert", "(", "path", "==", "path_name", ")", ",", "\"path_name passed to output_path must be in posix format\"", "if", "posixpath", ".", "isabs", "(", "path", ")", ":", "if", "self", ".", "output", "==", "self", ".", "cache", ":", "# worth seeing if an absolute path can be avoided", "path", "=", "posixpath", ".", "relpath", "(", "path", ",", "self", ".", "output", ")", "else", ":", "return", "posixpath", ".", "realpath", "(", "path", ")", "if", "path", ".", "startswith", "(", "'../'", ")", ":", "joined", "=", "posixpath", ".", "join", "(", "self", ".", "output", ",", "path", ")", "return", "posixpath", ".", "realpath", "(", "joined", ")", "return", "path" ]
36.333333
19.958333
def with_revision(self, label, number): """ Returns a Tag with a given revision """ t = self.clone() t.revision = Revision(label, number) return t
[ "def", "with_revision", "(", "self", ",", "label", ",", "number", ")", ":", "t", "=", "self", ".", "clone", "(", ")", "t", ".", "revision", "=", "Revision", "(", "label", ",", "number", ")", "return", "t" ]
26.857143
6.857143
def update_machine_state(state_path): """Update the machine state using the provided state declaration.""" charmhelpers.contrib.templating.contexts.juju_state_to_yaml( salt_grains_path) subprocess.check_call([ 'salt-call', '--local', 'state.template', state_path, ])
[ "def", "update_machine_state", "(", "state_path", ")", ":", "charmhelpers", ".", "contrib", ".", "templating", ".", "contexts", ".", "juju_state_to_yaml", "(", "salt_grains_path", ")", "subprocess", ".", "check_call", "(", "[", "'salt-call'", ",", "'--local'", ",", "'state.template'", ",", "state_path", ",", "]", ")" ]
31.3
16.7
def get_meta(self, key, conforming=True): """ RETURN METADATA ON FILE IN BUCKET :param key: KEY, OR PREFIX OF KEY :param conforming: TEST IF THE KEY CONFORMS TO REQUIRED PATTERN :return: METADATA, IF UNIQUE, ELSE ERROR """ try: metas = list(self.bucket.list(prefix=key)) metas = wrap([m for m in metas if m.name.find(".json") != -1]) perfect = Null favorite = Null too_many = False error = None for m in metas: try: simple = strip_extension(m.key) if conforming: self._verify_key_format(simple) if simple == key: perfect = m too_many = False if simple.startswith(key + ".") or simple.startswith(key + ":"): if favorite and not perfect: too_many = True favorite = m except Exception as e: error = e if too_many: Log.error( "multiple keys in {{bucket}} with prefix={{prefix|quote}}: {{list}}", bucket=self.name, prefix=key, list=[k.name for k in metas] ) if not perfect and error: Log.error("Problem with key request", error) return coalesce(perfect, favorite) except Exception as e: Log.error(READ_ERROR+" can not read {{key}} from {{bucket}}", key=key, bucket=self.bucket.name, cause=e)
[ "def", "get_meta", "(", "self", ",", "key", ",", "conforming", "=", "True", ")", ":", "try", ":", "metas", "=", "list", "(", "self", ".", "bucket", ".", "list", "(", "prefix", "=", "key", ")", ")", "metas", "=", "wrap", "(", "[", "m", "for", "m", "in", "metas", "if", "m", ".", "name", ".", "find", "(", "\".json\"", ")", "!=", "-", "1", "]", ")", "perfect", "=", "Null", "favorite", "=", "Null", "too_many", "=", "False", "error", "=", "None", "for", "m", "in", "metas", ":", "try", ":", "simple", "=", "strip_extension", "(", "m", ".", "key", ")", "if", "conforming", ":", "self", ".", "_verify_key_format", "(", "simple", ")", "if", "simple", "==", "key", ":", "perfect", "=", "m", "too_many", "=", "False", "if", "simple", ".", "startswith", "(", "key", "+", "\".\"", ")", "or", "simple", ".", "startswith", "(", "key", "+", "\":\"", ")", ":", "if", "favorite", "and", "not", "perfect", ":", "too_many", "=", "True", "favorite", "=", "m", "except", "Exception", "as", "e", ":", "error", "=", "e", "if", "too_many", ":", "Log", ".", "error", "(", "\"multiple keys in {{bucket}} with prefix={{prefix|quote}}: {{list}}\"", ",", "bucket", "=", "self", ".", "name", ",", "prefix", "=", "key", ",", "list", "=", "[", "k", ".", "name", "for", "k", "in", "metas", "]", ")", "if", "not", "perfect", "and", "error", ":", "Log", ".", "error", "(", "\"Problem with key request\"", ",", "error", ")", "return", "coalesce", "(", "perfect", ",", "favorite", ")", "except", "Exception", "as", "e", ":", "Log", ".", "error", "(", "READ_ERROR", "+", "\" can not read {{key}} from {{bucket}}\"", ",", "key", "=", "key", ",", "bucket", "=", "self", ".", "bucket", ".", "name", ",", "cause", "=", "e", ")" ]
39.333333
15.238095
def prepare_environment(default_settings=_default_settings, **kwargs): # pylint: disable=unused-argument ''' Prepare ENV for web-application :param default_settings: minimal needed settings for run app :type default_settings: dict :param kwargs: other overrided settings :rtype: None ''' for key, value in default_settings.items(): os.environ.setdefault(key, value) os.environ.update(kwargs) if six.PY2: # nocv warnings.warn( 'Python 2.7 is deprecated and will dropped in 2.0, use Python >3.5', DeprecationWarning )
[ "def", "prepare_environment", "(", "default_settings", "=", "_default_settings", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=unused-argument", "for", "key", ",", "value", "in", "default_settings", ".", "items", "(", ")", ":", "os", ".", "environ", ".", "setdefault", "(", "key", ",", "value", ")", "os", ".", "environ", ".", "update", "(", "kwargs", ")", "if", "six", ".", "PY2", ":", "# nocv", "warnings", ".", "warn", "(", "'Python 2.7 is deprecated and will dropped in 2.0, use Python >3.5'", ",", "DeprecationWarning", ")" ]
34.823529
17.529412
def model_schema( model: Type['main.BaseModel'], by_alias: bool = True, ref_prefix: Optional[str] = None ) -> Dict[str, Any]: """ Generate a JSON Schema for one model. With all the sub-models defined in the ``definitions`` top-level JSON key. :param model: a Pydantic model (a class that inherits from BaseModel) :param by_alias: generate the schemas using the aliases defined, if any :param ref_prefix: the JSON Pointer prefix for schema references with ``$ref``, if None, will be set to the default of ``#/definitions/``. Update it if you want the schemas to reference the definitions somewhere else, e.g. for OpenAPI use ``#/components/schemas/``. The resulting generated schemas will still be at the top-level key ``definitions``, so you can extract them from there. But all the references will have the set prefix. :return: dict with the JSON Schema for the passed ``model`` """ ref_prefix = ref_prefix or default_prefix flat_models = get_flat_models_from_model(model) model_name_map = get_model_name_map(flat_models) m_schema, m_definitions = model_process_schema( model, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix ) if m_definitions: m_schema.update({'definitions': m_definitions}) return m_schema
[ "def", "model_schema", "(", "model", ":", "Type", "[", "'main.BaseModel'", "]", ",", "by_alias", ":", "bool", "=", "True", ",", "ref_prefix", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "ref_prefix", "=", "ref_prefix", "or", "default_prefix", "flat_models", "=", "get_flat_models_from_model", "(", "model", ")", "model_name_map", "=", "get_model_name_map", "(", "flat_models", ")", "m_schema", ",", "m_definitions", "=", "model_process_schema", "(", "model", ",", "by_alias", "=", "by_alias", ",", "model_name_map", "=", "model_name_map", ",", "ref_prefix", "=", "ref_prefix", ")", "if", "m_definitions", ":", "m_schema", ".", "update", "(", "{", "'definitions'", ":", "m_definitions", "}", ")", "return", "m_schema" ]
52.56
32.16
def press_and_tap(self, press_key, tap_key, n=1, interval=0, pre_dl=None, post_dl=None): """Press combination of two keys, like Ctrl + C, Alt + F4. The second key could be tapped for multiple time. Examples:: bot.press_and_tap("ctrl", "c") bot.press_and_tap("shift", "1") **中文文档** 按下两个键的组合键。 """ press_key = self._parse_key(press_key) tap_key = self._parse_key(tap_key) self.delay(pre_dl) self.k.press_key(press_key) self.k.tap_key(tap_key, n, interval) self.k.release_key(press_key) self.delay(post_dl)
[ "def", "press_and_tap", "(", "self", ",", "press_key", ",", "tap_key", ",", "n", "=", "1", ",", "interval", "=", "0", ",", "pre_dl", "=", "None", ",", "post_dl", "=", "None", ")", ":", "press_key", "=", "self", ".", "_parse_key", "(", "press_key", ")", "tap_key", "=", "self", ".", "_parse_key", "(", "tap_key", ")", "self", ".", "delay", "(", "pre_dl", ")", "self", ".", "k", ".", "press_key", "(", "press_key", ")", "self", ".", "k", ".", "tap_key", "(", "tap_key", ",", "n", ",", "interval", ")", "self", ".", "k", ".", "release_key", "(", "press_key", ")", "self", ".", "delay", "(", "post_dl", ")" ]
29.333333
17.809524
def replace_template(self, template_content, team_context, template_id): """ReplaceTemplate. [Preview API] Replace template contents :param :class:`<WorkItemTemplate> <azure.devops.v5_1.work_item_tracking.models.WorkItemTemplate>` template_content: Template contents to replace with :param :class:`<TeamContext> <azure.devops.v5_1.work_item_tracking.models.TeamContext>` team_context: The team context for the operation :param str template_id: Template id :rtype: :class:`<WorkItemTemplate> <azure.devops.v5_1.work-item-tracking.models.WorkItemTemplate>` """ project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') if template_id is not None: route_values['templateId'] = self._serialize.url('template_id', template_id, 'str') content = self._serialize.body(template_content, 'WorkItemTemplate') response = self._send(http_method='PUT', location_id='fb10264a-8836-48a0-8033-1b0ccd2748d5', version='5.1-preview.1', route_values=route_values, content=content) return self._deserialize('WorkItemTemplate', response)
[ "def", "replace_template", "(", "self", ",", "template_content", ",", "team_context", ",", "template_id", ")", ":", "project", "=", "None", "team", "=", "None", "if", "team_context", "is", "not", "None", ":", "if", "team_context", ".", "project_id", ":", "project", "=", "team_context", ".", "project_id", "else", ":", "project", "=", "team_context", ".", "project", "if", "team_context", ".", "team_id", ":", "team", "=", "team_context", ".", "team_id", "else", ":", "team", "=", "team_context", ".", "team", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'string'", ")", "if", "team", "is", "not", "None", ":", "route_values", "[", "'team'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'team'", ",", "team", ",", "'string'", ")", "if", "template_id", "is", "not", "None", ":", "route_values", "[", "'templateId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'template_id'", ",", "template_id", ",", "'str'", ")", "content", "=", "self", ".", "_serialize", ".", "body", "(", "template_content", ",", "'WorkItemTemplate'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'PUT'", ",", "location_id", "=", "'fb10264a-8836-48a0-8033-1b0ccd2748d5'", ",", "version", "=", "'5.1-preview.1'", ",", "route_values", "=", "route_values", ",", "content", "=", "content", ")", "return", "self", ".", "_deserialize", "(", "'WorkItemTemplate'", ",", "response", ")" ]
52.147059
23.676471
def get_attributes(**kwargs): """ Get all attributes """ attrs = db.DBSession.query(Attr).order_by(Attr.name).all() return attrs
[ "def", "get_attributes", "(", "*", "*", "kwargs", ")", ":", "attrs", "=", "db", ".", "DBSession", ".", "query", "(", "Attr", ")", ".", "order_by", "(", "Attr", ".", "name", ")", ".", "all", "(", ")", "return", "attrs" ]
18.375
18.875
def create_packages_archive(packages, filename): """ Create a tar archive which will contain the files for the packages listed in packages. """ import tarfile tar = tarfile.open(filename, "w") def add(src, dst): logger.debug('adding to tar: %s -> %s', src, dst) tar.add(src, dst) def add_files_for_package(sub_package_path, root_package_path, root_package_name): for root, dirs, files in os.walk(sub_package_path): if '.svn' in dirs: dirs.remove('.svn') for f in files: if not f.endswith(".pyc") and not f.startswith("."): add(dereference(root + "/" + f), root.replace(root_package_path, root_package_name) + "/" + f) for package in packages: # Put a submodule's entire package in the archive. This is the # magic that usually packages everything you need without # having to attach packages/modules explicitly if not getattr(package, "__path__", None) and '.' in package.__name__: package = __import__(package.__name__.rpartition('.')[0], None, None, 'non_empty') n = package.__name__.replace(".", "/") if getattr(package, "__path__", None): # TODO: (BUG) picking only the first path does not # properly deal with namespaced packages in different # directories p = package.__path__[0] if p.endswith('.egg') and os.path.isfile(p): raise 'egg files not supported!!!' # Add the entire egg file # p = p[:p.find('.egg') + 4] # add(dereference(p), os.path.basename(p)) else: # include __init__ files from parent projects root = [] for parent in package.__name__.split('.')[0:-1]: root.append(parent) module_name = '.'.join(root) directory = '/'.join(root) add(dereference(__import__(module_name, None, None, 'non_empty').__path__[0] + "/__init__.py"), directory + "/__init__.py") add_files_for_package(p, p, n) # include egg-info directories that are parallel: for egg_info_path in glob.glob(p + '*.egg-info'): logger.debug( 'Adding package metadata to archive for "%s" found at "%s"', package.__name__, egg_info_path ) add_files_for_package(egg_info_path, p, n) else: f = package.__file__ if f.endswith("pyc"): f = f[:-3] + "py" if n.find(".") == -1: add(dereference(f), os.path.basename(f)) else: add(dereference(f), n + ".py") tar.close()
[ "def", "create_packages_archive", "(", "packages", ",", "filename", ")", ":", "import", "tarfile", "tar", "=", "tarfile", ".", "open", "(", "filename", ",", "\"w\"", ")", "def", "add", "(", "src", ",", "dst", ")", ":", "logger", ".", "debug", "(", "'adding to tar: %s -> %s'", ",", "src", ",", "dst", ")", "tar", ".", "add", "(", "src", ",", "dst", ")", "def", "add_files_for_package", "(", "sub_package_path", ",", "root_package_path", ",", "root_package_name", ")", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "sub_package_path", ")", ":", "if", "'.svn'", "in", "dirs", ":", "dirs", ".", "remove", "(", "'.svn'", ")", "for", "f", "in", "files", ":", "if", "not", "f", ".", "endswith", "(", "\".pyc\"", ")", "and", "not", "f", ".", "startswith", "(", "\".\"", ")", ":", "add", "(", "dereference", "(", "root", "+", "\"/\"", "+", "f", ")", ",", "root", ".", "replace", "(", "root_package_path", ",", "root_package_name", ")", "+", "\"/\"", "+", "f", ")", "for", "package", "in", "packages", ":", "# Put a submodule's entire package in the archive. This is the", "# magic that usually packages everything you need without", "# having to attach packages/modules explicitly", "if", "not", "getattr", "(", "package", ",", "\"__path__\"", ",", "None", ")", "and", "'.'", "in", "package", ".", "__name__", ":", "package", "=", "__import__", "(", "package", ".", "__name__", ".", "rpartition", "(", "'.'", ")", "[", "0", "]", ",", "None", ",", "None", ",", "'non_empty'", ")", "n", "=", "package", ".", "__name__", ".", "replace", "(", "\".\"", ",", "\"/\"", ")", "if", "getattr", "(", "package", ",", "\"__path__\"", ",", "None", ")", ":", "# TODO: (BUG) picking only the first path does not", "# properly deal with namespaced packages in different", "# directories", "p", "=", "package", ".", "__path__", "[", "0", "]", "if", "p", ".", "endswith", "(", "'.egg'", ")", "and", "os", ".", "path", ".", "isfile", "(", "p", ")", ":", "raise", "'egg files not supported!!!'", "# Add the entire egg file", "# p = p[:p.find('.egg') + 4]", "# add(dereference(p), os.path.basename(p))", "else", ":", "# include __init__ files from parent projects", "root", "=", "[", "]", "for", "parent", "in", "package", ".", "__name__", ".", "split", "(", "'.'", ")", "[", "0", ":", "-", "1", "]", ":", "root", ".", "append", "(", "parent", ")", "module_name", "=", "'.'", ".", "join", "(", "root", ")", "directory", "=", "'/'", ".", "join", "(", "root", ")", "add", "(", "dereference", "(", "__import__", "(", "module_name", ",", "None", ",", "None", ",", "'non_empty'", ")", ".", "__path__", "[", "0", "]", "+", "\"/__init__.py\"", ")", ",", "directory", "+", "\"/__init__.py\"", ")", "add_files_for_package", "(", "p", ",", "p", ",", "n", ")", "# include egg-info directories that are parallel:", "for", "egg_info_path", "in", "glob", ".", "glob", "(", "p", "+", "'*.egg-info'", ")", ":", "logger", ".", "debug", "(", "'Adding package metadata to archive for \"%s\" found at \"%s\"'", ",", "package", ".", "__name__", ",", "egg_info_path", ")", "add_files_for_package", "(", "egg_info_path", ",", "p", ",", "n", ")", "else", ":", "f", "=", "package", ".", "__file__", "if", "f", ".", "endswith", "(", "\"pyc\"", ")", ":", "f", "=", "f", "[", ":", "-", "3", "]", "+", "\"py\"", "if", "n", ".", "find", "(", "\".\"", ")", "==", "-", "1", ":", "add", "(", "dereference", "(", "f", ")", ",", "os", ".", "path", ".", "basename", "(", "f", ")", ")", "else", ":", "add", "(", "dereference", "(", "f", ")", ",", "n", "+", "\".py\"", ")", "tar", ".", "close", "(", ")" ]
40.070423
21.56338
def saveDirectory(alias): """save a directory to a certain alias/nickname""" if not settings.platformCompatible(): return False dataFile = open(settings.getDataFile(), "wb") currentDirectory = os.path.abspath(".") directory = {alias : currentDirectory} pickle.dump(directory, dataFile) speech.success(alias + " will now link to " + currentDirectory + ".") speech.success("Tip: use 'hallie go to " + alias + "' to change to this directory.")
[ "def", "saveDirectory", "(", "alias", ")", ":", "if", "not", "settings", ".", "platformCompatible", "(", ")", ":", "return", "False", "dataFile", "=", "open", "(", "settings", ".", "getDataFile", "(", ")", ",", "\"wb\"", ")", "currentDirectory", "=", "os", ".", "path", ".", "abspath", "(", "\".\"", ")", "directory", "=", "{", "alias", ":", "currentDirectory", "}", "pickle", ".", "dump", "(", "directory", ",", "dataFile", ")", "speech", ".", "success", "(", "alias", "+", "\" will now link to \"", "+", "currentDirectory", "+", "\".\"", ")", "speech", ".", "success", "(", "\"Tip: use 'hallie go to \"", "+", "alias", "+", "\"' to change to this directory.\"", ")" ]
44.1
13.2
def assert_less(first, second, msg_fmt="{msg}"): """Fail if first is not less than second. >>> assert_less('bar', 'foo') >>> assert_less(5, 5) Traceback (most recent call last): ... AssertionError: 5 is not less than 5 The following msg_fmt arguments are supported: * msg - the default error message * first - the first argument * second - the second argument """ if not first < second: msg = "{!r} is not less than {!r}".format(first, second) fail(msg_fmt.format(msg=msg, first=first, second=second))
[ "def", "assert_less", "(", "first", ",", "second", ",", "msg_fmt", "=", "\"{msg}\"", ")", ":", "if", "not", "first", "<", "second", ":", "msg", "=", "\"{!r} is not less than {!r}\"", ".", "format", "(", "first", ",", "second", ")", "fail", "(", "msg_fmt", ".", "format", "(", "msg", "=", "msg", ",", "first", "=", "first", ",", "second", "=", "second", ")", ")" ]
30.833333
15.055556
def keplerian_sheared_field_locations(ax, kbos, date, ras, decs, names, elongation=False, plot=False): """ Shift fields from the discovery set to the requested date by the average motion of L7 kbos in the discovery field. :param ras: :param decs: :param plot: :param ax: :param kbos: precomputed at the discovery date for that block. e.g. Oct new moon for 13B :param date: :param names: :param elongation: """ seps = {'dra': 0., 'ddec': 0.} for kbo in kbos: ra = kbo.ra dec = kbo.dec kbo.compute(date) seps['dra'] += kbo.ra - ra seps['ddec'] += kbo.dec - dec seps['dra'] /= float(len(kbos)) seps['ddec'] /= float(len(kbos)) print date, seps, len(kbos) for idx in range(len(ras)): name = names[idx] ra = ras[idx] + seps['dra'] dec = decs[idx] + seps['ddec'] if plot: ax.add_artist(Rectangle(xy=(math.degrees(ra) - camera_dimen / 2.0, math.degrees(dec) - camera_dimen / 2.0), height=camera_dimen, width=camera_dimen, edgecolor='b', facecolor='b', lw=0.5, fill=True, alpha=0.2)) if elongation: # For each field centre, plot the elongation onto the field at that date. elong = field_elongation(ephem.degrees(ra), ephem.degrees(dec), date) ax.annotate(name, (math.degrees(ra) + camera_dimen / 2., math.degrees(dec)), size=3) ax.annotate("%0.1f" % elong, (math.degrees(ra) + camera_dimen / 4., math.degrees(dec) - camera_dimen / 4.), size=5) return ax
[ "def", "keplerian_sheared_field_locations", "(", "ax", ",", "kbos", ",", "date", ",", "ras", ",", "decs", ",", "names", ",", "elongation", "=", "False", ",", "plot", "=", "False", ")", ":", "seps", "=", "{", "'dra'", ":", "0.", ",", "'ddec'", ":", "0.", "}", "for", "kbo", "in", "kbos", ":", "ra", "=", "kbo", ".", "ra", "dec", "=", "kbo", ".", "dec", "kbo", ".", "compute", "(", "date", ")", "seps", "[", "'dra'", "]", "+=", "kbo", ".", "ra", "-", "ra", "seps", "[", "'ddec'", "]", "+=", "kbo", ".", "dec", "-", "dec", "seps", "[", "'dra'", "]", "/=", "float", "(", "len", "(", "kbos", ")", ")", "seps", "[", "'ddec'", "]", "/=", "float", "(", "len", "(", "kbos", ")", ")", "print", "date", ",", "seps", ",", "len", "(", "kbos", ")", "for", "idx", "in", "range", "(", "len", "(", "ras", ")", ")", ":", "name", "=", "names", "[", "idx", "]", "ra", "=", "ras", "[", "idx", "]", "+", "seps", "[", "'dra'", "]", "dec", "=", "decs", "[", "idx", "]", "+", "seps", "[", "'ddec'", "]", "if", "plot", ":", "ax", ".", "add_artist", "(", "Rectangle", "(", "xy", "=", "(", "math", ".", "degrees", "(", "ra", ")", "-", "camera_dimen", "/", "2.0", ",", "math", ".", "degrees", "(", "dec", ")", "-", "camera_dimen", "/", "2.0", ")", ",", "height", "=", "camera_dimen", ",", "width", "=", "camera_dimen", ",", "edgecolor", "=", "'b'", ",", "facecolor", "=", "'b'", ",", "lw", "=", "0.5", ",", "fill", "=", "True", ",", "alpha", "=", "0.2", ")", ")", "if", "elongation", ":", "# For each field centre, plot the elongation onto the field at that date.", "elong", "=", "field_elongation", "(", "ephem", ".", "degrees", "(", "ra", ")", ",", "ephem", ".", "degrees", "(", "dec", ")", ",", "date", ")", "ax", ".", "annotate", "(", "name", ",", "(", "math", ".", "degrees", "(", "ra", ")", "+", "camera_dimen", "/", "2.", ",", "math", ".", "degrees", "(", "dec", ")", ")", ",", "size", "=", "3", ")", "ax", ".", "annotate", "(", "\"%0.1f\"", "%", "elong", ",", "(", "math", ".", "degrees", "(", "ra", ")", "+", "camera_dimen", "/", "4.", ",", "math", ".", "degrees", "(", "dec", ")", "-", "camera_dimen", "/", "4.", ")", ",", "size", "=", "5", ")", "return", "ax" ]
39.136364
25.227273
def get(self, key, default=None, type_=None): """ Return the last data value for the passed key. If key doesn't exist or value is an empty list, return `default`. """ try: rv = self[key] except KeyError: return default if type_ is not None: try: rv = type_(rv) except ValueError: rv = default return rv
[ "def", "get", "(", "self", ",", "key", ",", "default", "=", "None", ",", "type_", "=", "None", ")", ":", "try", ":", "rv", "=", "self", "[", "key", "]", "except", "KeyError", ":", "return", "default", "if", "type_", "is", "not", "None", ":", "try", ":", "rv", "=", "type_", "(", "rv", ")", "except", "ValueError", ":", "rv", "=", "default", "return", "rv" ]
28.8
14.266667
def get_json_response(self, content, **kwargs): """Returns a json response object.""" # Don't care to return a django form or view in the response here. # Remove those from the context. if isinstance(content, dict): response_content = {k: deepcopy(v) for k, v in content.items() if k not in ('form', 'view') or k in ('form', 'view') and not isinstance(v, (Form, View))} else: response_content = content return HttpResponse(content=json.dumps(response_content), content_type='application/json; charset=utf-8', **kwargs)
[ "def", "get_json_response", "(", "self", ",", "content", ",", "*", "*", "kwargs", ")", ":", "# Don't care to return a django form or view in the response here.", "# Remove those from the context.", "if", "isinstance", "(", "content", ",", "dict", ")", ":", "response_content", "=", "{", "k", ":", "deepcopy", "(", "v", ")", "for", "k", ",", "v", "in", "content", ".", "items", "(", ")", "if", "k", "not", "in", "(", "'form'", ",", "'view'", ")", "or", "k", "in", "(", "'form'", ",", "'view'", ")", "and", "not", "isinstance", "(", "v", ",", "(", "Form", ",", "View", ")", ")", "}", "else", ":", "response_content", "=", "content", "return", "HttpResponse", "(", "content", "=", "json", ".", "dumps", "(", "response_content", ")", ",", "content_type", "=", "'application/json; charset=utf-8'", ",", "*", "*", "kwargs", ")" ]
46.533333
21.533333
def verify_gmt_integrity(gmt): """ Make sure that set ids are unique. Args: gmt (GMT object): list of dicts Returns: None """ # Verify that set ids are unique set_ids = [d[SET_IDENTIFIER_FIELD] for d in gmt] assert len(set(set_ids)) == len(set_ids), ( "Set identifiers should be unique. set_ids: {}".format(set_ids))
[ "def", "verify_gmt_integrity", "(", "gmt", ")", ":", "# Verify that set ids are unique", "set_ids", "=", "[", "d", "[", "SET_IDENTIFIER_FIELD", "]", "for", "d", "in", "gmt", "]", "assert", "len", "(", "set", "(", "set_ids", ")", ")", "==", "len", "(", "set_ids", ")", ",", "(", "\"Set identifiers should be unique. set_ids: {}\"", ".", "format", "(", "set_ids", ")", ")" ]
23.866667
20.866667
def get_owner(self, default=True): """Return (User ID, Group ID) tuple :param bool default: Whether to return default if not set. :rtype: tuple[int, int] """ uid, gid = self.owner if not uid and default: uid = os.getuid() if not gid and default: gid = os.getgid() return uid, gid
[ "def", "get_owner", "(", "self", ",", "default", "=", "True", ")", ":", "uid", ",", "gid", "=", "self", ".", "owner", "if", "not", "uid", "and", "default", ":", "uid", "=", "os", ".", "getuid", "(", ")", "if", "not", "gid", "and", "default", ":", "gid", "=", "os", ".", "getgid", "(", ")", "return", "uid", ",", "gid" ]
23.8
17.933333
def run(self, **kwargs): """ Run the model Parameters ---------- ``**kwargs`` Any other parameter for the :meth:`model_organization.ModelOrganizer.app_main` method """ from calculate import compute self.app_main(**kwargs) # get the default output name output = osp.join(self.exp_config['expdir'], 'output.dat') # save the paths in the configuration self.exp_config['output'] = output # run the model data = np.loadtxt(self.exp_config['infile']) out = compute(data) # save the output self.logger.info('Saving output data to %s', osp.relpath(output)) np.savetxt(output, out) # store some additional information in the configuration of the # experiment self.exp_config['mean'] = mean = float(out.mean()) self.exp_config['std'] = std = float(out.std()) self.logger.debug('Mean: %s, Standard deviation: %s', mean, std)
[ "def", "run", "(", "self", ",", "*", "*", "kwargs", ")", ":", "from", "calculate", "import", "compute", "self", ".", "app_main", "(", "*", "*", "kwargs", ")", "# get the default output name", "output", "=", "osp", ".", "join", "(", "self", ".", "exp_config", "[", "'expdir'", "]", ",", "'output.dat'", ")", "# save the paths in the configuration", "self", ".", "exp_config", "[", "'output'", "]", "=", "output", "# run the model", "data", "=", "np", ".", "loadtxt", "(", "self", ".", "exp_config", "[", "'infile'", "]", ")", "out", "=", "compute", "(", "data", ")", "# save the output", "self", ".", "logger", ".", "info", "(", "'Saving output data to %s'", ",", "osp", ".", "relpath", "(", "output", ")", ")", "np", ".", "savetxt", "(", "output", ",", "out", ")", "# store some additional information in the configuration of the", "# experiment", "self", ".", "exp_config", "[", "'mean'", "]", "=", "mean", "=", "float", "(", "out", ".", "mean", "(", ")", ")", "self", ".", "exp_config", "[", "'std'", "]", "=", "std", "=", "float", "(", "out", ".", "std", "(", ")", ")", "self", ".", "logger", ".", "debug", "(", "'Mean: %s, Standard deviation: %s'", ",", "mean", ",", "std", ")" ]
32.129032
19.096774
def validate_steps(self, request, workflow, start, end): """Validates the workflow steps from ``start`` to ``end``, inclusive. Returns a dict describing the validation state of the workflow. """ errors = {} for step in workflow.steps[start:end + 1]: if not step.action.is_valid(): errors[step.slug] = dict( (field, [six.text_type(error) for error in errors]) for (field, errors) in step.action.errors.items()) return { 'has_errors': bool(errors), 'workflow_slug': workflow.slug, 'errors': errors, }
[ "def", "validate_steps", "(", "self", ",", "request", ",", "workflow", ",", "start", ",", "end", ")", ":", "errors", "=", "{", "}", "for", "step", "in", "workflow", ".", "steps", "[", "start", ":", "end", "+", "1", "]", ":", "if", "not", "step", ".", "action", ".", "is_valid", "(", ")", ":", "errors", "[", "step", ".", "slug", "]", "=", "dict", "(", "(", "field", ",", "[", "six", ".", "text_type", "(", "error", ")", "for", "error", "in", "errors", "]", ")", "for", "(", "field", ",", "errors", ")", "in", "step", ".", "action", ".", "errors", ".", "items", "(", ")", ")", "return", "{", "'has_errors'", ":", "bool", "(", "errors", ")", ",", "'workflow_slug'", ":", "workflow", ".", "slug", ",", "'errors'", ":", "errors", ",", "}" ]
40.25
15.75
def _modname(path): """Return a plausible module name for the path""" base = os.path.basename(path) filename, ext = os.path.splitext(base) return filename
[ "def", "_modname", "(", "path", ")", ":", "base", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "filename", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "base", ")", "return", "filename" ]
33.2
10.2
def convert_raw_data_to_universes(raw_data) -> tuple: """ converts the raw data to a readable universes tuple. The raw_data is scanned from index 0 and has to have 16-bit numbers with high byte first. The data is converted from the start to the beginning! :param raw_data: the raw data to convert :return: tuple full with 16-bit numbers """ if len(raw_data)%2 != 0: raise TypeError('The given data has not a length that is a multiple of 2!') rtrnList = [] for i in range(0, len(raw_data), 2): rtrnList.append(two_bytes_to_int(raw_data[i], raw_data[i+1])) return tuple(rtrnList)
[ "def", "convert_raw_data_to_universes", "(", "raw_data", ")", "->", "tuple", ":", "if", "len", "(", "raw_data", ")", "%", "2", "!=", "0", ":", "raise", "TypeError", "(", "'The given data has not a length that is a multiple of 2!'", ")", "rtrnList", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "raw_data", ")", ",", "2", ")", ":", "rtrnList", ".", "append", "(", "two_bytes_to_int", "(", "raw_data", "[", "i", "]", ",", "raw_data", "[", "i", "+", "1", "]", ")", ")", "return", "tuple", "(", "rtrnList", ")" ]
47.769231
20.384615
def getMeta(self, uri): """Return meta information about an action. Cache the result as specified by the server""" action = urlparse(uri).path mediaKey = self.cacheKey + '_meta_' + action mediaKey = mediaKey.replace(' ', '__') meta = cache.get(mediaKey, None) # Nothing found -> Retrieve it from the server and cache it if not meta: r = self.doQuery('meta/' + uri) if r.status_code == 200: # Get the content if there is not problem. If there is, template will stay to None meta = r.json() if 'expire' not in r.headers: expire = 5 * 60 # 5 minutes of cache if the server didn't specified anything else: expire = int((parser.parse(r.headers['expire']) - datetime.datetime.now(tzutc())).total_seconds()) # Use the server value for cache if expire > 0: # Do the server want us to cache ? cache.set(mediaKey, meta, expire) return meta
[ "def", "getMeta", "(", "self", ",", "uri", ")", ":", "action", "=", "urlparse", "(", "uri", ")", ".", "path", "mediaKey", "=", "self", ".", "cacheKey", "+", "'_meta_'", "+", "action", "mediaKey", "=", "mediaKey", ".", "replace", "(", "' '", ",", "'__'", ")", "meta", "=", "cache", ".", "get", "(", "mediaKey", ",", "None", ")", "# Nothing found -> Retrieve it from the server and cache it", "if", "not", "meta", ":", "r", "=", "self", ".", "doQuery", "(", "'meta/'", "+", "uri", ")", "if", "r", ".", "status_code", "==", "200", ":", "# Get the content if there is not problem. If there is, template will stay to None", "meta", "=", "r", ".", "json", "(", ")", "if", "'expire'", "not", "in", "r", ".", "headers", ":", "expire", "=", "5", "*", "60", "# 5 minutes of cache if the server didn't specified anything", "else", ":", "expire", "=", "int", "(", "(", "parser", ".", "parse", "(", "r", ".", "headers", "[", "'expire'", "]", ")", "-", "datetime", ".", "datetime", ".", "now", "(", "tzutc", "(", ")", ")", ")", ".", "total_seconds", "(", ")", ")", "# Use the server value for cache", "if", "expire", ">", "0", ":", "# Do the server want us to cache ?", "cache", ".", "set", "(", "mediaKey", ",", "meta", ",", "expire", ")", "return", "meta" ]
37.185185
28.740741
def get_yaml_items(self, dir_path, param=None): ''' Loops through the dir_path and parses all YAML files inside the directory. If no param is defined, then all YAML items will be returned in a list. If a param is defined, then all items will be scanned for this param and a list of all those values will be returned. ''' result = [] if not os.path.isdir(dir_path): return [] for filename in os.listdir(dir_path): path = os.path.join(dir_path, filename) items = self.read_yaml(path) for item in items: if param: if param in item: item = item[param] if isinstance(item, list): result.extend(item) else: result.append(item) else: result.append(item) return result
[ "def", "get_yaml_items", "(", "self", ",", "dir_path", ",", "param", "=", "None", ")", ":", "result", "=", "[", "]", "if", "not", "os", ".", "path", ".", "isdir", "(", "dir_path", ")", ":", "return", "[", "]", "for", "filename", "in", "os", ".", "listdir", "(", "dir_path", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "dir_path", ",", "filename", ")", "items", "=", "self", ".", "read_yaml", "(", "path", ")", "for", "item", "in", "items", ":", "if", "param", ":", "if", "param", "in", "item", ":", "item", "=", "item", "[", "param", "]", "if", "isinstance", "(", "item", ",", "list", ")", ":", "result", ".", "extend", "(", "item", ")", "else", ":", "result", ".", "append", "(", "item", ")", "else", ":", "result", ".", "append", "(", "item", ")", "return", "result" ]
30.40625
20.34375
def get_item(self, address, state = 'fresh'): """Get an item from the cache. :Parameters: - `address`: its address. - `state`: the worst state that is acceptable. :Types: - `address`: any hashable - `state`: `str` :return: the item or `None` if it was not found. :returntype: `CacheItem`""" self._lock.acquire() try: item = self._items.get(address) if not item: return None self.update_item(item) if _state_values[state] >= item.state_value: return item return None finally: self._lock.release()
[ "def", "get_item", "(", "self", ",", "address", ",", "state", "=", "'fresh'", ")", ":", "self", ".", "_lock", ".", "acquire", "(", ")", "try", ":", "item", "=", "self", ".", "_items", ".", "get", "(", "address", ")", "if", "not", "item", ":", "return", "None", "self", ".", "update_item", "(", "item", ")", "if", "_state_values", "[", "state", "]", ">=", "item", ".", "state_value", ":", "return", "item", "return", "None", "finally", ":", "self", ".", "_lock", ".", "release", "(", ")" ]
30.043478
14.695652
def get_history_item_for_tree_iter(self, child_tree_iter): """Hands history item for tree iter and compensate if tree item is a dummy item :param Gtk.TreeIter child_tree_iter: Tree iter of row :rtype rafcon.core.execution.execution_history.HistoryItem: :return history tree item: """ history_item = self.history_tree_store[child_tree_iter][self.HISTORY_ITEM_STORAGE_ID] if history_item is None: # is dummy item if self.history_tree_store.iter_n_children(child_tree_iter) > 0: child_iter = self.history_tree_store.iter_nth_child(child_tree_iter, 0) history_item = self.history_tree_store[child_iter][self.HISTORY_ITEM_STORAGE_ID] else: logger.debug("In a dummy history should be respective real call element.") return history_item
[ "def", "get_history_item_for_tree_iter", "(", "self", ",", "child_tree_iter", ")", ":", "history_item", "=", "self", ".", "history_tree_store", "[", "child_tree_iter", "]", "[", "self", ".", "HISTORY_ITEM_STORAGE_ID", "]", "if", "history_item", "is", "None", ":", "# is dummy item", "if", "self", ".", "history_tree_store", ".", "iter_n_children", "(", "child_tree_iter", ")", ">", "0", ":", "child_iter", "=", "self", ".", "history_tree_store", ".", "iter_nth_child", "(", "child_tree_iter", ",", "0", ")", "history_item", "=", "self", ".", "history_tree_store", "[", "child_iter", "]", "[", "self", ".", "HISTORY_ITEM_STORAGE_ID", "]", "else", ":", "logger", ".", "debug", "(", "\"In a dummy history should be respective real call element.\"", ")", "return", "history_item" ]
56.866667
26.6
def dict_to_object(self, d): """ Decode datetime value from string to datetime """ for k, v in list(d.items()): if isinstance(v, six.string_types) and len(v) == 19: # Decode a datetime string to a datetime object try: d[k] = datetime.strptime(v, "%Y-%m-%dT%H:%M:%S") except ValueError: pass elif isinstance(v, six.string_types) and len(v) > 20: try: d[k] = datetime.strptime(v, "%Y-%m-%dT%H:%M:%S.%f") except ValueError: pass elif isinstance(v, list): d[k] = [self.string_to_datetime(elem) for elem in v] return DotDict(d)
[ "def", "dict_to_object", "(", "self", ",", "d", ")", ":", "for", "k", ",", "v", "in", "list", "(", "d", ".", "items", "(", ")", ")", ":", "if", "isinstance", "(", "v", ",", "six", ".", "string_types", ")", "and", "len", "(", "v", ")", "==", "19", ":", "# Decode a datetime string to a datetime object", "try", ":", "d", "[", "k", "]", "=", "datetime", ".", "strptime", "(", "v", ",", "\"%Y-%m-%dT%H:%M:%S\"", ")", "except", "ValueError", ":", "pass", "elif", "isinstance", "(", "v", ",", "six", ".", "string_types", ")", "and", "len", "(", "v", ")", ">", "20", ":", "try", ":", "d", "[", "k", "]", "=", "datetime", ".", "strptime", "(", "v", ",", "\"%Y-%m-%dT%H:%M:%S.%f\"", ")", "except", "ValueError", ":", "pass", "elif", "isinstance", "(", "v", ",", "list", ")", ":", "d", "[", "k", "]", "=", "[", "self", ".", "string_to_datetime", "(", "elem", ")", "for", "elem", "in", "v", "]", "return", "DotDict", "(", "d", ")" ]
39.789474
15.263158
def create_attachment(cls, session, attachment): """Create an attachment. An attachment must be sent to the API before it can be used in a thread. Use this method to create the attachment, then use the resulting hash when creating a thread. Note that HelpScout only supports attachments of 10MB or lower. Args: session (requests.sessions.Session): Authenticated session. attachment (helpscout.models.Attachment): The attachment to be created. Returns: helpscout.models.Attachment: The newly created attachment (hash property only). Use this hash when associating the attachment with a new thread. """ return super(Conversations, cls).create( session, attachment, endpoint_override='/attachments.json', out_type=Attachment, )
[ "def", "create_attachment", "(", "cls", ",", "session", ",", "attachment", ")", ":", "return", "super", "(", "Conversations", ",", "cls", ")", ".", "create", "(", "session", ",", "attachment", ",", "endpoint_override", "=", "'/attachments.json'", ",", "out_type", "=", "Attachment", ",", ")" ]
36.28
23.36
def read_abinit_hdr(self): """ Read the variables associated to the Abinit header. Return :class:`AbinitHeader` """ d = {} for hvar in _HDR_VARIABLES.values(): ncname = hvar.etsf_name if hvar.etsf_name is not None else hvar.name if ncname in self.rootgrp.variables: d[hvar.name] = self.read_value(ncname) elif ncname in self.rootgrp.dimensions: d[hvar.name] = self.read_dimvalue(ncname) else: raise ValueError("Cannot find `%s` in `%s`" % (ncname, self.path)) # Convert scalars to (well) scalars. if hasattr(d[hvar.name], "shape") and not d[hvar.name].shape: d[hvar.name] = np.asscalar(d[hvar.name]) if hvar.name in ("title", "md5_pseudos", "codvsn"): # Convert array of numpy bytes to list of strings if hvar.name == "codvsn": d[hvar.name] = "".join(bs.decode("utf-8").strip() for bs in d[hvar.name]) else: d[hvar.name] = ["".join(bs.decode("utf-8") for bs in astr).strip() for astr in d[hvar.name]] return AbinitHeader(d)
[ "def", "read_abinit_hdr", "(", "self", ")", ":", "d", "=", "{", "}", "for", "hvar", "in", "_HDR_VARIABLES", ".", "values", "(", ")", ":", "ncname", "=", "hvar", ".", "etsf_name", "if", "hvar", ".", "etsf_name", "is", "not", "None", "else", "hvar", ".", "name", "if", "ncname", "in", "self", ".", "rootgrp", ".", "variables", ":", "d", "[", "hvar", ".", "name", "]", "=", "self", ".", "read_value", "(", "ncname", ")", "elif", "ncname", "in", "self", ".", "rootgrp", ".", "dimensions", ":", "d", "[", "hvar", ".", "name", "]", "=", "self", ".", "read_dimvalue", "(", "ncname", ")", "else", ":", "raise", "ValueError", "(", "\"Cannot find `%s` in `%s`\"", "%", "(", "ncname", ",", "self", ".", "path", ")", ")", "# Convert scalars to (well) scalars.", "if", "hasattr", "(", "d", "[", "hvar", ".", "name", "]", ",", "\"shape\"", ")", "and", "not", "d", "[", "hvar", ".", "name", "]", ".", "shape", ":", "d", "[", "hvar", ".", "name", "]", "=", "np", ".", "asscalar", "(", "d", "[", "hvar", ".", "name", "]", ")", "if", "hvar", ".", "name", "in", "(", "\"title\"", ",", "\"md5_pseudos\"", ",", "\"codvsn\"", ")", ":", "# Convert array of numpy bytes to list of strings", "if", "hvar", ".", "name", "==", "\"codvsn\"", ":", "d", "[", "hvar", ".", "name", "]", "=", "\"\"", ".", "join", "(", "bs", ".", "decode", "(", "\"utf-8\"", ")", ".", "strip", "(", ")", "for", "bs", "in", "d", "[", "hvar", ".", "name", "]", ")", "else", ":", "d", "[", "hvar", ".", "name", "]", "=", "[", "\"\"", ".", "join", "(", "bs", ".", "decode", "(", "\"utf-8\"", ")", "for", "bs", "in", "astr", ")", ".", "strip", "(", ")", "for", "astr", "in", "d", "[", "hvar", ".", "name", "]", "]", "return", "AbinitHeader", "(", "d", ")" ]
45.148148
20.333333
def previousSibling(self): ''' previousSibling - Returns the previous sibling. This would be the previous node (text or tag) in the parent's list This could be text or an element. use previousSiblingElement to ensure element @return <None/str/AdvancedTag> - None if there are no nodes (text or tag) in the parent before this node, Otherwise the previous node (text or tag) ''' parentNode = self.parentNode # If no parent, no previous sibling if not parentNode: return None # Determine block index on parent of this node myBlockIdx = parentNode.blocks.index(self) # If we are the first, no previous sibling if myBlockIdx == 0: return None # Else, return the previous block in parent return parentNode.blocks[myBlockIdx-1]
[ "def", "previousSibling", "(", "self", ")", ":", "parentNode", "=", "self", ".", "parentNode", "# If no parent, no previous sibling", "if", "not", "parentNode", ":", "return", "None", "# Determine block index on parent of this node", "myBlockIdx", "=", "parentNode", ".", "blocks", ".", "index", "(", "self", ")", "# If we are the first, no previous sibling", "if", "myBlockIdx", "==", "0", ":", "return", "None", "# Else, return the previous block in parent", "return", "parentNode", ".", "blocks", "[", "myBlockIdx", "-", "1", "]" ]
37.24
29
def scroll(self, rect, dx, dy, attr=None, fill=' '): u'''Scroll a rectangle.''' if attr is None: attr = self.attr x0, y0, x1, y1 = rect source = SMALL_RECT(x0, y0, x1 - 1, y1 - 1) dest = self.fixcoord(x0 + dx, y0 + dy) style = CHAR_INFO() style.Char.AsciiChar = ensure_str(fill[0]) style.Attributes = attr return self.ScrollConsoleScreenBufferW(self.hout, byref(source), byref(source), dest, byref(style))
[ "def", "scroll", "(", "self", ",", "rect", ",", "dx", ",", "dy", ",", "attr", "=", "None", ",", "fill", "=", "' '", ")", ":", "if", "attr", "is", "None", ":", "attr", "=", "self", ".", "attr", "x0", ",", "y0", ",", "x1", ",", "y1", "=", "rect", "source", "=", "SMALL_RECT", "(", "x0", ",", "y0", ",", "x1", "-", "1", ",", "y1", "-", "1", ")", "dest", "=", "self", ".", "fixcoord", "(", "x0", "+", "dx", ",", "y0", "+", "dy", ")", "style", "=", "CHAR_INFO", "(", ")", "style", ".", "Char", ".", "AsciiChar", "=", "ensure_str", "(", "fill", "[", "0", "]", ")", "style", ".", "Attributes", "=", "attr", "return", "self", ".", "ScrollConsoleScreenBufferW", "(", "self", ".", "hout", ",", "byref", "(", "source", ")", ",", "byref", "(", "source", ")", ",", "dest", ",", "byref", "(", "style", ")", ")" ]
41
16.384615
def add_source(source, key=None, fail_invalid=False): """Add a package source to this system. @param source: a URL or sources.list entry, as supported by add-apt-repository(1). Examples:: ppa:charmers/example deb https://stub:[email protected]/ubuntu trusty main In addition: 'proposed:' may be used to enable the standard 'proposed' pocket for the release. 'cloud:' may be used to activate official cloud archive pockets, such as 'cloud:icehouse' 'distro' may be used as a noop Full list of source specifications supported by the function are: 'distro': A NOP; i.e. it has no effect. 'proposed': the proposed deb spec [2] is wrtten to /etc/apt/sources.list/proposed 'distro-proposed': adds <version>-proposed to the debs [2] 'ppa:<ppa-name>': add-apt-repository --yes <ppa_name> 'deb <deb-spec>': add-apt-repository --yes deb <deb-spec> 'http://....': add-apt-repository --yes http://... 'cloud-archive:<spec>': add-apt-repository -yes cloud-archive:<spec> 'cloud:<release>[-staging]': specify a Cloud Archive pocket <release> with optional staging version. If staging is used then the staging PPA [2] with be used. If staging is NOT used then the cloud archive [3] will be added, and the 'ubuntu-cloud-keyring' package will be added for the current distro. Otherwise the source is not recognised and this is logged to the juju log. However, no error is raised, unless sys_error_on_exit is True. [1] deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main where {} is replaced with the derived pocket name. [2] deb http://archive.ubuntu.com/ubuntu {}-proposed \ main universe multiverse restricted where {} is replaced with the lsb_release codename (e.g. xenial) [3] deb http://ubuntu-cloud.archive.canonical.com/ubuntu <pocket> to /etc/apt/sources.list.d/cloud-archive-list @param key: A key to be added to the system's APT keyring and used to verify the signatures on packages. Ideally, this should be an ASCII format GPG public key including the block headers. A GPG key id may also be used, but be aware that only insecure protocols are available to retrieve the actual public key from a public keyserver placing your Juju environment at risk. ppa and cloud archive keys are securely added automtically, so sould not be provided. @param fail_invalid: (boolean) if True, then the function raises a SourceConfigError is there is no matching installation source. @raises SourceConfigError() if for cloud:<pocket>, the <pocket> is not a valid pocket in CLOUD_ARCHIVE_POCKETS """ _mapping = OrderedDict([ (r"^distro$", lambda: None), # This is a NOP (r"^(?:proposed|distro-proposed)$", _add_proposed), (r"^cloud-archive:(.*)$", _add_apt_repository), (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository), (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging), (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), (r"^cloud:(.*)$", _add_cloud_pocket), (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), ]) if source is None: source = '' for r, fn in six.iteritems(_mapping): m = re.match(r, source) if m: # call the assoicated function with the captured groups # raises SourceConfigError on error. fn(*m.groups()) if key: try: import_key(key) except GPGKeyError as e: raise SourceConfigError(str(e)) break else: # nothing matched. log an error and maybe sys.exit err = "Unknown source: {!r}".format(source) log(err) if fail_invalid: raise SourceConfigError(err)
[ "def", "add_source", "(", "source", ",", "key", "=", "None", ",", "fail_invalid", "=", "False", ")", ":", "_mapping", "=", "OrderedDict", "(", "[", "(", "r\"^distro$\"", ",", "lambda", ":", "None", ")", ",", "# This is a NOP", "(", "r\"^(?:proposed|distro-proposed)$\"", ",", "_add_proposed", ")", ",", "(", "r\"^cloud-archive:(.*)$\"", ",", "_add_apt_repository", ")", ",", "(", "r\"^((?:deb |http:|https:|ppa:).*)$\"", ",", "_add_apt_repository", ")", ",", "(", "r\"^cloud:(.*)-(.*)\\/staging$\"", ",", "_add_cloud_staging", ")", ",", "(", "r\"^cloud:(.*)-(.*)$\"", ",", "_add_cloud_distro_check", ")", ",", "(", "r\"^cloud:(.*)$\"", ",", "_add_cloud_pocket", ")", ",", "(", "r\"^snap:.*-(.*)-(.*)$\"", ",", "_add_cloud_distro_check", ")", ",", "]", ")", "if", "source", "is", "None", ":", "source", "=", "''", "for", "r", ",", "fn", "in", "six", ".", "iteritems", "(", "_mapping", ")", ":", "m", "=", "re", ".", "match", "(", "r", ",", "source", ")", "if", "m", ":", "# call the assoicated function with the captured groups", "# raises SourceConfigError on error.", "fn", "(", "*", "m", ".", "groups", "(", ")", ")", "if", "key", ":", "try", ":", "import_key", "(", "key", ")", "except", "GPGKeyError", "as", "e", ":", "raise", "SourceConfigError", "(", "str", "(", "e", ")", ")", "break", "else", ":", "# nothing matched. log an error and maybe sys.exit", "err", "=", "\"Unknown source: {!r}\"", ".", "format", "(", "source", ")", "log", "(", "err", ")", "if", "fail_invalid", ":", "raise", "SourceConfigError", "(", "err", ")" ]
43.908046
21.609195
def insert(self, item, priority): """Adds item to DEPQ with given priority by performing a binary search on the concurrently rotating deque. Amount rotated R of DEPQ of length n would be n <= R <= 3n/2. Performance: O(n)""" with self.lock: self_data = self.data rotate = self_data.rotate self_items = self.items maxlen = self._maxlen try: if priority <= self_data[-1][1]: self_data.append((item, priority)) elif priority > self_data[0][1]: self_data.appendleft((item, priority)) else: length = len(self_data) + 1 mid = length // 2 shift = 0 while True: if priority <= self_data[0][1]: rotate(-mid) shift += mid mid //= 2 if mid == 0: mid += 1 else: rotate(mid) shift -= mid mid //= 2 if mid == 0: mid += 1 if self_data[-1][1] >= priority > self_data[0][1]: self_data.appendleft((item, priority)) # When returning to original position, never shift # more than half length of DEPQ i.e. if length is # 100 and we rotated -75, rotate -25, not 75 if shift > length // 2: shift = length % shift rotate(-shift) else: rotate(shift) break try: self_items[item] += 1 except TypeError: self_items[repr(item)] += 1 except IndexError: self_data.append((item, priority)) try: self_items[item] = 1 except TypeError: self_items[repr(item)] = 1 if maxlen is not None and maxlen < len(self_data): self._poplast()
[ "def", "insert", "(", "self", ",", "item", ",", "priority", ")", ":", "with", "self", ".", "lock", ":", "self_data", "=", "self", ".", "data", "rotate", "=", "self_data", ".", "rotate", "self_items", "=", "self", ".", "items", "maxlen", "=", "self", ".", "_maxlen", "try", ":", "if", "priority", "<=", "self_data", "[", "-", "1", "]", "[", "1", "]", ":", "self_data", ".", "append", "(", "(", "item", ",", "priority", ")", ")", "elif", "priority", ">", "self_data", "[", "0", "]", "[", "1", "]", ":", "self_data", ".", "appendleft", "(", "(", "item", ",", "priority", ")", ")", "else", ":", "length", "=", "len", "(", "self_data", ")", "+", "1", "mid", "=", "length", "//", "2", "shift", "=", "0", "while", "True", ":", "if", "priority", "<=", "self_data", "[", "0", "]", "[", "1", "]", ":", "rotate", "(", "-", "mid", ")", "shift", "+=", "mid", "mid", "//=", "2", "if", "mid", "==", "0", ":", "mid", "+=", "1", "else", ":", "rotate", "(", "mid", ")", "shift", "-=", "mid", "mid", "//=", "2", "if", "mid", "==", "0", ":", "mid", "+=", "1", "if", "self_data", "[", "-", "1", "]", "[", "1", "]", ">=", "priority", ">", "self_data", "[", "0", "]", "[", "1", "]", ":", "self_data", ".", "appendleft", "(", "(", "item", ",", "priority", ")", ")", "# When returning to original position, never shift", "# more than half length of DEPQ i.e. if length is", "# 100 and we rotated -75, rotate -25, not 75", "if", "shift", ">", "length", "//", "2", ":", "shift", "=", "length", "%", "shift", "rotate", "(", "-", "shift", ")", "else", ":", "rotate", "(", "shift", ")", "break", "try", ":", "self_items", "[", "item", "]", "+=", "1", "except", "TypeError", ":", "self_items", "[", "repr", "(", "item", ")", "]", "+=", "1", "except", "IndexError", ":", "self_data", ".", "append", "(", "(", "item", ",", "priority", ")", ")", "try", ":", "self_items", "[", "item", "]", "=", "1", "except", "TypeError", ":", "self_items", "[", "repr", "(", "item", ")", "]", "=", "1", "if", "maxlen", "is", "not", "None", "and", "maxlen", "<", "len", "(", "self_data", ")", ":", "self", ".", "_poplast", "(", ")" ]
34.602941
16.558824
def Analyze(self, hashes): """Looks up hashes in nsrlsvr. Args: hashes (list[str]): hash values to look up. Returns: list[HashAnalysis]: analysis results, or an empty list on error. """ logger.debug( 'Opening connection to {0:s}:{1:d}'.format(self._host, self._port)) nsrl_socket = self._GetSocket() if not nsrl_socket: self.SignalAbort() return [] hash_analyses = [] for digest in hashes: response = self._QueryHash(nsrl_socket, digest) if response is None: continue hash_analysis = interface.HashAnalysis(digest, response) hash_analyses.append(hash_analysis) nsrl_socket.close() logger.debug( 'Closed connection to {0:s}:{1:d}'.format(self._host, self._port)) return hash_analyses
[ "def", "Analyze", "(", "self", ",", "hashes", ")", ":", "logger", ".", "debug", "(", "'Opening connection to {0:s}:{1:d}'", ".", "format", "(", "self", ".", "_host", ",", "self", ".", "_port", ")", ")", "nsrl_socket", "=", "self", ".", "_GetSocket", "(", ")", "if", "not", "nsrl_socket", ":", "self", ".", "SignalAbort", "(", ")", "return", "[", "]", "hash_analyses", "=", "[", "]", "for", "digest", "in", "hashes", ":", "response", "=", "self", ".", "_QueryHash", "(", "nsrl_socket", ",", "digest", ")", "if", "response", "is", "None", ":", "continue", "hash_analysis", "=", "interface", ".", "HashAnalysis", "(", "digest", ",", "response", ")", "hash_analyses", ".", "append", "(", "hash_analysis", ")", "nsrl_socket", ".", "close", "(", ")", "logger", ".", "debug", "(", "'Closed connection to {0:s}:{1:d}'", ".", "format", "(", "self", ".", "_host", ",", "self", ".", "_port", ")", ")", "return", "hash_analyses" ]
24.34375
23.4375
def create_board(self, name, project_ids, preset="scrum", location_type='user', location_id=None): """Create a new board for the ``project_ids``. :param name: name of the board :type name: str :param project_ids: the projects to create the board in :type project_ids: str :param preset: What preset to use for this board. (Default: "scrum") :type preset: 'kanban', 'scrum', 'diy' :param location_type: the location type. Available in cloud. (Default: "user") :type location_type: 'user', 'project' :param location_id: the id of project that the board should be located under. Omit this for a 'user' location_type. Available in cloud. :type location_id: Optional[str] :return: The newly created board :rtype: Board """ if self._options['agile_rest_path'] != GreenHopperResource.GREENHOPPER_REST_PATH: raise NotImplementedError('JIRA Agile Public API does not support this request') payload = {} if isinstance(project_ids, string_types): ids = [] for p in project_ids.split(','): ids.append(self.project(p).id) project_ids = ','.join(ids) if location_id is not None: location_id = self.project(location_id).id payload['name'] = name if isinstance(project_ids, string_types): project_ids = project_ids.split(',') payload['projectIds'] = project_ids payload['preset'] = preset if self.deploymentType == 'Cloud': payload['locationType'] = location_type payload['locationId'] = location_id url = self._get_url( 'rapidview/create/presets', base=self.AGILE_BASE_URL) r = self._session.post( url, data=json.dumps(payload)) raw_issue_json = json_loads(r) return Board(self._options, self._session, raw=raw_issue_json)
[ "def", "create_board", "(", "self", ",", "name", ",", "project_ids", ",", "preset", "=", "\"scrum\"", ",", "location_type", "=", "'user'", ",", "location_id", "=", "None", ")", ":", "if", "self", ".", "_options", "[", "'agile_rest_path'", "]", "!=", "GreenHopperResource", ".", "GREENHOPPER_REST_PATH", ":", "raise", "NotImplementedError", "(", "'JIRA Agile Public API does not support this request'", ")", "payload", "=", "{", "}", "if", "isinstance", "(", "project_ids", ",", "string_types", ")", ":", "ids", "=", "[", "]", "for", "p", "in", "project_ids", ".", "split", "(", "','", ")", ":", "ids", ".", "append", "(", "self", ".", "project", "(", "p", ")", ".", "id", ")", "project_ids", "=", "','", ".", "join", "(", "ids", ")", "if", "location_id", "is", "not", "None", ":", "location_id", "=", "self", ".", "project", "(", "location_id", ")", ".", "id", "payload", "[", "'name'", "]", "=", "name", "if", "isinstance", "(", "project_ids", ",", "string_types", ")", ":", "project_ids", "=", "project_ids", ".", "split", "(", "','", ")", "payload", "[", "'projectIds'", "]", "=", "project_ids", "payload", "[", "'preset'", "]", "=", "preset", "if", "self", ".", "deploymentType", "==", "'Cloud'", ":", "payload", "[", "'locationType'", "]", "=", "location_type", "payload", "[", "'locationId'", "]", "=", "location_id", "url", "=", "self", ".", "_get_url", "(", "'rapidview/create/presets'", ",", "base", "=", "self", ".", "AGILE_BASE_URL", ")", "r", "=", "self", ".", "_session", ".", "post", "(", "url", ",", "data", "=", "json", ".", "dumps", "(", "payload", ")", ")", "raw_issue_json", "=", "json_loads", "(", "r", ")", "return", "Board", "(", "self", ".", "_options", ",", "self", ".", "_session", ",", "raw", "=", "raw_issue_json", ")" ]
43.377778
16.733333
def update(self, fieldname, localValue, remoteValue): ''' Returns the appropriate current value, based on the changes recorded by this ChangeTracker, the value stored by the server (`localValue`), and the value stored by the synchronizing client (`remoteValue`). If `remoteValue` conflicts with changes stored locally, then a `pysyncml.ConflictError` is raised. If a change needs to be applied because `remoteValue` has been updated, then the new value will be returned, and this ChangeTracker will be updated such that a call to :meth:`getChangeSpec` will incorporate the change. :param fieldname: The name of the fieldname being evaluated. :param localValue: The value of the field as stored by the server, usually the one that also stored the current change-spec. If `localValue` is ``None``, then it is assumed that the field was potentially added (this will first be verified against the stored change-spec). :param remoteValue: The new value being presented that may or may not be a source of conflict. If `remoteValue` is ``None``, then it is assumed that the field was potentially deleted (this will first be verified against the stored change-spec). ''' if localValue == remoteValue: return localValue ct = constants.ITEM_DELETED if remoteValue is None else constants.ITEM_MODIFIED if localValue is None: ct = constants.ITEM_ADDED # todo: i should probably trap irep errors. for example, if this # cspec has a field "x" marked as deleted, then `localValue` # must be None... etc. # TODO: i think this kind of handling would break in ListChangeTracker!... changed = self.isChange(fieldname, ct, remoteValue) if changed is None: return localValue self.append(changed, ct, initialValue=localValue, isMd5=False) return remoteValue
[ "def", "update", "(", "self", ",", "fieldname", ",", "localValue", ",", "remoteValue", ")", ":", "if", "localValue", "==", "remoteValue", ":", "return", "localValue", "ct", "=", "constants", ".", "ITEM_DELETED", "if", "remoteValue", "is", "None", "else", "constants", ".", "ITEM_MODIFIED", "if", "localValue", "is", "None", ":", "ct", "=", "constants", ".", "ITEM_ADDED", "# todo: i should probably trap irep errors. for example, if this", "# cspec has a field \"x\" marked as deleted, then `localValue`", "# must be None... etc.", "# TODO: i think this kind of handling would break in ListChangeTracker!...", "changed", "=", "self", ".", "isChange", "(", "fieldname", ",", "ct", ",", "remoteValue", ")", "if", "changed", "is", "None", ":", "return", "localValue", "self", ".", "append", "(", "changed", ",", "ct", ",", "initialValue", "=", "localValue", ",", "isMd5", "=", "False", ")", "return", "remoteValue" ]
38.408163
25.714286