code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def correct(self, z): '''Correct the given approximate solution ``z`` with respect to the linear system ``linear_system`` and the deflation space defined by ``U``.''' c = self.linear_system.Ml*( self.linear_system.b - self.linear_system.A*z) c = utils.inner(self.W, c, ip_B=self.ip_B) if self.Q is not None and self.R is not None: c = scipy.linalg.solve_triangular(self.R, self.Q.T.conj().dot(c)) if self.WR is not self.VR: c = self.WR.dot(scipy.linalg.solve_triangular(self.VR, c)) return z + self.W.dot(cf correct(self, z): '''Correct the given approximate solution ``z`` with respect to the linear system ``linear_system`` and the deflation space defined by ``U``.''' c = self.linear_system.Ml*( self.linear_system.b - self.linear_system.A*z) c = utils.inner(self.W, c, ip_B=self.ip_B) if self.Q is not None and self.R is not None: c = scipy.linalg.solve_triangular(self.R, self.Q.T.conj().dot(c)) if self.WR is not self.VR: c = self.WR.dot(scipy.linalg.solve_triangular(self.VR, c)) return z + self.W.dot(c)
Correct the given approximate solution ``z`` with respect to the linear system ``linear_system`` and the deflation space defined by ``U``.
def MAU(self): '''Result of preconditioned operator to deflation space, i.e., :math:`MM_lAM_rU`.''' if self._MAU is None: self._MAU = self.linear_system.M * self.AU return self._MAf MAU(self): '''Result of preconditioned operator to deflation space, i.e., :math:`MM_lAM_rU`.''' if self._MAU is None: self._MAU = self.linear_system.M * self.AU return self._MAU
Result of preconditioned operator to deflation space, i.e., :math:`MM_lAM_rU`.
def _apply_projection(self, Av): '''Apply the projection and store inner product. :param v: the vector resulting from an application of :math:`M_lAM_r` to the current Arnoldi vector. (CG needs special treatment, here). ''' PAv, UAv = self.projection.apply_complement(Av, return_Ya=True) self.C = numpy.c_[self.C, UAv] return PAf _apply_projection(self, Av): '''Apply the projection and store inner product. :param v: the vector resulting from an application of :math:`M_lAM_r` to the current Arnoldi vector. (CG needs special treatment, here). ''' PAv, UAv = self.projection.apply_complement(Av, return_Ya=True) self.C = numpy.c_[self.C, UAv] return PAv
Apply the projection and store inner product. :param v: the vector resulting from an application of :math:`M_lAM_r` to the current Arnoldi vector. (CG needs special treatment, here).
def _get_initial_residual(self, x0): '''Return the projected initial residual. Returns :math:`MPM_l(b-Ax_0)`. ''' if x0 is None: Mlr = self.linear_system.Mlb else: r = self.linear_system.b - self.linear_system.A*x0 Mlr = self.linear_system.Ml*r PMlr, self.UMlr = self.projection.apply_complement(Mlr, return_Ya=True) MPMlr = self.linear_system.M*PMlr MPMlr_norm = utils.norm(PMlr, MPMlr, ip_B=self.linear_system.ip_B) return MPMlr, PMlr, MPMlr_norf _get_initial_residual(self, x0): '''Return the projected initial residual. Returns :math:`MPM_l(b-Ax_0)`. ''' if x0 is None: Mlr = self.linear_system.Mlb else: r = self.linear_system.b - self.linear_system.A*x0 Mlr = self.linear_system.Ml*r PMlr, self.UMlr = self.projection.apply_complement(Mlr, return_Ya=True) MPMlr = self.linear_system.M*PMlr MPMlr_norm = utils.norm(PMlr, MPMlr, ip_B=self.linear_system.ip_B) return MPMlr, PMlr, MPMlr_norm
Return the projected initial residual. Returns :math:`MPM_l(b-Ax_0)`.
def _apply_projection(self, Av): r'''Computes :math:`\langle C,M_lAM_rV_n\rangle` efficiently with a three-term recurrence.''' PAv, UAp = self.projection.apply_complement(Av, return_Ya=True) self._UAps.append(UAp) c = UAp.copy() rhos = self.rhos if self.iter > 0: c -= (1 + rhos[-1]/rhos[-2])*self._UAps[-2] if self.iter > 1: c += rhos[-2]/rhos[-3]*self._UAps[-3] c *= ((-1)**self.iter) / numpy.sqrt(rhos[-1]) if self.iter > 0: c -= numpy.sqrt(rhos[-2]/rhos[-1]) * self.C[:, [-1]] self.C = numpy.c_[self.C, c] return PAf _apply_projection(self, Av): r'''Computes :math:`\langle C,M_lAM_rV_n\rangle` efficiently with a three-term recurrence.''' PAv, UAp = self.projection.apply_complement(Av, return_Ya=True) self._UAps.append(UAp) c = UAp.copy() rhos = self.rhos if self.iter > 0: c -= (1 + rhos[-1]/rhos[-2])*self._UAps[-2] if self.iter > 1: c += rhos[-2]/rhos[-3]*self._UAps[-3] c *= ((-1)**self.iter) / numpy.sqrt(rhos[-1]) if self.iter > 0: c -= numpy.sqrt(rhos[-2]/rhos[-1]) * self.C[:, [-1]] self.C = numpy.c_[self.C, c] return PAv
r'''Computes :math:`\langle C,M_lAM_rV_n\rangle` efficiently with a three-term recurrence.
def get_vectors(self, indices=None): '''Compute Ritz vectors.''' H_ = self._deflated_solver.H (n_, n) = H_.shape coeffs = self.coeffs if indices is None else self.coeffs[:, indices] return numpy.c_[self._deflated_solver.V[:, :n], self._deflated_solver.projection.U].dot(coeffsf get_vectors(self, indices=None): '''Compute Ritz vectors.''' H_ = self._deflated_solver.H (n_, n) = H_.shape coeffs = self.coeffs if indices is None else self.coeffs[:, indices] return numpy.c_[self._deflated_solver.V[:, :n], self._deflated_solver.projection.U].dot(coeffs)
Compute Ritz vectors.
def get_explicit_residual(self, indices=None): '''Explicitly computes the Ritz residual.''' ritz_vecs = self.get_vectors(indices) return self._deflated_solver.linear_system.MlAMr * ritz_vecs \ - ritz_vecs * self.valuef get_explicit_residual(self, indices=None): '''Explicitly computes the Ritz residual.''' ritz_vecs = self.get_vectors(indices) return self._deflated_solver.linear_system.MlAMr * ritz_vecs \ - ritz_vecs * self.values
Explicitly computes the Ritz residual.
def get_explicit_resnorms(self, indices=None): '''Explicitly computes the Ritz residual norms.''' res = self.get_explicit_residual(indices) # apply preconditioner linear_system = self._deflated_solver.linear_system Mres = linear_system.M * res # compute norms resnorms = numpy.zeros(res.shape[1]) for i in range(resnorms.shape[0]): resnorms[i] = utils.norm(res[:, [i]], Mres[:, [i]], ip_B=linear_system.ip_B) return resnormf get_explicit_resnorms(self, indices=None): '''Explicitly computes the Ritz residual norms.''' res = self.get_explicit_residual(indices) # apply preconditioner linear_system = self._deflated_solver.linear_system Mres = linear_system.M * res # compute norms resnorms = numpy.zeros(res.shape[1]) for i in range(resnorms.shape[0]): resnorms[i] = utils.norm(res[:, [i]], Mres[:, [i]], ip_B=linear_system.ip_B) return resnorms
Explicitly computes the Ritz residual norms.
def initialize_notebook(): try: from IPython.core.display import display, HTML except ImportError: print("IPython Notebook could not be loaded.") lib_js = ENV.get_template('ipynb_init_js.html') lib_css = ENV.get_template('ipynb_init_css.html') display(HTML(lib_js.render())) display(HTML(lib_css.render()))
Initialize the IPython notebook display elements
def _repr_html_(self): self.chart_id = '_'.join(['bearcart', uuid4().hex]) self.template_vars.update({'chart_id': self.chart_id, 'y_axis_id': self.y_axis_id, 'legend_id': self.legend_id, 'slider_id': self.slider_id, 'export_json': json.dumps(self.json_data)}) self._build_graph() html = self.env.get_template('ipynb_repr.html') return html.render(self.template_vars)
Build the HTML representation for IPython.
def set_expire(self, y = 2999, mon = 12, d = 28, h = 23, min_ = 59, s = 59): if type(y) is not int or type(mon) is not int or type(d) is not int or \ type(h) is not int or type(min_) is not int or type(s) is not int: raise KPError("Date variables must be integers") elif y > 9999 or y < 1 or mon > 12 or mon < 1 or d > 31 or d < 1 or \ h > 23 or h < 0 or min_ > 59 or min_ < 0 or s > 59 or s < 0: raise KPError("No legal date") elif ((mon == 1 or mon == 3 or mon == 5 or mon == 7 or mon == 8 or \ mon == 10 or mon == 12) and d > 31) or ((mon == 4 or mon == 6 or \ mon == 9 or mon == 11) and d > 30) or (mon == 2 and d > 28): raise KPError("Given day doesn't exist in given month") else: self.expire = datetime(y, mon, d, h, min_, s) self.last_mod = datetime.now().replace(microsecond = 0) return True
This method is used to change the expire date of a group - y is the year between 1 and 9999 inclusive - mon is the month between 1 and 12 - d is a day in the given month - h is a hour between 0 and 23 - min_ is a minute between 0 and 59 - s is a second between 0 and 59 The special date 2999-12-28 23:59:59 means that group expires never. If only an uuid is given the expire date will set to this one.
def create_entry(self, title='', image=1, url='', username='', password='', comment='', y=2999, mon=12, d=28, h=23, min_=59, s=59): return self.db.create_entry(self, title, image, url, username, password, comment, y, mon, d, h, min_, s)
This method creates an entry in this group. Compare to StdEntry for information about the arguments. One of the following arguments is needed: - title - url - username - password - comment
def set_title(self, title = None): if title is None or type(title) is not str: raise KPError("Need a new title.") else: self.title = title self.last_mod = datetime.now().replace(microsecond=0) return True
This method is used to change an entry title. A new title string is needed.
def set_image(self, image = None): if image is None or type(image) is not int: raise KPError("Need a new image number") else: self.image = image self.last_mod = datetime.now().replace(microsecond=0) return True
This method is used to set the image number. image must be an unsigned int.
def set_url(self, url = None): if url is None or type(url) is not str: raise KPError("Need a new image number") else: self.url = url self.last_mod = datetime.now().replace(microsecond=0) return True
This method is used to set the url. url must be a string.
def set_username(self, username = None): if username is None or type(username) is not str: raise KPError("Need a new image number") else: self.username = username self.last_mod = datetime.now().replace(microsecond=0) return True
This method is used to set the username. username must be a string.
def set_password(self, password = None): if password is None or type(password) is not str: raise KPError("Need a new image number") else: self.password = password self.last_mod = datetime.now().replace(microsecond=0) return True
This method is used to set the password. password must be a string.
def set_comment(self, comment = None): if comment is None or type(comment) is not str: raise KPError("Need a new image number") else: self.comment = comment self.last_mod = datetime.now().replace(microsecond=0) return True
This method is used to the the comment. comment must be a string.
def read_buf(self): with open(self.filepath, 'rb') as handler: try: buf = handler.read() # There should be a header at least if len(buf) < 124: raise KPError('Unexpected file size. It should be more or' 'equal 124 bytes but it is ' '{0}!'.format(len(buf))) except: raise return buf
Read database file
def close(self): if self.filepath is not None: if path.isfile(self.filepath+'.lock'): remove(self.filepath+'.lock') self.filepath = None self.read_only = False self.lock() return True else: raise KPError('Can\'t close a not opened file')
This method closes the database correctly.
def lock(self): self.password = None self.keyfile = None self.groups[:] = [] self.entries[:] = [] self._group_order[:] = [] self._entry_order[:] = [] self.root_group = v1Group() self._num_groups = 1 self._num_entries = 0 return True
This method locks the database.
def unlock(self, password = None, keyfile = None, buf = None): if ((password is None or password == "") and (keyfile is None or keyfile == "")): raise KPError("A password/keyfile is needed") elif ((type(password) is not str and password is not None) or (type(keyfile) is not str and keyfile is not None)): raise KPError("password/keyfile must be a string.") if keyfile == "": keyfile = None if password == "": password = None self.password = password self.keyfile = keyfile return self.load(buf)
Unlock the database. masterkey is needed.
def create_group(self, title = None, parent = None, image = 1, y = 2999, mon = 12, d = 28, h = 23, min_ = 59, s = 59): if title is None: raise KPError("Need a group title to create a group.") elif type(title) is not str or image < 1 or(parent is not None and \ type(parent) is not v1Group) or type(image) is not int: raise KPError("Wrong type or value for title or image or parent") id_ = 1 for i in self.groups: if i.id_ >= id_: id_ = i.id_ + 1 group = v1Group(id_, title, image, self) group.creation = datetime.now().replace(microsecond=0) group.last_mod = datetime.now().replace(microsecond=0) group.last_access = datetime.now().replace(microsecond=0) if group.set_expire(y, mon, d, h, min_, s) is False: group.set_expire() # If no parent is given, just append the new group at the end if parent is None: group.parent = self.root_group self.root_group.children.append(group) group.level = 0 self.groups.append(group) # Else insert the group behind the parent else: if parent in self.groups: parent.children.append(group) group.parent = parent group.level = parent.level+1 self.groups.insert(self.groups.index(parent)+1, group) else: raise KPError("Given parent doesn't exist") self._num_groups += 1 return True
This method creates a new group. A group title is needed or no group will be created. If a parent is given, the group will be created as a sub-group. title must be a string, image an unsigned int >0 and parent a v1Group. With y, mon, d, h, min_ and s you can set an expiration date like on entries.
def remove_group(self, group = None): if group is None: raise KPError("Need group to remove a group") elif type(group) is not v1Group: raise KPError("group must be v1Group") children = [] entries = [] if group in self.groups: # Save all children and entries to # delete them later children.extend(group.children) entries.extend(group.entries) # Finally remove group group.parent.children.remove(group) self.groups.remove(group) else: raise KPError("Given group doesn't exist") self._num_groups -= 1 for i in children: self.remove_group(i) for i in entries: self.remove_entry(i) return True
This method removes a group. The group needed to remove the group. group must be a v1Group.
def move_group(self, group = None, parent = None): if group is None or type(group) is not v1Group: raise KPError("A valid group must be given.") elif parent is not None and type(parent) is not v1Group: raise KPError("parent must be a v1Group.") elif group is parent: raise KPError("group and parent must not be the same group") if parent is None: parent = self.root_group if group in self.groups: self.groups.remove(group) group.parent.children.remove(group) group.parent = parent if parent.children: if parent.children[-1] is self.groups[-1]: self.groups.append(group) else: new_index = self.groups.index(parent.children[-1]) + 1 self.groups.insert(new_index, group) else: new_index = self.groups.index(parent) + 1 self.groups.insert(new_index, group) parent.children.append(group) if parent is self.root_group: group.level = 0 else: group.level = parent.level + 1 if group.children: self._move_group_helper(group) group.last_mod = datetime.now().replace(microsecond=0) return True else: raise KPError("Didn't find given group.")
Append group to a new parent. group and parent must be v1Group-instances.
def move_group_in_parent(self, group = None, index = None): if group is None or index is None: raise KPError("group and index must be set") elif type(group) is not v1Group or type(index) is not int: raise KPError("group must be a v1Group-instance and index " "must be an integer.") elif group not in self.groups: raise KPError("Given group doesn't exist") elif index < 0 or index >= len(group.parent.children): raise KPError("index must be a valid index if group.parent.groups") else: group_at_index = group.parent.children[index] pos_in_parent = group.parent.children.index(group) pos_in_groups = self.groups.index(group) pos_in_groups2 = self.groups.index(group_at_index) group.parent.children[index] = group group.parent.children[pos_in_parent] = group_at_index self.groups[pos_in_groups2] = group self.groups[pos_in_groups] = group_at_index if group.children: self._move_group_helper(group) if group_at_index.children: self._move_group_helper(group_at_index) group.last_mod = datetime.now().replace(microsecond=0) return True
Move group to another position in group's parent. index must be a valid index of group.parent.groups
def _move_group_helper(self, group): for i in group.children: self.groups.remove(i) i.level = group.level + 1 self.groups.insert(self.groups.index(group) + 1, i) if i.children: self._move_group_helper(i)
A helper to move the chidren of a group.
def remove_entry(self, entry = None): if entry is None or type(entry) is not v1Entry: raise KPError("Need an entry.") elif entry in self.entries: entry.group.entries.remove(entry) self.entries.remove(entry) self._num_entries -= 1 return True else: raise KPError("Given entry doesn't exist.")
This method can remove entries. The v1Entry-object entry is needed.
def move_entry(self, entry = None, group = None): if entry is None or group is None or type(entry) is not v1Entry or \ type(group) is not v1Group: raise KPError("Need an entry and a group.") elif entry not in self.entries: raise KPError("No entry found.") elif group in self.groups: entry.group.entries.remove(entry) group.entries.append(entry) entry.group_id = group.id_ entry.group = group return True else: raise KPError("No group found.")
Move an entry to another group. A v1Group group and a v1Entry entry are needed.
def move_entry_in_group(self, entry = None, index = None): if entry is None or index is None or type(entry) is not v1Entry \ or type(index) is not int: raise KPError("Need an entry and an index.") elif index < 0 or index > len(entry.group.entries)-1: raise KPError("Index is not valid.") elif entry not in self.entries: raise KPError("Entry not found.") pos_in_group = entry.group.entries.index(entry) pos_in_entries = self.entries.index(entry) entry_at_index = entry.group.entries[index] pos_in_entries2 = self.entries.index(entry_at_index) entry.group.entries[index] = entry entry.group.entries[pos_in_group] = entry_at_index self.entries[pos_in_entries2] = entry self.entries[pos_in_entries] = entry_at_index return True
Move entry to another position inside a group. An entry and a valid index to insert the entry in the entry list of the holding group is needed. 0 means that the entry is moved to the first position 1 to the second and so on.
def _transform_key(self, masterkey): aes = AES.new(self._transf_randomseed, AES.MODE_ECB) # Encrypt the created hash for _ in range(self._key_transf_rounds): masterkey = aes.encrypt(masterkey) # Finally, hash it again... sha_obj = SHA256.new() sha_obj.update(masterkey) masterkey = sha_obj.digest() # ...and hash the result together with the randomseed sha_obj = SHA256.new() sha_obj.update(self._final_randomseed + masterkey) return sha_obj.digest()
This method creates the key to decrypt the database
def _get_passwordkey(self): sha = SHA256.new() sha.update(self.password.encode('utf-8')) return sha.digest()
This method just hashes self.password.
def _get_filekey(self): if not os.path.exists(self.keyfile): raise KPError('Keyfile not exists.') try: with open(self.keyfile, 'rb') as handler: handler.seek(0, os.SEEK_END) size = handler.tell() handler.seek(0, os.SEEK_SET) if size == 32: return handler.read(32) elif size == 64: try: return binascii.unhexlify(handler.read(64)) except (TypeError, binascii.Error): handler.seek(0, os.SEEK_SET) sha = SHA256.new() while True: buf = handler.read(2048) sha.update(buf) if len(buf) < 2048: break return sha.digest() except IOError as e: raise KPError('Could not read file: %s' % e)
This method creates a key from a keyfile.
def _cbc_decrypt(self, final_key, crypted_content): # Just decrypt the content with the created key aes = AES.new(final_key, AES.MODE_CBC, self._enc_iv) decrypted_content = aes.decrypt(crypted_content) padding = decrypted_content[-1] if sys.version > '3': padding = decrypted_content[-1] else: padding = ord(decrypted_content[-1]) decrypted_content = decrypted_content[:len(decrypted_content)-padding] return decrypted_content
This method decrypts the database
def _cbc_encrypt(self, content, final_key): aes = AES.new(final_key, AES.MODE_CBC, self._enc_iv) padding = (16 - len(content) % AES.block_size) for _ in range(padding): content += chr(padding).encode() temp = bytes(content) return aes.encrypt(temp)
This method encrypts the content.
def _read_group_field(self, group, levels, field_type, field_size, decrypted_content): if field_type == 0x0000: # Ignored (commentar block) pass elif field_type == 0x0001: group.id_ = struct.unpack('<I', decrypted_content[:4])[0] elif field_type == 0x0002: try: group.title = struct.unpack('<{0}s'.format(field_size-1), decrypted_content[:field_size-1])[0].decode('utf-8') except UnicodeDecodeError: group.title = struct.unpack('<{0}s'.format(field_size-1), decrypted_content[:field_size-1])[0].decode('latin-1') decrypted_content = decrypted_content[1:] elif field_type == 0x0003: group.creation = self._get_date(decrypted_content) elif field_type == 0x0004: group.last_mod = self._get_date(decrypted_content) elif field_type == 0x0005: group.last_access = self._get_date(decrypted_content) elif field_type == 0x0006: group.expire = self._get_date(decrypted_content) elif field_type == 0x0007: group.image = struct.unpack('<I', decrypted_content[:4])[0] elif field_type == 0x0008: level = struct.unpack('<H', decrypted_content[:2])[0] group.level = level levels.append(level) elif field_type == 0x0009: group.flags = struct.unpack('<I', decrypted_content[:4])[0] elif field_type == 0xFFFF: pass else: return False return True
This method handles the different fields of a group
def _get_date(self, decrypted_content): # Just copied from original KeePassX source date_field = struct.unpack('<5B', decrypted_content[:5]) dw1 = date_field[0] dw2 = date_field[1] dw3 = date_field[2] dw4 = date_field[3] dw5 = date_field[4] y = (dw1 << 6) | (dw2 >> 2) mon = ((dw2 & 0x03) << 2) | (dw3 >> 6) d = (dw3 >> 1) & 0x1F h = ((dw3 & 0x01) << 4) | (dw4 >> 4) min_ = ((dw4 & 0x0F) << 2) | (dw5 >> 6) s = dw5 & 0x3F return datetime(y, mon, d, h, min_, s)
This method is used to decode the packed dates of entries
def _pack_date(self, date): # Just copied from original KeePassX source y, mon, d, h, min_, s = date.timetuple()[:6] dw1 = 0x0000FFFF & ((y>>6) & 0x0000003F) dw2 = 0x0000FFFF & ((y & 0x0000003F)<<2 | ((mon>>2) & 0x00000003)) dw3 = 0x0000FFFF & (((mon & 0x0000003)<<6) | ((d & 0x0000001F)<<1) \ | ((h>>4) & 0x00000001)) dw4 = 0x0000FFFF & (((h & 0x0000000F)<<4) | ((min_>>2) & 0x0000000F)) dw5 = 0x0000FFFF & (((min_ & 0x00000003)<<6) | (s & 0x0000003F)) return struct.pack('<5B', dw1, dw2, dw3, dw4, dw5)
This method is used to encode dates
def _create_group_tree(self, levels): if levels[0] != 0: raise KPError("Invalid group tree") for i in range(len(self.groups)): if(levels[i] == 0): self.groups[i].parent = self.root_group self.groups[i].index = len(self.root_group.children) self.root_group.children.append(self.groups[i]) continue j = i-1 while j >= 0: if levels[j] < levels[i]: if levels[i]-levels[j] != 1: raise KPError("Invalid group tree") self.groups[i].parent = self.groups[j] self.groups[i].index = len(self.groups[j].children) self.groups[i].parent.children.append(self.groups[i]) break if j == 0: raise KPError("Invalid group tree") j -= 1 for e in range(len(self.entries)): for g in range(len(self.groups)): if self.entries[e].group_id == self.groups[g].id_: self.groups[g].entries.append(self.entries[e]) self.entries[e].group = self.groups[g] # from original KeePassX-code, but what does it do? self.entries[e].index = 0 return True
This method creates a group tree
def _save_group_field(self, field_type, group): if field_type == 0x0000: # Ignored (commentar block) pass elif field_type == 0x0001: if group.id_ is not None: return (4, struct.pack('<I', group.id_)) elif field_type == 0x0002: if group.title is not None: return (len(group.title.encode())+1, (group.title+'\0').encode()) elif field_type == 0x0003: if group.creation is not None: return (5, self._pack_date(group.creation)) elif field_type == 0x0004: if group.last_mod is not None: return (5, self._pack_date(group.last_mod)) elif field_type == 0x0005: if group.last_access is not None: return (5, self._pack_date(group.last_access)) elif field_type == 0x0006: if group.expire is not None: return (5, self._pack_date(group.expire)) elif field_type == 0x0007: if group.image is not None: return (4, struct.pack('<I', group.image)) elif field_type == 0x0008: if group.level is not None: return (2, struct.pack('<H', group.level)) elif field_type == 0x0009: if group.flags is not None: return (4, struct.pack('<I', group.flags)) return False
This method packs a group field
def getsecret(self, section, option, **kwargs): # keyword-only arguments, vars and fallback are directly passed through raw = kwargs.get('raw', False) value = self.get(section, option, **kwargs) if raw: return value return self.custodia_client.get_secret(value)
Get a secret from Custodia
def _load_plugin_class(menu, name): group = 'custodia.{}'.format(menu) eps = list(pkg_resources.iter_entry_points(group, name)) if len(eps) > 1: raise ValueError( "Multiple entry points for {} {}: {}".format(menu, name, eps)) elif len(eps) == 1: # backwards compatibility with old setuptools ep = eps[0] if hasattr(ep, 'resolve'): return ep.resolve() else: return ep.load(require=False) elif '.' in name: # fall back to old style dotted name module, classname = name.rsplit('.', 1) m = importlib.import_module(module) return getattr(m, classname) else: raise ValueError("{}: {} not found".format(menu, name))
Load Custodia plugin Entry points are preferred over dotted import path.
def _load_plugins(config, cfgparser): # set umask before any plugin gets a chance to create a file os.umask(config['umask']) for s in cfgparser.sections(): if s in {'ENV', 'global'}: # ENV section is only used for interpolation continue if s.startswith('/'): menu = 'consumers' path_chain = s.split('/') if path_chain[-1] == '': path_chain = path_chain[:-1] name = tuple(path_chain) else: if s.startswith('auth:'): menu = 'authenticators' name = s[5:] elif s.startswith('authz:'): menu = 'authorizers' name = s[6:] elif s.startswith('store:'): menu = 'stores' name = s[6:] else: raise ValueError('Invalid section name [%s].\n' % s) try: config[menu][name] = _create_plugin(cfgparser, s, menu) except Exception as e: logger.debug("Plugin '%s' failed to load.", name, exc_info=True) raise RuntimeError(menu, name, e) # 2nd initialization stage for menu in ['authenticators', 'authorizers', 'consumers', 'stores']: plugins = config[menu] for name in sorted(plugins): plugin = plugins[name] plugin.finalize_init(config, cfgparser, context=None)
Load and initialize plugins
def get(self, po): name = po.name typ = po.typ default = po.default handler = getattr(self, '_get_{}'.format(typ), None) if handler is None: raise ValueError(typ) self.seen.add(name) # pylint: disable=not-callable if not self.parser.has_option(self.section, name): if default is REQUIRED: raise NameError(self.section, name) if isinstance(default, INHERIT_GLOBAL): return handler('global', name, default.default) # don't return default here, give the handler a chance to modify # the default, e.g. pw_uid with default='root' returns 0. return handler(self.section, name, default)
Lookup value for a PluginOption instance Args: po: PluginOption Returns: converted value
def parse(self, msg, name): # On requests we imply 'simple' if there is no input message if msg is None: return if not isinstance(msg, string_types): raise InvalidMessage("The 'value' attribute is not a string") self.name = name self.payload = msg self.msg_type = 'simple'
Parses a simple message :param msg: the json-decoded value :param name: the requested name :raises UnknownMessageType: if the type is not 'simple' :raises InvalidMessage: if the message cannot be parsed or validated
def server_check(arg): if arg.startswith(('http://', 'https://', 'http+unix://')): return arg if arg.startswith('./'): arg = os.path.abspath(arg) elif not arg.startswith('/'): raise argparse.ArgumentTypeError( 'Unix socket path must start with / or ./') # assume it is a unix socket return 'http+unix://{}'.format(url_escape(arg, ''))
Check and format --server arg
def krb5_unparse_principal_name(name): prefix, realm = name.split(u'@') if u'/' in prefix: service, host = prefix.rsplit(u'/', 1) return service, host, realm else: return None, prefix, realm
Split a Kerberos principal name into parts Returns: * ('host', hostname, realm) for a host principal * (servicename, hostname, realm) for a service principal * (None, username, realm) for a user principal :param text name: Kerberos principal name :return: (service, host, realm) or (None, username, realm)
def getLogger(name): def exception(self, msg, *args, **kwargs): extra = kwargs.setdefault('extra', {}) extra['exc_fullstack'] = self.isEnabledFor(logging.DEBUG) kwargs['exc_info'] = True self.log(logging.ERROR, msg, *args, **kwargs) logger = logging.getLogger(name) logger.exception = six.create_bound_method(exception, logger) return logger
Create logger with custom exception() method
def exception(self, msg, *args, **kwargs): extra = kwargs.setdefault('extra', {}) extra['exc_fullstack'] = self.isEnabledFor(logging.DEBUG) kwargs['exc_info'] = True self.log(logging.ERROR, msg, *args, **kwargs)
Like standard exception() logger but only print stack in debug mode
def parse(self, msg, name): try: jtok = JWT(jwt=msg) except Exception as e: raise InvalidMessage('Failed to parse message: %s' % str(e)) try: token = jtok.token if isinstance(token, JWE): token.decrypt(self.kkstore.server_keys[KEY_USAGE_ENC]) # If an encrypted payload is received then there must be # a nested signed payload to verify the provenance. payload = token.payload.decode('utf-8') token = JWS() token.deserialize(payload) elif isinstance(token, JWS): pass else: raise TypeError("Invalid Token type: %s" % type(jtok)) # Retrieve client keys for later use self.client_keys = [ JWK(**self._get_key(token.jose_header, KEY_USAGE_SIG)), JWK(**self._get_key(token.jose_header, KEY_USAGE_ENC))] # verify token and get payload token.verify(self.client_keys[KEY_USAGE_SIG]) claims = json_decode(token.payload) except Exception as e: logger.debug('Failed to validate message', exc_info=True) raise InvalidMessage('Failed to validate message: %s' % str(e)) check_kem_claims(claims, name) self.name = name self.payload = claims.get('value') self.msg_type = 'kem' return {'type': self.msg_type, 'value': {'kid': self.client_keys[KEY_USAGE_ENC].key_id, 'claims': claims}}
Parses the message. We check that the message is properly formatted. :param msg: a json-encoded value containing a JWS or JWE+JWS token :raises InvalidMessage: if the message cannot be parsed or validated :returns: A verified payload
def instance_name(string): invalid = ':/@' if set(string).intersection(invalid): msg = 'Invalid instance name {}'.format(string) raise argparse.ArgumentTypeError(msg) return string
Check for valid instance name
def image_request(self, image, filename, params=None): data = self._init_data(params) response = requests.post(REQUESTS_URL, headers={ 'Authorization': self.auth.authorize('POST', REQUESTS_URL, params), 'User-Agent': USER_AGENT, }, data=data, files={'image_request[image]': (filename, image)}) return self._unwrap_error(response)
Send an image for classification. The image is a file-like object. The params parameter is optional. On success this method will immediately return a job information. Its status will initially be :py:data:`cloudsight.STATUS_NOT_COMPLETED` as it usually takes 6-12 seconds for the server to process an image. In order to retrieve the annotation data, you need to keep updating the job status using the :py:meth:`cloudsight.API.image_response` method until the status changes. You may also use the :py:meth:`cloudsight.API.wait` method which does this automatically. :param image: File-like object containing the image data. :param filename: The file name. :param params: Additional parameters for CloudSight API.
def remote_image_request(self, image_url, params=None): data = self._init_data(params) data['image_request[remote_image_url]'] = image_url response = requests.post(REQUESTS_URL, headers={ 'Authorization': self.auth.authorize('POST', REQUESTS_URL, data), 'User-Agent': USER_AGENT, }, data=data) return self._unwrap_error(response)
Send an image for classification. The imagewill be retrieved from the URL specified. The params parameter is optional. On success this method will immediately return a job information. Its status will initially be :py:data:`cloudsight.STATUS_NOT_COMPLETED` as it usually takes 6-12 seconds for the server to process an image. In order to retrieve the annotation data, you need to keep updating the job status using the :py:meth:`cloudsight.API.image_response` method until the status changes. You may also use the :py:meth:`cloudsight.API.wait` method which does this automatically. :param image_url: Image URL. :param params: Additional parameters for CloudSight API.
def image_response(self, token): url = RESPONSES_URL + token response = requests.get(url, headers={ 'Authorization': self.auth.authorize('GET', url), 'User-Agent': USER_AGENT, }) return self._unwrap_error(response)
Contact the server and update the job status. After a request has been submitted, it usually takes 6-12 seconds to receive a completed response. We recommend polling for a response every 1 second after a 4 second delay from the initial request, while the status is :py:data:`cloudsight.STATUS_NOT_COMPLETED`. :py:meth:`cloudsight.API.wait` method does this automatically. :param token: Job token as returned from :py:meth:`cloudsight.API.image_request` or :py:meth:`cloudsight.API.remote_image_request`
def repost(self, token): url = '%s/%s/repost' % (REQUESTS_URL, token) response = requests.post(url, headers={ 'Authorization': self.auth.authorize('POST', url), 'User-Agent': USER_AGENT, }) if response.status_code == 200: return return self._unwrap_error(response)
Repost the job if it has timed out (:py:data:`cloudsight.STATUS_TIMEOUT`). :param token: Job token as returned from :py:meth:`cloudsight.API.image_request` or :py:meth:`cloudsight.API.remote_image_request`
def wait(self, token, timeout=DEFAULT_POLL_TIMEOUT): delta = datetime.timedelta(seconds=timeout) timeout_at = datetime.datetime.now() + delta time.sleep(min(timeout, INITIAL_POLL_WAIT)) response = self.image_response(token) while response['status'] == STATUS_NOT_COMPLETED \ and datetime.datetime.now() < timeout_at: time.sleep(1) response = self.image_response(token) return response
Wait for the job until it has been processed. This method will block for up to `timeout` seconds. This method will wait for 4 seconds after the initial request and then will call :py:meth:`cloudsight.API.image_response` method every second until the status changes. :param token: Job token as returned from :py:meth:`cloudsight.API.image_request` or :py:meth:`cloudsight.API.remote_image_request`
def copy_magic_into_pyc(input_pyc, output_pyc, src_version, dest_version): (version, timestamp, magic_int, co, is_pypy, source_size) = load_module(input_pyc) assert version == float(src_version), ( "Need Python %s bytecode; got bytecode for version %s" % (src_version, version)) magic_int = magic2int(magics[dest_version]) write_bytecode_file(output_pyc, co, magic_int) print("Wrote %s" % output_pyc) return
Bytecodes are the same except the magic number, so just change that
def transform_26_27(inst, new_inst, i, n, offset, instructions, new_asm): if inst.opname in ('JUMP_IF_FALSE', 'JUMP_IF_TRUE'): i += 1 assert i < n assert instructions[i].opname == 'POP_TOP' new_inst.offset = offset new_inst.opname = ( 'POP_JUMP_IF_FALSE' if inst.opname == 'JUMP_IF_FALSE' else 'POP_JUMP_IF_TRUE' ) new_asm.backpatch[-1].remove(inst) new_inst.arg = 'L%d' % (inst.offset + inst.arg + 3) new_asm.backpatch[-1].add(new_inst) else: xlate26_27(new_inst) return xdis.op_size(new_inst.opcode, opcode_27)
Change JUMP_IF_FALSE and JUMP_IF_TRUE to POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE
def transform_32_33(inst, new_inst, i, n, offset, instructions, new_asm): add_size = xdis.op_size(new_inst.opcode, opcode_33) if inst.opname in ('MAKE_FUNCTION','MAKE_CLOSURE'): # Previous instruction should be a load const which # contains the name of the function to call prev_inst = instructions[i-1] assert prev_inst.opname == 'LOAD_CONST' assert isinstance(prev_inst.arg, int) # Add the function name as an additional LOAD_CONST load_fn_const = Instruction() load_fn_const.opname = 'LOAD_CONST' load_fn_const.opcode = opcode_33.opmap['LOAD_CONST'] load_fn_const.line_no = None prev_const = new_asm.code.co_consts[prev_inst.arg] if hasattr(prev_const, 'co_name'): fn_name = new_asm.code.co_consts[prev_inst.arg].co_name else: fn_name = 'what-is-up' const_index = len(new_asm.code.co_consts) new_asm.code.co_consts = list(new_asm.code.co_consts) new_asm.code.co_consts.append(fn_name) load_fn_const.arg = const_index load_fn_const.offset = offset load_fn_const.starts_line = False load_fn_const.is_jump_target = False new_asm.code.instructions.append(load_fn_const) load_const_size = xdis.op_size(load_fn_const.opcode, opcode_33) add_size += load_const_size new_inst.offset = offset + add_size pass return add_size
MAKEFUNCTION adds another const. probably MAKECLASS as well
def transform_33_32(inst, new_inst, i, n, offset, instructions, new_asm): add_size = xdis.op_size(new_inst.opcode, opcode_33) if inst.opname in ('MAKE_FUNCTION','MAKE_CLOSURE'): # Previous instruction should be a load const which # contains the name of the function to call prev_inst = instructions[i-1] assert prev_inst.opname == 'LOAD_CONST' assert isinstance(prev_inst.arg, int) assert len(instructions) > 2 assert len(instructions) > 2 prev_inst2 = instructions[i-2] assert prev_inst2.opname == 'LOAD_CONST' assert isinstance(prev_inst2.arg, int) # Remove the function name as an additional LOAD_CONST prev2_const = new_asm.code.co_consts[prev_inst.arg] assert hasattr(prev2_const, 'co_name') new_asm.code.instructions = new_asm.code.instructions[:-1] load_const_size = xdis.op_size(prev_inst.opcode, opcode_33) add_size -= load_const_size new_inst.offset = offset - add_size return -load_const_size return 0
MAKE_FUNCTION, and MAKE_CLOSURE have an additional LOAD_CONST of a name that are not in Python 3.2. Remove these.
def main(conversion_type, input_pyc, output_pyc): shortname = osp.basename(input_pyc) if shortname.endswith('.pyc'): shortname = shortname[:-4] src_version = conversion_to_version(conversion_type, is_dest=False) dest_version = conversion_to_version(conversion_type, is_dest=True) if output_pyc is None: output_pyc = "%s-%s.pyc" % (shortname, dest_version) if conversion_type in UPWARD_COMPATABLE: copy_magic_into_pyc(input_pyc, output_pyc, src_version, dest_version) return temp_asm = NamedTemporaryFile('w', suffix='.pyasm', prefix=shortname, delete=False) (filename, co, version, timestamp, magic_int) = disassemble_file(input_pyc, temp_asm, asm_format=True) temp_asm.close() assert version == float(src_version), ( "Need Python %s bytecode; got bytecode for version %s" % (src_version, version)) asm = asm_file(temp_asm.name) new_asm = transform_asm(asm, conversion_type, src_version, dest_version) os.unlink(temp_asm.name) write_pycfile(output_pyc, new_asm)
Convert Python bytecode from one version to another. INPUT_PYC contains the input bytecode path name OUTPUT_PYC contians the output bytecode path name if supplied The --conversion type option specifies what conversion to do. Note: there are a very limited set of conversions currently supported. Help out and write more!
def generate(self, str=None, fpath=None): self.prepare_storage() self.str = self.load_file(fpath) if fpath else self.sanitize(str) self.validate_config() self.generate_kgrams() self.hash_kgrams() self.generate_fingerprints() return self.fingerprints
generates fingerprints of the input. Either provide `str` to compute fingerprint directly from your string or `fpath` to compute fingerprint from the text of the file. Make sure to have your text decoded in `utf-8` format if you pass the input string. Args: str (Optional(str)): string whose fingerprint is to be computed. fpath (Optional(str)): absolute path of the text file whose fingerprint is to be computed. Returns: List(int): fingerprints of the input. Raises: FingerprintException: If the input string do not meet the requirements of parameters provided for fingerprinting.
def main(pyc_file, asm_path): if os.stat(asm_path).st_size == 0: print("Size of assembly file %s is zero" % asm_path) sys.exit(1) asm = asm_file(asm_path) if not pyc_file and asm_path.endswith('.pyasm'): pyc_file = asm_path[:-len('.pyasm')] + '.pyc' write_pycfile(pyc_file, asm)
Create Python bytecode from a Python assembly file. ASM_PATH gives the input Python assembly file. We suggest ending the file in .pyc If --pyc-file is given, that indicates the path to write the Python bytecode. The path should end in '.pyc'. See https://github.com/rocky/python-xasm/blob/master/HOW-TO-USE.rst for how to write a Python assembler file.
def payload_body(req): to_hash = req.body if type(req.body) is bytes else req.body.encode('utf-8') if req.method in ('POST', 'PUT'): return { 'hash': hashlib.sha256(to_hash).hexdigest(), 'alg': 'sha256', }
A generator that will include the sha256 signature of the request's body in the JWT payload. This is only done if the request could have a body: if the method is POST or PUT. >>> auth = JWTAuth('secret') >>> auth.add_field('body', payload_body)
def expire(self, secs): self.add_field('exp', lambda req: int(time.time() + secs))
Adds the standard 'exp' field, used to prevent replay attacks. Adds the 'exp' field to the payload. When a request is made, the field says that it should expire at now + `secs` seconds. Of course, this provides no protection unless the server reads and interprets this field.
def _generate(self, request): payload = {} for field, gen in self._generators.items(): value = None if callable(gen): value = gen(request) else: value = gen if value: payload[field] = value return payload
Generate a payload for the given request.
def tt2nav(toctree, klass=None, appendix=None, divider=False): tt = toctree divider = '<li class="divider"></li>' if divider else '' # Append anything just before the closing </ul>. if appendix: tt = re.sub(r'(</ul>$)', r'{}\1'.format(appendix), tt) # Add class attribute to all <ul> elements. tt = re.sub(r'<ul>', r'<ul class="">', tt) # Add class to first <ul> tag. if klass: tt = re.sub(r'(^<ul[\s\w-]+class=")', r'\1{} '.format(klass), tt) # Add class "active" to all <li> tags with "current" class. # tt = re.sub(r'(<li[\s\w-]+class="[^"]*current)([^"]*")', r'\1 active\2', tt) # Match each <li> that contains <ul>. pattern = r'(<li[\s\w-]+class=")([^>]*>[^<]*<a[^>]*>[^<]*</a>[^<]*<ul[\s\w]+class=")' # Inject the classes. replace = r'{}\1has-dropdown \2dropdown '.format(divider) # Do the replace and return. return re.sub(pattern, replace, tt)
Injects ``has-dropdown`` and ``dropdown`` classes to HTML generated by the :func:`toctree` function. :param str toctree: HTML generated by the :func:`toctree` function.
def list_vars(script_path, ignore=IGNORE_DEFAULT): if path.isfile(script_path): input = (""". "%s"; env | awk -F = '/[a-zA-Z_][a-zA-Z_0-9]*=/ """ % script_path + """{ if (!system("[ -n \\"${" $1 "}\\" ]")) print $1 }'""") cmd = "env -i bash".split() p = Popen(cmd, stdout=PIPE, stdin=PIPE, stderr=PIPE) stdout_data, stderr_data = p.communicate(input=input) if stderr_data: raise ShellScriptException(script_path, stderr_data) else: lines = stdout_data.split() return [elt for elt in lines if elt not in ignore] else: raise _noscripterror(script_path)
Given a shell script, returns a list of shell variable names. Note: this method executes the script, so beware if it contains side-effects. :param script_path: Path the a shell script :type script_path: str or unicode :param ignore: variable names to ignore. By default we ignore variables that env injects into the script's environment. See IGNORE_DEFAULT. :type ignore: iterable :return: Key value pairs representing the environment variables defined in the script. :rtype: list
def get_vars(script_path, ignore=IGNORE_DEFAULT): # Iterate over every var independently: # This is slower than using env, but enables us to capture multiline variables return dict((var, get_var(script_path, var)) for var in list_vars(script_path))
Gets the values of environment variables defined in a shell script. Note: this method executes the script potentially many times. :param script_path: Path the a shell script :type script_path: str or unicode :param ignore: variable names to ignore. By default we ignore variables that env injects into the script's environment. See IGNORE_DEFAULT. :type ignore: iterable :return: Key value pairs representing the environment variables defined in the script. :rtype: dict
def get_var(script_path, var): if path.isfile(script_path): input = '. "%s"; echo -n "$%s"\n'% (script_path, var) pipe = Popen(["bash"], stdout=PIPE, stdin=PIPE, stderr=PIPE) stdout_data, stderr_data = pipe.communicate(input=input) if stderr_data: raise ShellScriptException(script_path, stderr_data) else: return stdout_data else: raise _noscripterror(script_path)
Given a script, and the name of an environment variable, returns the value of the environment variable. :param script_path: Path the a shell script :type script_path: str or unicode :param var: environment variable name :type var: str or unicode :return: str
def get_current_trading_info(fsym, tsym, markets='all', try_conversion=True, format='raw'): # load data url = build_url('generateAvg', fsym=fsym, tsym=tsym, markets=markets, try_conversion=try_conversion) data = load_data(url) # select format to return if format == 'raw': data = data['RAW'] elif format == 'display': data = data['DISPLAY'] return {fsym: {tsym: data}}
Get the latest trading info of the requested pair as a volume weighted average based on the markets requested. Args: fsym: FROM symbol. tsym: TO symbol. markets: List containing the market names. try_conversion: If the crypto does not trade directly into the toSymbol requested, BTC will be used for conversion. If set to false, it will try to get values without using any conversion at all. format: Default returns the 'RAW' format. Can be set to 'DISPLAY' format. Returns: The returned latest average trading information dictionary contains the following key value pairs: {'PRICE': ..., 'LASTVOLUMETO': ..., 'TOSYMBOL': ..., 'LOW24HOUR': ..., 'CHANGE24HOUR': ..., 'FROMSYMBOL': ..., 'FLAGS': ..., 'VOLUME24HOUR': ..., 'HIGH24HOUR': ..., 'LASTUPDATE': ..., 'VOLUME24HOURT': ..., 'LASTMARKET': ..., 'CHANGEPCT24HOUR': ..., 'OPEN24HOUR': ..., 'MARKET': ..., 'LASTTRADEID': ..., 'LASTVOLUME': ...}
def get_day_average_price(fsym, tsym, e='all', try_conversion=True, avg_type='HourVWAP', utc_hour_diff=0): # load data url = build_url('dayAvg', fsym=fsym, tsym=tsym, e=e, try_conversion=try_conversion, avg_type=avg_type, utc_hour_diff=utc_hour_diff) data = load_data(url) # remove 'ConversionType' information del data['ConversionType'] return {fsym: data}
Get the current days average price of a currency pair. Args: fsym: FROM symbol. tsym: TO symbol. e: Default returns average price across all exchanges. Can be set to the name of a single exchange. try_conversion: If the crypto does not trade directly into the toSymbol requested, BTC will be used for conversion. If set to false, it will try to get values without using any conversion at all. avg_type: 'HourVWAP' returns a volume weighted average of the hourly close price. The other option 'MidHighLow' gives the average between the 24 hour high and low. utc_hour_diff: Pass hour difference to UTC for different time zone. # add 'toTs' parameter ###################### ###################### ###################### ###################### ###################### ###################### Returns: Returns a price dictionairy containing the current days average price as float. {fsym: {tsym: price}}
def get_historical_eod_price(fsym, tsyms, date, e='all', try_conversion=True): # convert single fsym and tsym input to single element lists if not isinstance(tsyms, list): tsyms = [tsyms] # convert date to timestamp ts = date_to_timestamp(date) # load data url = build_url("pricehistorical", fsym=fsym, tsyms=tsyms, ts=ts, e=e, try_conversion=try_conversion) data = load_data(url) return data
Get end of day price for cryptocurrency in any other currency for the requested timestamp. Args: fsym: FROM symbol. tsyms: Single string or list of TO symbols. date: Date as string with this format: "Y-m-d H-M-S". e: Default returns average price across all exchanges. Can be set to the name of a single exchange. try_conversion: If the crypto does not trade directly into the toSymbol requested, BTC will be used for conversion. If set to false, it will try to get values without using any conversion at all. Returns: Returns a dictionary containing the end of day price pairs for the provided date. {fsym: {tsym1: ..., tsym2: ..., ...}}
def url2fs(url): uri, extension = posixpath.splitext(url) return safe64.dir(uri) + extension
encode a URL to be safe as a filename
def xindexes(slots): # the first response... slot = [0] * len(slots) for i in range(reduce(operator.mul, slots)): yield slot carry = 1 # iterate from the least to the most significant digit for j in range(len(slots), 0, -1): k = j - 1 slot[k] += carry if slot[k] >= slots[k]: carry = 1 + slot[k] - slots[k] slot[k] = 0 else: carry = 0
Generate list of possible indexes into a list of slots. Best way to think of this is as a number where each digit might have a different radix. E.g.: (10, 10, 10) would return 10 x 10 x 10 = 1000 responses from (0, 0, 0) to (9, 9, 9), (2, 2, 2, 2) would return 2 x 2 x 2 x 2 = 16 responses from (0, 0, 0, 0) to (1, 1, 1, 1).
def is_merc_projection(srs): if srs.lower() == '+init=epsg:900913': return True # observed srs = dict([p.split('=') for p in srs.split() if '=' in p]) # expected # note, common optional modifiers like +no_defs, +over, and +wkt # are not pairs and should not prevent matching gym = '+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null' gym = dict([p.split('=') for p in gym.split() if '=' in p]) for p in gym: if srs.get(p, None) != gym.get(p, None): return False return True
Return true if the map projection matches that used by VEarth, Google, OSM, etc. Is currently necessary for zoom-level shorthand for scale-denominator.
def extract_declarations(map_el, dirs, scale=1, user_styles=[]): styles = [] # # First, look at all the stylesheets defined in the map itself. # for stylesheet in map_el.findall('Stylesheet'): map_el.remove(stylesheet) content, mss_href = fetch_embedded_or_remote_src(stylesheet, dirs) if content: styles.append((content, mss_href)) # # Second, look through the user-supplied styles for override rules. # for stylesheet in user_styles: mss_href = urljoin(dirs.source.rstrip('/')+'/', stylesheet) content = urllib.urlopen(mss_href).read().decode(DEFAULT_ENCODING) styles.append((content, mss_href)) declarations = [] for (content, mss_href) in styles: is_merc = is_merc_projection(map_el.get('srs','')) for declaration in stylesheet_declarations(content, is_merc, scale): # # Change the value of each URI relative to the location # of the containing stylesheet. We generally just have # the one instance of "dirs" around for a full parse cycle, # so it's necessary to perform this normalization here # instead of later, while mss_href is still available. # uri_value = declaration.value.value if uri_value.__class__ is uri: uri_value.address = urljoin(mss_href, uri_value.address) declarations.append(declaration) return declarations
Given a Map element and directories object, remove and return a complete list of style declarations from any Stylesheet elements found within.
def make_rule(filter, *symbolizers): scale_tests = [test for test in filter.tests if test.isMapScaled()] other_tests = [test for test in filter.tests if not test.isMapScaled()] # these will be replaced with values as necessary minscale, maxscale, filter = None, None, None for scale_test in scale_tests: if scale_test.op in ('>', '>='): if scale_test.op == '>=': value = scale_test.value elif scale_test.op == '>': value = scale_test.value + 1 minscale = output.MinScaleDenominator(value) if scale_test.op in ('<', '<='): if scale_test.op == '<=': value = scale_test.value elif scale_test.op == '<': value = scale_test.value - 1 maxscale = output.MaxScaleDenominator(value) filter_text = ' and '.join(test2str(test) for test in other_tests) if filter_text: filter = output.Filter(filter_text) rule = output.Rule(minscale, maxscale, filter, [s for s in symbolizers if s]) return rule
Given a Filter and some symbolizers, return a Rule prepopulated with applicable min/max scale denominator and filter.
def is_applicable_selector(selector, filter): for test in selector.allTests(): if not test.isCompatible(filter.tests): return False return True
Given a Selector and Filter, return True if the Selector is compatible with the given Filter, and False if they contradict.
def get_polygon_rules(declarations): property_map = {'polygon-fill': 'fill', 'polygon-opacity': 'fill-opacity', 'polygon-gamma': 'gamma', 'polygon-meta-output': 'meta-output', 'polygon-meta-writer': 'meta-writer'} property_names = property_map.keys() # a place to put rules rules = [] for (filter, values) in filtered_property_declarations(declarations, property_names): color = values.has_key('polygon-fill') and values['polygon-fill'].value opacity = values.has_key('polygon-opacity') and values['polygon-opacity'].value or None gamma = values.has_key('polygon-gamma') and values['polygon-gamma'].value or None symbolizer = color and output.PolygonSymbolizer(color, opacity, gamma) if symbolizer: rules.append(make_rule(filter, symbolizer)) return rules
Given a Map element, a Layer element, and a list of declarations, create a new Style element with a PolygonSymbolizer, add it to Map and refer to it in Layer.
def get_raster_rules(declarations): property_map = {'raster-opacity': 'opacity', 'raster-mode': 'mode', 'raster-scaling': 'scaling' } property_names = property_map.keys() # a place to put rules rules = [] for (filter, values) in filtered_property_declarations(declarations, property_names): sym_params = {} for prop,attr in property_map.items(): sym_params[attr] = values.has_key(prop) and values[prop].value or None symbolizer = output.RasterSymbolizer(**sym_params) rules.append(make_rule(filter, symbolizer)) if not rules: # No raster-* rules were created, but we're here so we must need a symbolizer. rules.append(make_rule(Filter(), output.RasterSymbolizer())) return rules
Given a Map element, a Layer element, and a list of declarations, create a new Style element with a RasterSymbolizer, add it to Map and refer to it in Layer. The RasterSymbolizer will always created, even if there are no applicable declarations.
def post_process_symbolizer_image_file(file_href, dirs): # support latest mapnik features of auto-detection # of image sizes and jpeg reading support... # http://trac.mapnik.org/ticket/508 mapnik_auto_image_support = (MAPNIK_VERSION >= 701) mapnik_requires_absolute_paths = (MAPNIK_VERSION < 601) file_href = urljoin(dirs.source.rstrip('/')+'/', file_href) scheme, n, path, p, q, f = urlparse(file_href) if scheme in ('http','https'): scheme, path = '', locally_cache_remote_file(file_href, dirs.cache) if scheme not in ('file', '') or not systempath.exists(un_posix(path)): raise Exception("Image file needs to be a working, fetchable resource, not %s" % file_href) if not mapnik_auto_image_support and not Image: raise SystemExit('PIL (Python Imaging Library) is required for handling image data unless you are using PNG inputs and running Mapnik >=0.7.0') img = Image.open(un_posix(path)) if mapnik_requires_absolute_paths: path = posixpath.realpath(path) else: path = dirs.output_path(path) msg('reading symbol: %s' % path) image_name, ext = posixpath.splitext(path) if ext in ('.png', '.tif', '.tiff'): output_ext = ext else: output_ext = '.png' # new local file name dest_file = un_posix('%s%s' % (image_name, output_ext)) if not posixpath.exists(dest_file): img.save(dest_file,'PNG') msg('Destination file: %s' % dest_file) return dest_file, output_ext[1:], img.size[0], img.size[1]
Given an image file href and a set of directories, modify the image file name so it's correct with respect to the output and cache directories.
def get_point_rules(declarations, dirs): property_map = {'point-file': 'file', 'point-width': 'width', 'point-height': 'height', 'point-type': 'type', 'point-allow-overlap': 'allow_overlap', 'point-meta-output': 'meta-output', 'point-meta-writer': 'meta-writer'} property_names = property_map.keys() # a place to put rules rules = [] for (filter, values) in filtered_property_declarations(declarations, property_names): point_file, point_type, point_width, point_height \ = values.has_key('point-file') \ and post_process_symbolizer_image_file(str(values['point-file'].value), dirs) \ or (None, None, None, None) point_width = values.has_key('point-width') and values['point-width'].value or point_width point_height = values.has_key('point-height') and values['point-height'].value or point_height point_allow_overlap = values.has_key('point-allow-overlap') and values['point-allow-overlap'].value or None symbolizer = point_file and output.PointSymbolizer(point_file, point_type, point_width, point_height, point_allow_overlap) if symbolizer: rules.append(make_rule(filter, symbolizer)) return rules
Given a list of declarations, return a list of output.Rule objects. Optionally provide an output directory for local copies of image files.
def get_polygon_pattern_rules(declarations, dirs): property_map = {'polygon-pattern-file': 'file', 'polygon-pattern-width': 'width', 'polygon-pattern-height': 'height', 'polygon-pattern-type': 'type', 'polygon-meta-output': 'meta-output', 'polygon-meta-writer': 'meta-writer'} property_names = property_map.keys() # a place to put rules rules = [] for (filter, values) in filtered_property_declarations(declarations, property_names): poly_pattern_file, poly_pattern_type, poly_pattern_width, poly_pattern_height \ = values.has_key('polygon-pattern-file') \ and post_process_symbolizer_image_file(str(values['polygon-pattern-file'].value), dirs) \ or (None, None, None, None) poly_pattern_width = values.has_key('polygon-pattern-width') and values['polygon-pattern-width'].value or poly_pattern_width poly_pattern_height = values.has_key('polygon-pattern-height') and values['polygon-pattern-height'].value or poly_pattern_height symbolizer = poly_pattern_file and output.PolygonPatternSymbolizer(poly_pattern_file, poly_pattern_type, poly_pattern_width, poly_pattern_height) if symbolizer: rules.append(make_rule(filter, symbolizer)) return rules
Given a list of declarations, return a list of output.Rule objects. Optionally provide an output directory for local copies of image files.
def get_line_pattern_rules(declarations, dirs): property_map = {'line-pattern-file': 'file', 'line-pattern-width': 'width', 'line-pattern-height': 'height', 'line-pattern-type': 'type', 'line-pattern-meta-output': 'meta-output', 'line-pattern-meta-writer': 'meta-writer'} property_names = property_map.keys() # a place to put rules rules = [] for (filter, values) in filtered_property_declarations(declarations, property_names): line_pattern_file, line_pattern_type, line_pattern_width, line_pattern_height \ = values.has_key('line-pattern-file') \ and post_process_symbolizer_image_file(str(values['line-pattern-file'].value), dirs) \ or (None, None, None, None) line_pattern_width = values.has_key('line-pattern-width') and values['line-pattern-width'].value or line_pattern_width line_pattern_height = values.has_key('line-pattern-height') and values['line-pattern-height'].value or line_pattern_height symbolizer = line_pattern_file and output.LinePatternSymbolizer(line_pattern_file, line_pattern_type, line_pattern_width, line_pattern_height) if symbolizer: rules.append(make_rule(filter, symbolizer)) return rules
Given a list of declarations, return a list of output.Rule objects. Optionally provide an output directory for local copies of image files.
def get_applicable_declarations(element, declarations): element_tag = element.tag element_id = element.get('id', None) element_classes = element.get('class', '').split() return [dec for dec in declarations if dec.selector.matches(element_tag, element_id, element_classes)]
Given an XML element and a list of declarations, return the ones that match as a list of (property, value, selector) tuples.
def localize_shapefile(shp_href, dirs): # support latest mapnik features of auto-detection # of image sizes and jpeg reading support... # http://trac.mapnik.org/ticket/508 mapnik_requires_absolute_paths = (MAPNIK_VERSION < 601) shp_href = urljoin(dirs.source.rstrip('/')+'/', shp_href) scheme, host, path, p, q, f = urlparse(shp_href) if scheme in ('http','https'): msg('%s | %s' % (shp_href, dirs.cache)) scheme, path = '', locally_cache_remote_file(shp_href, dirs.cache) else: host = None # collect drive for windows to_posix(systempath.realpath(path)) if scheme not in ('file', ''): raise Exception("Shapefile needs to be local, not %s" % shp_href) if mapnik_requires_absolute_paths: path = posixpath.realpath(path) original = path path = dirs.output_path(path) if path.endswith('.zip'): # unzip_shapefile_into needs a path it can find path = posixpath.join(dirs.output, path) path = unzip_shapefile_into(path, dirs.cache, host) return dirs.output_path(path)
Given a shapefile href and a set of directories, modify the shapefile name so it's correct with respect to the output and cache directories.
def localize_file_datasource(file_href, dirs): # support latest mapnik features of auto-detection # of image sizes and jpeg reading support... # http://trac.mapnik.org/ticket/508 mapnik_requires_absolute_paths = (MAPNIK_VERSION < 601) file_href = urljoin(dirs.source.rstrip('/')+'/', file_href) scheme, n, path, p, q, f = urlparse(file_href) if scheme in ('http','https'): scheme, path = '', locally_cache_remote_file(file_href, dirs.cache) if scheme not in ('file', ''): raise Exception("Datasource file needs to be a working, fetchable resource, not %s" % file_href) if mapnik_requires_absolute_paths: return posixpath.realpath(path) else: return dirs.output_path(path)
Handle localizing file-based datasources other than shapefiles. This will only work for single-file based types.
def output_path(self, path_name): # make sure it is a valid posix format path = to_posix(path_name) assert (path == path_name), "path_name passed to output_path must be in posix format" if posixpath.isabs(path): if self.output == self.cache: # worth seeing if an absolute path can be avoided path = posixpath.relpath(path, self.output) else: return posixpath.realpath(path) if path.startswith('../'): joined = posixpath.join(self.output, path) return posixpath.realpath(joined) return path
Modify a path so it fits expectations. Avoid returning relative paths that start with '../' and possibly return relative paths when output and cache directories match.
def midpoint(self): minpoint = self.leftedge if self.leftop is gt: minpoint += 1 maxpoint = self.rightedge if self.rightop is lt: maxpoint -= 1 if minpoint is None: return maxpoint elif maxpoint is None: return minpoint else: return (minpoint + maxpoint) / 2
Return a point guranteed to fall within this range, hopefully near the middle.
def isOpen(self): if self.leftedge and self.rightedge and self.leftedge > self.rightedge: return False if self.leftedge == self.rightedge: if self.leftop is gt or self.rightop is lt: return False return True
Return true if this range has any room in it.
def toFilter(self, property): if self.leftedge == self.rightedge and self.leftop is ge and self.rightop is le: # equivalent to == return Filter(style.SelectorAttributeTest(property, '=', self.leftedge)) try: return Filter(style.SelectorAttributeTest(property, opstr[self.leftop], self.leftedge), style.SelectorAttributeTest(property, opstr[self.rightop], self.rightedge)) except KeyError: try: return Filter(style.SelectorAttributeTest(property, opstr[self.rightop], self.rightedge)) except KeyError: try: return Filter(style.SelectorAttributeTest(property, opstr[self.leftop], self.leftedge)) except KeyError: return Filter()
Convert this range to a Filter with a tests having a given property.
def isOpen(self): equals = {} nequals = {} for test in self.tests: if test.op == '=': if equals.has_key(test.property) and test.value != equals[test.property]: # we've already stated that this arg must equal something else return False if nequals.has_key(test.property) and test.value in nequals[test.property]: # we've already stated that this arg must not equal its current value return False equals[test.property] = test.value if test.op == '!=': if equals.has_key(test.property) and test.value == equals[test.property]: # we've already stated that this arg must equal its current value return False if not nequals.has_key(test.property): nequals[test.property] = set() nequals[test.property].add(test.value) return True
Return true if this filter is not trivially false, i.e. self-contradictory.
def minusExtras(self): assert self.isOpen() trimmed = self.clone() equals = {} for test in trimmed.tests: if test.op == '=': equals[test.property] = test.value extras = [] for (i, test) in enumerate(trimmed.tests): if test.op == '!=' and equals.has_key(test.property) and equals[test.property] != test.value: extras.append(i) while extras: trimmed.tests.pop(extras.pop()) return trimmed
Return a new Filter that's equal to this one, without extra terms that don't add meaning.
def chunk_generator(N, n): chunk_size = get_chunk_size(N, n) for start in range(0, N, chunk_size): yield slice(start, min(start + chunk_size, N))
Returns a generator of slice objects. Parameters ---------- N : int The size of one of the dimensions of a two-dimensional array. n : int The number of arrays of shape ('N', 'get_chunk_size(N, n)') that fit into memory. Returns ------- Slice objects of the type 'slice(start, stop)' are generated, representing the set of indices specified by 'range(start, stop)'.
def check_HDF5_arrays(hdf5_file, N, convergence_iter): Worker.hdf5_lock.acquire() with tables.open_file(hdf5_file, 'r+') as fileh: if not hasattr(fileh.root, 'aff_prop_group'): fileh.create_group(fileh.root, "aff_prop_group") atom = tables.Float32Atom() filters = None #filters = tables.Filters(5, 'blosc') for feature in ('availabilities', 'responsibilities', 'similarities', 'temporaries'): if not hasattr(fileh.root.aff_prop_group, feature): fileh.create_carray(fileh.root.aff_prop_group, feature, atom, (N, N), "Matrix of {0} for affinity " "propagation clustering".format(feature), filters = filters) if not hasattr(fileh.root.aff_prop_group, 'parallel_updates'): fileh.create_carray(fileh.root.aff_prop_group, 'parallel_updates', atom, (N, convergence_iter), "Matrix of parallel updates for affinity propagation " "clustering", filters = filters) Worker.hdf5_lock.release()
Check that the HDF5 data structure of file handle 'hdf5_file' has all the required nodes organizing the various two-dimensional arrays required for Affinity Propagation clustering ('Responsibility' matrix, 'Availability', etc.). Parameters ---------- hdf5_file : string or file handle Name of the Hierarchical Data Format under consideration. N : int The number of samples in the data-set that will undergo Affinity Propagation clustering. convergence_iter : int Number of iterations with no change in the number of estimated clusters that stops the convergence.
def get_sum(hdf5_file, path, array_out, out_lock, rows_slice): Worker.hdf5_lock.acquire() with tables.open_file(hdf5_file, 'r+') as fileh: hdf5_array = fileh.get_node(path) tmp = hdf5_array[rows_slice, ...] Worker.hdf5_lock.release() szum = np.sum(tmp, axis = 0) with out_lock: array_out += szum del tmp
Access an array at node 'path' of the 'hdf5_file', compute the sums along a slice of rows specified by 'rows_slice' and add the resulting vector to 'array_out'. Parameters ---------- hdf5_file : string or file handle The location of the HDF5 data structure containing the matrices of availabitilites, responsibilities and similarities among others. path : string Specify the node where the matrix whose row-sums are to be computed is located within the given hierarchical data format. array_out : multiprocessing.Array object This ctypes array is allocated from shared memory and used by various processes to store the outcome of their computations. out_lock : multiprocessing.Lock object Synchronize access to the values stored in 'array_out'. rows_slice : slice object Specifies a range of rows indices.
def terminate_processes(pid_list): for proc in psutil.process_iter(): if proc.pid in pid_list: proc.terminate()
Terminate a list of processes by sending to each of them a SIGTERM signal, pre-emptively checking if its PID might have been reused. Parameters ---------- pid_list : list A list of process identifiers identifying active processes.
def compute_similarities(hdf5_file, data, N_processes): slice_queue = multiprocessing.JoinableQueue() pid_list = [] for i in range(N_processes): worker = Similarities_worker(hdf5_file, '/aff_prop_group/similarities', data, slice_queue) worker.daemon = True worker.start() pid_list.append(worker.pid) for rows_slice in chunk_generator(data.shape[0], 2 * N_processes): slice_queue.put(rows_slice) slice_queue.join() slice_queue.close() terminate_processes(pid_list) gc.collect()
Compute a matrix of pairwise L2 Euclidean distances among samples from 'data'. This computation is to be done in parallel by 'N_processes' distinct processes. Those processes (which are instances of the class 'Similarities_worker') are prevented from simultaneously accessing the HDF5 data structure at 'hdf5_file' through the use of a multiprocessing.Lock object.
def add_preference(hdf5_file, preference): Worker.hdf5_lock.acquire() with tables.open_file(hdf5_file, 'r+') as fileh: S = fileh.root.aff_prop_group.similarities diag_ind = np.diag_indices(S.nrows) S[diag_ind] = preference Worker.hdf5_lock.release()
Assign the value 'preference' to the diagonal entries of the matrix of similarities stored in the HDF5 data structure at 'hdf5_file'.