index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
5,341
semantic_version.base
__str__
null
def __str__(self): version = '%d' % self.major if self.minor is not None: version = '%s.%d' % (version, self.minor) if self.patch is not None: version = '%s.%d' % (version, self.patch) if self.prerelease or (self.partial and self.prerelease == () and self.build is None): version = '%s-%s' % (version, '.'.join(self.prerelease)) if self.build or (self.partial and self.build == ()): version = '%s+%s' % (version, '.'.join(self.build)) return version
(self)
5,342
semantic_version.base
next_major
null
def next_major(self): if self.prerelease and self.minor is 0 and self.patch is 0: return Version('.'.join(str(x) for x in [self.major, self.minor, self.patch])) else: return Version('.'.join(str(x) for x in [self.major + 1, 0, 0]))
(self)
5,343
semantic_version.base
next_minor
null
def next_minor(self): if self.prerelease and self.patch is 0: return Version('.'.join(str(x) for x in [self.major, self.minor, self.patch])) else: return Version( '.'.join(str(x) for x in [self.major, self.minor + 1, 0]))
(self)
5,344
semantic_version.base
next_patch
null
def next_patch(self): if self.prerelease: return Version('.'.join(str(x) for x in [self.major, self.minor, self.patch])) else: return Version( '.'.join(str(x) for x in [self.major, self.minor, self.patch + 1]))
(self)
5,345
releases
_log
Log debug output if debug setting is on. Intended to be partial'd w/ config at top of functions. Meh.
def _log(txt, config): """ Log debug output if debug setting is on. Intended to be partial'd w/ config at top of functions. Meh. """ if config.releases_debug: print(txt, file=sys.stderr, flush=True)
(txt, config)
5,347
releases
add_role
null
def add_role(app, name, role_obj): # This (introspecting docutils.parser.rst.roles._roles) is the same trick # Sphinx uses to emit warnings about double-registering; it's a PITA to try # and configure the app early on so it doesn't emit those warnings, so we # instead just...don't double-register. Meh. if name not in roles._roles: app.add_role(name, role_obj)
(app, name, role_obj)
5,348
releases
append_unreleased_entries
Generate new abstract 'releases' for unreleased issues. There's one for each combination of bug-vs-feature & major release line. When only one major release line exists, that dimension is ignored.
def append_unreleased_entries(app, manager, releases): """ Generate new abstract 'releases' for unreleased issues. There's one for each combination of bug-vs-feature & major release line. When only one major release line exists, that dimension is ignored. """ for family, lines in manager.items(): # Skip over any unsupported lines supported = app.config.releases_supported_versions if supported is not None and family not in supported: continue for type_ in ("bugfix", "feature"): bucket = f"unreleased_{type_}" if bucket not in lines: # Implies unstable prehistory + 0.x fam continue issues = lines[bucket] fam_prefix = f"{family}.x " if len(manager) > 1 else "" header = f"Next {fam_prefix}{type_} release" line = f"unreleased_{family}.x_{type_}" releases.append( generate_unreleased_entry(header, line, issues, manager, app) )
(app, manager, releases)
5,349
releases
construct_entry_with_release
Releases 'eat' the entries in their line's list and get added to the final data structure. They also inform new release-line 'buffers'. Release lines, once the release obj is removed, should be empty or a comma-separated list of issue numbers.
def construct_entry_with_release(focus, issues, manager, log, releases, rest): """ Releases 'eat' the entries in their line's list and get added to the final data structure. They also inform new release-line 'buffers'. Release lines, once the release obj is removed, should be empty or a comma-separated list of issue numbers. """ log(f"release for line {focus.minor!r}") # Check for explicitly listed issues first explicit = None if rest[0].children: explicit = [x.strip() for x in rest[0][0].split(",")] # Do those by themselves since they override all other logic if explicit: log(f"Explicit issues requested: {explicit!r}") # First scan global issue dict, dying if not found missing = [i for i in explicit if i not in issues] if missing: raise ValueError( f"Couldn't find issue(s) #{', '.join(missing)} in the changelog!" # noqa ) # Obtain the explicitly named issues from global list entries = [] for i in explicit: for flattened_issue_item in itertools.chain(issues[i]): entries.append(flattened_issue_item) # Create release log(f"entries in this release: {entries!r}") releases.append({"obj": focus, "entries": entries}) # Introspect these entries to determine which buckets they should get # removed from (it's not "all of them"!) for obj in entries: if obj.type == "bug": # Major bugfix: remove from unreleased_feature if obj.major: log(f"Removing #{obj.number} from unreleased") # TODO: consider making a LineManager method somehow manager[focus.family]["unreleased_feature"].remove(obj) # Regular bugfix: remove from bucket for this release's # line + unreleased_bugfix else: if obj in manager[focus.family]["unreleased_bugfix"]: log(f"Removing #{obj.number} from unreleased") manager[focus.family]["unreleased_bugfix"].remove(obj) if obj in manager[focus.family][focus.minor]: log(f"Removing #{obj.number} from {focus.minor}") manager[focus.family][focus.minor].remove(obj) # Regular feature/support: remove from unreleased_feature # Backported feature/support: remove from bucket for this # release's line (if applicable) + unreleased_feature else: log(f"Removing #{obj.number} from unreleased") manager[focus.family]["unreleased_feature"].remove(obj) if obj in manager[focus.family].get(focus.minor, []): manager[focus.family][focus.minor].remove(obj) # Implicit behavior otherwise else: # Unstable prehistory -> just dump 'unreleased' and continue if manager.unstable_prehistory: # TODO: need to continue making LineManager actually OO, i.e. do # away with the subdicts + keys, move to sub-objects with methods # answering questions like "what should I give you for a release" # or whatever log("in unstable prehistory, dumping 'unreleased'") releases.append( { "obj": focus, # NOTE: explicitly dumping 0, not focus.family, since this # might be the last pre-historical release and thus not 0.x "entries": manager[0]["unreleased"].copy(), } ) manager[0]["unreleased"] = [] # If this isn't a 0.x release, it signals end of prehistory, make a # new release bucket (as is also done below in regular behavior). # Also acts like a sentinel that prehistory is over. if focus.family != 0: manager[focus.family][focus.minor] = [] # Regular behavior from here else: # New release line/branch detected. Create it & dump unreleased # features. if focus.minor not in manager[focus.family]: log("not seen prior, making feature release & bugfix bucket") manager[focus.family][focus.minor] = [] # TODO: this used to explicitly say "go over everything in # unreleased_feature and dump if it's feature, support or major # bug". But what the hell else would BE in unreleased_feature? # Why not just dump the whole thing?? # # Dump only the items in the bucket whose family this release # object belongs to, i.e. 1.5.0 should only nab the 1.0 # family's unreleased feature items. releases.append( { "obj": focus, "entries": manager[focus.family]["unreleased_feature"][ : ], } ) manager[focus.family]["unreleased_feature"] = [] # Existing line -> empty out its bucket into new release. # Skip 'major' bugs as those "belong" to the next release (and will # also be in 'unreleased_feature' - so safe to nuke the entire # line) else: log("pre-existing, making bugfix release") # TODO: as in other branch, I don't get why this wasn't just # dumping the whole thing - why would major bugs be in the # regular bugfix buckets? entries = manager[focus.family][focus.minor].copy() releases.append({"obj": focus, "entries": entries}) manager[focus.family][focus.minor] = [] # Clean out the items we just released from # 'unreleased_bugfix'. (Can't nuke it because there might # be some unreleased bugs for other release lines.) for x in entries: if x in manager[focus.family]["unreleased_bugfix"]: manager[focus.family]["unreleased_bugfix"].remove(x)
(focus, issues, manager, log, releases, rest)
5,350
releases
construct_entry_without_release
null
def construct_entry_without_release(focus, issues, manager, log, rest): # Handle rare-but-valid non-issue-attached line items, which are # always bugs. (They are their own description.) if not isinstance(focus, Issue): # First, sanity check for potential mistakes resulting in an issue node # being buried within something else. buried = focus.traverse(Issue) if buried: msg = f""" Found issue node ({buried[0]!r}) buried inside another node: {buried[0].parent} Please double-check your ReST syntax! There is probably text in the above output that will show you which part of your changelog to look at. For example, indentation problems can accidentally generate nested definition lists. """ raise ValueError(msg) # OK, it looks legit - make it a bug. log("Found line item w/ no real issue object, creating bug") nodelist = issue_nodelist("bug") # Skip nodelist entirely if we're in unstable prehistory - # classification doesn't matter there. if manager.unstable_prehistory: nodelist = [] # Undo the 'pop' from outer scope. TODO: rework things so we don't have # to do this dumb shit uggggh rest[0].insert(0, focus) focus = Issue(type_="bug", nodelist=nodelist, description=rest) else: focus.attributes["description"] = rest # Add to global list (for use by explicit releases) or die trying issues[focus.number] = issues.get(focus.number, []) + [focus] # Add to per-release bugfix lines and/or unreleased bug/feature buckets, as # necessary. # TODO: suspect all of add_to_manager can now live in the manager; most of # Release's methods should probably go that way if manager.unstable_prehistory: log("Unstable prehistory -> adding to 0.x unreleased bucket") manager[0]["unreleased"].append(focus) else: log("Adding to release line manager") focus.add_to_manager(manager)
(focus, issues, manager, log, rest)
5,351
releases
construct_nodes
null
def construct_nodes(releases): result = [] # Reverse the list again so the final display is newest on top for d in reversed(releases): if not d["entries"]: continue obj = d["obj"] entries = [] for entry in d["entries"]: # Use nodes.Node.deepcopy to deepcopy the description # node. If this is not done, multiple references to the same # object (e.g. a reference object in the description of #649, which # is then copied into 2 different release lists) will end up in the # doctree, which makes subsequent parse steps very angry (index() # errors). desc = entry["description"].deepcopy() # Additionally, expand any other issue roles found in the # description - sometimes we refer to related issues inline. (They # can't be left as issue() objects at render time since that's # undefined.) # Use [:] slicing (even under modern Python; the objects here are # docutils Nodes whose .copy() is weird) to avoid mutation during # the loops. for index, node in enumerate(desc[:]): for subindex, subnode in enumerate(node[:]): if isinstance(subnode, Issue): lst = subnode["nodelist"] desc[index][subindex : subindex + 1] = lst # Rework this entry to insert the now-rendered issue nodes in front # of the 1st paragraph of the 'description' nodes (which should be # the preserved LI + nested paragraph-or-more from original # markup.) # FIXME: why is there no "prepend a list" method? for node in reversed(entry["nodelist"]): desc[0].insert(0, node) entries.append(desc) # Entry list list_ = nodes.bullet_list("", *entries) # Insert list into release nodelist (as it's a section) obj["nodelist"][0].append(list_) # Release header header = nodes.paragraph("", "", *obj["nodelist"]) result.extend(header) return result
(releases)
5,352
releases
construct_releases
null
def construct_releases(entries, app): log = partial(_log, config=app.config) # Walk from back to front, consuming entries & copying them into # per-release buckets as releases are encountered. Store releases in order. releases = [] # Release lines, to be organized by major releases, then by major+minor, # alongside per-major-release 'unreleased' bugfix/feature buckets. # NOTE: With exception of unstable_prehistory=True, which triggers use of a # separate, undifferentiated 'unreleased' bucket (albeit still within the # '0' major line family). manager = LineManager(app) # Also keep a master hash of issues by number to detect duplicates & assist # in explicitly defined release lists. issues = {} reversed_entries = list(reversed(entries)) # For the lookahead, so we're not doing this stripping O(n) times. # TODO: probs just merge the two into e.g. a list of 2-tuples of "actual # entry obj + rest"? stripped_entries = [x[0][0] for x in reversed_entries] # Perform an initial lookahead to prime manager with the 1st major release handle_first_release_line(stripped_entries, manager) # Start crawling... for index, obj in enumerate(reversed_entries): # Issue object is always found in obj (LI) index 0 (first, often only # P) and is the 1st item within that (index 0 again). # Preserve all other contents of 'obj'. focus = obj[0].pop(0) rest = obj log(repr(focus)) # Releases 'eat' the entries in their line's list and get added to the # final data structure. They also inform new release-line 'buffers'. # Release lines, once the release obj is removed, should be empty or a # comma-separated list of issue numbers. if isinstance(focus, Release): construct_entry_with_release( focus, issues, manager, log, releases, rest ) # After each release is handled, look ahead to see if we're # entering "last stretch before a major release". If so, # pre-emptively update the line-manager so upcoming features are # correctly sorted into that major release by default (re: logic in # Release.add_to_manager) handle_upcoming_major_release( stripped_entries[index + 1 :], manager ) # Entries get copied into release line buckets as follows: # * Features and support go into 'unreleased_feature' for use in new # feature releases. # * Bugfixes go into all release lines (so they can be printed in >1 # bugfix release as appropriate) as well as 'unreleased_bugfix' (so # they can be displayed prior to release'). Caveats include bugs marked # 'major' (they go into unreleased_feature instead) or with 'N.N+' # (meaning they only go into release line buckets for that release and # up.) # * Support/feature entries marked as 'backported' go into all # release lines as well, on the assumption that they were released to # all active branches. # * The 'rest' variable (which here is the bug description, vitally # important!) is preserved by stuffing it into the focus (issue) # object - it will get unpacked by construct_nodes() later. else: construct_entry_without_release(focus, issues, manager, log, rest) if manager.unstable_prehistory: releases.append( generate_unreleased_entry( header="Next release", line="unreleased", issues=manager[0]["unreleased"], manager=manager, app=app, ) ) else: append_unreleased_entries(app, manager, releases) reorder_release_entries(releases) return releases, manager
(entries, app)
5,353
releases
generate_changelog
null
def generate_changelog(app, doctree, docname): desired_docnames = app.config.releases_document_name # Ensure we still work mostly-correctly in singlehtml builder situations # (must use name substring test as RTD's singlehtml builder doesn't # actually inherit from Sphinx's own!) is_singlepage = "singlehtml" in app.builder.name changelog_names = ["index"] if is_singlepage else desired_docnames if docname not in changelog_names: return # Find an appropriate bullet-list node & replace it with our # organized/parsed elements. changelog_visitor = BulletListVisitor( doctree, app, desired_docnames, is_singlepage ) doctree.walk(changelog_visitor)
(app, doctree, docname)
5,354
releases
generate_unreleased_entry
null
def generate_unreleased_entry(header, line, issues, manager, app): log = partial(_log, config=app.config) nodelist = [ release_nodes( header, app.config.releases_development_branch, None, app.config, ) ] log(f"Creating {line!r} faux-release with {issues!r}") return { "obj": Release(number=line, date=None, nodelist=nodelist), "entries": issues, }
(header, line, issues, manager, app)
5,355
releases
handle_first_release_line
Set up initial line-manager entry for first encountered release line. To be called at start of overall process; afterwards, subsequent major lines are generated by `handle_upcoming_major_release`.
def handle_first_release_line(entries, manager): """ Set up initial line-manager entry for first encountered release line. To be called at start of overall process; afterwards, subsequent major lines are generated by `handle_upcoming_major_release`. """ # It's remotely possible the changelog is totally empty... if not entries: return # Obtain (short-circuiting) first Release obj. first_release = None for obj in entries: if isinstance(obj, Release): first_release = obj break # It's also possible it's non-empty but has no releases yet. if first_release: manager.add_family(obj.family) # If God did not exist, man would be forced to invent him. else: manager.add_family(0)
(entries, manager)
5,356
releases
handle_upcoming_major_release
null
def handle_upcoming_major_release(entries, manager): # Short-circuit if the future holds nothing for us if not entries: return # Short-circuit if we're in the middle of a block of releases, only the # last release before a bunch of issues, should be taking any action. if isinstance(entries[0], Release): return # Iterate through entries til we find the next Release or set of Releases next_releases = [] for index, obj in enumerate(entries): if isinstance(obj, Release): next_releases.append(obj) # Non-empty next_releases + encountered a non-release = done w/ release # block. elif next_releases: break # Examine result: is a major release present? If so, add its major number # to the line manager! for obj in next_releases: # TODO: update when Release gets tied closer w/ Version version = Version(obj.number) if version.minor == 0 and version.patch == 0: manager.add_family(obj.family)
(entries, manager)
5,357
releases
interpolate
null
def interpolate(text, number): if "%s" in text: return text % number return text.format(number=number)
(text, number)
5,358
releases
issue_nodelist
null
def issue_nodelist(name, identifier=None): which = f'[<span style="color: #{ISSUE_TYPES[name]};">{name.capitalize()}</span>]' # noqa signifier = [nodes.raw(text=which, format="html")] id_nodelist = [nodes.inline(text=" "), identifier] if identifier else [] trail = [] if identifier else [nodes.inline(text=" ")] return signifier + id_nodelist + [nodes.inline(text=":")] + trail
(name, identifier=None)
5,359
releases
issues_role
Use: :issue|bug|feature|support:`ticket_number` When invoked as :issue:, turns into just a "#NN" hyperlink to `releases_issue_uri`. When invoked otherwise, turns into "[Type] <#NN hyperlink>: ". Spaces present in the "ticket number" are used as fields for keywords (major, backported) and/or specs (e.g. '>=1.0'). This data is removed & used when constructing the object. May give a 'ticket number' of ``-`` or ``0`` to generate no hyperlink.
def issues_role(name, rawtext, text, lineno, inliner, options={}, content=[]): """ Use: :issue|bug|feature|support:`ticket_number` When invoked as :issue:, turns into just a "#NN" hyperlink to `releases_issue_uri`. When invoked otherwise, turns into "[Type] <#NN hyperlink>: ". Spaces present in the "ticket number" are used as fields for keywords (major, backported) and/or specs (e.g. '>=1.0'). This data is removed & used when constructing the object. May give a 'ticket number' of ``-`` or ``0`` to generate no hyperlink. """ parts = utils.unescape(text).split() issue_no = parts.pop(0) # Lol @ access back to Sphinx config = inliner.document.settings.env.app.config if issue_no not in ("-", "0"): ref = None if config.releases_issue_uri: ref = interpolate(text=config.releases_issue_uri, number=issue_no) elif config.releases_github_path: ref = f"https://github.com/{config.releases_github_path}/issues/{issue_no}" # noqa # Only generate a reference/link if we were able to make a URI if ref: identifier = nodes.reference( rawtext, "#" + issue_no, refuri=ref, **options ) # Otherwise, just make it regular text else: identifier = nodes.raw( rawtext=rawtext, text="#" + issue_no, format="html", **options ) else: identifier = None issue_no = None # So it doesn't gum up dupe detection later # Additional 'new-style changelog' stuff if name in ISSUE_TYPES: nodelist = issue_nodelist(name, identifier) spec = None keyword = None # TODO: sanity checks re: e.g. >2 parts, >1 instance of keywords, >1 # instance of specs, etc. for part in parts: maybe_spec = scan_for_spec(part) if maybe_spec: spec = maybe_spec else: if part in ("backported", "major"): keyword = part else: raise ValueError( f"Gave unknown keyword {keyword!r} for issue no. {issue_no}" # noqa ) # Create temporary node w/ data & final nodes to publish node = Issue( number=issue_no, type_=name, nodelist=nodelist, backported=(keyword == "backported"), major=(keyword == "major"), spec=spec, ) return [node], [] # Return old style info for 'issue' for older changelog entries else: return [identifier], []
(name, rawtext, text, lineno, inliner, options={}, content=[])
5,364
functools
partial
partial(func, *args, **keywords) - new function with partial application of the given arguments and keywords.
class partial: """New function with partial application of the given arguments and keywords. """ __slots__ = "func", "args", "keywords", "__dict__", "__weakref__" def __new__(cls, func, /, *args, **keywords): if not callable(func): raise TypeError("the first argument must be callable") if hasattr(func, "func"): args = func.args + args keywords = {**func.keywords, **keywords} func = func.func self = super(partial, cls).__new__(cls) self.func = func self.args = args self.keywords = keywords return self def __call__(self, /, *args, **keywords): keywords = {**self.keywords, **keywords} return self.func(*self.args, *args, **keywords) @recursive_repr() def __repr__(self): qualname = type(self).__qualname__ args = [repr(self.func)] args.extend(repr(x) for x in self.args) args.extend(f"{k}={v!r}" for (k, v) in self.keywords.items()) if type(self).__module__ == "functools": return f"functools.{qualname}({', '.join(args)})" return f"{qualname}({', '.join(args)})" def __reduce__(self): return type(self), (self.func,), (self.func, self.args, self.keywords or None, self.__dict__ or None) def __setstate__(self, state): if not isinstance(state, tuple): raise TypeError("argument to __setstate__ must be a tuple") if len(state) != 4: raise TypeError(f"expected 4 items in state, got {len(state)}") func, args, kwds, namespace = state if (not callable(func) or not isinstance(args, tuple) or (kwds is not None and not isinstance(kwds, dict)) or (namespace is not None and not isinstance(namespace, dict))): raise TypeError("invalid partial state") args = tuple(args) # just in case it's a subclass if kwds is None: kwds = {} elif type(kwds) is not dict: # XXX does it need to be *exactly* dict? kwds = dict(kwds) if namespace is None: namespace = {} self.__dict__ = namespace self.func = func self.args = args self.keywords = kwds
null
5,366
releases
release_nodes
null
def release_nodes(text, slug, date, config): # Doesn't seem possible to do this "cleanly" (i.e. just say "make me a # title and give it these HTML attributes during render time) so...fuckit. # We were already doing fully raw elements elsewhere anyway. And who cares # about a PDF of a changelog? :x uri = None if config.releases_release_uri: uri = interpolate(text=config.releases_release_uri, number=slug) elif config.releases_github_path: uri = f"https://github.com/{config.releases_github_path}/tree/{slug}" # Only construct link tag if user actually configured release URIs somehow if uri: link = f'<a class="reference external" href="{uri}">{text}</a>' else: link = text datespan = "" if date: datespan = f' <span style="font-size: 75%;">{date}</span>' header = f'<h2 style="margin-bottom: 0.3em;">{link}{datespan}</h2>' return nodes.section( "", nodes.raw(rawtext="", text=header, format="html"), ids=[text] )
(text, slug, date, config)
5,367
releases
release_role
Invoked as :release:`N.N.N <YYYY-MM-DD>`. Turns into useful release header + link to GH tree for the tag.
def release_role(name, rawtext, text, lineno, inliner, options={}, content=[]): """ Invoked as :release:`N.N.N <YYYY-MM-DD>`. Turns into useful release header + link to GH tree for the tag. """ # Make sure year has been specified match = year_arg_re.match(text) if not match: msg = inliner.reporter.error("Must specify release date!") return [inliner.problematic(rawtext, rawtext, msg)], [msg] number, date = match.group(1), match.group(2) # Lol @ access back to Sphinx config = inliner.document.settings.env.app.config nodelist = [release_nodes(number, number, date, config)] # Return intermediate node node = Release(number=number, date=date, nodelist=nodelist) return [node], []
(name, rawtext, text, lineno, inliner, options={}, content=[])
5,368
releases
reorder_release_entries
Mutate ``releases`` so the entrylist in each is ordered by feature/bug/etc.
def reorder_release_entries(releases): """ Mutate ``releases`` so the entrylist in each is ordered by feature/bug/etc. """ order = {"feature": 0, "bug": 1, "support": 2} for release in releases: entries = release["entries"].copy() release["entries"] = sorted(entries, key=lambda x: order[x.type])
(releases)
5,370
releases
scan_for_spec
Attempt to return some sort of Spec from given keyword value. Returns None if one could not be derived.
def scan_for_spec(keyword): """ Attempt to return some sort of Spec from given keyword value. Returns None if one could not be derived. """ # Both 'spec' formats are wrapped in parens, discard keyword = keyword.lstrip("(").rstrip(")") # First, test for intermediate '1.2+' style matches = release_line_re.findall(keyword) if matches: return Spec(f">={matches[0]}") # Failing that, see if Spec can make sense of it try: return Spec(keyword) # I've only ever seen Spec fail with ValueError. except ValueError: return None
(keyword)
5,371
releases
setup
null
def setup(app): for key, default in ( # Issue base URI setting: releases_issue_uri # E.g. 'https://github.com/fabric/fabric/issues/' ("issue_uri", None), # Release-tag base URI setting: releases_release_uri # E.g. 'https://github.com/fabric/fabric/tree/' ("release_uri", None), # Convenience Github version of above ("github_path", None), # Which branch to use for unreleased feature items # TODO 3.0: s/master/main/ ("development_branch", "master"), # Which versions to show unreleased buckets for ("supported_versions", None), # Which document to use as the changelog ("document_name", ["changelog"]), # Debug output ("debug", False), # Whether to enable linear history during 0.x release timeline # TODO 3.0: flip this to True by default? ("unstable_prehistory", False), ): app.add_config_value( name=f"releases_{key}", default=default, rebuild="html" ) if isinstance(app.config.releases_document_name, str): app.config.releases_document_name = [app.config.releases_document_name] # Register intermediate roles for x in list(ISSUE_TYPES) + ["issue"]: add_role(app, x, issues_role) add_role(app, "release", release_role) # Hook in our changelog transmutation at appropriate step app.connect("doctree-resolved", generate_changelog) # identifies the version of our extension return {"version": __version__}
(app)
5,374
credis.base
AuthenticationError
null
from credis.base import AuthenticationError
null
5,375
credis.base
Connection
Manages TCP communication to and from a Redis server
from credis.base import Connection
null
5,376
credis.base
ConnectionError
null
from credis.base import ConnectionError
null
5,377
credis.base
RedisProtocolError
null
from credis.base import RedisProtocolError
null
5,378
credis.base
RedisReplyError
null
from credis.base import RedisReplyError
null
5,380
notifications_python_client.notifications
NotificationsAPIClient
null
class NotificationsAPIClient(BaseAPIClient): def send_sms_notification( self, phone_number, template_id, personalisation=None, reference=None, sms_sender_id=None ): notification = {"phone_number": phone_number, "template_id": template_id} if personalisation: notification.update({"personalisation": personalisation}) if reference: notification.update({"reference": reference}) if sms_sender_id: notification.update({"sms_sender_id": sms_sender_id}) return self.post("/v2/notifications/sms", data=notification) def send_email_notification( self, email_address, template_id, personalisation=None, reference=None, email_reply_to_id=None ): notification = {"email_address": email_address, "template_id": template_id} if personalisation: notification.update({"personalisation": personalisation}) if reference: notification.update({"reference": reference}) if email_reply_to_id: notification.update({"email_reply_to_id": email_reply_to_id}) return self.post("/v2/notifications/email", data=notification) def send_letter_notification(self, template_id, personalisation, reference=None): notification = {"template_id": template_id, "personalisation": personalisation} if reference: notification.update({"reference": reference}) return self.post("/v2/notifications/letter", data=notification) def send_precompiled_letter_notification(self, reference, pdf_file, postage=None): content = base64.b64encode(pdf_file.read()).decode("utf-8") notification = {"reference": reference, "content": content} if postage: notification["postage"] = postage return self.post("/v2/notifications/letter", data=notification) def get_received_texts(self, older_than=None): if older_than: query_string = "?older_than={}".format(older_than) else: query_string = "" return self.get("/v2/received-text-messages{}".format(query_string)) def get_received_texts_iterator(self, older_than=None): result = self.get_received_texts(older_than=older_than) received_texts = result.get("received_text_messages") while received_texts: for received_text in received_texts: yield received_text next_link = result["links"].get("next") received_text_id = re.search("[0-F]{8}-[0-F]{4}-[0-F]{4}-[0-F]{4}-[0-F]{12}", next_link, re.I).group(0) result = self.get_received_texts_iterator(older_than=received_text_id) received_texts = result.get("received_text_messages") def get_notification_by_id(self, id): return self.get("/v2/notifications/{}".format(id)) def get_pdf_for_letter(self, id): url = "/v2/notifications/{}/pdf".format(id) logger.debug("API request %s %s", "GET", url) url, kwargs = self._create_request_objects(url, data=None, params=None) response = self._perform_request("GET", url, kwargs) return BytesIO(response.content) def get_all_notifications( self, status=None, template_type=None, reference=None, older_than=None, include_jobs=None ): data = {} if status: data.update({"status": status}) if template_type: data.update({"template_type": template_type}) if reference: data.update({"reference": reference}) if older_than: data.update({"older_than": older_than}) if include_jobs: data.update({"include_jobs": include_jobs}) return self.get("/v2/notifications", params=data) def get_all_notifications_iterator(self, status=None, template_type=None, reference=None, older_than=None): result = self.get_all_notifications(status, template_type, reference, older_than) notifications = result.get("notifications") while notifications: for notification in notifications: yield notification next_link = result["links"].get("next") notification_id = re.search("[0-F]{8}-[0-F]{4}-[0-F]{4}-[0-F]{4}-[0-F]{12}", next_link, re.I).group(0) result = self.get_all_notifications(status, template_type, reference, notification_id) notifications = result.get("notifications") def post_template_preview(self, template_id, personalisation): template = {"personalisation": personalisation} return self.post("/v2/template/{}/preview".format(template_id), data=template) def get_template(self, template_id): return self.get("/v2/template/{}".format(template_id)) def get_template_version(self, template_id, version): return self.get("/v2/template/{}/version/{}".format(template_id, version)) def get_all_template_versions(self, template_id): return self.get("service/{}/template/{}/versions".format(self.service_id, template_id)) def get_all_templates(self, template_type=None): _template_type = "?type={}".format(template_type) if template_type else "" return self.get("/v2/templates{}".format(_template_type))
(api_key, base_url='https://api.notifications.service.gov.uk', timeout=30)
5,381
notifications_python_client.base
__init__
Initialise the client Error if either of base_url or secret missing :param base_url - base URL of GOV.UK Notify API: :param secret - application secret - used to sign the request: :param timeout - request timeout on the client :return:
def __init__(self, api_key, base_url="https://api.notifications.service.gov.uk", timeout=30): """ Initialise the client Error if either of base_url or secret missing :param base_url - base URL of GOV.UK Notify API: :param secret - application secret - used to sign the request: :param timeout - request timeout on the client :return: """ service_id = api_key[-73:-37] api_key = api_key[-36:] assert base_url, "Missing base url" assert service_id, "Missing service ID" assert api_key, "Missing API key" self.base_url = base_url self.service_id = service_id self.api_key = api_key self.timeout = timeout self.request_session = requests.Session()
(self, api_key, base_url='https://api.notifications.service.gov.uk', timeout=30)
5,382
notifications_python_client.base
_create_request_objects
null
def _create_request_objects(self, url, data, params): api_token = create_jwt_token(self.api_key, self.service_id) kwargs = {"headers": self.generate_headers(api_token), "timeout": self.timeout} if data is not None: kwargs.update(data=self._serialize_data(data)) if params is not None: kwargs.update(params=params) url = urllib.parse.urljoin(str(self.base_url), str(url)) return url, kwargs
(self, url, data, params)
5,383
notifications_python_client.base
_extended_json_encoder
null
def _extended_json_encoder(self, obj): if isinstance(obj, set): return list(obj) raise TypeError
(self, obj)
5,384
notifications_python_client.base
_perform_request
null
def _perform_request(self, method, url, kwargs): start_time = time.monotonic() try: response = self.request_session.request(method, url, **kwargs) response.raise_for_status() return response except requests.RequestException as e: api_error = HTTPError.create(e) logger.warning( "API %s request on %s failed with %s '%s'", method, url, api_error.status_code, api_error.message ) raise api_error finally: elapsed_time = time.monotonic() - start_time logger.debug("API %s request on %s finished in %s", method, url, elapsed_time)
(self, method, url, kwargs)
5,385
notifications_python_client.base
_process_json_response
null
def _process_json_response(self, response): try: if response.status_code == 204: return return response.json() except ValueError as e: raise InvalidResponse(response, message="No JSON response object could be decoded") from e
(self, response)
5,386
notifications_python_client.base
_serialize_data
null
def _serialize_data(self, data): return json.dumps(data, default=self._extended_json_encoder)
(self, data)
5,387
notifications_python_client.base
delete
null
def delete(self, url, data=None): return self.request("DELETE", url, data=data)
(self, url, data=None)
5,388
notifications_python_client.base
generate_headers
null
def generate_headers(self, api_token): return { "Content-type": "application/json", "Authorization": "Bearer {}".format(api_token), "User-agent": "NOTIFY-API-PYTHON-CLIENT/{}".format(__version__), }
(self, api_token)
5,389
notifications_python_client.base
get
null
def get(self, url, params=None): return self.request("GET", url, params=params)
(self, url, params=None)
5,390
notifications_python_client.notifications
get_all_notifications
null
def get_all_notifications( self, status=None, template_type=None, reference=None, older_than=None, include_jobs=None ): data = {} if status: data.update({"status": status}) if template_type: data.update({"template_type": template_type}) if reference: data.update({"reference": reference}) if older_than: data.update({"older_than": older_than}) if include_jobs: data.update({"include_jobs": include_jobs}) return self.get("/v2/notifications", params=data)
(self, status=None, template_type=None, reference=None, older_than=None, include_jobs=None)
5,391
notifications_python_client.notifications
get_all_notifications_iterator
null
def get_all_notifications_iterator(self, status=None, template_type=None, reference=None, older_than=None): result = self.get_all_notifications(status, template_type, reference, older_than) notifications = result.get("notifications") while notifications: for notification in notifications: yield notification next_link = result["links"].get("next") notification_id = re.search("[0-F]{8}-[0-F]{4}-[0-F]{4}-[0-F]{4}-[0-F]{12}", next_link, re.I).group(0) result = self.get_all_notifications(status, template_type, reference, notification_id) notifications = result.get("notifications")
(self, status=None, template_type=None, reference=None, older_than=None)
5,392
notifications_python_client.notifications
get_all_template_versions
null
def get_all_template_versions(self, template_id): return self.get("service/{}/template/{}/versions".format(self.service_id, template_id))
(self, template_id)
5,393
notifications_python_client.notifications
get_all_templates
null
def get_all_templates(self, template_type=None): _template_type = "?type={}".format(template_type) if template_type else "" return self.get("/v2/templates{}".format(_template_type))
(self, template_type=None)
5,394
notifications_python_client.notifications
get_notification_by_id
null
def get_notification_by_id(self, id): return self.get("/v2/notifications/{}".format(id))
(self, id)
5,395
notifications_python_client.notifications
get_pdf_for_letter
null
def get_pdf_for_letter(self, id): url = "/v2/notifications/{}/pdf".format(id) logger.debug("API request %s %s", "GET", url) url, kwargs = self._create_request_objects(url, data=None, params=None) response = self._perform_request("GET", url, kwargs) return BytesIO(response.content)
(self, id)
5,396
notifications_python_client.notifications
get_received_texts
null
def get_received_texts(self, older_than=None): if older_than: query_string = "?older_than={}".format(older_than) else: query_string = "" return self.get("/v2/received-text-messages{}".format(query_string))
(self, older_than=None)
5,397
notifications_python_client.notifications
get_received_texts_iterator
null
def get_received_texts_iterator(self, older_than=None): result = self.get_received_texts(older_than=older_than) received_texts = result.get("received_text_messages") while received_texts: for received_text in received_texts: yield received_text next_link = result["links"].get("next") received_text_id = re.search("[0-F]{8}-[0-F]{4}-[0-F]{4}-[0-F]{4}-[0-F]{12}", next_link, re.I).group(0) result = self.get_received_texts_iterator(older_than=received_text_id) received_texts = result.get("received_text_messages")
(self, older_than=None)
5,398
notifications_python_client.notifications
get_template
null
def get_template(self, template_id): return self.get("/v2/template/{}".format(template_id))
(self, template_id)
5,399
notifications_python_client.notifications
get_template_version
null
def get_template_version(self, template_id, version): return self.get("/v2/template/{}/version/{}".format(template_id, version))
(self, template_id, version)
5,400
notifications_python_client.base
post
null
def post(self, url, data): return self.request("POST", url, data=data)
(self, url, data)
5,401
notifications_python_client.notifications
post_template_preview
null
def post_template_preview(self, template_id, personalisation): template = {"personalisation": personalisation} return self.post("/v2/template/{}/preview".format(template_id), data=template)
(self, template_id, personalisation)
5,402
notifications_python_client.base
put
null
def put(self, url, data): return self.request("PUT", url, data=data)
(self, url, data)
5,403
notifications_python_client.base
request
null
def request(self, method, url, data=None, params=None): logger.debug("API request %s %s", method, url) url, kwargs = self._create_request_objects(url, data, params) response = self._perform_request(method, url, kwargs) return self._process_json_response(response)
(self, method, url, data=None, params=None)
5,404
notifications_python_client.notifications
send_email_notification
null
def send_email_notification( self, email_address, template_id, personalisation=None, reference=None, email_reply_to_id=None ): notification = {"email_address": email_address, "template_id": template_id} if personalisation: notification.update({"personalisation": personalisation}) if reference: notification.update({"reference": reference}) if email_reply_to_id: notification.update({"email_reply_to_id": email_reply_to_id}) return self.post("/v2/notifications/email", data=notification)
(self, email_address, template_id, personalisation=None, reference=None, email_reply_to_id=None)
5,405
notifications_python_client.notifications
send_letter_notification
null
def send_letter_notification(self, template_id, personalisation, reference=None): notification = {"template_id": template_id, "personalisation": personalisation} if reference: notification.update({"reference": reference}) return self.post("/v2/notifications/letter", data=notification)
(self, template_id, personalisation, reference=None)
5,406
notifications_python_client.notifications
send_precompiled_letter_notification
null
def send_precompiled_letter_notification(self, reference, pdf_file, postage=None): content = base64.b64encode(pdf_file.read()).decode("utf-8") notification = {"reference": reference, "content": content} if postage: notification["postage"] = postage return self.post("/v2/notifications/letter", data=notification)
(self, reference, pdf_file, postage=None)
5,407
notifications_python_client.notifications
send_sms_notification
null
def send_sms_notification( self, phone_number, template_id, personalisation=None, reference=None, sms_sender_id=None ): notification = {"phone_number": phone_number, "template_id": template_id} if personalisation: notification.update({"personalisation": personalisation}) if reference: notification.update({"reference": reference}) if sms_sender_id: notification.update({"sms_sender_id": sms_sender_id}) return self.post("/v2/notifications/sms", data=notification)
(self, phone_number, template_id, personalisation=None, reference=None, sms_sender_id=None)
5,412
notifications_python_client.utils
prepare_upload
null
def prepare_upload(f, filename=None, confirm_email_before_download=None, retention_period=None): contents = f.read() if len(contents) > DOCUMENT_UPLOAD_SIZE_LIMIT: raise ValueError("File is larger than 2MB") file_data = { "file": base64.b64encode(contents).decode("ascii"), "filename": filename, "confirm_email_before_download": confirm_email_before_download, "retention_period": retention_period, } return file_data
(f, filename=None, confirm_email_before_download=None, retention_period=None)
5,415
ascon._ascon
ascon_decrypt
Ascon decryption. key: a bytes object of size 16 (for Ascon-128, Ascon-128a; 128-bit security) or 20 (for Ascon-80pq; 128-bit security) nonce: a bytes object of size 16 (must not repeat for the same key!) associateddata: a bytes object of arbitrary length ciphertext: a bytes object of arbitrary length (also contains tag) variant: "Ascon-128", "Ascon-128a", or "Ascon-80pq" (specifies key size, rate and number of rounds) returns a bytes object containing the plaintext or None if verification fails
def ascon_decrypt(key, nonce, associateddata, ciphertext, variant="Ascon-128"): """ Ascon decryption. key: a bytes object of size 16 (for Ascon-128, Ascon-128a; 128-bit security) or 20 (for Ascon-80pq; 128-bit security) nonce: a bytes object of size 16 (must not repeat for the same key!) associateddata: a bytes object of arbitrary length ciphertext: a bytes object of arbitrary length (also contains tag) variant: "Ascon-128", "Ascon-128a", or "Ascon-80pq" (specifies key size, rate and number of rounds) returns a bytes object containing the plaintext or None if verification fails """ assert variant in ["Ascon-128", "Ascon-128a", "Ascon-80pq"] assert(len(nonce) == 16 and (len(key) == 16 or (len(key) == 20 and variant == "Ascon-80pq"))) assert(len(ciphertext) >= 16) S = [0, 0, 0, 0, 0] k = len(key) * 8 # bits a = 12 # rounds b = 8 if variant == "Ascon-128a" else 6 # rounds rate = 16 if variant == "Ascon-128a" else 8 # bytes ascon_initialize(S, k, rate, a, b, key, nonce) ascon_process_associated_data(S, b, rate, associateddata) plaintext = ascon_process_ciphertext(S, b, rate, ciphertext[:-16]) tag = ascon_finalize(S, rate, a, key) if tag == ciphertext[-16:]: return plaintext else: return None
(key, nonce, associateddata, ciphertext, variant='Ascon-128')
5,416
ascon._ascon
ascon_encrypt
Ascon encryption. key: a bytes object of size 16 (for Ascon-128, Ascon-128a; 128-bit security) or 20 (for Ascon-80pq; 128-bit security) nonce: a bytes object of size 16 (must not repeat for the same key!) associateddata: a bytes object of arbitrary length plaintext: a bytes object of arbitrary length variant: "Ascon-128", "Ascon-128a", or "Ascon-80pq" (specifies key size, rate and number of rounds) returns a bytes object of length len(plaintext)+16 containing the ciphertext and tag
def ascon_encrypt(key, nonce, associateddata, plaintext, variant="Ascon-128"): """ Ascon encryption. key: a bytes object of size 16 (for Ascon-128, Ascon-128a; 128-bit security) or 20 (for Ascon-80pq; 128-bit security) nonce: a bytes object of size 16 (must not repeat for the same key!) associateddata: a bytes object of arbitrary length plaintext: a bytes object of arbitrary length variant: "Ascon-128", "Ascon-128a", or "Ascon-80pq" (specifies key size, rate and number of rounds) returns a bytes object of length len(plaintext)+16 containing the ciphertext and tag """ assert variant in ["Ascon-128", "Ascon-128a", "Ascon-80pq"] assert(len(nonce) == 16 and (len(key) == 16 or (len(key) == 20 and variant == "Ascon-80pq"))) S = [0, 0, 0, 0, 0] k = len(key) * 8 # bits a = 12 # rounds b = 8 if variant == "Ascon-128a" else 6 # rounds rate = 16 if variant == "Ascon-128a" else 8 # bytes ascon_initialize(S, k, rate, a, b, key, nonce) ascon_process_associated_data(S, b, rate, associateddata) ciphertext = ascon_process_plaintext(S, b, rate, plaintext) tag = ascon_finalize(S, rate, a, key) return ciphertext + tag
(key, nonce, associateddata, plaintext, variant='Ascon-128')
5,417
ascon._ascon
ascon_hash
Ascon hash function and extendable-output function. message: a bytes object of arbitrary length variant: "Ascon-Hash", "Ascon-Hasha" (both with 256-bit output for 128-bit security), "Ascon-Xof", or "Ascon-Xofa" (both with arbitrary output length, security=min(128, bitlen/2)) hashlength: the requested output bytelength (must be 32 for variant "Ascon-Hash"; can be arbitrary for Ascon-Xof, but should be >= 32 for 128-bit security) returns a bytes object containing the hash tag
def ascon_hash(message, variant="Ascon-Hash", hashlength=32): """ Ascon hash function and extendable-output function. message: a bytes object of arbitrary length variant: "Ascon-Hash", "Ascon-Hasha" (both with 256-bit output for 128-bit security), "Ascon-Xof", or "Ascon-Xofa" (both with arbitrary output length, security=min(128, bitlen/2)) hashlength: the requested output bytelength (must be 32 for variant "Ascon-Hash"; can be arbitrary for Ascon-Xof, but should be >= 32 for 128-bit security) returns a bytes object containing the hash tag """ assert variant in ["Ascon-Hash", "Ascon-Hasha", "Ascon-Xof", "Ascon-Xofa"] if variant in ["Ascon-Hash", "Ascon-Hasha"]: assert(hashlength == 32) a = 12 # rounds b = 8 if variant in ["Ascon-Hasha", "Ascon-Xofa"] else 12 rate = 8 # bytes # Initialization tagspec = int_to_bytes(256 if variant in ["Ascon-Hash", "Ascon-Hasha"] else 0, 4) S = bytes_to_state(to_bytes([0, rate * 8, a, a-b]) + tagspec + zero_bytes(32)) if debug: printstate(S, "initial value:") ascon_permutation(S, a) if debug: printstate(S, "initialization:") # Message Processing (Absorbing) m_padding = to_bytes([0x80]) + zero_bytes(rate - (len(message) % rate) - 1) m_padded = message + m_padding # first s-1 blocks for block in range(0, len(m_padded) - rate, rate): S[0] ^= bytes_to_int(m_padded[block:block+8]) # rate=8 ascon_permutation(S, b) # last block block = len(m_padded) - rate S[0] ^= bytes_to_int(m_padded[block:block+8]) # rate=8 if debug: printstate(S, "process message:") # Finalization (Squeezing) H = b"" ascon_permutation(S, a) while len(H) < hashlength: H += int_to_bytes(S[0], 8) # rate=8 ascon_permutation(S, b) if debug: printstate(S, "finalization:") return H[:hashlength]
(message, variant='Ascon-Hash', hashlength=32)
5,418
ascon._ascon
ascon_mac
Ascon message authentication code (MAC) and pseudorandom function (PRF). key: a bytes object of size 16 message: a bytes object of arbitrary length (<= 16 for "Ascon-PrfShort") variant: "Ascon-Mac", "Ascon-Maca" (both 128-bit output, arbitrarily long input), "Ascon-Prf", "Ascon-Prfa" (both arbitrarily long input and output), or "Ascon-PrfShort" (t-bit output for t<=128, m-bit input for m<=128) taglength: the requested output bytelength l/8 (must be <=16 for variants "Ascon-Mac", "Ascon-Maca", and "Ascon-PrfShort", arbitrary for "Ascon-Prf", "Ascon-Prfa"; should be >= 16 for 128-bit security) returns a bytes object containing the authentication tag
def ascon_mac(key, message, variant="Ascon-Mac", taglength=16): """ Ascon message authentication code (MAC) and pseudorandom function (PRF). key: a bytes object of size 16 message: a bytes object of arbitrary length (<= 16 for "Ascon-PrfShort") variant: "Ascon-Mac", "Ascon-Maca" (both 128-bit output, arbitrarily long input), "Ascon-Prf", "Ascon-Prfa" (both arbitrarily long input and output), or "Ascon-PrfShort" (t-bit output for t<=128, m-bit input for m<=128) taglength: the requested output bytelength l/8 (must be <=16 for variants "Ascon-Mac", "Ascon-Maca", and "Ascon-PrfShort", arbitrary for "Ascon-Prf", "Ascon-Prfa"; should be >= 16 for 128-bit security) returns a bytes object containing the authentication tag """ assert variant in ["Ascon-Mac", "Ascon-Prf", "Ascon-Maca", "Ascon-Prfa", "Ascon-PrfShort"] if variant in ["Ascon-Mac", "Ascon-Maca"]: assert(len(key) == 16 and taglength <= 16) if variant in ["Ascon-Prf", "Ascon-Prfa"]: assert(len(key) == 16) if variant == "Ascon-PrfShort": assert(len(key) == 16 and taglength <= 16 and len(message) <= 16) a = 12 # rounds b = 8 if variant in ["Ascon-Prfa", "Ascon-Maca"] else 12 # rounds msgblocksize = 40 if variant in ["Ascon-Prfa", "Ascon-Maca"] else 32 # bytes (input rate for Mac, Prf) rate = 16 # bytes (output rate) if variant == "Ascon-PrfShort": # Initialization + Message Processing (Absorbing) IV = to_bytes([len(key) * 8, len(message)*8, a + 64, taglength * 8]) + zero_bytes(4) S = bytes_to_state(IV + key + message + zero_bytes(16 - len(message))) if debug: printstate(S, "initial value:") ascon_permutation(S, a) if debug: printstate(S, "process message:") # Finalization (Squeezing) T = int_to_bytes(S[3] ^ bytes_to_int(key[0:8]), 8) + int_to_bytes(S[4] ^ bytes_to_int(key[8:16]), 8) return T[:taglength] else: # Ascon-Prf, Ascon-Prfa, Ascon-Mac, Ascon-Maca # Initialization if variant in ["Ascon-Mac", "Ascon-Maca"]: tagspec = int_to_bytes(16*8, 4) if variant in ["Ascon-Prf", "Ascon-Prfa"]: tagspec = int_to_bytes(0*8, 4) S = bytes_to_state(to_bytes([len(key) * 8, rate * 8, a + 128, a-b]) + tagspec + key + zero_bytes(16)) if debug: printstate(S, "initial value:") ascon_permutation(S, a) if debug: printstate(S, "initialization:") # Message Processing (Absorbing) m_padding = to_bytes([0x80]) + zero_bytes(msgblocksize - (len(message) % msgblocksize) - 1) m_padded = message + m_padding # first s-1 blocks for block in range(0, len(m_padded) - msgblocksize, msgblocksize): S[0] ^= bytes_to_int(m_padded[block:block+8]) # msgblocksize=32 bytes S[1] ^= bytes_to_int(m_padded[block+8:block+16]) S[2] ^= bytes_to_int(m_padded[block+16:block+24]) S[3] ^= bytes_to_int(m_padded[block+24:block+32]) if variant in ["Ascon-Prfa", "Ascon-Maca"]: S[4] ^= bytes_to_int(m_padded[block+32:block+40]) ascon_permutation(S, b) # last block block = len(m_padded) - msgblocksize S[0] ^= bytes_to_int(m_padded[block:block+8]) # msgblocksize=32 bytes S[1] ^= bytes_to_int(m_padded[block+8:block+16]) S[2] ^= bytes_to_int(m_padded[block+16:block+24]) S[3] ^= bytes_to_int(m_padded[block+24:block+32]) if variant in ["Ascon-Prfa", "Ascon-Maca"]: S[4] ^= bytes_to_int(m_padded[block+32:block+40]) S[4] ^= 1 if debug: printstate(S, "process message:") # Finalization (Squeezing) T = b"" ascon_permutation(S, a) while len(T) < taglength: T += int_to_bytes(S[0], 8) # rate=16 T += int_to_bytes(S[1], 8) ascon_permutation(S, b) if debug: printstate(S, "finalization:") return T[:taglength]
(key, message, variant='Ascon-Mac', taglength=16)
5,419
xml.etree.ElementTree
Comment
Comment element factory. This function creates a special element which the standard serializer serializes as an XML comment. *text* is a string containing the comment string.
def Comment(text=None): """Comment element factory. This function creates a special element which the standard serializer serializes as an XML comment. *text* is a string containing the comment string. """ element = Element(Comment) element.text = text return element
(text=None)
5,421
meld3
HTMLMeldParser
A mostly-cut-and-paste of ElementTree's HTMLTreeBuilder that does special meld3 things (like preserve comments and munge meld ids). Subclassing is not possible due to private attributes. :-(
class HTMLMeldParser(HTMLParser): """ A mostly-cut-and-paste of ElementTree's HTMLTreeBuilder that does special meld3 things (like preserve comments and munge meld ids). Subclassing is not possible due to private attributes. :-(""" def __init__(self, builder=None, encoding=None): self.__stack = [] if builder is None: builder = MeldTreeBuilder() self.builder = builder self.encoding = encoding or "iso-8859-1" try: # ``convert_charrefs`` was added in Python 3.4. Set it to avoid # "DeprecationWarning: The value of convert_charrefs will become # True in 3.5. You are encouraged to set the value explicitly." HTMLParser.__init__(self, convert_charrefs=False) except TypeError: HTMLParser.__init__(self) self.meldids = {} def close(self): HTMLParser.close(self) self.meldids = {} return self.builder.close() def handle_starttag(self, tag, attrs): if tag == "meta": # look for encoding directives http_equiv = content = None for k, v in attrs: if k == "http-equiv": http_equiv = v.lower() elif k == "content": content = v if http_equiv == "content-type" and content: # use email to parse the http header msg = email.message_from_string( "%s: %s\n\n" % (http_equiv, content) ) encoding = msg.get_param("charset") if encoding: self.encoding = encoding if tag in AUTOCLOSE: if self.__stack and self.__stack[-1] == tag: self.handle_endtag(tag) self.__stack.append(tag) attrib = {} if attrs: for k, v in attrs: if k == _MELD_SHORT_ID: k = _MELD_ID if self.meldids.get(v): raise ValueError('Repeated meld id "%s" in source' % v) self.meldids[v] = 1 else: k = k.lower() attrib[k] = v self.builder.start(tag, attrib) if tag in IGNOREEND: self.__stack.pop() self.builder.end(tag) def handle_endtag(self, tag): if tag in IGNOREEND: return lasttag = self.__stack.pop() if tag != lasttag and lasttag in AUTOCLOSE: self.handle_endtag(lasttag) self.builder.end(tag) def handle_charref(self, char): if char[:1] == "x": char = int(char[1:], 16) else: char = int(char) self.builder.data(unichr(char)) def handle_entityref(self, name): entity = htmlentitydefs.entitydefs.get(name) if entity: if len(entity) == 1: entity = ord(entity) else: entity = int(entity[2:-1]) self.builder.data(unichr(entity)) else: self.unknown_entityref(name) def handle_data(self, data): if isinstance(data, bytes): data = _u(data, self.encoding) self.builder.data(data) def unknown_entityref(self, name): pass # ignore by default; override if necessary def handle_comment(self, data): self.builder.start(Comment, {}) self.builder.data(data) self.builder.end(Comment)
(builder=None, encoding=None)
5,422
meld3
__init__
null
def __init__(self, builder=None, encoding=None): self.__stack = [] if builder is None: builder = MeldTreeBuilder() self.builder = builder self.encoding = encoding or "iso-8859-1" try: # ``convert_charrefs`` was added in Python 3.4. Set it to avoid # "DeprecationWarning: The value of convert_charrefs will become # True in 3.5. You are encouraged to set the value explicitly." HTMLParser.__init__(self, convert_charrefs=False) except TypeError: HTMLParser.__init__(self) self.meldids = {}
(self, builder=None, encoding=None)
5,423
_markupbase
_parse_doctype_attlist
null
def _parse_doctype_attlist(self, i, declstartpos): rawdata = self.rawdata name, j = self._scan_name(i, declstartpos) c = rawdata[j:j+1] if c == "": return -1 if c == ">": return j + 1 while 1: # scan a series of attribute descriptions; simplified: # name type [value] [#constraint] name, j = self._scan_name(j, declstartpos) if j < 0: return j c = rawdata[j:j+1] if c == "": return -1 if c == "(": # an enumerated type; look for ')' if ")" in rawdata[j:]: j = rawdata.find(")", j) + 1 else: return -1 while rawdata[j:j+1].isspace(): j = j + 1 if not rawdata[j:]: # end of buffer, incomplete return -1 else: name, j = self._scan_name(j, declstartpos) c = rawdata[j:j+1] if not c: return -1 if c in "'\"": m = _declstringlit_match(rawdata, j) if m: j = m.end() else: return -1 c = rawdata[j:j+1] if not c: return -1 if c == "#": if rawdata[j:] == "#": # end of buffer return -1 name, j = self._scan_name(j + 1, declstartpos) if j < 0: return j c = rawdata[j:j+1] if not c: return -1 if c == '>': # all done return j + 1
(self, i, declstartpos)
5,424
_markupbase
_parse_doctype_element
null
def _parse_doctype_element(self, i, declstartpos): name, j = self._scan_name(i, declstartpos) if j == -1: return -1 # style content model; just skip until '>' rawdata = self.rawdata if '>' in rawdata[j:]: return rawdata.find(">", j) + 1 return -1
(self, i, declstartpos)
5,425
_markupbase
_parse_doctype_entity
null
def _parse_doctype_entity(self, i, declstartpos): rawdata = self.rawdata if rawdata[i:i+1] == "%": j = i + 1 while 1: c = rawdata[j:j+1] if not c: return -1 if c.isspace(): j = j + 1 else: break else: j = i name, j = self._scan_name(j, declstartpos) if j < 0: return j while 1: c = self.rawdata[j:j+1] if not c: return -1 if c in "'\"": m = _declstringlit_match(rawdata, j) if m: j = m.end() else: return -1 # incomplete elif c == ">": return j + 1 else: name, j = self._scan_name(j, declstartpos) if j < 0: return j
(self, i, declstartpos)
5,426
_markupbase
_parse_doctype_notation
null
def _parse_doctype_notation(self, i, declstartpos): name, j = self._scan_name(i, declstartpos) if j < 0: return j rawdata = self.rawdata while 1: c = rawdata[j:j+1] if not c: # end of buffer; incomplete return -1 if c == '>': return j + 1 if c in "'\"": m = _declstringlit_match(rawdata, j) if not m: return -1 j = m.end() else: name, j = self._scan_name(j, declstartpos) if j < 0: return j
(self, i, declstartpos)
5,427
_markupbase
_parse_doctype_subset
null
def _parse_doctype_subset(self, i, declstartpos): rawdata = self.rawdata n = len(rawdata) j = i while j < n: c = rawdata[j] if c == "<": s = rawdata[j:j+2] if s == "<": # end of buffer; incomplete return -1 if s != "<!": self.updatepos(declstartpos, j + 1) raise AssertionError( "unexpected char in internal subset (in %r)" % s ) if (j + 2) == n: # end of buffer; incomplete return -1 if (j + 4) > n: # end of buffer; incomplete return -1 if rawdata[j:j+4] == "<!--": j = self.parse_comment(j, report=0) if j < 0: return j continue name, j = self._scan_name(j + 2, declstartpos) if j == -1: return -1 if name not in {"attlist", "element", "entity", "notation"}: self.updatepos(declstartpos, j + 2) raise AssertionError( "unknown declaration %r in internal subset" % name ) # handle the individual names meth = getattr(self, "_parse_doctype_" + name) j = meth(j, declstartpos) if j < 0: return j elif c == "%": # parameter entity reference if (j + 1) == n: # end of buffer; incomplete return -1 s, j = self._scan_name(j + 1, declstartpos) if j < 0: return j if rawdata[j] == ";": j = j + 1 elif c == "]": j = j + 1 while j < n and rawdata[j].isspace(): j = j + 1 if j < n: if rawdata[j] == ">": return j self.updatepos(declstartpos, j) raise AssertionError("unexpected char after internal subset") else: return -1 elif c.isspace(): j = j + 1 else: self.updatepos(declstartpos, j) raise AssertionError("unexpected char %r in internal subset" % c) # end of buffer reached return -1
(self, i, declstartpos)
5,428
_markupbase
_scan_name
null
def _scan_name(self, i, declstartpos): rawdata = self.rawdata n = len(rawdata) if i == n: return None, -1 m = _declname_match(rawdata, i) if m: s = m.group() name = s.strip() if (i + len(s)) == n: return None, -1 # end of buffer return name.lower(), m.end() else: self.updatepos(declstartpos, i) raise AssertionError( "expected name token at %r" % rawdata[declstartpos:declstartpos+20] )
(self, i, declstartpos)
5,429
html.parser
check_for_whole_start_tag
null
def check_for_whole_start_tag(self, i): rawdata = self.rawdata m = locatestarttagend_tolerant.match(rawdata, i) if m: j = m.end() next = rawdata[j:j+1] if next == ">": return j + 1 if next == "/": if rawdata.startswith("/>", j): return j + 2 if rawdata.startswith("/", j): # buffer boundary return -1 # else bogus input if j > i: return j else: return i + 1 if next == "": # end of input return -1 if next in ("abcdefghijklmnopqrstuvwxyz=/" "ABCDEFGHIJKLMNOPQRSTUVWXYZ"): # end of input in or before attribute value, or we have the # '/' from a '/>' ending return -1 if j > i: return j else: return i + 1 raise AssertionError("we should not get here!")
(self, i)
5,430
html.parser
clear_cdata_mode
null
def clear_cdata_mode(self): self.interesting = interesting_normal self.cdata_elem = None
(self)
5,431
meld3
close
null
def close(self): HTMLParser.close(self) self.meldids = {} return self.builder.close()
(self)
5,432
html.parser
feed
Feed data to the parser. Call this as often as you want, with as little or as much text as you want (may include '\n').
def feed(self, data): r"""Feed data to the parser. Call this as often as you want, with as little or as much text as you want (may include '\n'). """ self.rawdata = self.rawdata + data self.goahead(0)
(self, data)
5,433
html.parser
get_starttag_text
Return full source of start tag: '<...>'.
def get_starttag_text(self): """Return full source of start tag: '<...>'.""" return self.__starttag_text
(self)
5,434
_markupbase
getpos
Return current line number and offset.
def getpos(self): """Return current line number and offset.""" return self.lineno, self.offset
(self)
5,435
html.parser
goahead
null
def goahead(self, end): rawdata = self.rawdata i = 0 n = len(rawdata) while i < n: if self.convert_charrefs and not self.cdata_elem: j = rawdata.find('<', i) if j < 0: # if we can't find the next <, either we are at the end # or there's more text incoming. If the latter is True, # we can't pass the text to handle_data in case we have # a charref cut in half at end. Try to determine if # this is the case before proceeding by looking for an # & near the end and see if it's followed by a space or ;. amppos = rawdata.rfind('&', max(i, n-34)) if (amppos >= 0 and not re.compile(r'[\s;]').search(rawdata, amppos)): break # wait till we get all the text j = n else: match = self.interesting.search(rawdata, i) # < or & if match: j = match.start() else: if self.cdata_elem: break j = n if i < j: if self.convert_charrefs and not self.cdata_elem: self.handle_data(unescape(rawdata[i:j])) else: self.handle_data(rawdata[i:j]) i = self.updatepos(i, j) if i == n: break startswith = rawdata.startswith if startswith('<', i): if starttagopen.match(rawdata, i): # < + letter k = self.parse_starttag(i) elif startswith("</", i): k = self.parse_endtag(i) elif startswith("<!--", i): k = self.parse_comment(i) elif startswith("<?", i): k = self.parse_pi(i) elif startswith("<!", i): k = self.parse_html_declaration(i) elif (i + 1) < n: self.handle_data("<") k = i + 1 else: break if k < 0: if not end: break k = rawdata.find('>', i + 1) if k < 0: k = rawdata.find('<', i + 1) if k < 0: k = i + 1 else: k += 1 if self.convert_charrefs and not self.cdata_elem: self.handle_data(unescape(rawdata[i:k])) else: self.handle_data(rawdata[i:k]) i = self.updatepos(i, k) elif startswith("&#", i): match = charref.match(rawdata, i) if match: name = match.group()[2:-1] self.handle_charref(name) k = match.end() if not startswith(';', k-1): k = k - 1 i = self.updatepos(i, k) continue else: if ";" in rawdata[i:]: # bail by consuming &# self.handle_data(rawdata[i:i+2]) i = self.updatepos(i, i+2) break elif startswith('&', i): match = entityref.match(rawdata, i) if match: name = match.group(1) self.handle_entityref(name) k = match.end() if not startswith(';', k-1): k = k - 1 i = self.updatepos(i, k) continue match = incomplete.match(rawdata, i) if match: # match.group() will contain at least 2 chars if end and match.group() == rawdata[i:]: k = match.end() if k <= i: k = n i = self.updatepos(i, i + 1) # incomplete break elif (i + 1) < n: # not the end of the buffer, and can't be confused # with some other construct self.handle_data("&") i = self.updatepos(i, i + 1) else: break else: assert 0, "interesting.search() lied" # end while if end and i < n and not self.cdata_elem: if self.convert_charrefs and not self.cdata_elem: self.handle_data(unescape(rawdata[i:n])) else: self.handle_data(rawdata[i:n]) i = self.updatepos(i, n) self.rawdata = rawdata[i:]
(self, end)
5,436
meld3
handle_charref
null
def handle_charref(self, char): if char[:1] == "x": char = int(char[1:], 16) else: char = int(char) self.builder.data(unichr(char))
(self, char)
5,437
meld3
handle_comment
null
def handle_comment(self, data): self.builder.start(Comment, {}) self.builder.data(data) self.builder.end(Comment)
(self, data)
5,438
meld3
handle_data
null
def handle_data(self, data): if isinstance(data, bytes): data = _u(data, self.encoding) self.builder.data(data)
(self, data)
5,439
html.parser
handle_decl
null
def handle_decl(self, decl): pass
(self, decl)
5,440
meld3
handle_endtag
null
def handle_endtag(self, tag): if tag in IGNOREEND: return lasttag = self.__stack.pop() if tag != lasttag and lasttag in AUTOCLOSE: self.handle_endtag(lasttag) self.builder.end(tag)
(self, tag)
5,441
meld3
handle_entityref
null
def handle_entityref(self, name): entity = htmlentitydefs.entitydefs.get(name) if entity: if len(entity) == 1: entity = ord(entity) else: entity = int(entity[2:-1]) self.builder.data(unichr(entity)) else: self.unknown_entityref(name)
(self, name)
5,442
html.parser
handle_pi
null
def handle_pi(self, data): pass
(self, data)
5,443
html.parser
handle_startendtag
null
def handle_startendtag(self, tag, attrs): self.handle_starttag(tag, attrs) self.handle_endtag(tag)
(self, tag, attrs)
5,444
meld3
handle_starttag
null
def handle_starttag(self, tag, attrs): if tag == "meta": # look for encoding directives http_equiv = content = None for k, v in attrs: if k == "http-equiv": http_equiv = v.lower() elif k == "content": content = v if http_equiv == "content-type" and content: # use email to parse the http header msg = email.message_from_string( "%s: %s\n\n" % (http_equiv, content) ) encoding = msg.get_param("charset") if encoding: self.encoding = encoding if tag in AUTOCLOSE: if self.__stack and self.__stack[-1] == tag: self.handle_endtag(tag) self.__stack.append(tag) attrib = {} if attrs: for k, v in attrs: if k == _MELD_SHORT_ID: k = _MELD_ID if self.meldids.get(v): raise ValueError('Repeated meld id "%s" in source' % v) self.meldids[v] = 1 else: k = k.lower() attrib[k] = v self.builder.start(tag, attrib) if tag in IGNOREEND: self.__stack.pop() self.builder.end(tag)
(self, tag, attrs)
5,445
html.parser
parse_bogus_comment
null
def parse_bogus_comment(self, i, report=1): rawdata = self.rawdata assert rawdata[i:i+2] in ('<!', '</'), ('unexpected call to ' 'parse_comment()') pos = rawdata.find('>', i+2) if pos == -1: return -1 if report: self.handle_comment(rawdata[i+2:pos]) return pos + 1
(self, i, report=1)
5,446
_markupbase
parse_comment
null
def parse_comment(self, i, report=1): rawdata = self.rawdata if rawdata[i:i+4] != '<!--': raise AssertionError('unexpected call to parse_comment()') match = _commentclose.search(rawdata, i+4) if not match: return -1 if report: j = match.start(0) self.handle_comment(rawdata[i+4: j]) return match.end(0)
(self, i, report=1)
5,447
_markupbase
parse_declaration
null
def parse_declaration(self, i): # This is some sort of declaration; in "HTML as # deployed," this should only be the document type # declaration ("<!DOCTYPE html...>"). # ISO 8879:1986, however, has more complex # declaration syntax for elements in <!...>, including: # --comment-- # [marked section] # name in the following list: ENTITY, DOCTYPE, ELEMENT, # ATTLIST, NOTATION, SHORTREF, USEMAP, # LINKTYPE, LINK, IDLINK, USELINK, SYSTEM rawdata = self.rawdata j = i + 2 assert rawdata[i:j] == "<!", "unexpected call to parse_declaration" if rawdata[j:j+1] == ">": # the empty comment <!> return j + 1 if rawdata[j:j+1] in ("-", ""): # Start of comment followed by buffer boundary, # or just a buffer boundary. return -1 # A simple, practical version could look like: ((name|stringlit) S*) + '>' n = len(rawdata) if rawdata[j:j+2] == '--': #comment # Locate --.*-- as the body of the comment return self.parse_comment(i) elif rawdata[j] == '[': #marked section # Locate [statusWord [...arbitrary SGML...]] as the body of the marked section # Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA # Note that this is extended by Microsoft Office "Save as Web" function # to include [if...] and [endif]. return self.parse_marked_section(i) else: #all other declaration elements decltype, j = self._scan_name(j, i) if j < 0: return j if decltype == "doctype": self._decl_otherchars = '' while j < n: c = rawdata[j] if c == ">": # end of declaration syntax data = rawdata[i+2:j] if decltype == "doctype": self.handle_decl(data) else: # According to the HTML5 specs sections "8.2.4.44 Bogus # comment state" and "8.2.4.45 Markup declaration open # state", a comment token should be emitted. # Calling unknown_decl provides more flexibility though. self.unknown_decl(data) return j + 1 if c in "\"'": m = _declstringlit_match(rawdata, j) if not m: return -1 # incomplete j = m.end() elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ": name, j = self._scan_name(j, i) elif c in self._decl_otherchars: j = j + 1 elif c == "[": # this could be handled in a separate doctype parser if decltype == "doctype": j = self._parse_doctype_subset(j + 1, i) elif decltype in {"attlist", "linktype", "link", "element"}: # must tolerate []'d groups in a content model in an element declaration # also in data attribute specifications of attlist declaration # also link type declaration subsets in linktype declarations # also link attribute specification lists in link declarations raise AssertionError("unsupported '[' char in %s declaration" % decltype) else: raise AssertionError("unexpected '[' char in declaration") else: raise AssertionError("unexpected %r char in declaration" % rawdata[j]) if j < 0: return j return -1 # incomplete
(self, i)
5,448
html.parser
parse_endtag
null
def parse_endtag(self, i): rawdata = self.rawdata assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag" match = endendtag.search(rawdata, i+1) # > if not match: return -1 gtpos = match.end() match = endtagfind.match(rawdata, i) # </ + tag + > if not match: if self.cdata_elem is not None: self.handle_data(rawdata[i:gtpos]) return gtpos # find the name: w3.org/TR/html5/tokenization.html#tag-name-state namematch = tagfind_tolerant.match(rawdata, i+2) if not namematch: # w3.org/TR/html5/tokenization.html#end-tag-open-state if rawdata[i:i+3] == '</>': return i+3 else: return self.parse_bogus_comment(i) tagname = namematch.group(1).lower() # consume and ignore other stuff between the name and the > # Note: this is not 100% correct, since we might have things like # </tag attr=">">, but looking for > after the name should cover # most of the cases and is much simpler gtpos = rawdata.find('>', namematch.end()) self.handle_endtag(tagname) return gtpos+1 elem = match.group(1).lower() # script or style if self.cdata_elem is not None: if elem != self.cdata_elem: self.handle_data(rawdata[i:gtpos]) return gtpos self.handle_endtag(elem) self.clear_cdata_mode() return gtpos
(self, i)
5,449
html.parser
parse_html_declaration
null
def parse_html_declaration(self, i): rawdata = self.rawdata assert rawdata[i:i+2] == '<!', ('unexpected call to ' 'parse_html_declaration()') if rawdata[i:i+4] == '<!--': # this case is actually already handled in goahead() return self.parse_comment(i) elif rawdata[i:i+3] == '<![': return self.parse_marked_section(i) elif rawdata[i:i+9].lower() == '<!doctype': # find the closing > gtpos = rawdata.find('>', i+9) if gtpos == -1: return -1 self.handle_decl(rawdata[i+2:gtpos]) return gtpos+1 else: return self.parse_bogus_comment(i)
(self, i)
5,450
_markupbase
parse_marked_section
null
def parse_marked_section(self, i, report=1): rawdata= self.rawdata assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()" sectName, j = self._scan_name( i+3, i ) if j < 0: return j if sectName in {"temp", "cdata", "ignore", "include", "rcdata"}: # look for standard ]]> ending match= _markedsectionclose.search(rawdata, i+3) elif sectName in {"if", "else", "endif"}: # look for MS Office ]> ending match= _msmarkedsectionclose.search(rawdata, i+3) else: raise AssertionError( 'unknown status keyword %r in marked section' % rawdata[i+3:j] ) if not match: return -1 if report: j = match.start(0) self.unknown_decl(rawdata[i+3: j]) return match.end(0)
(self, i, report=1)
5,451
html.parser
parse_pi
null
def parse_pi(self, i): rawdata = self.rawdata assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()' match = piclose.search(rawdata, i+2) # > if not match: return -1 j = match.start() self.handle_pi(rawdata[i+2: j]) j = match.end() return j
(self, i)
5,452
html.parser
parse_starttag
null
def parse_starttag(self, i): self.__starttag_text = None endpos = self.check_for_whole_start_tag(i) if endpos < 0: return endpos rawdata = self.rawdata self.__starttag_text = rawdata[i:endpos] # Now parse the data between i+1 and j into a tag and attrs attrs = [] match = tagfind_tolerant.match(rawdata, i+1) assert match, 'unexpected call to parse_starttag()' k = match.end() self.lasttag = tag = match.group(1).lower() while k < endpos: m = attrfind_tolerant.match(rawdata, k) if not m: break attrname, rest, attrvalue = m.group(1, 2, 3) if not rest: attrvalue = None elif attrvalue[:1] == '\'' == attrvalue[-1:] or \ attrvalue[:1] == '"' == attrvalue[-1:]: attrvalue = attrvalue[1:-1] if attrvalue: attrvalue = unescape(attrvalue) attrs.append((attrname.lower(), attrvalue)) k = m.end() end = rawdata[k:endpos].strip() if end not in (">", "/>"): lineno, offset = self.getpos() if "\n" in self.__starttag_text: lineno = lineno + self.__starttag_text.count("\n") offset = len(self.__starttag_text) \ - self.__starttag_text.rfind("\n") else: offset = offset + len(self.__starttag_text) self.handle_data(rawdata[i:endpos]) return endpos if end.endswith('/>'): # XHTML-style empty tag: <span attr="value" /> self.handle_startendtag(tag, attrs) else: self.handle_starttag(tag, attrs) if tag in self.CDATA_CONTENT_ELEMENTS: self.set_cdata_mode(tag) return endpos
(self, i)
5,453
html.parser
reset
Reset this instance. Loses all unprocessed data.
def reset(self): """Reset this instance. Loses all unprocessed data.""" self.rawdata = '' self.lasttag = '???' self.interesting = interesting_normal self.cdata_elem = None _markupbase.ParserBase.reset(self)
(self)
5,454
html.parser
set_cdata_mode
null
def set_cdata_mode(self, elem): self.cdata_elem = elem.lower() self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
(self, elem)
5,455
html.parser
unknown_decl
null
def unknown_decl(self, data): pass
(self, data)
5,456
meld3
unknown_entityref
null
def unknown_entityref(self, name): pass # ignore by default; override if necessary
(self, name)
5,457
_markupbase
updatepos
null
def updatepos(self, i, j): if i >= j: return j rawdata = self.rawdata nlines = rawdata.count("\n", i, j) if nlines: self.lineno = self.lineno + nlines pos = rawdata.rindex("\n", i, j) # Should not fail self.offset = j-(pos+1) else: self.offset = self.offset + j-i return j
(self, i, j)