code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def mkdir(jottapath, JFS):
jf = JFS.post('%s?mkDir=true' % jottapath)
return instanceof(jf, JFSFolder) | Make a new directory (a.k.a. folder) on JottaCloud.
Returns boolean |
def iter_tree(jottapath, JFS):
filedirlist = JFS.getObject('%s?mode=list' % jottapath)
log.debug("got tree: %s", filedirlist)
if not isinstance(filedirlist, JFSFileDirList):
yield ( '', tuple(), tuple() )
for path in filedirlist.tree:
yield path | Get a tree of of files and folders. use as an iterator, you get something like os.walk |
def _query(self, filename):
# - Query metadata of one file
# - Return a dict with a 'size' key, and a file size value (-1 for not found)
# - Retried if an exception is thrown
log.Info('Querying size of %s' % filename)
from jottalib.JFS import JFSNotFoundError, JFSIncompleteFile
remote_path = posixpath.join(self.folder.path, filename)
try:
remote_file = self.client.getObject(remote_path)
except JFSNotFoundError:
return {'size': -1}
return {
'size': remote_file.size,
} | Get size of filename |
def parse_drawing(document, container, elem):
_blip = elem.xpath('.//a:blip', namespaces=NAMESPACES)
if len(_blip) > 0:
blip = _blip[0]
_rid = blip.attrib[_name('{{{r}}}embed')]
img = doc.Image(_rid)
container.elements.append(img) | Parse drawing element.
We don't do much with drawing element. We can find embeded image but we don't do more than that. |
def parse_footnote(document, container, elem):
"Parse the footnote element."
_rid = elem.attrib[_name('{{{w}}}id')]
foot = doc.Footnote(_rid)
container.elements.append(footf parse_footnote(document, container, elem):
"Parse the footnote element."
_rid = elem.attrib[_name('{{{w}}}id')]
foot = doc.Footnote(_rid)
container.elements.append(foot) | Parse the footnote element. |
def parse_endnote(document, container, elem):
"Parse the endnote element."
_rid = elem.attrib[_name('{{{w}}}id')]
note = doc.Endnote(_rid)
container.elements.append(notef parse_endnote(document, container, elem):
"Parse the endnote element."
_rid = elem.attrib[_name('{{{w}}}id')]
note = doc.Endnote(_rid)
container.elements.append(note) | Parse the endnote element. |
def parse_smarttag(document, container, tag_elem):
"Parse the endnote element."
tag = doc.SmartTag()
tag.element = tag_elem.attrib[_name('{{{w}}}element')]
for elem in tag_elem:
if elem.tag == _name('{{{w}}}r'):
parse_text(document, tag, elem)
if elem.tag == _name('{{{w}}}smartTag'):
parse_smarttag(document, tag, elem)
container.elements.append(tag)
returf parse_smarttag(document, container, tag_elem):
"Parse the endnote element."
tag = doc.SmartTag()
tag.element = tag_elem.attrib[_name('{{{w}}}element')]
for elem in tag_elem:
if elem.tag == _name('{{{w}}}r'):
parse_text(document, tag, elem)
if elem.tag == _name('{{{w}}}smartTag'):
parse_smarttag(document, tag, elem)
container.elements.append(tag)
return | Parse the endnote element. |
def parse_paragraph(document, par):
paragraph = doc.Paragraph()
paragraph.document = document
for elem in par:
if elem.tag == _name('{{{w}}}pPr'):
parse_paragraph_properties(document, paragraph, elem)
if elem.tag == _name('{{{w}}}r'):
parse_text(document, paragraph, elem)
if elem.tag == _name('{{{m}}}oMath'):
_m = doc.Math()
paragraph.elements.append(_m)
if elem.tag == _name('{{{m}}}oMathPara'):
_m = doc.Math()
paragraph.elements.append(_m)
if elem.tag == _name('{{{w}}}commentRangeStart'):
_m = doc.Comment(elem.attrib[_name('{{{w}}}id')], 'start')
paragraph.elements.append(_m)
if elem.tag == _name('{{{w}}}commentRangeEnd'):
_m = doc.Comment(elem.attrib[_name('{{{w}}}id')], 'end')
paragraph.elements.append(_m)
if elem.tag == _name('{{{w}}}hyperlink'):
try:
t = doc.Link(elem.attrib[_name('{{{r}}}id')])
parse_text(document, t, elem)
paragraph.elements.append(t)
except:
logger.error('Error with with hyperlink [%s].', str(elem.attrib.items()))
if elem.tag == _name('{{{w}}}smartTag'):
parse_smarttag(document, paragraph, elem)
return paragraph | Parse paragraph element.
Some other elements could be found inside of paragraph element (math, links). |
def parse_table_properties(doc, table, prop):
"Parse table properties."
if not table:
return
style = prop.find(_name('{{{w}}}tblStyle'))
if style is not None:
table.style_id = style.attrib[_name('{{{w}}}val')]
doc.add_style_as_used(table.style_idf parse_table_properties(doc, table, prop):
"Parse table properties."
if not table:
return
style = prop.find(_name('{{{w}}}tblStyle'))
if style is not None:
table.style_id = style.attrib[_name('{{{w}}}val')]
doc.add_style_as_used(table.style_id) | Parse table properties. |
def parse_table_column_properties(doc, cell, prop):
"Parse table column properties."
if not cell:
return
grid = prop.find(_name('{{{w}}}gridSpan'))
if grid is not None:
cell.grid_span = int(grid.attrib[_name('{{{w}}}val')])
vmerge = prop.find(_name('{{{w}}}vMerge'))
if vmerge is not None:
if _name('{{{w}}}val') in vmerge.attrib:
cell.vmerge = vmerge.attrib[_name('{{{w}}}val')]
else:
cell.vmerge = "f parse_table_column_properties(doc, cell, prop):
"Parse table column properties."
if not cell:
return
grid = prop.find(_name('{{{w}}}gridSpan'))
if grid is not None:
cell.grid_span = int(grid.attrib[_name('{{{w}}}val')])
vmerge = prop.find(_name('{{{w}}}vMerge'))
if vmerge is not None:
if _name('{{{w}}}val') in vmerge.attrib:
cell.vmerge = vmerge.attrib[_name('{{{w}}}val')]
else:
cell.vmerge = "" | Parse table column properties. |
def parse_document(xmlcontent):
document = etree.fromstring(xmlcontent)
body = document.xpath('.//w:body', namespaces=NAMESPACES)[0]
document = doc.Document()
for elem in body:
if elem.tag == _name('{{{w}}}p'):
document.elements.append(parse_paragraph(document, elem))
if elem.tag == _name('{{{w}}}tbl'):
document.elements.append(parse_table(document, elem))
if elem.tag == _name('{{{w}}}sdt'):
document.elements.append(doc.TOC())
return document | Parse document with content.
Content is placed in file 'document.xml'. |
def parse_relationship(document, xmlcontent, rel_type):
doc = etree.fromstring(xmlcontent)
for elem in doc:
if elem.tag == _name('{{{pr}}}Relationship'):
rel = {'target': elem.attrib['Target'],
'type': elem.attrib['Type'],
'target_mode': elem.attrib.get('TargetMode', 'Internal')}
document.relationships[rel_type][elem.attrib['Id']] = rel | Parse relationship document.
Relationships hold information like external or internal references for links.
Relationships are placed in file '_rels/document.xml.rels'. |
def parse_style(document, xmlcontent):
styles = etree.fromstring(xmlcontent)
_r = styles.xpath('.//w:rPrDefault', namespaces=NAMESPACES)
if len(_r) > 0:
rpr = _r[0].find(_name('{{{w}}}rPr'))
if rpr is not None:
st = doc.Style()
parse_previous_properties(document, st, rpr)
document.default_style = st
# rest of the styles
for style in styles.xpath('.//w:style', namespaces=NAMESPACES):
st = doc.Style()
st.style_id = style.attrib[_name('{{{w}}}styleId')]
style_type = style.attrib[_name('{{{w}}}type')]
if style_type is not None:
st.style_type = style_type
if _name('{{{w}}}default') in style.attrib:
is_default = style.attrib[_name('{{{w}}}default')]
if is_default is not None:
st.is_default = is_default == '1'
name = style.find(_name('{{{w}}}name'))
if name is not None:
st.name = name.attrib[_name('{{{w}}}val')]
based_on = style.find(_name('{{{w}}}basedOn'))
if based_on is not None:
st.based_on = based_on.attrib[_name('{{{w}}}val')]
document.styles.styles[st.style_id] = st
if st.is_default:
document.styles.default_styles[st.style_type] = st.style_id
rpr = style.find(_name('{{{w}}}rPr'))
if rpr is not None:
parse_previous_properties(document, st, rpr)
ppr = style.find(_name('{{{w}}}pPr'))
if ppr is not None:
parse_paragraph_properties(document, st, ppr) | Parse styles document.
Styles are defined in file 'styles.xml'. |
def parse_comments(document, xmlcontent):
comments = etree.fromstring(xmlcontent)
document.comments = {}
for comment in comments.xpath('.//w:comment', namespaces=NAMESPACES):
# w:author
# w:id
# w: date
comment_id = comment.attrib[_name('{{{w}}}id')]
comm = doc.CommentContent(comment_id)
comm.author = comment.attrib.get(_name('{{{w}}}author'), None)
comm.date = comment.attrib.get(_name('{{{w}}}date'), None)
comm.elements = [parse_paragraph(document, para) for para in comment.xpath('.//w:p', namespaces=NAMESPACES)]
document.comments[comment_id] = comm | Parse comments document.
Comments are defined in file 'comments.xml' |
def parse_footnotes(document, xmlcontent):
footnotes = etree.fromstring(xmlcontent)
document.footnotes = {}
for footnote in footnotes.xpath('.//w:footnote', namespaces=NAMESPACES):
_type = footnote.attrib.get(_name('{{{w}}}type'), None)
# don't know what to do with these now
if _type in ['separator', 'continuationSeparator', 'continuationNotice']:
continue
paragraphs = [parse_paragraph(document, para) for para in footnote.xpath('.//w:p', namespaces=NAMESPACES)]
document.footnotes[footnote.attrib[_name('{{{w}}}id')]] = paragraphs | Parse footnotes document.
Footnotes are defined in file 'footnotes.xml' |
def parse_endnotes(document, xmlcontent):
endnotes = etree.fromstring(xmlcontent)
document.endnotes = {}
for note in endnotes.xpath('.//w:endnote', namespaces=NAMESPACES):
paragraphs = [parse_paragraph(document, para) for para in note.xpath('.//w:p', namespaces=NAMESPACES)]
document.endnotes[note.attrib[_name('{{{w}}}id')]] = paragraphs | Parse endnotes document.
Endnotes are defined in file 'endnotes.xml' |
def parse_numbering(document, xmlcontent):
numbering = etree.fromstring(xmlcontent)
document.abstruct_numbering = {}
document.numbering = {}
for abstruct_num in numbering.xpath('.//w:abstractNum', namespaces=NAMESPACES):
numb = {}
for lvl in abstruct_num.xpath('./w:lvl', namespaces=NAMESPACES):
ilvl = int(lvl.attrib[_name('{{{w}}}ilvl')])
fmt = lvl.find(_name('{{{w}}}numFmt'))
numb[ilvl] = {'numFmt': fmt.attrib[_name('{{{w}}}val')]}
document.abstruct_numbering[abstruct_num.attrib[_name('{{{w}}}abstractNumId')]] = numb
for num in numbering.xpath('.//w:num', namespaces=NAMESPACES):
num_id = num.attrib[_name('{{{w}}}numId')]
abs_num = num.find(_name('{{{w}}}abstractNumId'))
if abs_num is not None:
number_id = abs_num.attrib[_name('{{{w}}}val')]
document.numbering[int(num_id)] = number_id | Parse numbering document.
Numbering is defined in file 'numbering.xml'. |
def get_by_name(self, name, style_type = None):
for st in self.styles.values():
if st:
if st.name == name:
return st
if style_type and not st:
st = self.styles.get(self.default_styles[style_type], None)
return st | Find style by it's descriptive name.
:Returns:
Returns found style of type :class:`ooxml.doc.Style`. |
def get_by_id(self, style_id, style_type = None):
for st in self.styles.values():
if st:
if st.style_id == style_id:
return st
if style_type:
return self.styles.get(self.default_styles[style_type], None)
return None | Find style by it's unique identifier
:Returns:
Returns found style of type :class:`ooxml.doc.Style`. |
def process_affinity(affinity=None):
if affinity is not None:
affinity = CPUSet(affinity)
if not affinity.issubset(system_affinity()):
raise ValueError("unknown cpus: %s" % affinity)
return system_affinity() | Get or set the CPU affinity set for the current process.
This will affect all future threads spawned by this process. It is
implementation-defined whether it will also affect previously-spawned
threads. |
def acquire(self,blocking=True,timeout=None):
if timeout is None:
return self.__lock.acquire(blocking)
else:
# Simulated timeout using progressively longer sleeps.
# This is the same timeout scheme used in the stdlib Condition
# class. If there's lots of contention on the lock then there's
# a good chance you won't get it; but then again, Python doesn't
# guarantee fairness anyway. We hope that platform-specific
# extensions can provide a better mechanism.
endtime = _time() + timeout
delay = 0.0005
while not self.__lock.acquire(False):
remaining = endtime - _time()
if remaining <= 0:
return False
delay = min(delay*2,remaining,0.05)
_sleep(delay)
return True | Attempt to acquire this lock.
If the optional argument "blocking" is True and "timeout" is None,
this methods blocks until is successfully acquires the lock. If
"blocking" is False, it returns immediately if the lock could not
be acquired. Otherwise, it blocks for at most "timeout" seconds
trying to acquire the lock.
In all cases, this methods returns True if the lock was successfully
acquired and False otherwise. |
def from_thread(cls,thread):
new_classes = []
for new_cls in cls.__mro__:
if new_cls not in thread.__class__.__mro__:
new_classes.append(new_cls)
if isinstance(thread,cls):
pass
elif issubclass(cls,thread.__class__):
thread.__class__ = cls
else:
class UpgradedThread(thread.__class__,cls):
pass
thread.__class__ = UpgradedThread
for new_cls in new_classes:
if hasattr(new_cls,"_upgrade_thread"):
new_cls._upgrade_thread(thread)
return thread | Convert a vanilla thread object into an instance of this class.
This method "upgrades" a vanilla thread object to an instance of this
extended class. You might need to call this if you obtain a reference
to a thread by some means other than (a) creating it, or (b) from the
methods of the threading2 module. |
def acquire(self,blocking=True,timeout=None,shared=False):
with self._lock:
if shared:
self._acquire_shared(blocking,timeout)
else:
self._acquire_exclusive(blocking,timeout)
assert not (self.is_shared and self.is_exclusive) | Acquire the lock in shared or exclusive mode. |
def release(self):
# This decrements the appropriate lock counters, and if the lock
# becomes free, it looks for a queued thread to hand it off to.
# By doing the handoff here we ensure fairness.
me = currentThread()
with self._lock:
if self.is_exclusive:
if self._exclusive_owner is not me:
raise RuntimeError("release() called on unheld lock")
self.is_exclusive -= 1
if not self.is_exclusive:
self._exclusive_owner = None
# If there are waiting shared locks, issue it to them
# all and then wake everyone up.
if self._shared_queue:
for (thread,waiter) in self._shared_queue:
self.is_shared += 1
self._shared_owners[thread] = 1
waiter.notify()
del self._shared_queue[:]
# Otherwise, if there are waiting exclusive locks,
# they get first dibbs on the lock.
elif self._exclusive_queue:
(thread,waiter) = self._exclusive_queue.pop(0)
self._exclusive_owner = thread
self.is_exclusive += 1
waiter.notify()
elif self.is_shared:
try:
self._shared_owners[me] -= 1
if self._shared_owners[me] == 0:
del self._shared_owners[me]
except KeyError:
raise RuntimeError("release() called on unheld lock")
self.is_shared -= 1
if not self.is_shared:
# If there are waiting exclusive locks,
# they get first dibbs on the lock.
if self._exclusive_queue:
(thread,waiter) = self._exclusive_queue.pop(0)
self._exclusive_owner = thread
self.is_exclusive += 1
waiter.notify()
else:
assert not self._shared_queue
else:
raise RuntimeError("release() called on unheld lock") | Release the lock. |
def read_from_file(file_name):
from .docxfile import DOCXFile
dfile = DOCXFile(file_name)
dfile.parse()
return dfile | Parser OOXML file and returns parsed document.
:Args:
- file_name (str): Path to OOXML file
:Returns:
Returns object of type :class:`ooxml.docx.DOCXFile`. |
def _get_font_size(document, style):
font_size = style.get_font_size()
if font_size == -1:
if style.based_on:
based_on = document.styles.get_by_id(style.based_on)
if based_on:
return _get_font_size(document, based_on)
return font_size | Get font size defined for this style.
It will try to get font size from it's parent style if it is not defined by original style.
:Args:
- document (:class:`ooxml.doc.Document`): Document object
- style (:class:`ooxml.doc.Style`): Style object
:Returns:
Returns font size as a number. -1 if it can not get font size. |
def _get_numbering(document, numid, ilvl):
try:
abs_num = document.numbering[numid]
return document.abstruct_numbering[abs_num][ilvl]['numFmt']
except:
return 'bullet' | Returns type for the list.
:Returns:
Returns type for the list. Returns "bullet" by default or in case of an error. |
def _get_parent(root):
elem = root
while True:
elem = elem.getparent()
if elem.tag in ['ul', 'ol']:
return elem | Returns root element for a list.
:Args:
root (Element): lxml element of current location
:Returns:
lxml element representing list |
def close_list(ctx, root):
try:
n = len(ctx.in_list)
if n <= 0:
return root
elem = root
while n > 0:
while True:
if elem.tag in ['ul', 'ol', 'td']:
elem = elem.getparent()
break
elem = elem.getparent()
n -= 1
ctx.in_list = []
return elem
except:
return None | Close already opened list if needed.
This will try to see if it is needed to close already opened list.
:Args:
- ctx (:class:`Context`): Context object
- root (Element): lxml element representing current position.
:Returns:
lxml element where future content should be placed. |
def serialize_break(ctx, document, elem, root):
"Serialize break element."
if elem.break_type == u'textWrapping':
_div = etree.SubElement(root, 'br')
else:
_div = etree.SubElement(root, 'span')
if ctx.options['embed_styles']:
_div.set('style', 'page-break-after: always;')
fire_hooks(ctx, document, elem, _div, ctx.get_hook('page_break'))
return roof serialize_break(ctx, document, elem, root):
"Serialize break element."
if elem.break_type == u'textWrapping':
_div = etree.SubElement(root, 'br')
else:
_div = etree.SubElement(root, 'span')
if ctx.options['embed_styles']:
_div.set('style', 'page-break-after: always;')
fire_hooks(ctx, document, elem, _div, ctx.get_hook('page_break'))
return root | Serialize break element. |
def serialize_math(ctx, document, elem, root):
_div = etree.SubElement(root, 'span')
if ctx.options['embed_styles']:
_div.set('style', 'border: 1px solid red')
_div.text = 'We do not support Math blocks at the moment.'
fire_hooks(ctx, document, elem, _div, ctx.get_hook('math'))
return root | Serialize math element.
Math objects are not supported at the moment. This is wht we only show error message. |
def serialize_link(ctx, document, elem, root):
_a = etree.SubElement(root, 'a')
for el in elem.elements:
_ser = ctx.get_serializer(el)
if _ser:
_td = _ser(ctx, document, el, _a)
else:
if isinstance(el, doc.Text):
children = list(_a)
if len(children) == 0:
_text = _a.text or u''
_a.text = u'{}{}'.format(_text, el.value())
else:
_text = children[-1].tail or u''
children[-1].tail = u'{}{}'.format(_text, el.value())
if elem.rid in document.relationships[ctx.options['relationship']]:
_a.set('href', document.relationships[ctx.options['relationship']][elem.rid].get('target', ''))
fire_hooks(ctx, document, elem, _a, ctx.get_hook('a'))
return root | Serilaze link element.
This works only for external links at the moment. |
def serialize_image(ctx, document, elem, root):
_img = etree.SubElement(root, 'img')
# make path configurable
if elem.rid in document.relationships[ctx.options['relationship']]:
img_src = document.relationships[ctx.options['relationship']][elem.rid].get('target', '')
img_name, img_extension = os.path.splitext(img_src)
_img.set('src', 'static/{}{}'.format(elem.rid, img_extension))
fire_hooks(ctx, document, elem, _img, ctx.get_hook('img'))
return root | Serialize image element.
This is not abstract enough. |
def fire_hooks(ctx, document, elem, element, hooks):
if not hooks:
return
for hook in hooks:
hook(ctx, document, elem, element) | Fire hooks on newly created element.
For each newly created element we will try to find defined hooks and execute them.
:Args:
- ctx (:class:`Context`): Context object
- document (:class:`ooxml.doc.Document`): Document object
- elem (:class:`ooxml.doc.Element`): Element which we serialized
- element (Element): lxml element which we created
- hooks (list): List of hooks |
def has_style(node):
elements = ['b', 'i', 'u', 'strike', 'color', 'jc', 'sz', 'ind', 'superscript', 'subscript', 'small_caps']
return any([True for elem in elements if elem in node.rpr]) | Tells us if node element has defined styling.
:Args:
- node (:class:`ooxml.doc.Element`): Element
:Returns:
True or False |
def get_all_styles(document, style):
classes = []
while True:
classes.insert(0, get_style_name(style))
if style.based_on:
style = document.styles.get_by_id(style.based_on)
else:
break
return classes | Returns list of styles on which specified style is based on.
:Args:
- document (:class:`ooxml.doc.Document`): Document object
- style (:class:`ooxml.doc.Style`): Style object
:Returns:
List of style objects. |
def get_css_classes(document, style):
lst = [st.lower() for st in get_all_styles(document, style)[-1:]] + \
['{}-fontsize'.format(st.lower()) for st in get_all_styles(document, style)[-1:]]
return ' '.join(lst) | Returns CSS classes for this style.
This function will check all the styles specified style is based on and return their CSS classes.
:Args:
- document (:class:`ooxml.doc.Document`): Document object
- style (:class:`ooxml.doc.Style`): Style object
:Returns:
String representing all the CSS classes for this element.
>>> get_css_classes(doc, st)
'header1 normal' |
def serialize_symbol(ctx, document, el, root):
"Serialize special symbols."
span = etree.SubElement(root, 'span')
span.text = el.value()
fire_hooks(ctx, document, el, span, ctx.get_hook('symbol'))
return roof serialize_symbol(ctx, document, el, root):
"Serialize special symbols."
span = etree.SubElement(root, 'span')
span.text = el.value()
fire_hooks(ctx, document, el, span, ctx.get_hook('symbol'))
return root | Serialize special symbols. |
def serialize_footnote(ctx, document, el, root):
"Serializes footnotes."
footnote_num = el.rid
if el.rid not in ctx.footnote_list:
ctx.footnote_id += 1
ctx.footnote_list[el.rid] = ctx.footnote_id
footnote_num = ctx.footnote_list[el.rid]
note = etree.SubElement(root, 'sup')
link = etree.SubElement(note, 'a')
link.set('href', '#')
link.text = u'{}'.format(footnote_num)
fire_hooks(ctx, document, el, note, ctx.get_hook('footnote'))
return roof serialize_footnote(ctx, document, el, root):
"Serializes footnotes."
footnote_num = el.rid
if el.rid not in ctx.footnote_list:
ctx.footnote_id += 1
ctx.footnote_list[el.rid] = ctx.footnote_id
footnote_num = ctx.footnote_list[el.rid]
note = etree.SubElement(root, 'sup')
link = etree.SubElement(note, 'a')
link.set('href', '#')
link.text = u'{}'.format(footnote_num)
fire_hooks(ctx, document, el, note, ctx.get_hook('footnote'))
return root | Serializes footnotes. |
def serialize_comment(ctx, document, el, root):
"Serializes comment."
# Check if option is turned on
if el.comment_type == 'end':
ctx.opened_comments.remove(el.cid)
else:
if el.comment_type != 'reference':
ctx.opened_comments.append(el.cid)
if ctx.options['comment_span']:
link = etree.SubElement(root, 'a')
link.set('href', '#')
link.set('class', 'comment-link')
link.set('id', 'comment-id-' + el.cid)
link.text = ''
fire_hooks(ctx, document, el, link, ctx.get_hook('comment'))
return roof serialize_comment(ctx, document, el, root):
"Serializes comment."
# Check if option is turned on
if el.comment_type == 'end':
ctx.opened_comments.remove(el.cid)
else:
if el.comment_type != 'reference':
ctx.opened_comments.append(el.cid)
if ctx.options['comment_span']:
link = etree.SubElement(root, 'a')
link.set('href', '#')
link.set('class', 'comment-link')
link.set('id', 'comment-id-' + el.cid)
link.text = ''
fire_hooks(ctx, document, el, link, ctx.get_hook('comment'))
return root | Serializes comment. |
def serialize_endnote(ctx, document, el, root):
"Serializes endnotes."
footnote_num = el.rid
if el.rid not in ctx.endnote_list:
ctx.endnote_id += 1
ctx.endnote_list[el.rid] = ctx.endnote_id
footnote_num = ctx.endnote_list[el.rid]
note = etree.SubElement(root, 'sup')
link = etree.SubElement(note, 'a')
link.set('href', '#')
link.text = u'{}'.format(footnote_num)
fire_hooks(ctx, document, el, note, ctx.get_hook('endnote'))
return roof serialize_endnote(ctx, document, el, root):
"Serializes endnotes."
footnote_num = el.rid
if el.rid not in ctx.endnote_list:
ctx.endnote_id += 1
ctx.endnote_list[el.rid] = ctx.endnote_id
footnote_num = ctx.endnote_list[el.rid]
note = etree.SubElement(root, 'sup')
link = etree.SubElement(note, 'a')
link.set('href', '#')
link.text = u'{}'.format(footnote_num)
fire_hooks(ctx, document, el, note, ctx.get_hook('endnote'))
return root | Serializes endnotes. |
def serialize_smarttag(ctx, document, el, root):
"Serializes smarttag."
if ctx.options['smarttag_span']:
_span = etree.SubElement(root, 'span', {'class': 'smarttag', 'data-smarttag-element': el.element})
else:
_span = root
for elem in el.elements:
_ser = ctx.get_serializer(elem)
if _ser:
_td = _ser(ctx, document, elem, _span)
else:
if isinstance(elem, doc.Text):
children = list(_span)
if len(children) == 0:
_text = _span.text or u''
_span.text = u'{}{}'.format(_text, elem.text)
else:
_text = children[-1].tail or u''
children[-1].tail = u'{}{}'.format(_text, elem.text)
fire_hooks(ctx, document, el, _span, ctx.get_hook('smarttag'))
return roof serialize_smarttag(ctx, document, el, root):
"Serializes smarttag."
if ctx.options['smarttag_span']:
_span = etree.SubElement(root, 'span', {'class': 'smarttag', 'data-smarttag-element': el.element})
else:
_span = root
for elem in el.elements:
_ser = ctx.get_serializer(elem)
if _ser:
_td = _ser(ctx, document, elem, _span)
else:
if isinstance(elem, doc.Text):
children = list(_span)
if len(children) == 0:
_text = _span.text or u''
_span.text = u'{}{}'.format(_text, elem.text)
else:
_text = children[-1].tail or u''
children[-1].tail = u'{}{}'.format(_text, elem.text)
fire_hooks(ctx, document, el, _span, ctx.get_hook('smarttag'))
return root | Serializes smarttag. |
def serialize_table(ctx, document, table, root):
# What we should check really is why do we pass None as root element
# There is a good chance some content is missing after the import
if root is None:
return root
if ctx.ilvl != None:
root = close_list(ctx, root)
ctx.ilvl, ctx.numid = None, None
_table = etree.SubElement(root, 'table')
_table.set('border', '1')
_table.set('width', '100%')
style = get_style(document, table)
if style:
_table.set('class', get_css_classes(document, style))
for rows in table.rows:
_tr = etree.SubElement(_table, 'tr')
for cell in rows:
_td = etree.SubElement(_tr, 'td')
if cell.grid_span != 1:
_td.set('colspan', str(cell.grid_span))
if cell.row_span != 1:
_td.set('rowspan', str(cell.row_span))
for elem in cell.elements:
if isinstance(elem, doc.Paragraph):
_ser = ctx.get_serializer(elem)
_td = _ser(ctx, document, elem, _td, embed=False)
if ctx.ilvl != None:
# root = close_list(ctx, root)
_td = close_list(ctx, _td)
ctx.ilvl, ctx.numid = None, None
fire_hooks(ctx, document, table, _td, ctx.get_hook('td'))
fire_hooks(ctx, document, table, _td, ctx.get_hook('tr'))
fire_hooks(ctx, document, table, _table, ctx.get_hook('table'))
return root | Serializes table element. |
def serialize_textbox(ctx, document, txtbox, root):
_div = etree.SubElement(root, 'div')
_div.set('class', 'textbox')
for elem in txtbox.elements:
_ser = ctx.get_serializer(elem)
if _ser:
_ser(ctx, document, elem, _div)
fire_hooks(ctx, document, txtbox, _div, ctx.get_hook('textbox'))
return root | Serialize textbox element. |
def serialize_elements(document, elements, options=None):
ctx = Context(document, options)
tree_root = root = etree.Element('div')
for elem in elements:
_ser = ctx.get_serializer(elem)
if _ser:
root = _ser(ctx, document, elem, root)
# TODO:
# - create footnotes now
return etree.tostring(tree_root, pretty_print=ctx.options.get('pretty_print', True), encoding="utf-8", xml_declaration=False) | Serialize list of elements into HTML string.
:Args:
- document (:class:`ooxml.doc.Document`): Document object
- elements (list): List of elements
- options (dict): Optional dictionary with :class:`Context` options
:Returns:
Returns HTML representation of the document. |
def is_header(self, elem, font_size, node, style=None):
# This logic has been disabled for now. Mark this as header if it has
# been marked during the parsing or mark.
# if hasattr(elem, 'possible_header'):
# if elem.possible_header:
# return True
# if not style:
# return False
if hasattr(style, 'style_id'):
fnt_size = _get_font_size(self.doc, style)
from .importer import calculate_weight
weight = calculate_weight(self.doc, elem)
if weight > 50:
return False
if fnt_size in self.doc.possible_headers_style:
return True
return font_size in self.doc.possible_headers
else:
list_of_sizes = {}
for el in elem.elements:
try:
fs = get_style_fontsize(el)
weight = len(el.value()) if el.value() else 0
list_of_sizes[fs] = list_of_sizes.setdefault(fs, 0) + weight
except:
pass
sorted_list_of_sizes = list(collections.OrderedDict(sorted(six.iteritems(list_of_sizes), key=lambda t: t[0])))
font_size_to_check = font_size
if len(sorted_list_of_sizes) > 0:
if sorted_list_of_sizes[0] != font_size:
return sorted_list_of_sizes[0] in self.doc.possible_headers
return font_size in self.doc.possible_headers | Used for checking if specific element is a header or not.
:Returns:
True or False |
def get_header(self, elem, style, node):
font_size = style
if hasattr(elem, 'possible_header'):
if elem.possible_header:
return 'h1'
if not style:
return 'h6'
if hasattr(style, 'style_id'):
font_size = _get_font_size(self.doc, style)
try:
if font_size in self.doc.possible_headers_style:
return 'h{}'.format(self.doc.possible_headers_style.index(font_size)+1)
return 'h{}'.format(self.doc.possible_headers.index(font_size)+1)
except ValueError:
return 'h6' | Returns HTML tag representing specific header for this element.
:Returns:
String representation of HTML tag. |
def get_serializer(self, node):
return self.options['serializers'].get(type(node), None)
if type(node) in self.options['serializers']:
return self.options['serializers'][type(node)]
return None | Returns serializer for specific element.
:Args:
- node (:class:`ooxml.doc.Element`): Element object
:Returns:
Returns reference to a function which will be used for serialization. |
def priority(self,priority):
with self.__lock:
old_priorities = {}
try:
for thread in self.__threads:
old_priorities[thread] = thread.priority
thread.priority = priority
except Exception:
for (thread,old_priority) in old_priorities.iteritems():
try:
thread.priority = old_priority
except Exception:
pass
raise
else:
self.__priority = priority | Set the priority for all threads in this group.
If setting priority fails on any thread, the priority of all threads
is restored to its previous value. |
def affinity(self,affinity):
with self.__lock:
old_affinities = {}
try:
for thread in self.__threads:
old_affinities[thread] = thread.affinity
thread.affinity = affinity
except Exception:
for (thread,old_affinity) in old_affinities.iteritems():
try:
thread.affinity = old_affinity
except Exception:
pass
raise
else:
self.__affinity = affinity | Set the affinity for all threads in this group.
If setting affinity fails on any thread, the affinity of all threads
is restored to its previous value. |
def join(self,timeout=None):
if timeout is None:
for thread in self.__threads:
thread.join()
else:
deadline = _time() + timeout
for thread in self.__threads:
delay = deadline - _time()
if delay <= 0:
return False
if not thread.join(delay):
return False
return True | Join all threads in this group.
If the optional "timeout" argument is given, give up after that many
seconds. This method returns True is the threads were successfully
joined, False if a timeout occurred. |
def text_length(elem):
if not elem:
return 0
value = elem.value()
try:
value = len(value)
except:
value = 0
try:
for a in elem.elements:
value += len(a.value())
except:
pass
return value | Returns length of the content in this element.
Return value is not correct but it is **good enough***. |
def reset(self):
"Resets the values."
self.zf = zipfile.ZipFile(self.file_name, 'r')
self._doc = Nonf reset(self):
"Resets the values."
self.zf = zipfile.ZipFile(self.file_name, 'r')
self._doc = None | Resets the values. |
def _priority_range(policy=None):
if policy is None:
policy = libc.sched_getscheduler(0)
if policy < 0:
raise OSError(get_errno(),"sched_getscheduler")
max = libc.sched_get_priority_max(policy)
if max < 0:
raise OSError(get_errno(),"sched_get_priority_max")
min = libc.sched_get_priority_min(policy)
if min < 0:
raise OSError(get_errno(),"sched_get_priority_min")
return (min,max) | Determine the priority range (min,max) for the given scheduler policy.
If no policy is specified, the current default policy is used. |
def _get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check co_argcount rather than try/excepting the function and
# catching the TypeError, because something inside the function
# may raise the TypeError. This technique is more accurate.
try:
code = six.get_function_code(attr)
except AttributeError:
code = six.get_function_code(attr.__call__)
if code.co_argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr | Copied from django.contrib.syndication.views.Feed (v1.7.1) |
def build_rrule(count=None, interval=None, bysecond=None, byminute=None,
byhour=None, byweekno=None, bymonthday=None, byyearday=None,
bymonth=None, until=None, bysetpos=None, wkst=None, byday=None,
freq=None):
result = {}
if count is not None:
result['COUNT'] = count
if interval is not None:
result['INTERVAL'] = interval
if bysecond is not None:
result['BYSECOND'] = bysecond
if byminute is not None:
result['BYMINUTE'] = byminute
if byhour is not None:
result['BYHOUR'] = byhour
if byweekno is not None:
result['BYWEEKNO'] = byweekno
if bymonthday is not None:
result['BYMONTHDAY'] = bymonthday
if byyearday is not None:
result['BYYEARDAY'] = byyearday
if bymonth is not None:
result['BYMONTH'] = bymonth
if until is not None:
result['UNTIL'] = until
if bysetpos is not None:
result['BYSETPOS'] = bysetpos
if wkst is not None:
result['WKST'] = wkst
if byday is not None:
result['BYDAY'] = byday
if freq is not None:
if freq not in vRecur.frequencies:
raise ValueError('Frequency value should be one of: {0}'
.format(vRecur.frequencies))
result['FREQ'] = freq
return result | Build rrule dictionary for vRecur class.
:param count: int
:param interval: int
:param bysecond: int
:param byminute: int
:param byhour: int
:param byweekno: int
:param bymonthday: int
:param byyearday: int
:param bymonth: int
:param until: datetime
:param bysetpos: int
:param wkst: str, two-letter weekday
:param byday: weekday
:param freq: str, frequency name ('WEEK', 'MONTH', etc)
:return: dict |
def build_rrule_from_recurrences_rrule(rule):
from recurrence import serialize
line = serialize(rule)
if line.startswith('RRULE:'):
line = line[6:]
return build_rrule_from_text(line) | Build rrule dictionary for vRecur class from a django_recurrences rrule.
django_recurrences is a popular implementation for recurrences in django.
https://pypi.org/project/django-recurrence/
this is a shortcut to interface between recurrences and icalendar. |
def build_rrule_from_dateutil_rrule(rule):
lines = str(rule).splitlines()
for line in lines:
if line.startswith('DTSTART:'):
continue
if line.startswith('RRULE:'):
line = line[6:]
return build_rrule_from_text(line) | Build rrule dictionary for vRecur class from a dateutil rrule.
Dateutils rrule is a popular implementation of rrule in python.
https://pypi.org/project/python-dateutil/
this is a shortcut to interface between dateutil and icalendar. |
def write(self, outfile, encoding):
u
cal = Calendar()
cal.add('version', '2.0')
cal.add('calscale', 'GREGORIAN')
for ifield, efield in FEED_FIELD_MAP:
val = self.feed.get(ifield)
if val is not None:
cal.add(efield, val)
self.write_items(cal)
to_ical = getattr(cal, 'as_string', None)
if not to_ical:
to_ical = cal.to_ical
outfile.write(to_ical()) | u"""
Writes the feed to the specified file in the
specified encoding. |
def write_items(self, calendar):
for item in self.items:
event = Event()
for ifield, efield in ITEM_EVENT_FIELD_MAP:
val = item.get(ifield)
if val is not None:
event.add(efield, val)
calendar.add_component(event) | Write all events to the calendar |
def _textlist(self, _addtail=False):
'''Returns a list of text strings contained within an element and its sub-elements.
Helpful for extracting text from prose-oriented XML (such as XHTML or DocBook).
'''
result = []
if (not _addtail) and (self.text is not None):
result.append(self.text)
for elem in self:
result.extend(elem.textlist(True))
if _addtail and self.tail is not None:
result.append(self.tail)
return resulf _textlist(self, _addtail=False):
'''Returns a list of text strings contained within an element and its sub-elements.
Helpful for extracting text from prose-oriented XML (such as XHTML or DocBook).
'''
result = []
if (not _addtail) and (self.text is not None):
result.append(self.text)
for elem in self:
result.extend(elem.textlist(True))
if _addtail and self.tail is not None:
result.append(self.tail)
return result | Returns a list of text strings contained within an element and its sub-elements.
Helpful for extracting text from prose-oriented XML (such as XHTML or DocBook). |
def _reindent(s, indent, reformat=True):
s = textwrap.dedent(s)
s = s.split('\n')
s = [x.rstrip() for x in s]
while s and (not s[0]):
s = s[1:]
while s and (not s[-1]):
s = s[:-1]
if reformat:
s = '\n'.join(s)
s = textwrap.wrap(s, initial_indent=indent, subsequent_indent=indent)
else:
s = [indent + x for x in s]
return '\n'.join(s) + '\n' | Remove the existing indentation from each line of a chunk of
text, s, and then prefix each line with a new indent string.
Also removes trailing whitespace from each line, and leading and
trailing blank lines. |
def generate_docstr(element, indent='', wrap=None):
result = []
txt = element.text and element.text.rstrip()
if txt:
result.append(_reindent(txt, indent))
result.append(indent)
for d in element.findall('doc') + element.findall('rule'):
docval = ''.join(d.textlist()).rstrip()
if not docval:
continue
reformat = True
if 'name' in d.attrib:
result.append(indent + d.attrib['name'].upper() + ':')
result.append(indent)
extra_indent = ' '
if d.attrib['name'] == 'grammar':
reformat = False # Don't want re-indenting to mess this up
elif d.tag == 'rule':
result.append(indent + 'RULE:')
result.append(indent)
extra_indent = ' '
else:
extra_indent = ''
result.append(_reindent(docval, indent + extra_indent, reformat))
result.append(indent)
fields = element.findall('field')
if fields:
result.append(indent + 'PARAMETERS:')
for f in fields:
result.append(indent + ' ' + _fixup_field_name(f) + ': ' + _field_type(f))
field_docs = generate_docstr(f, indent + ' ')
if field_docs:
result.append(indent)
result.append(field_docs)
result.append(indent)
if not result:
return None
if wrap is not None:
result = [wrap] + result + [wrap]
return '\n'.join(x.rstrip() for x in result) + '\n' | Generate a Python docstr for a given element in the AMQP
XML spec file. The element could be a class or method
The 'wrap' parameter is an optional chunk of text that's
added to the beginning and end of the resulting docstring. |
def _next_method(self):
while self.queue.empty():
try:
frame_type, channel, payload = self.source.read_frame()
except Exception, e:
#
# Connection was closed? Framing Error?
#
self.queue.put(e)
break
if self.expected_types[channel] != frame_type:
self.queue.put((
channel,
Exception('Received frame type %s while expecting type: %s' %
(frame_type, self.expected_types[channel])
)
))
elif frame_type == 1:
self._process_method_frame(channel, payload)
elif frame_type == 2:
self._process_content_header(channel, payload)
elif frame_type == 3:
self._process_content_body(channel, payload) | Read the next method from the source, once one complete method has
been assembled it is placed in the internal queue. |
def _process_method_frame(self, channel, payload):
method_sig = unpack('>HH', payload[:4])
args = AMQPReader(payload[4:])
if method_sig in _CONTENT_METHODS:
#
# Save what we've got so far and wait for the content-header
#
self.partial_messages[channel] = _PartialMessage(method_sig, args)
self.expected_types[channel] = 2
else:
self.queue.put((channel, method_sig, args, None)) | Process Method frames |
def _process_content_header(self, channel, payload):
partial = self.partial_messages[channel]
partial.add_header(payload)
if partial.complete:
#
# a bodyless message, we're done
#
self.queue.put((channel, partial.method_sig, partial.args, partial.msg))
del self.partial_messages[channel]
self.expected_types[channel] = 1
else:
#
# wait for the content-body
#
self.expected_types[channel] = 3 | Process Content Header frames |
def _process_content_body(self, channel, payload):
partial = self.partial_messages[channel]
partial.add_payload(payload)
if partial.complete:
#
# Stick the message in the queue and go back to
# waiting for method frames
#
self.queue.put((channel, partial.method_sig, partial.args, partial.msg))
del self.partial_messages[channel]
self.expected_types[channel] = 1 | Process Content Body frames |
def read_method(self):
self._next_method()
m = self.queue.get()
if isinstance(m, Exception):
raise m
return m | Read a method from the peer. |
def _do_close(self):
AMQP_LOGGER.debug('Closed channel #%d' % self.channel_id)
self.is_open = False
del self.connection.channels[self.channel_id]
self.channel_id = self.connection = None
self.callbacks = {} | Tear down this object, after we've agreed to close with the server. |
def _alert(self, args):
reply_code = args.read_short()
reply_text = args.read_shortstr()
details = args.read_table()
self.alerts.put((reply_code, reply_text, details)) | This method allows the server to send a non-fatal warning to
the client. This is used for methods that are normally
asynchronous and thus do not have confirmations, and for which
the server may detect errors that need to be reported. Fatal
errors are handled as channel or connection exceptions; non-
fatal errors are sent through this method.
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
details: table
detailed information for warning
A set of fields that provide more information about
the problem. The meaning of these fields are defined
on a per-reply-code basis (TO BE DEFINED). |
def close(self, reply_code=0, reply_text='', method_sig=(0, 0)):
if not self.is_open:
# already closed
return
args = AMQPWriter()
args.write_short(reply_code)
args.write_shortstr(reply_text)
args.write_short(method_sig[0]) # class_id
args.write_short(method_sig[1]) # method_id
self._send_method((20, 40), args)
return self.wait(allowed_methods=[
(20, 41), # Channel.close_ok
]) | request a channel close
This method indicates that the sender wants to close the
channel. This may be due to internal conditions (e.g. a forced
shut-down) or due to an error handling a specific method, i.e.
an exception. When a close is due to an exception, the sender
provides the class and method id of the method which caused
the exception.
RULE:
After sending this method any received method except
Channel.Close-OK MUST be discarded.
RULE:
The peer sending this method MAY use a counter or timeout
to detect failure of the other peer to respond correctly
with Channel.Close-OK..
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
class_id: short
failing method class
When the close is provoked by a method exception, this
is the class of the method.
method_id: short
failing method ID
When the close is provoked by a method exception, this
is the ID of the method. |
def _close(self, args):
reply_code = args.read_short()
reply_text = args.read_shortstr()
class_id = args.read_short()
method_id = args.read_short()
# self.close_ok()
# def close_ok(self):
# """
# confirm a channel close
#
# This method confirms a Channel.Close method and tells the
# recipient that it is safe to release resources for the channel
# and close the socket.
#
# RULE:
#
# A peer that detects a socket closure without having
# received a Channel.Close-Ok handshake method SHOULD log
# the error.
#
# """
self._send_method((20, 41))
self._do_close()
raise AMQPChannelException(reply_code, reply_text,
(class_id, method_id)) | request a channel close
This method indicates that the sender wants to close the
channel. This may be due to internal conditions (e.g. a forced
shut-down) or due to an error handling a specific method, i.e.
an exception. When a close is due to an exception, the sender
provides the class and method id of the method which caused
the exception.
RULE:
After sending this method any received method except
Channel.Close-OK MUST be discarded.
RULE:
The peer sending this method MAY use a counter or timeout
to detect failure of the other peer to respond correctly
with Channel.Close-OK..
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
class_id: short
failing method class
When the close is provoked by a method exception, this
is the class of the method.
method_id: short
failing method ID
When the close is provoked by a method exception, this
is the ID of the method. |
def exchange_delete(self, exchange, if_unused=False,
nowait=False, ticket=None):
args = AMQPWriter()
if ticket is not None:
args.write_short(ticket)
else:
args.write_short(self.default_ticket)
args.write_shortstr(exchange)
args.write_bit(if_unused)
args.write_bit(nowait)
self._send_method((40, 20), args)
if not nowait:
return self.wait(allowed_methods=[
(40, 21), # Channel.exchange_delete_ok
]) | delete an exchange
This method deletes an exchange. When an exchange is deleted
all queue bindings on the exchange are cancelled.
PARAMETERS:
exchange: shortstr
RULE:
The exchange MUST exist. Attempting to delete a
non-existing exchange causes a channel exception.
if_unused: boolean
delete only if unused
If set, the server will only delete the exchange if it
has no queue bindings. If the exchange has queue
bindings the server does not delete it but raises a
channel exception instead.
RULE:
If set, the server SHOULD delete the exchange but
only if it has no queue bindings.
RULE:
If set, the server SHOULD raise a channel
exception if the exchange is in use.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
ticket: short
RULE:
The client MUST provide a valid access ticket
giving "active" access rights to the exchange's
access realm. |
def _queue_declare_ok(self, args):
queue = args.read_shortstr()
message_count = args.read_long()
consumer_count = args.read_long()
return queue, message_count, consumer_count | confirms a queue definition
This method confirms a Declare method and confirms the name of
the queue, essential for automatically-named queues.
PARAMETERS:
queue: shortstr
Reports the name of the queue. If the server generated
a queue name, this field contains that name.
message_count: long
number of messages in queue
Reports the number of messages in the queue, which
will be zero for newly-created queues.
consumer_count: long
number of consumers
Reports the number of active consumers for the queue.
Note that consumers can suspend activity
(Channel.Flow) in which case they do not appear in
this count. |
def basic_get(self, queue='', no_ack=False, ticket=None):
args = AMQPWriter()
if ticket is not None:
args.write_short(ticket)
else:
args.write_short(self.default_ticket)
args.write_shortstr(queue)
args.write_bit(no_ack)
self._send_method((60, 70), args)
return self.wait(allowed_methods=[
(60, 71), # Channel.basic_get_ok
(60, 72), # Channel.basic_get_empty
]) | direct access to a queue
This method provides a direct access to the messages in a
queue using a synchronous dialogue that is designed for
specific types of application where synchronous functionality
is more important than performance.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to consume from. If
the queue name is null, refers to the current queue
for the channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
no_ack: boolean
no acknowledgement needed
If this field is set the server does not expect
acknowledgments for messages. That is, when a message
is delivered to the client the server automatically and
silently acknowledges it on behalf of the client. This
functionality increases performance but at the cost of
reliability. Messages can get lost if a client dies
before it can deliver them to the application.
ticket: short
RULE:
The client MUST provide a valid access ticket
giving "read" access rights to the realm for the
queue.
Non-blocking, returns a message object, or None. |
def _basic_return(self, args, msg):
reply_code = args.read_short()
reply_text = args.read_shortstr()
exchange = args.read_shortstr()
routing_key = args.read_shortstr()
self.returned_messages.put(
(reply_code, reply_text, exchange, routing_key, msg)
) | return a failed message
This method returns an undeliverable message that was
published with the "immediate" flag set, or an unroutable
message published with the "mandatory" flag set. The reply
code and text provide information about the reason that the
message was undeliverable.
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
exchange: shortstr
Specifies the name of the exchange that the message
was originally published to.
routing_key: shortstr
Message routing key
Specifies the routing key name specified when the
message was published. |
def _wait_method(self, channel_id, allowed_methods):
#
# Check the channel's deferred methods
#
method_queue = self.channels[channel_id].method_queue
for queued_method in method_queue:
method_sig = queued_method[0]
if (allowed_methods is None) \
or (method_sig in allowed_methods) \
or (method_sig == (20, 40)):
method_queue.remove(queued_method)
return queued_method
#
# Nothing queued, need to wait for a method from the peer
#
while True:
channel, method_sig, args, content = \
self.method_reader.read_method()
if (channel == channel_id) \
and ((allowed_methods is None) \
or (method_sig in allowed_methods) \
or (method_sig == (20, 40))):
return method_sig, args, content
#
# Certain methods like basic_return should be dispatched
# immediately rather than being queued, even if they're not
# one of the 'allowed_methods' we're looking for.
#
if (channel != 0) and (method_sig in Channel._IMMEDIATE_METHODS):
self.channels[channel].dispatch_method(method_sig, args, content)
continue
#
# Not the channel and/or method we were looking for. Queue
# this method for later
#
self.channels[channel].method_queue.append((method_sig, args, content))
#
# If we just queued up a method for channel 0 (the Connection
# itself) it's probably a close method in reaction to some
# error, so deal with it right away.
#
if channel == 0:
self.wait() | Wait for a method from the server destined for
a particular channel. |
def channel(self, channel_id=None):
if channel_id in self.channels:
return self.channels[channel_id]
return Channel(self, channel_id) | Fetch a Channel object identified by the numeric channel_id, or
create that object if it doesn't already exist. |
def _close(self, args):
reply_code = args.read_short()
reply_text = args.read_shortstr()
class_id = args.read_short()
method_id = args.read_short()
self._x_close_ok()
raise AMQPConnectionException(reply_code, reply_text, (class_id, method_id)) | request a connection close
This method indicates that the sender wants to close the
connection. This may be due to internal conditions (e.g. a
forced shut-down) or due to an error handling a specific
method, i.e. an exception. When a close is due to an
exception, the sender provides the class and method id of the
method which caused the exception.
RULE:
After sending this method any received method except the
Close-OK method MUST be discarded.
RULE:
The peer sending this method MAY use a counter or timeout
to detect failure of the other peer to respond correctly
with the Close-OK method.
RULE:
When a server receives the Close method from a client it
MUST delete all server-side resources associated with the
client's context. A client CANNOT reconnect to a context
after sending or receiving a Close method.
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
class_id: short
failing method class
When the close is provoked by a method exception, this
is the class of the method.
method_id: short
failing method ID
When the close is provoked by a method exception, this
is the ID of the method. |
def _x_open(self, virtual_host, capabilities='', insist=False):
args = AMQPWriter()
args.write_shortstr(virtual_host)
args.write_shortstr(capabilities)
args.write_bit(insist)
self._send_method((10, 40), args)
return self.wait(allowed_methods=[
(10, 41), # Connection.open_ok
(10, 50), # Connection.redirect
]) | open connection to virtual host
This method opens a connection to a virtual host, which is a
collection of resources, and acts to separate multiple
application domains within a server.
RULE:
The client MUST open the context before doing any work on
the connection.
PARAMETERS:
virtual_host: shortstr
virtual host name
The name of the virtual host to work with.
RULE:
If the server supports multiple virtual hosts, it
MUST enforce a full separation of exchanges,
queues, and all associated entities per virtual
host. An application, connected to a specific
virtual host, MUST NOT be able to access resources
of another virtual host.
RULE:
The server SHOULD verify that the client has
permission to access the specified virtual host.
RULE:
The server MAY configure arbitrary limits per
virtual host, such as the number of each type of
entity that may be used, per connection and/or in
total.
capabilities: shortstr
required capabilities
The client may specify a number of capability names,
delimited by spaces. The server can use this string
to how to process the client's connection request.
insist: boolean
insist on connecting to server
In a configuration with multiple load-sharing servers,
the server may respond to a Connection.Open method
with a Connection.Redirect. The insist option tells
the server that the client is insisting on a
connection to the specified server.
RULE:
When the client uses the insist option, the server
SHOULD accept the client connection unless it is
technically unable to do so. |
def _open_ok(self, args):
self.known_hosts = args.read_shortstr()
AMQP_LOGGER.debug('Open OK! known_hosts [%s]' % self.known_hosts)
return None | signal that the connection is ready
This method signals to the client that the connection is ready
for use.
PARAMETERS:
known_hosts: shortstr |
def _redirect(self, args):
host = args.read_shortstr()
self.known_hosts = args.read_shortstr()
AMQP_LOGGER.debug('Redirected to [%s], known_hosts [%s]' % (host, self.known_hosts))
return host | asks the client to use a different server
This method redirects the client to another server, based on
the requested virtual host and/or capabilities.
RULE:
When getting the Connection.Redirect method, the client
SHOULD reconnect to the host specified, and if that host
is not present, to any of the hosts specified in the
known-hosts list.
PARAMETERS:
host: shortstr
server to connect to
Specifies the server to connect to. This is an IP
address or a DNS name, optionally followed by a colon
and a port number. If no port number is specified, the
client should use the default port number for the
protocol.
known_hosts: shortstr |
def _start(self, args):
self.version_major = args.read_octet()
self.version_minor = args.read_octet()
self.server_properties = args.read_table()
self.mechanisms = args.read_longstr().split(' ')
self.locales = args.read_longstr().split(' ')
AMQP_LOGGER.debug('Start from server, version: %d.%d, properties: %s, mechanisms: %s, locales: %s'
% (self.version_major, self.version_minor,
str(self.server_properties), self.mechanisms, self.locales)) | start connection negotiation
This method starts the connection negotiation process by
telling the client the protocol version that the server
proposes, along with a list of security mechanisms which the
client can use for authentication.
RULE:
If the client cannot handle the protocol version suggested
by the server it MUST close the socket connection.
RULE:
The server MUST provide a protocol version that is lower
than or equal to that requested by the client in the
protocol header. If the server cannot support the
specified protocol it MUST NOT send this method, but MUST
close the socket connection.
PARAMETERS:
version_major: octet
protocol major version
The protocol major version that the server agrees to
use, which cannot be higher than the client's major
version.
version_minor: octet
protocol major version
The protocol minor version that the server agrees to
use, which cannot be higher than the client's minor
version.
server_properties: table
server properties
mechanisms: longstr
available security mechanisms
A list of the security mechanisms that the server
supports, delimited by spaces. Currently ASL supports
these mechanisms: PLAIN.
locales: longstr
available message locales
A list of the message locales that the server
supports, delimited by spaces. The locale defines the
language in which the server will send reply texts.
RULE:
All servers MUST support at least the en_US
locale. |
def _x_start_ok(self, client_properties, mechanism, response, locale):
args = AMQPWriter()
args.write_table(client_properties)
args.write_shortstr(mechanism)
args.write_longstr(response)
args.write_shortstr(locale)
self._send_method((10, 11), args) | select security mechanism and locale
This method selects a SASL security mechanism. ASL uses SASL
(RFC2222) to negotiate authentication and encryption.
PARAMETERS:
client_properties: table
client properties
mechanism: shortstr
selected security mechanism
A single security mechanisms selected by the client,
which must be one of those specified by the server.
RULE:
The client SHOULD authenticate using the highest-
level security profile it can handle from the list
provided by the server.
RULE:
The mechanism field MUST contain one of the
security mechanisms proposed by the server in the
Start method. If it doesn't, the server MUST close
the socket.
response: longstr
security response data
A block of opaque data passed to the security
mechanism. The contents of this data are defined by
the SASL security mechanism. For the PLAIN security
mechanism this is defined as a field table holding two
fields, LOGIN and PASSWORD.
locale: shortstr
selected message locale
A single message local selected by the client, which
must be one of those specified by the server. |
def _tune(self, args):
self.channel_max = args.read_short() or self.channel_max
self.frame_max = args.read_long() or self.frame_max
self.method_writer.frame_max = self.frame_max
self.heartbeat = args.read_short()
self._x_tune_ok(self.channel_max, self.frame_max, 0) | propose connection tuning parameters
This method proposes a set of connection configuration values
to the client. The client can accept and/or adjust these.
PARAMETERS:
channel_max: short
proposed maximum channels
The maximum total number of channels that the server
allows per connection. Zero means that the server does
not impose a fixed limit, but the number of allowed
channels may be limited by available server resources.
frame_max: long
proposed maximum frame size
The largest frame size that the server proposes for
the connection. The client can negotiate a lower
value. Zero means that the server does not impose any
specific limit but may reject very large frames if it
cannot allocate resources for them.
RULE:
Until the frame-max has been negotiated, both
peers MUST accept frames of up to 4096 octets
large. The minimum non-zero value for the frame-
max field is 4096.
heartbeat: short
desired heartbeat delay
The delay, in seconds, of the connection heartbeat
that the server wants. Zero means the server does not
want a heartbeat. |
def _send_method(self, method_sig, args=bytes(), content=None):
if isinstance(args, AMQPWriter):
args = args.getvalue()
self.connection.method_writer.write_method(self.channel_id,
method_sig, args, content) | Send a method for our channel. |
def read_frame(self):
frame_type, channel, size = unpack('>BHI', self._read(7))
payload = self._read(size)
ch = ord(self._read(1))
if ch == 206: # '\xce'
return frame_type, channel, payload
else:
raise Exception('Framing Error, received 0x%02x while expecting 0xce' % ch) | Read an AMQP frame. |
def write_frame(self, frame_type, channel, payload):
size = len(payload)
self._write(pack('>BHI%dsB' % size,
frame_type, channel, size, payload, 0xce)) | Write out an AMQP frame. |
def _setup_transport(self):
if HAVE_PY26_SSL:
if hasattr(self, 'sslopts'):
self.sslobj = ssl.wrap_socket(self.sock, **self.sslopts)
else:
self.sslobj = ssl.wrap_socket(self.sock)
self.sslobj.do_handshake()
else:
self.sslobj = socket.ssl(self.sock) | Wrap the socket in an SSL object, either the
new Python 2.6 version, or the older Python 2.5 and
lower version. |
def _shutdown_transport(self):
if HAVE_PY26_SSL and (self.sslobj is not None):
self.sock = self.sslobj.unwrap()
self.sslobj = None | Unwrap a Python 2.6 SSL socket, so we can call shutdown() |
def _read(self, n):
result = self.sslobj.read(n)
while len(result) < n:
s = self.sslobj.read(n - len(result))
if not s:
raise IOError('Socket closed')
result += s
return result | It seems that SSL Objects read() method may not supply as much
as you're asking for, at least with extremely large messages.
somewhere > 16K - found this in the test_channel.py test_large
unittest. |
def _write(self, s):
while s:
n = self.sslobj.write(s)
if not n:
raise IOError('Socket closed')
s = s[n:] | Write a string out to the SSL socket fully. |
def _setup_transport(self):
self._write = self.sock.sendall
self._read_buffer = bytes() | Setup to _write() directly to the socket, and
do our own buffered reads. |
def _read(self, n):
while len(self._read_buffer) < n:
s = self.sock.recv(65536)
if not s:
raise IOError('Socket closed')
self._read_buffer += s
result = self._read_buffer[:n]
self._read_buffer = self._read_buffer[n:]
return result | Read exactly n bytes from the socket |
def read_table(self):
self.bitcount = self.bits = 0
tlen = unpack('>I', self.input.read(4))[0]
table_data = AMQPReader(self.input.read(tlen))
result = {}
while table_data.input.tell() < tlen:
name = table_data.read_shortstr()
ftype = ord(table_data.input.read(1))
if ftype == 83: # 'S'
val = table_data.read_longstr()
elif ftype == 73: # 'I'
val = unpack('>i', table_data.input.read(4))[0]
elif ftype == 68: # 'D'
d = table_data.read_octet()
n = unpack('>i', table_data.input.read(4))[0]
val = Decimal(n) / Decimal(10 ** d)
elif ftype == 84: # 'T'
val = table_data.read_timestamp()
elif ftype == 70: # 'F'
val = table_data.read_table() # recurse
else:
raise ValueError('Unknown table item type: %s' % repr(ftype))
result[name] = val
return result | Read an AMQP table, and return as a Python dictionary. |
def write_bit(self, b):
if b:
b = 1
else:
b = 0
shift = self.bitcount % 8
if shift == 0:
self.bits.append(0)
self.bits[-1] |= (b << shift)
self.bitcount += 1 | Write a boolean value. |
def write_octet(self, n):
if (n < 0) or (n > 255):
raise ValueError('Octet out of range 0..255')
self._flushbits()
self.out.write(pack('B', n)) | Write an integer as an unsigned 8-bit value. |
def write_short(self, n):
if (n < 0) or (n > 65535):
raise ValueError('Octet out of range 0..65535')
self._flushbits()
self.out.write(pack('>H', n)) | Write an integer as an unsigned 16-bit value. |
def write_long(self, n):
if (n < 0) or (n >= (2**32)):
raise ValueError('Octet out of range 0..2**31-1')
self._flushbits()
self.out.write(pack('>I', n)) | Write an integer as an unsigned2 32-bit value. |
def write_longlong(self, n):
if (n < 0) or (n >= (2**64)):
raise ValueError('Octet out of range 0..2**64-1')
self._flushbits()
self.out.write(pack('>Q', n)) | Write an integer as an unsigned 64-bit value. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.