text
stringlengths 29
850k
|
---|
#!/usr/bin/python
#
# html2markdown
# Copyright 2005 Dale Sedivec
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
# XXX
# TODO:
# * Change constant names to upper case.
# * Test wrapping of HTML in Markdown source with long attributes that
# have whitespace in their contents.
# * Should probably put non-breaking spaces in the middle of a
# Markdown image markup.
# * Stop all the interpolation and concatenation operations and take
# advantage of buffers more (use write not +)
# * In code, do a consistency check WRT indentation on continued
# statements.
# * Look at inline HTML in indented block elements (block quote, list,
# maybe code block)
# * Test CLI.
# * Check through for classes that are too big (refactoring)
# * Write test for <li>[whitespace]<p>...</p></li>. I'm not sure that
# Markdown will ever generate this, but it still looks likely to
# happen in hand-written HTML.
# * Make test with numeric entity to make sure handle_charref is
# implemented.
# * It's possible that (almost) everywhere we do an isinstance()
# check, we should really be doing some kind of hasFeature() check,
# hasFeature() being a method we implement? More flexible.
from HTMLParser import HTMLParser
from StringIO import StringIO
import logging
import textwrap
import re
import string
import inspect
import sys
from itertools import repeat, chain
WRAP_AT_COLUMN = 70
# XXX This is kind of dumb, really, since certain types of syntax
# demand certain types of indents. To parameterize this, we should
# probably find all indent instances, change them to this variable,
# then see what breaks with one indent or the other and hard code that
# particular indent.
MARKDOWN_INDENT = " "
log = logging.getLogger("html2markdown")
try:
any
except NameError:
def any(items):
for item in items:
if item:
return True
return False
def all(items):
for item in items:
if not item:
return False
return True
# XXX TEST this is not tested? Plus it probably doesn't belong here.
# At least document it.
# def getMyCaller(): #pragma: no cover
# try:
# callerFrame = inspect.getouterframes(inspect.currentframe())[2]
# return "%s:%d" % (callerFrame[3], callerFrame[2])
# finally:
# del callerFrame
class Box (object):
def __init__(self):
self.parent = None
def render(self, writer):
raise NotImplementedError("you must overload this") #pragma: no cover
width = property(fget=lambda self: self.parent.width)
class ContainerBox (Box):
def __init__(self):
super(ContainerBox, self).__init__()
self.children = []
def addChild(self, child):
self.children.append(child)
child.parent = self
def makeChild(self, childClass):
child = childClass()
self.addChild(child)
return child
class CompositeBox (ContainerBox):
def __init__(self, addNewLines=True):
super(CompositeBox, self).__init__()
self.__addNewLineAfterChild = []
self.__addNewLines = addNewLines
def addChild(self, child):
super(CompositeBox, self).addChild(child)
self.__addNewLineAfterChild.append(self.__addNewLines)
def insertNewLineAfterChild(self, childIndex):
assert childIndex >= 0, childIndex
self.__addNewLineAfterChild[childIndex] = True
def insertNewLineBeforeLastChild(self):
self.__addNewLineAfterChild[-2] = True
def render(self, writer):
if self.children:
assert len(self.__addNewLineAfterChild) == len(self.children)
addNewLine = iter(self.__addNewLineAfterChild)
self.children[0].render(writer)
for child in self.children[1:]:
if addNewLine.next():
writer("\n")
child.render(writer)
class RootBox (CompositeBox):
# Override the property set in a superclass. (XXX Is this the
# cleanest way to do this?)
width = None
def __init__(self, width):
super(RootBox, self).__init__()
self.width = width
def ijoin(iterable, joinString):
"""Yields joinString between items from iterable.
s.join(i) == "".join(ijoin(i, s))
"""
iterator = iter(iterable)
yield iterator.next()
for item in iterator:
yield joinString
yield item
class TextBox (Box):
def __init__(self):
self.__lines = [StringIO()]
def addText(self, text):
self.__lines[-1].write(text)
def addLineBreak(self):
self.__lines.append(StringIO())
def _iterLines(self):
for line in self.__lines:
yield line.getvalue()
def render(self, writer):
for string in ijoin(self._iterLines(), " \n"):
writer(string)
if string[-1] != "\n":
writer("\n")
class iterAllButLast (object):
def __init__(self, iterable):
self._iterator = iter(iterable)
def __iter__(self):
lastItem = self._iterator.next()
for item in self._iterator:
yield lastItem
lastItem = item
self.last = lastItem
class WrappedTextBox (TextBox):
__wordBoundaryRegexp = re.compile(r'(\s+)')
def render(self, writer):
def fill(line, lastLineSuffix=""):
return self.__fill(line, self.width, lastLineSuffix, writer)
lines = iterAllButLast(self._iterLines())
for line in lines:
writer(fill(line, " "))
writer(fill(lines.last))
# XXX REFACTOR I'd say refactor this, but right now I don't see a
# particularly clean way to do it.
#
# There should be a way, though. All this code seems so verbose,
# if not needlessly complex.
def __fill(self, text, width, lastLineSuffix, writer):
log.debug("fill text=%r suffix=%r" % (text, lastLineSuffix))
words = self.__splitTextIntoWordsAndSpaces(text)
firstSpace, firstWord = words.pop(0)
linePosition = self.__writeFirstWordOnLine(firstWord, writer)
for spaceBefore, word in words:
spaceLen = len(spaceBefore)
wordLen = len(word)
if (linePosition + spaceLen + wordLen) > width:
writer("\n")
self.__writeFirstWordOnLine(word, writer)
linePosition = wordLen
else:
writer(spaceBefore)
writer(word)
linePosition += spaceLen + wordLen
writer(lastLineSuffix)
writer("\n")
# The second grouping prevents **strong** from tripping this
# regular expression.
__beginningOfLineTokens = re.compile(r"^([0-9]+\.|[*+-]([^*]|$)|#)")
def __writeFirstWordOnLine(self, word, writer):
"""Writes the first word using writer, adding escaping if needed.
Markdown assigns special meaning to certain tokens when they
appear at the beginning of a line. We have to esacpe these
special characters if they happen to appear at the beginning
of a line after a paragraph is wrapped. This function will
return the total number of characters written, which might be
bigger than len(word) if an escape character is added.
"""
wordLen = len(word)
tokenMatch = self.__beginningOfLineTokens.search(word)
if tokenMatch:
matchEndPosition = tokenMatch.end(1)
log.debug("word=%r matchEndPosition=%r" % (word, matchEndPosition))
writer(word[0:matchEndPosition - 1])
writer("\\")
writer(word[matchEndPosition - 1:])
return wordLen + 1
else:
log.debug("word=%r no match" % (word,));
writer(word)
return wordLen
def __splitTextIntoWordsAndSpaces(self, text):
"""
Builds and returns a list of tuples in the form (space
before word, word), where the spaces and words are determined
by splitting text on word boundaries. This is used primarily
by the fill() method.
"""
log.debug("splitTextIntoWordsAndSpaces: text=%r" % (text,))
parts = self.__wordBoundaryRegexp.split(text)
log.debug("splitTextIntoWordsAndSpaces: normalizing %r" % (parts,))
self.__normalizeSplitTextParts(parts)
log.debug("splitTextIntoWordsAndSpaces: after normalizing %r"
% (parts,))
words = []
lastWord = ""
for spaceBefore, word in zip(parts[::2], parts[1::2]):
spaceBefore = self.__normalizeWordSpacing(spaceBefore, lastWord)
words.append((spaceBefore, word))
lastWord = word
return words
def __normalizeWordSpacing(self, spaceBefore, precedingWord):
# If the input is "foo.\nbar" you'll end up with "foo. bar"
# even if you separate your sentences with two spaces. I'm
# not inclined to do anything to fix this until someone really
# bitches about it. Also, two spaces are "safer" than one in
# the case of (for example) "Mr.\nSmith".
if spaceBefore[0:2] == " " and precedingWord[-1] in ".?!:":
spaceBefore = " "
else:
spaceBefore = " "
return spaceBefore
def __normalizeSplitTextParts(self, parts):
"""
This method makes sure that the parts list is a list of space,
word, space, word, space, word, ... The first element in the
list will always be the empty string (an empty space).
This method is used by the wrapping code.
"""
if parts[0] == "":
del parts[1]
else:
parts.insert(0, "")
if parts[-1] == "":
del parts[-2:]
assert (len(parts) % 2) == 0, "List normalizing failed: %r" % (parts,)
class IndentedBox (ContainerBox):
def __init__(self, indent, firstLineIndent=None):
super(IndentedBox, self).__init__()
self.__indentLength = len(indent)
self.__subsequentLineIndent = indent
if firstLineIndent is not None:
assert len(firstLineIndent) == self.__indentLength
self.__firstLineIndent = firstLineIndent
else:
self.__firstLineIndent = indent
def render(self, writer):
childRendering = StringIO()
self.__renderChildren(childRendering.write)
self.__rewindFile(childRendering)
self.__renderLinesFromFile(childRendering, writer)
def __renderLinesFromFile(self, childRendering, writer):
indentGenerator = chain([self.__firstLineIndent],
repeat(self.__subsequentLineIndent))
for line in childRendering:
indent = indentGenerator.next()
if self.__isBlankLine(line):
indent = indent.rstrip()
writer(indent)
writer(line)
def __isBlankLine(self, line):
return not line.rstrip("\r\n")
def __rewindFile(self, childRendering):
childRendering.seek(0)
def __renderChildren(self, writer):
for child in self.children:
child.render(writer)
def _getWidth(self):
return super(IndentedBox, self).width - self.__indentLength
width = property(fget=_getWidth)
class RawTextBox (TextBox):
"""A TextBox whose contents shouldn't have Markdown elements escaped."""
pass
# Based on DOM. Should probably refer to this as MDDOM (Markdown
# DOM). I think I used "micro-DOM" somewhere else.
class Node (object):
def __init__(self):
self.parent = None
class ContainerNode (Node):
def __init__(self):
super(ContainerNode, self).__init__()
self.children = []
def makeChild(self, type):
child = type()
self.addChild(child)
return child
def addChild(self, child):
self.children.append(child)
child.parent = self
# An InlineNode is a Node that does not render to a Box, but rather
# modifies the Box inside which it occurs. Currently this is used to
# mark Nodes whose transformation requires a Box that supports
# addText().
class InlineNode (Node):
pass
# A TextContainer is a ContainerNode that may also hold
# TextRelatedNodes. The HTML parser will ignore text that occurs
# outside of a TextContainer.
class TextContainer (ContainerNode):
pass
class InlineTextContainer (InlineNode, TextContainer):
pass
class Text (InlineNode):
def __init__(self, text=""):
super(Node, self).__init__()
self.text = text
class Document (ContainerNode):
pass
class List (ContainerNode):
pass
class OrderedList (List):
def getChildIndex(self, child):
return self.children.index(child)
class UnorderedList (List):
pass
class ListItem (TextContainer):
def getItemNumber(self):
# This method is only valid when this is an item in an
# OrderedList. Obviously.
return self.parent.getChildIndex(self) + 1
class BlockQuote (ContainerNode):
pass
class Paragraph (TextContainer):
pass
class Preformatted (TextContainer):
pass
class HTML (TextContainer):
pass
class Code (InlineTextContainer):
pass
class Emphasized (InlineTextContainer):
pass
class Strong (InlineTextContainer):
pass
class LineBreak (InlineNode):
pass
class Image (InlineNode):
def __init__(self, url, alternateText=None, title=None):
super(Image, self).__init__()
self.url = url
self.alternateText = alternateText
self.title = title
class Heading (TextContainer):
def __init__(self, level):
super(Heading, self).__init__()
self.level = level
class HorizontalRule (Node):
pass
class Anchor (InlineTextContainer):
def __init__(self, url, title=None):
super(Anchor, self).__init__()
self.url = url
self.title = title
class UnknownInlineElement (InlineTextContainer):
def __init__(self, tag, attributes):
super(UnknownInlineElement, self).__init__()
self.tag = tag
self.attributes = attributes
class MarkdownTransformer (object):
__formattingCharactersRegexp = re.compile(r"((?<=\S)([*_])|([*_])(?=\S))")
def transform(self, document):
rootBox = RootBox(width=WRAP_AT_COLUMN)
self.__dispatchChildren(document, rootBox)
return rootBox
def __dispatch(self, node, parentBox):
log.debug("Dispatching node=%r parentBox=%r" % (node, parentBox))
if isinstance(node, List):
nodeTypeName = "List"
else:
nodeTypeName = type(node).__name__
getattr(self, "_transform" + nodeTypeName)(node, parentBox)
# self.__handlers[type(node)](self, node, parentBox)
def __dispatchChildren(self, node, parentBox):
self.__dispatchList(node.children, parentBox)
def __dispatchList(self, nodeList, parentBox):
for node in nodeList:
self.__dispatch(node, parentBox)
def _transformParagraph(self, node, parentBox):
box = parentBox.makeChild(WrappedTextBox)
self.__dispatchChildren(node, box)
def _transformBlockQuote(self, node, parentBox):
indentedBox = IndentedBox(indent="> ")
parentBox.addChild(indentedBox)
dividedBox = indentedBox.makeChild(CompositeBox)
self.__dispatchChildren(node, dividedBox)
def _transformPreformatted(self, node, parentBox):
indentedBox = IndentedBox(indent=MARKDOWN_INDENT)
parentBox.addChild(indentedBox)
textBox = indentedBox.makeChild(TextBox)
self.__dispatchChildren(node, textBox)
def _transformText(self, node, parentBox):
if isinstance(node.parent, (HTML, Preformatted, Code)) \
or isinstance(parentBox, RawTextBox):
text = node.text
else:
text = self.__escapeFormattingCharacters(node.text)
parentBox.addText(text)
def __escapeFormattingCharacters(self, data):
escapedData = data.replace("\\", "\\\\")
escapedData = self.__formattingCharactersRegexp.sub(r"\\\1",
escapedData)
return escapedData
def _transformList(self, node, parentBox):
box = CompositeBox(addNewLines=False)
parentBox.addChild(box)
self.__dispatchChildren(node, box)
self.__addExplicitParagraphsInList(node, box)
# XXX REFACTOR if you dare. The list code (here and ListItem
# processing) is nigh incomprehensible. Of course, I can't even
# figure out how to simplify this function since the way it
# figures out where to put explicit paragraphs is so arcane (and
# the rules for how to generate <p></p> are, shall we say,
# "tedious").
def __addExplicitParagraphsInList(self, node, box):
paragraphAnalysis = []
for listItem in node.children:
isSingleParagraph = False
if isinstance(listItem.children[0], Paragraph):
isSingleParagraph = True
for child in listItem.children[1:]:
if isinstance(child, List):
break
elif not isinstance(child, Text):
isSingleParagraph = False
break
paragraphAnalysis.append(isSingleParagraph)
log.debug("paragraphAnalysis=%r" % (paragraphAnalysis,))
consecutiveSingleParas = 0
for childIndex, isSingleParagraph in enumerate(paragraphAnalysis):
if isSingleParagraph:
consecutiveSingleParas += 1
if consecutiveSingleParas >= 2:
box.insertNewLineAfterChild(childIndex - 1)
else:
if consecutiveSingleParas == 1:
if any([ isinstance(n, List) for n
in node.children[childIndex - 1].children ]):
# A List node's children can only be
# ListItems, and a ListItem always generates
# an outer CompositeBox, so box.children are
# all CompositeBoxes.
box.children[childIndex - 1].insertNewLineAfterChild(0)
else:
box.insertNewLineBeforeLastChild()
consecutiveSingleParas = 0
# XXX Near exact copy of above code.
if consecutiveSingleParas == 1:
if any([ isinstance(n, List) for n
in node.children[childIndex].children ]):
box.children[childIndex].insertNewLineAfterChild(0)
else:
box.insertNewLineBeforeLastChild()
# XXX REFACTOR
def _transformListItem(self, node, parentBox):
BOX_AT_BULLET_LEVEL = 1
BOX_AT_LIST_ITEM_LEVEL = 2
outerBox = CompositeBox(addNewLines=False)
parentBox.addChild(outerBox)
# XXX This code to determine indents will have a tendency to
# not work right if you want to make MARKDOWN_INDENT = "\t"
# (for example).
bulletIndent = " "
if isinstance(node.parent, OrderedList):
number = "%d. " % (node.getItemNumber(),)
number = number + " " * (4 - len(number))
# XXX Should we use len(number) instead of 4 here? Are
# more than four spaces on continued lines fine with
# Markdown?
indentedBox = IndentedBox(firstLineIndent=number,
indent=bulletIndent)
else:
indentedBox = IndentedBox(firstLineIndent="* ",
indent=bulletIndent)
outerBox.addChild(indentedBox)
innerBox = indentedBox.makeChild(CompositeBox)
children = node.children[:]
# The first child has to be in the indent box that has the
# list bullet.
if isinstance(children[0], InlineNode):
# A ListItem that starts with text can only have text or
# nested lists under it. I think.
log.debug("List item dispatching text children")
textBox = innerBox.makeChild(WrappedTextBox)
while children and isinstance(children[0], InlineNode):
self.__dispatch(children.pop(0), textBox)
elif isinstance(children[0], List):
# Immediate sublist.
listIndentBox = IndentedBox(indent=MARKDOWN_INDENT)
innerBox.addChild(listIndentBox)
self.__dispatch(children.pop(0), listIndentBox)
else:
self.__dispatch(children.pop(0), innerBox)
innerBoxType = BOX_AT_BULLET_LEVEL
for child in children:
if isinstance(child, Text):
# Ignore whitespace that occurs between elements.
continue
elif isinstance(child, (Preformatted, List)):
if innerBoxType != BOX_AT_LIST_ITEM_LEVEL:
innerBox = IndentedBox(indent=MARKDOWN_INDENT)
outerBox.addChild(innerBox)
if isinstance(child, Preformatted):
outerBox.insertNewLineBeforeLastChild()
innerBoxType = BOX_AT_LIST_ITEM_LEVEL
else:
if innerBoxType != BOX_AT_BULLET_LEVEL:
indentedBox = IndentedBox(indent=bulletIndent)
outerBox.addChild(indentedBox)
outerBox.insertNewLineBeforeLastChild()
innerBox = indentedBox.makeChild(CompositeBox)
innerBoxType = BOX_AT_BULLET_LEVEL
self.__dispatch(child, innerBox)
# XXX Might want to factor out this pattern.
def _transformHTML(self, node, parentBox):
box = parentBox.makeChild(TextBox)
self.__dispatchChildren(node, box)
__backtickRegexp = re.compile("`+")
def _transformCode(self, node, parentBox):
contents = self.__renderChildren(node)
codeDelimiter = self.__makeCodeDelimiter(contents)
parentBox.addText(codeDelimiter)
if contents[0] == "`":
parentBox.addText(" ")
parentBox.addText(contents)
if contents[-1] == "`":
parentBox.addText(" ")
parentBox.addText(codeDelimiter)
def __makeCodeDelimiter(self, content):
"""Returns the correct number of backticks to set off string as code.
Markdown requires you to use at least one more backtick to
introduce/conclude a code span than there are backticks within
the code span. For example, if contents="foo ``date`` bar",
Markdown would require ``` to be used to begin/end the code
span for that string.
"""
matches = self.__backtickRegexp.findall(content)
if matches:
codeDelimiterLength = max([ len(m) for m in matches ]) + 1
else:
codeDelimiterLength = 1
return "`" * codeDelimiterLength
def _transformEmphasized(self, node, parentBox):
parentBox.addText("_")
self.__dispatchChildren(node, parentBox)
parentBox.addText("_")
def _transformLineBreak(self, node, parentBox):
parentBox.addLineBreak()
def _transformImage(self, node, parentBox):
parentBox.addText("
parentBox.addText(node.url)
if node.title:
parentBox.addText(' "')
parentBox.addText(node.title)
parentBox.addText('"')
parentBox.addText(")")
def _transformHeading(self, node, parentBox):
box = parentBox.makeChild(TextBox)
box.addText("#" * node.level + " ")
self.__dispatchChildren(node, box)
box.addText(" " + node.level * "#")
def _transformHorizontalRule(self, node, parentBox):
box = parentBox.makeChild(TextBox)
box.addText("---")
def _transformAnchor(self, node, parentBox):
# Sometimes this renders the contents twice: once as "raw
# text" (no escaping of formatting characters) so we can match
# a URL that might have Markdown formatting characters in it
# (f.e. http://example.com/foo_bar_baz), and the second time
# with Markdown escaping if the contents aren't the same as
# the href.
linkContents = self.__renderChildren(node, boxType=RawTextBox)
url = node.url
isMailto = url.startswith("mailto:")
if linkContents == url or (isMailto and linkContents == url[7:]):
parentBox.addText("<")
parentBox.addText(linkContents)
parentBox.addText(">")
else:
parentBox.addText("[")
parentBox.addText(self.__renderChildren(node))
parentBox.addText("](")
parentBox.addText(url)
if node.title:
parentBox.addText(' "')
parentBox.addText(node.title)
parentBox.addText('"')
parentBox.addText(")")
def __renderChildren(self, node, boxType=TextBox):
textBox = boxType()
self.__dispatchChildren(node, textBox)
contents = StringIO()
textBox.render(contents.write)
return contents.getvalue().strip()
def _transformStrong(self, node, parentBox):
parentBox.addText("**")
self.__dispatchChildren(node, parentBox)
parentBox.addText("**")
def _transformUnknownInlineElement(self, node, parentBox):
write = parentBox.addText
write("<")
write(node.tag)
for name, value in node.attributes:
if '"' in value:
quotingChar = "'"
else:
quotingChar = '"'
write(" ")
write(name)
write('=')
write(quotingChar)
write(value)
write(quotingChar)
if node.children:
write(">")
self.__dispatchChildren(node, parentBox)
write("</")
write(node.tag)
write(">")
else:
write(" />")
# XXX TEST Should test this?
class LineNumberedBuffer (StringIO):
__eolRegexp = re.compile(r"(\r?\n)")
def __init__(self):
StringIO.__init__(self)
self.__linePositions = [0]
def write(self, string):
parts = self.__eolRegexp.split(string)
log.debug("LineNumberedBuffer write split parts=%r" % (parts,))
for part in parts:
StringIO.write(self, part)
if "\n" in part:
log.debug("new line at %d" % (self.tell(),))
self.__linePositions.append(self.tell())
log.debug("LineNumberedBuffer.write final pos=%d" % (self.tell(),))
def seekLinePosition(self, lineNumber, offset):
"""Seek to an offset from the start of line lineNumber.
The first line is 1, the first character on a line is 0. This
is in line with HTMLParser.getpos().
"""
position = self.__linePositions[lineNumber - 1] + offset
log.debug("seekLinePosition (%d,%d)=%d" % (lineNumber, offset,
position))
self.seek(position, 0)
log.debug("seekLinePosition tell=%d" % (self.tell(),))
assert self.tell() == position, "seekLinePosition failed"
# XXX Turn this into MDDOMParser, outputs MDDOM? Then you take the
# Document and ship it off to MarkdownTransformer. Should at least
# give this class a better name.
class MarkdownTranslator (HTMLParser):
__translatedEntities = {"amp": "&",
"lt": "<",
"gt": ">",
"quot": '"'}
__unsupportedBlockElements = ("dl", "div", "noscript", "form", "table",
"fieldset", "address")
def reset(self):
HTMLParser.reset(self)
self.__shouldOutputStack = [False]
self.__unsupportedElementDepth = 0
self.__unsupportedBlockStart = None
self.__input = LineNumberedBuffer()
self.__currentNode = Document()
def feed(self, text):
self.__input.write(text)
HTMLParser.feed(self, text)
def handle_starttag(self, tag, attrs):
if self.__unsupportedElementDepth:
self.__unsupportedElementDepth += 1
elif tag == "code" \
and isinstance(self.__currentNode,
Preformatted) \
and len(self.__currentNode.children) == 0:
# Special case: ignore <code> immediately following <pre>.
# Markdown emits <pre><code>...</code></pre> for a
# preformatted text block.
#
# XXX In the interest of moving to just a DOM HTML parser,
# I think I support moving this logic to
# MarkdownTransformer.
pass
else:
# XXX REFACTOR
element = None
handler = self.__recognizedTags.get(tag)
if handler:
if not isinstance(handler, type):
element = handler(self, tag, attrs)
isBlock = handler.isBlock
elif attrs:
isBlock = not issubclass(handler, InlineNode)
else:
element = self.__currentNode.makeChild(handler)
else:
isBlock = tag in self.__unsupportedBlockElements
if not element and not isBlock:
element = UnknownInlineElement(tag, attrs)
self.__currentNode.addChild(element)
if element:
self.__currentNode = element
self.__shouldOutputStack.append(isinstance(element,
TextContainer))
else:
self.__enterUnsupportedBlockElement()
def handle_endtag(self, tag):
log.debug("Leaving tag=%r" % (tag,))
if self.__unsupportedElementDepth:
log.debug("Leaving unsupported element")
self.__leaveUnsupportedElement()
elif tag == "code" and isinstance(self.__currentNode,
Preformatted):
# Special case for </code></pre>. See similar exception
# in handle_starttag() for explanation.
pass
else:
log.debug("Leaving element")
self.__leaveElement()
def __enterUnsupportedBlockElement(self):
self.__unsupportedElementDepth = 1
self.__unsupportedBlockStart = self.getpos()
# XXX REFACTOR
def __leaveUnsupportedElement(self):
self.__unsupportedElementDepth -= 1
log.debug("unsupportedBlockDepth=%r"
% (self.__unsupportedElementDepth,))
if not self.__unsupportedElementDepth:
log.debug("Finished with unsupported block element");
log.debug("positions begin=%r end=%r"
% (self.__unsupportedBlockStart, self.getpos()))
html = self.__getUnsupportedBlockElementHTML()
htmlNode = self.__currentNode.makeChild(HTML)
htmlNode.addChild(Text(html))
self.__positionInputBufferAtEnd()
# XXX Maybe refactor -- or rename to something shorter (applies to
# all methods following this naming convention).
def __getUnsupportedBlockElementHTML(self):
"""Side effect: repositions self.__input."""
endPosition = self.__getEndOfTagPosition(self.getpos())
self.__input.seekLinePosition(*self.__unsupportedBlockStart)
startPosition = self.__input.tell()
htmlLength = endPosition - startPosition
log.debug("endPosition=%d startPosition=%d len=%d"
% (endPosition, startPosition, htmlLength))
html = StringIO()
html.write(self.__input.read(htmlLength))
html.write("\n")
return html.getvalue()
def __getEndOfTagPosition(self, startAt):
"""Side effect: repositions self.__input."""
self.__input.seekLinePosition(*startAt)
self.__searchInputForTagClose()
return self.__input.tell()
def __searchInputForTagClose(self):
# XXX expensive debugging statement
log.debug("searchInputForTagClose pos=%d input=%r"
% (self.__input.tell(),
self.__input.getvalue()))
while True:
nextCharacter = self.__input.read(1)
if not nextCharacter:
assert False, "premature tag end in input" #pragma: no cover
elif nextCharacter == ">":
break
def __positionInputBufferAtEnd(self):
self.__input.seek(0, 2)
def __leaveElement(self):
assert len(self.__shouldOutputStack) > 1
self.__shouldOutputStack.pop()
self.__currentNode = self.__currentNode.parent
# XXX REFACTOR
def _enterImg(self, tag, attributes):
if True not in map(lambda attr: attr[0] not in ("src", "alt", "title"),
attributes):
attributes = dict(attributes)
parameters = {"url": attributes["src"]}
if "alt" in attributes:
parameters["alternateText"] = attributes["alt"]
if "title" in attributes:
parameters["title"] = attributes["title"]
image = Image(**parameters)
self.__currentNode.addChild(image)
return image
else:
return None
_enterImg.isBlock = False
__numericEntityRegexp = re.compile("&#(x[0-9A-F]{2}|[0-9]{2,3});")
def __substituteNumericEntity(self, match):
return self.__translateNumericEntity(match.group(1))
def __translateNumericEntity(self, ref):
if ref[0] == "x":
value = int(ref[1:], 16)
else:
value = int(ref)
if self.__shouldDecodeNumericEntity(value):
return chr(value)
else:
return "&#%s;" % (ref,)
def __shouldDecodeNumericEntity(self, characterCode):
return 32 <= characterCode <= 126
def _enterA(self, tag, attributes):
if all([ attr[0] in ("href", "title") for attr in attributes ]):
attributes = dict(attributes)
# XXX REFACTOR This indentation/wrapping is ugly and looks
# unnecessary. Should think about reducing name lengths.
href = self.__numericEntityRegexp.sub(
self.__substituteNumericEntity, attributes["href"])
anchor = Anchor(href, title=attributes.get("title", None))
self.__currentNode.addChild(anchor)
return anchor
else:
return None
_enterA.isBlock = False
# XXX TEST <h*> with attributes.
def _enterHeading(self, tag, attributes):
level = int(tag[1:])
heading = Heading(level)
self.__currentNode.addChild(heading)
return heading
_enterHeading.isBlock = True
def __shouldOutput(self):
return self.__shouldOutputStack[-1]
def handle_data(self, data):
if self.__shouldOutput():
log.debug("output %r" % (data,))
self.__currentNode.addChild(Text(data))
def handle_entityref(self, name):
log.debug("entity=%r" % (name,))
if not self.__unsupportedElementDepth:
if name in self.__translatedEntities:
self.handle_data(self.__translatedEntities[name])
else:
self.handle_data("&%s;" % (name,))
def handle_charref(self, ref):
if not self.__unsupportedElementDepth:
self.handle_data(self.__translateNumericEntity(ref))
# XXX some day we should probably change this interface to write
# to a file, or to a callable
def getOutput(self):
assert isinstance(self.__currentNode, Document), `self.__currentNode`
log.debug(self.__renderTreeForDebug(self.__currentNode))
box = MarkdownTransformer().transform(self.__currentNode)
log.debug(self.__renderTreeForDebug(box))
result = StringIO()
box.render(result.write)
return result.getvalue()
# XXX OPTIMIZE Could short-circuit this code when debug is off, as
# an alternative to not calling it (log.debug("%s" %
# (__renderTreeForDebug(),))).
def __renderTreeForDebug(self, node):
result = StringIO()
result.write("(%s" % (node.__class__.__name__,))
if hasattr(node, "children"):
for child in node.children:
result.write(" ")
result.write(self.__renderTreeForDebug(child))
result.write(")")
return result.getvalue()
__recognizedTags = {"p": Paragraph,
"blockquote": BlockQuote,
"ol": OrderedList,
"ul": UnorderedList,
"li": ListItem,
"code": Code,
"em": Emphasized,
"pre": Preformatted,
"br": LineBreak,
"img": _enterImg,
"hr": HorizontalRule,
"a": _enterA,
"strong": Strong}
for level in range(1, 10):
__recognizedTags["h%d" % (level,)] = _enterHeading
def html2markdown(html):
return html2markdown_file(StringIO(html))
def html2markdown_file(inputFile):
translator = MarkdownTranslator()
for line in inputFile:
translator.feed(line)
translator.close()
return 0, translator.getOutput()
if __name__ == "__main__": #pragma: no cover
logging.basicConfig()
if len(sys.argv) > 1:
inputFile = open(sys.argv[1], "r")
else:
inputFile = sys.stdin
status, output = html2markdown_file(inputFile)
if status == 0:
sys.stdout.write(output)
sys.exit(status)
|
The nutraceutical and useful nutrition box is speedily starting to be in varied sectors, together with educational, advertisement and govt. This has introduced a corresponding shift in examine concentration and in public know-how. realizing the relevance of the medical ideas in choosing the protection and effectiveness of useful meals and nutraceuticals is more and more vital.
Dietetics: perform and destiny developments, 3rd version, comprises new and up to date info reminiscent of stories on present actions, a evaluate of rising matters in dietetic perform, and extra. This 3rd version maintains to supply an summary of the profession possibilities for dietitians, explaining what they do, highlighting the explicit components of dietetic perform, and directory the necessities to develop into a dietitian.
Detox your physique for long-lasting beauty and health with Adina Niemerow’s great Cleanse. Newly revised and up to date with the simplest and most recent detoxing counsel, this easy-to-use consultant is a fit fusion of detoxifying recipes, therapeutic routines, and clean job principles, supplying 11 varied cleanses designed for physique, good looks, and spirit—from a 3-day face elevate to an energizing “winter get up” to a new “super slender down” cleanse.
Each year, healthcare charges raise whereas total health and wellbeing decreases. humans proceed to consume poorly, to realize weight, and to rely on medicinal drugs and operations to keep up their health--all whereas trying the most recent fad diets promising wonderful effects for his or her outward visual appeal. it is time for a fact money: there is not any one-size-fits-all nutrition.
Burlington, MA: Academic Press. Gerdes, L. , B. Jeune, K. A. Ranberg, H. Nybo, and J. W. Vaupel. 2000. Estimation of apolipoprotein E genotype-specific relative mortality risks from the distribution of genotypes in centenarians and middle-aged men: apolipoprotein E gene is a “frailty gene,” not a “longevity gene”. Genet. Epidemiol. 19:202–210. , J. O. Rahal, W. G. Beamer, N. G. Copeland, N. A. Jenkins, and K. E. Mayo. 1993. GHRH receptor of little mice contains a missense mutation in the extracellular domain that disrupts receptor function.
Johnson. 1988. A mutation in the age-1 gene in Caenorhabditis elegans lengthens life and reduces hermaphrodite fertility. Genetics 118:75–86. Fries, J. F. 1980. Aging, natural death, and the compression of morbidity. New Engl. J. Med. 303:130–135. , Q. M. Chen, S. Zdanov, J. P. Magalhães, J. Remacle, and O. Toussaint. 2001. Subcytotoxic H2O2 stress triggers a release of transforming growth factor-beta 1, which induces biomarkers of cellular senescence of human diploid fibroblasts. J. Biol. Chem.
Papaconstantinou, R. A. Miller, and D. E. Harrison. 2001. Lifespan extension and delayed immune and collagen aging in mutant mice with defects in growth hormone production. Proc. Natl Acad. Sci. USA 98:6736–6741. , J. Papaconstantinou, and D. E. Harrison. 2002. The Snell dwarf mutation Pit1(dw) can increase life span in mice. Mech. Ageing Dev. 123:121–130. , L. Partridge, and V. D. Longo. 2010. Extending healthy life span – from yeast to humans. Science 328:321–326. Förstermann, U. and W. C. Sessa.
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyJoblib(PythonPackage):
"""Python function as pipeline jobs"""
homepage = "http://packages.python.org/joblib/"
url = "https://pypi.io/packages/source/j/joblib/joblib-0.14.0.tar.gz"
import_modules = [
'joblib', 'joblib.externals', 'joblib.externals.cloudpickle',
'joblib.externals.loky', 'joblib.externals.loky.backend'
]
version('0.14.0', sha256='6fcc57aacb4e89451fd449e9412687c51817c3f48662c3d8f38ba3f8a0a193ff')
version('0.13.2', sha256='315d6b19643ec4afd4c41c671f9f2d65ea9d787da093487a81ead7b0bac94524')
version('0.11', sha256='7b8fd56df36d9731a83729395ccb85a3b401f62a96255deb1a77220c00ed4085')
version('0.10.3', sha256='29b2965a9efbc90a5fe66a389ae35ac5b5b0c1feabfc7cab7fd5d19f429a071d')
version('0.10.2', sha256='3123553bdad83b143428033537c9e1939caf4a4d8813dade6a2246948c94494b')
version('0.10.0', sha256='49b3a0ba956eaa2f077e1ebd230b3c8d7b98afc67520207ada20a4d8b8efd071')
depends_on('py-setuptools', when='@0.14:', type='build')
|
Why Invest in Egypt Property?
The benefit of buying specifically for investment purposes is the removal of emotion from your purchase while property is used purely as an investment vehicle. Egypt offers many possibilities in the form of re-assignable off-plan contract options to sell at a substantial profit prior to completion, or "buy-to-let" situations to generate reliable rental income and eventually substantial capital appreciation.
- Foreign investors are regarded by the government as a big investment opportunity and laws have recently streamlined procedures, making the purchase procedure easy.
- Increased inward investment, creating a rich investment climate.
The secret to a successful purchase of property abroad lies in finding the right property in the best location and snapping it up at the lowest price possible, before it becomes common knowledge. It is clear that wisely selected overseas property can offer some very secure and lucrative opportunities.
Whether your dream property abroad is a holiday apartment, town house, luxury villa or a plot of land, we are dedicated to helping you on your way, with independent advice and tailor-made investment plans for the purchase of your property abroad.
Why Invest in Morocco Property?
There are good reasons for investing in property rather than the Stock Market. Here you can read about the advantages of investing in a stable country like Morocco.
No investment today offers the stability and simplicity coupled with the excellent return offered by investing in property. The stock market can offer high returns but it is a very volatile and dangerous place. This is especially true for the non-professional as there are many external factors that can effect your financial investment. In recent years major stock markets have generally been under-performing and property investment now stands heads and shoulders above other forms of investment.
Property is now the wise investor’s weapon of choice. No other investment allows you to purchase with other peoples’ money (the bank) and then pay this back with other peoples’ money (the rental income from tenants). If you own property you can release equity against this, although there is no law that states that your property will increase in value year on year, it is accepted that a well maintained property in a reasonable area will appreciate in value.
- 50% of individuals mentioned on “The Times Rich List” made their money through investing in property.
- A property worth just €4,000 30 years ago would be today worth around €225,000.
- Equities or Stocks can be volatile, as with the .com crash. Property however is historically stable.
Buying specifically for investment allows you to remove the emotion you’re your purchase and look at the property as an investment vehicle. This might mean utilizing a re-assignable contract option and selling at a substantial profit prior to completion or taking the buy-to-let situation and generating a reliable rental income as well as substantial capital appreciation.
Why Invest In property in Morocco?
Morocco is a currently a unique location for property investors and offers the opportunity to purchase property early in an emerging market at prices very favourable to most other destinations.
What is unique about the property market in Morocco is the sense of security for investors: with tourism already improving at a fast rate and buy-to-let investors reporting 85% occupancy rates during the high season, Morocco offers a safe arena in which to purchase property.
The Moroccan King Mohammed VI and the UAE have allocated huge investment to drastically increasing tourism further, with a goal of 10 million per year by 2010. This investment along with the creation of several tax advantages also helps investors to feel comfortable in investing in property in Morocco.
- Huge project (Vision 2010) to increase tourism backed by King Mohammed and the UAE. The aims are to improve infrastructure and increase tourism by 10m visitors per year.
- Increased tourism generated by the Vision 2010 project will create huge requirement for rental accommodation.
- New roads, marinas, trains, 5 star resorts, shopping malls, beach clubs to be developed through Vision 2010 project.
- Morocco is a free market economy and allows free movement of money.
- Easily accessible (Tangiers) via helicopter, ferry, fast train or regular budget flights.
- “Open Skies” policy activated on 1st Jan 2006 will allow low cost airlines to service Morocco and create competition, which will lower fares.
Property purchase offers far greater returns today than most stock market investments and never before has international property been as popular as a highly lucrative opportunity to create wealth.
Worldwide property investors are now turning their attention to Brazil as it fast becomes a leader in the field of emerging markets. Returns on investment are considered to be excellent and investors are increasingly aware of the high growth potential that Brazil offers as a stable though fresh, new investment market.
- Property capital appreciation of 20% per annum in some locations.
- Favourable currency exchange rates, making property transactions cheap for foreign investors.
- President Lula’s progressive policies, bringing many improvements to Brazil, including a decrease in inflation to an all-time low at 5.7%.
- Active encouragement and incentives for foreign investment - you can own 100% of land and property.
- Cost of living only 20% of that in the UK/Europe and property maintenance costs extremely low.
- Some of the lowest property prices in the world.
- Increase of thriving manufacturing industries relocated to Brazil and boosting the economy.
- Expected self sufficiency in oil reserves within the next year.
- Some economists believe Brazil is amongst the leaders of the future, along with Russia, India and China.
- Year-round sunshine, with average summer temperatures of 21°C.
- Great natural beauty with fantastic scenery and 7,000km of beaches.
- Friendly nature of the Brazilian people.
- Vibrant cities with carnivals and music.
- Low international risk ofwar, terrorism or SARS in Brazil.
- Easy access via direct flights from many international airports.
Property purchase specifically as an investment vehicle is a popular option amongst those aiming to capitalize on the excellent growth potential of property in Brazil. This type of investment can consist of an off-plan purchase for sale prior to project completion. Alternatively, a buy-to-let situation can provide reliable rental income from the property, along with substantial capital appreciation.
According to independent investment experts, including many articles in international investment press, Brazil is now widely considered to be a highly profitable market. The economic climate and tourist infrastructure are currently undergoing major upheaval, while the country’s economic strategy has caused much growth in the past 3 years. While Brazil remains on a steady growth curve, investors are urged to catch this promising market from the very start, while prices remain low and high returns on investment are inevitable in current hotspot locations.
Brazil has excellent direct flight communications with the UK and the rest of Europe, making it a convenient holiday destination. This accessibility increases potential investment yields and reinforces the promising future of Brazil. A low cost of living at only 20% of that in the UK, great quality of life, a fantastic climate, Tropical beaches and a rich and vibrant culture are all valuable contributing factors to Brazil's success.
The time for buying bargain property in Italy is definitely not over. The Italian property market remains an exciting market to invest in.
With a highly active tourist market, Italy makes an attractive investment to the property buyer. Due to its central position, Italy is easily accessible from most European capitals.
Not only are there large areas of “undiscovered Italy”, but also many knowledgeable investors are predicting that if you buy property in Italy now, you will be buying ahead of a surge of investors that will be heading to Italy in the near future.
Low cost airlines have opened routes to many of Italy’s regional airports; this is having a knock on effect on property prices in these areas, especially as there are many destinations in the hitherto less popular places.
Many of the new areas that are finding their way onto the property market are on the islands of Sicily and Sardinia where property prices are about 25% cheaper than on the mainland. The regions of Puglia, Le Marche and Abruzzo are also up and coming investment regions. In these areas bargains ranging from as little as £20,000.00 can still be found.
The government has introduced new finance laws relating to the buying and selling of residential property between private individuals. Now the buyer declares the actual purchase price in the deed of sale but only pays tax on the difference of this re-valued rateable value. This helps to make the market more transparent and helps buyers not to become unwittingly involved in money laundering transactions.
For those who are interested in the buy-to-let market, the high tourist numbers coming to Italy are encouraging. Increasingly tourists are becoming interested not only in visiting the large cities, but in rural tourism as well. This bodes well for those who invest in converting old Italian houses into fabulous modern residences. There are government grants available for those undertaking renovation projects in rural areas.
The low cost airline revolution has also changed the way holidays are booked and holidaymakers tend to book over the internet. This has increased the demand for self-catering properties in these areas.
Foreign tourists are not the only market in Italy. Italians take great pleasure in enjoying their own country and make a point of travelling around it, therefore creating a strong internal market.
Investing in the north and around the lakes can double your rental prospects as you can benefit not only from the summer season but also from winter ski holidays. However remember property prices become more favourable the further south you go.
Italy is reforming its tax legislation. There is no wealth or capital gains tax. If you take out residency it is possible to cut your property costs considerably.
- Italy is a primary tourist destination.
- There is strong rental potential from both the tourist market and a strong internal market.
- Low cost airlines have put Italy at the centre of their growth.
- The property market is rising steadily up to 20% in “undiscovered” areas.
- There are still “bargains” to be had in some areas.
- There is no wealth or capital gains tax to be paid in Italy.
- Rural tourism is becoming a strong tourist market and there are grants for property renovation in rural areas.
The Property Overseas Group have identified and showcase specific projects we believe offer exceptional investment opportunity.
Property is cheap in absolute and relative terms. For example at this moment, an average villa will cost around $1,000 per square metre in comparison to London Docklands where it would cost $5,000 per square metre. There are very few modern cities in the world where high-standard property is priced so low.
Foreign OwnershipDubai is in the process of creating an international property market from scratch, with foreign ownership of freehold only introduced last year. Thus buyers are given an exceptionally good deal to encourage them to be pioneers.
Shortage of supplyThe Dubai Government is working hard to prevent a shortage of supply and is giving land to developers as an incentive. What looks like massive supply today in Dubai may be nothing compared with demand in a few years time. Dubai is after all growing its GDP by 7-8% a year and shows no sign of slowing down, quite the contrary.
Rising Building CostsA fundamental influence on property prices are rising building costs. The low US dollar is pushing up the cost of materials from Europe which is pushing energy prices to its peak.
Tax Free IncomeDubai is a city where a lot of people earn high tax-free salaries and are in a position to support higher house prices. This is a city with a 20-year track record of strong economic growth and will continue to attract foreign and regional inward investment.
International Business CentreLong recognised as the leading regional trading hub of the Middle East, Dubai has now become an international business and re-export centre.
The country has developed rapidly over the past 10 years and has transformed itself from an oil dependent regional entrepot into a highly diversified international business centre of global significance, which offers opportunities for UK business people in all sectors. At present, over 500 UK companies have been set up in Dubai.
TourismOver 3.4 million tourists visited Dubai in 2001 and this figure is expected to grow to in excess of 6.0 million by 2010. Dubai is going from strength to strength.
Whether your dream property abroad is a holiday apartment, town house, luxury villa or a plot of land, we are dedicated to helping you on your way, with independent advice and tailor-made investment plans for the purchase of your property abroad.The Property Overseas Group have identified and showcase specific projects we believe offer exceptional investment opportunity.
- Turkey has a huge population of 70+ million. This creates a strong internal property market meaning investors are not reliant on international investors for resale.
|
import os
import requests
URL_TEMPLATE = "http://yjsjy.hust.edu.cn/Uploadfiles/StudentPhoto/%s.jpg"
SAVE_TO_DIR_ROOT = "D:/HUST"
def mkdirs_if_not_exist(dir_name):
"""
create new folder if not exist
:param dir_name:
:return:
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def crawl_avatar(avatar_url):
response = requests.get(avatar_url, timeout=20)
if response.status_code != 404:
avatar_filename = avatar_url.split('/')[-1]
year = avatar_filename[0:4]
college = avatar_filename[4:7]
mkdirs_if_not_exist(os.path.join(SAVE_TO_DIR_ROOT, year, college))
with open(os.path.join(SAVE_TO_DIR_ROOT, year, college, avatar_filename), mode='wb') as f:
f.write(response.content)
f.flush()
f.close()
print('{0} has been downloaded...'.format(avatar_filename))
if __name__ == '__main__':
for year in [2008, 2009, 2010, 2011, 2012]:
for college in [_ for _ in range(301, 320)]:
for i in range(200):
if i < 10:
idx = str(year) + str(college) + "01000" + str(i)
elif 10 <= i < 100:
idx = str(year) + str(college) + "0100" + str(i)
else:
idx = str(year) + str(college) + "010" + str(i)
try:
crawl_avatar(URL_TEMPLATE % str(idx))
except:
pass
|
This guide was first published on Sep 17, 2013. It was last updated on Sep 17, 2013. This page (Figure out your chipset) was last updated on Apr 22, 2019.
|
#!/usr/bin/env python3
# pedalboard server
import socket
from threading import Thread
from gpiozero import Button
from signal import pause
from time import sleep, time
from radio import Radio
TCP_IP = '0.0.0.0'
TCP_PORT = 31415
ENC = 'UTF-8'
NEW_PRESS_DELAY = 0.3 # in seconds
CONNECTIONS_LIMIT = 5
buttons_map = [
(1, 2),
(2, 3),
(3, 4),
(4, 17),
(5, 27),
(6, 22),
(7, 10),
(8, 9),
(9, 11)
]
class BtnsThread(Thread):
is_dead = True
buttons = None
def __init__(self, radio):
self.is_dead = False
self.radio = radio
self.last_press_time = 0
self.is_released = True
super().__init__()
def __del__(self):
if self.is_dead: return
print('Stopping listening for buttons…')
if self.buttons is not None:
for btn in self.buttons:
btn[1].when_pressed = None
btn[1].when_released = None
del self.buttons
del self.radio
del self.last_press_time
del self.is_released
del self.is_dead
def pressed(self, n):
def f():
if time() - (self.last_press_time + NEW_PRESS_DELAY) <= 0: return
print('Pressed button #%d' % n)
self.last_press_time = time()
self.is_released = False
self.radio.trigger('button pressed', n=n)
return f
def released(self, n):
def f():
if self.is_released: return
print('Released button #%d' % n)
self.is_released = True
self.radio.trigger('button released', n=n)
return f
def run(self):
self.buttons = [(x[0], Button(x[1])) for x in buttons_map]
for btn in self.buttons:
btn[1].when_pressed = self.pressed(btn[0])
btn[1].when_released = self.released(btn[0])
print('Started buttons listening')
class SocketThread(Thread):
is_dead = True
def __init__(self, radio, conn, addr):
self.is_dead = False
self.radio = radio
self.conn = conn
self.addr = addr
self.radio.trigger('add connection', connection=self)
self.radio.on('close connections', self.__del__)
super().__init__()
def __del__(self):
if self.is_dead: return
self.radio.off('close connections', self.__del__)
self.radio.off('button pressed', self.send_pressed, soft=True)
self.radio.off('button released', self.send_released, soft=True)
self.conn.close()
self.radio.trigger('remove connection', connection=self)
print('Connection lost for:', self.addr)
del self.radio
del self.conn
del self.addr
del self.is_dead
def send_pressed(self, n):
try:
self.conn.send(bytes('button pressed|%d' % n, ENC))
print('Sent about button pressed to', self.addr)
except BrokenPipeError:
self.__del__()
def send_released(self, n):
try:
self.conn.send(bytes('button released|%d' % n, ENC))
print('Sent about button released to', self.addr)
except BrokenPipeError:
self.__del__()
def run(self):
print('Address connected:', self.addr)
self.radio.on('button pressed', self.send_pressed)
self.radio.on('button released', self.send_released)
class ConnectionsHandler:
is_dead = True
def __init__(self, radio):
self.is_dead = False
self.connections = []
self.radio = radio
self.radio.reply('opened connections count', self.get_connections_count)
self.radio.on('add connection', self.register_connection)
self.radio.on('remove connection', self.unregister_connection)
print('Started connections handling')
def __del__(self):
if self.is_dead: return
self.radio.stopReplying(
'opened connections count',
self.get_connections_count
)
self.radio.off('add connection', self.register_connection)
self.radio.off('remove connection', self.unregister_connection)
for conn in self.connections:
conn.__del__()
del conn
print('Stopped connections handling')
del self.connections
del self.radio
del self.is_dead
def register_connection(self, connection):
for conn in self.connections:
if conn == connection:
raise Exception('Connection already registered')
self.connections.append(connection)
def unregister_connection(self, connection):
new_connections = []
for conn in self.connections:
if conn != connection:
new_connections.append(conn)
if len(new_connections) == len(self.connections):
raise Exception('Connection not found to unregister')
elif len(new_connections) != len(self.connections) - 1:
raise Exception('More than one connection to unregister')
else:
self.connections = new_connections
def get_connections_count(self):
return len(self.connections)
radio = Radio()
btns = BtnsThread(radio)
btns.start()
conn_handler = ConnectionsHandler(radio)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((TCP_IP, TCP_PORT))
s.listen(CONNECTIONS_LIMIT)
try:
print('Starting listening for socket connections…')
while True:
conn, addr = s.accept()
SocketThread(radio, conn, addr).start()
except (KeyboardInterrupt, SystemExit):
print('Exiting… Closing all connections…')
radio.trigger('close connections')
while True:
conns_count = radio.request('opened connections count')
if conns_count == 0: break
sleep(0.1)
conn_handler.__del__()
del conn_handler
btns.__del__()
del btns
radio.__del__()
del radio
s.shutdown(socket.SHUT_RDWR)
print('Done')
|
Click here for New Member Application Form PDF, save to your PC, fill out then email using the button. Or you can print and complete by hand and send by USPS.
You will need to have Adobe Acrobat Reader installed on your PC or mobile device to fill out the form. The Windows PC version is available here, mobile device versions can be found in your respective app store.
Members are the life of the Crescent Boat Club and reason for its being.
Individuals and families join the Crescent Boat Club to associate with others who share our boating pastime as boating is much more than just floating around. The club provides exceptional ammenities, member priviledges, social interactions, and superior cost advantages not found in any marina setting.
As a member, you become "part owner" of the club, its property and facilities. Although we are governed by an elected board of directors and organizational bylaws, all members are equal in standing. The only exception to equal standing is that membership seniority is considered in awarding dock assignments.
Since we are a working club, many of our activities are great opportunities for you to get your required hours, along with meeting new people and sharing a few laughs. Single memberships require 25 hours and family memberships require 50 hours, per year. Don’t let this scare you, as you will be very surprised how quickly they accumulate. Hours can be accrued by working club functions, gardening, helping with dock-in and dock-out, cleaning, cooking, writing, committee membership, etc. Whatever your specialty we will utilize it. That's what makes our club great!
The Crescent Boat Club is a member of the Mohawk-Hudson Council of Yacht Clubs. This relationship provides members with access to the other clubs in the Council. It allows for reciprocating (free) dockage, should you decide to travel to or beyond one of these clubs. It also means more social activities as the other clubs invite us to their social functions. Groups from CBC often travel together by boat to the various clubs and destinations, for example Lake Champlain, the Long Island Sound, Sylvan Beach etc. It is a great way to travel, learn the local waters and build relationships with other members. If you are looking to expand your boating adventures this is the place to do it!
There are basic differences between boat clubs and marinas. Aside from the obvious social activity, the boat club provides a sense of unity among its members. Most of us are members of the Boat Owners Association of The United States (BoatUS), a lobbying group that supports boating issues that offers discounted boating supplies, marine insurance and boat loans. As a member of the Crescent Boat Club, BoatUS offers a discount on their membership fee.
Thank you for your interest in joining the Crescent Boat Club! We recommend you familiarize yourself with the content on these pages to help decide if the Crescent Boat Club is for you. If you would like to stop by to take a look around and get some info, please call the clubhouse phone (518) 371-9864 to be sure someone will be there to meet you and show you around. Or, you can contact the membership committee chair (Gordy Engel) on (518) 392-9766 to chat and arrange a visit.
Please complete the new member application (link at top of page) and forward it to the membership committee chair. The membership committee chair will contact you to answer your questions, provide insight, arrange a tour of the facilities, and/or schedule a meeting with the 3 person membership committee who will interview you as well as answer new questions. We want you to feel welcomed and informed.
With the comittee's favorable recommendation, the board of directors will vote to approve you as a probationary member for the term of one year. You will enjoy full membership privileges and be assigned a big brother and/or big sister to help you and your family get acquainted, answer questions and introduce you to other members.
We want people to feel at home at the Crescent Boat Club. In your first year as a probationary member and to become familiar with things, you're expected to attend at least half of monthly membership meetings and half of the year's social events in addition to meeting your hours requirement. Its easy to do. At the end of one year you will be voted on to become a full member.
|
from datetime import date
from django.contrib.auth.decorators import login_required, user_passes_test
from django.http import (
HttpResponse, HttpResponseRedirect, HttpResponseForbidden)
from django.shortcuts import render_to_response, redirect, get_object_or_404
from django.template import RequestContext
from django.templatetags.static import static
from random import shuffle
from reportlab.lib import colors
from reportlab.lib.pagesizes import A4, LETTER, landscape, portrait
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch
from reportlab.platypus import (
SimpleDocTemplate, Table, TableStyle, Paragraph, Spacer,
BaseDocTemplate, Frame, PageTemplate, Image)
from reportlab.platypus.flowables import PageBreak
from database.views import is_teacher, is_admin, is_student
from database.models import *
from feedback.models import *
from feedback.categories import *
from anonymous_marking.models import *
from mysds.unisettings import *
# The different marking categories are in feedback/categories.py
# Helper Functions
def logo():
"""Returns the university logo, unless it is not available"""
styles = getSampleStyleSheet()
url = "https://cccu.tobiaskliem.de/static/images/cccu.jpg"
try:
image = Image(url, 2.45*inch, 1*inch)
except IOError:
image = Paragraph(
"Canterbury Christ Church University", styles['Heading1'])
return image
def bold(string):
"""Adds <b> tags around a string"""
bold_string = '<b>' + string + '</b>'
return bold_string
def heading(string, headingstyle='Heading2'):
"""Returns a proper paragraph for the header line"""
styles = getSampleStyleSheet()
tmp = '<para alignment = "center">' + string + '</para>'
result = Paragraph(tmp, styles[headingstyle])
return result
def formatted_date(raw_date):
"""Returns a proper date string
This returns a string of the date in British Format.
If the date field was left blank, an empty string is returned.
"""
if raw_date is None:
result = ''
else:
result = (
str(raw_date.day) + '/' + str(raw_date.month) + '/' +
str(raw_date.year))
return result
def two_markers(marker1, marker2):
"""Returns a string containing two markers, sorted alphabetically"""
marker_1_sort = marker1.last_name + "/" + marker1.first_name
marker_2_sort = marker2.last_name + "/" + marker2.first_name
markers = [marker_1_sort, marker_2_sort]
markers.sort()
marker_1_list = markers[0].split("/")
marker_2_list = markers[1].split("/")
marker_1_return = marker_1_list[1] + ' ' + marker_1_list[0]
marker_2_return = marker_2_list[1] + ' ' + marker_2_list[0]
result = marker_1_return + ' / ' + marker_2_return
return result
def paragraph(string):
"""Returns a paragraph with normal style"""
styles = getSampleStyleSheet()
return Paragraph(string, styles['Normal'])
def bold_paragraph(string):
"""Returns a paragraph with bold formatting"""
styles = getSampleStyleSheet()
tmp = bold(string)
return Paragraph(tmp, styles['Normal'])
def get_title(module, assessment):
assessment_title_string = module.get_assessment_title(assessment)
assessment_title_string = assessment_title_string.replace("/", "or")
return assessment_title_string
# Different marksheets
def essay_sheet(student, module, assessment):
"""Marksheet for Essays
This is the standard marksheet for CCCU Law, including a marking grid
with four different categories
"""
styles = getSampleStyleSheet()
elements = []
performance = Performance.objects.get(student=student, module=module)
marksheet = Marksheet.objects.get(
student=student, module=module, assessment=assessment)
assessment_title = bold(module.get_assessment_title(assessment))
mark = str(performance.get_assessment_result(assessment))
elements.append(logo())
elements.append(Spacer(1, 5))
title = heading('Law Undergraduate Assessment Sheet: Essay')
elements.append(title)
elements.append(Spacer(1, 5))
last_name = [
bold_paragraph('Student family name'),
Spacer(1, 3),
bold_paragraph(student.last_name)]
first_name = [
paragraph('First name'),
Spacer(1, 3),
bold_paragraph(student.first_name)]
module_title = [
paragraph('Module Title'),
Spacer(1, 3),
bold_paragraph(module.title)]
module_code = [
paragraph('Module Code'),
Spacer(1, 3),
bold_paragraph(module.code)]
tmp = formatted_date(marksheet.submission_date)
submission_date = [
paragraph('Submission Date'),
Spacer(1, 3),
bold_paragraph(tmp)]
assessment_title = [
paragraph('Assessment Title'),
Spacer(1, 3),
paragraph(assessment_title)]
if module.get_assessment_max_wordcount(assessment):
tmp = (
str(module.get_assessment_max_wordcount(assessment)) +
' Words max.')
else:
tmp = ''
word_count = [
paragraph('Word Count'),
Spacer(1, 3),
bold_paragraph(tmp)]
criteria = paragraph('Criteria')
category_1 = paragraph(CATEGORIES['ESSAY']['i_1'])
category_2 = paragraph(CATEGORIES['ESSAY']['i_2'])
category_3 = paragraph(CATEGORIES['ESSAY']['i_3'])
category_4 = paragraph(CATEGORIES['ESSAY']['i_4'])
data = [
[last_name, '', first_name, ''],
[module_title, '', module_code, submission_date, ''],
[assessment_title, '', word_count, '', ''],
[criteria, category_1, category_2, category_3, category_4]]
row = ['80 +']
if marksheet.category_mark_1 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 80:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['70 - 79']
if marksheet.category_mark_1 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 79:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['60 - 69']
if marksheet.category_mark_1 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 69:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['50 - 59']
if marksheet.category_mark_1 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 59:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['40 - 49']
if marksheet.category_mark_1 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 49:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['Under 40']
if marksheet.category_mark_1 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 39:
row.append('X')
else:
row.append(' ')
data.append(row)
t = Table(data)
t.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (0, 0), (1, 0)),
('SPAN', (2, 0), (-1, 0)),
('SPAN', (0, 1), (1, 1)),
('SPAN', (3, 1), (-1, 1)),
('SPAN', (0, 2), (1, 2)),
('SPAN', (2, 2), (-1, 2)),
('BACKGROUND', (0, 3), (-1, 3), colors.lightgrey),
('BACKGROUND', (0, 4), (0, 9), colors.lightgrey),
('ALIGN', (1, 4), (-1, -1), 'CENTER'),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)])
)
elements.append(t)
comments = [
bold_paragraph('General Comments'),
Spacer(1, 4)]
feedbacklist = marksheet.comments.split('\n')
for line in feedbacklist:
if line != "":
p = paragraph(line)
comments.append(p)
comments.append(Spacer(1, 4))
for comment in comments:
elements.append(comment)
marker = marksheet.marker
if marksheet.second_first_marker:
marker2 = marksheet.second_first_marker
tmp = two_markers(marker, marker2)
else:
tmp = marker.first_name + ' ' + marker.last_name
marking_date = formatted_date(marksheet.marking_date)
marked_by = [
[paragraph('Marked by'), bold_paragraph(tmp)],
[paragraph('Date'), bold_paragraph(marking_date)]]
marked_by_table = Table(marked_by)
mark = [
[
paragraph('Mark'),
Paragraph(mark, styles['Heading1'])],
['', '']]
mark_table = Table(mark)
mark_table.setStyle(TableStyle([('SPAN', (1, 0), (1, 1))]))
last_data = [[marked_by_table, '', '', mark_table, '']]
last_table = Table(last_data)
last_table.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (0, 0), (2, 0)),
('SPAN', (3, -1), (-1, -1))])
)
elements.append(last_table)
return elements
def legal_problem_sheet(student, module, assessment):
"""Marksheet for Legal Problem Questions
This is the standard marksheet for CCCU Law, including a marking grid
with four different categories
"""
styles = getSampleStyleSheet()
elements = []
performance = Performance.objects.get(student=student, module=module)
marksheet = Marksheet.objects.get(
student=student, module=module, assessment=assessment)
assessment_title = bold(module.get_assessment_title(assessment))
mark = str(performance.get_assessment_result(assessment))
elements.append(logo())
elements.append(Spacer(1, 5))
title = heading('Law Undergraduate Assessment Sheet: Legal Problem')
elements.append(title)
elements.append(Spacer(1, 5))
last_name = [
bold_paragraph('Student family name'),
Spacer(1, 3),
bold_paragraph(student.last_name)]
first_name = [
paragraph('First name'),
Spacer(1, 3),
bold_paragraph(student.first_name)]
module_title = [
paragraph('Module Title'),
Spacer(1, 3),
bold_paragraph(module.title)]
module_code = [
paragraph('Module Code'),
Spacer(1, 3),
bold_paragraph(module.code)]
tmp = formatted_date(marksheet.submission_date)
submission_date = [
paragraph('Submission Date'),
Spacer(1, 3),
bold_paragraph(tmp)]
assessment_title = [
paragraph('Assessment Title'),
Spacer(1, 3),
paragraph(assessment_title)]
if module.get_assessment_max_wordcount(assessment):
tmp = (
str(module.get_assessment_max_wordcount(assessment)) +
' Words max.')
else:
tmp = ''
word_count = [
paragraph('Word Count'),
Spacer(1, 3),
bold_paragraph(tmp)]
criteria = paragraph('Criteria')
category_1 = paragraph(CATEGORIES['LEGAL_PROBLEM']['i_1'])
category_2 = paragraph(CATEGORIES['LEGAL_PROBLEM']['i_2'])
category_3 = paragraph(CATEGORIES['LEGAL_PROBLEM']['i_3'])
category_4 = paragraph(CATEGORIES['LEGAL_PROBLEM']['i_4'])
data = [
[last_name, '', first_name, ''],
[module_title, '', module_code, submission_date, ''],
[assessment_title, '', word_count, '', ''],
[criteria, category_1, category_2, category_3, category_4]]
row = ['80 +']
if marksheet.category_mark_1 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 80:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['70 - 79']
if marksheet.category_mark_1 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 79:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['60 - 69']
if marksheet.category_mark_1 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 69:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['50 - 59']
if marksheet.category_mark_1 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 59:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['40 - 49']
if marksheet.category_mark_1 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 49:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['Under 40']
if marksheet.category_mark_1 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 39:
row.append('X')
else:
row.append(' ')
data.append(row)
t = Table(data)
t.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (0, 0), (1, 0)),
('SPAN', (2, 0), (-1, 0)),
('SPAN', (0, 1), (1, 1)),
('SPAN', (3, 1), (-1, 1)),
('SPAN', (0, 2), (1, 2)),
('SPAN', (2, 2), (-1, 2)),
('BACKGROUND', (0, 3), (-1, 3), colors.lightgrey),
('BACKGROUND', (0, 4), (0, 9), colors.lightgrey),
('ALIGN', (1, 4), (-1, -1), 'CENTER'),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)])
)
elements.append(t)
comments = [
bold_paragraph('General Comments'),
Spacer(1, 4)]
feedbacklist = marksheet.comments.split('\n')
for line in feedbacklist:
if line != "":
p = paragraph(line)
comments.append(p)
comments.append(Spacer(1, 4))
for comment in comments:
elements.append(comment)
marker = marksheet.marker
if marksheet.second_first_marker:
marker2 = marksheet.second_first_marker
tmp = two_markers(marker, marker2)
else:
tmp = marker.first_name + ' ' + marker.last_name
marking_date = formatted_date(marksheet.marking_date)
marked_by = [
[paragraph('Marked by'), bold_paragraph(tmp)],
[paragraph('Date'), bold_paragraph(marking_date)]]
marked_by_table = Table(marked_by)
mark = [
[
paragraph('Mark'),
Paragraph(mark, styles['Heading1'])],
['', '']]
mark_table = Table(mark)
mark_table.setStyle(TableStyle([('SPAN', (1, 0), (1, 1))]))
last_data = [[marked_by_table, '', '', mark_table, '']]
last_table = Table(last_data)
last_table.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (0, 0), (2, 0)),
('SPAN', (3, -1), (-1, -1))])
)
elements.append(last_table)
return elements
def presentation_sheet(student, module, assessment):
"""Marksheet for Oral Presentations
This is the standard marksheet for individual presentations at
CCCU Law, including a marking grid with X different categories
"""
styles = getSampleStyleSheet()
elements = []
performance = Performance.objects.get(student=student, module=module)
marksheet = Marksheet.objects.get(
student=student, module=module, assessment=assessment)
assessment_title = bold(module.get_assessment_title(assessment))
mark = str(performance.get_assessment_result(assessment))
elements.append(logo())
elements.append(Spacer(1, 5))
title = heading('Law Undergraduate Assessment Sheet: Oral Presentation')
elements.append(title)
elements.append(Spacer(1, 5))
last_name = [
bold_paragraph('Student family name'),
Spacer(1, 3),
bold_paragraph(student.last_name)]
first_name = [
paragraph('First name'),
Spacer(1, 3),
bold_paragraph(student.first_name)]
module_title = [
paragraph('Module Title'),
Spacer(1, 3),
bold_paragraph(module.title)]
module_code = [
paragraph('Module Code'),
Spacer(1, 3),
bold_paragraph(module.code)]
tmp = formatted_date(marksheet.submission_date)
submission_date = [
paragraph('Presentation Date'),
Spacer(1, 3),
bold_paragraph(tmp)]
assessment_title = [
paragraph('Assessment Title'),
Spacer(1, 3),
paragraph(assessment_title)]
criteria = paragraph('Criteria')
category_1 = paragraph(CATEGORIES['PRESENTATION']['i_1'])
category_2 = paragraph(CATEGORIES['PRESENTATION']['i_2'])
category_3 = paragraph(CATEGORIES['PRESENTATION']['i_3'])
data = [
[last_name, '', first_name, ''],
[module_title, '', module_code, submission_date],
[assessment_title, '', '', ''],
[criteria, category_1, category_2, category_3]]
row = ['80 +']
if marksheet.category_mark_1 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 80:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['70 - 79']
if marksheet.category_mark_1 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 79:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['60 - 69']
if marksheet.category_mark_1 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 69:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['50 - 59']
if marksheet.category_mark_1 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 59:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['40 - 49']
if marksheet.category_mark_1 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 49:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['Under 40']
if marksheet.category_mark_1 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 39:
row.append('X')
else:
row.append(' ')
data.append(row)
t = Table(data)
t.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (0, 0), (1, 0)),
('SPAN', (2, 0), (-1, 0)),
('SPAN', (0, 1), (1, 1)),
('SPAN', (0, 2), (-1, 2)),
('BACKGROUND', (0, 3), (-1, 3), colors.lightgrey),
('BACKGROUND', (0, 4), (0, 9), colors.lightgrey),
('ALIGN', (1, 4), (-1, -1), 'CENTER'),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)])
)
elements.append(t)
comments = [
bold_paragraph('General Comments'),
Spacer(1, 4)]
feedbacklist = marksheet.comments.split('\n')
for line in feedbacklist:
if line != "":
p = paragraph(line)
comments.append(p)
comments.append(Spacer(1, 4))
for comment in comments:
elements.append(comment)
marker = marksheet.marker
if marksheet.second_first_marker:
marker2 = marksheet.second_first_marker
tmp = two_markers(marker, marker2)
else:
tmp = marker.first_name + ' ' + marker.last_name
marking_date = formatted_date(marksheet.marking_date)
marked_by = [
[paragraph('Marked by'), bold_paragraph(tmp)],
[paragraph('Date'), bold_paragraph(marking_date)]]
marked_by_table = Table(marked_by)
mark = [
[
paragraph('Mark'),
Paragraph(mark, styles['Heading1'])
],
['', '']]
mark_table = Table(mark)
mark_table.setStyle(TableStyle([('SPAN', (1, 0), (1, 1))]))
last_data = [[marked_by_table, '', '', mark_table, '']]
last_table = Table(last_data)
last_table.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (0, 0), (2, 0)),
('SPAN', (3, -1), (-1, -1))])
)
elements.append(last_table)
return elements
def essay_legal_problem_sheet(student, module, assessment):
"""Marksheet for a cross between Essay and legal problem
This consists of the essay marksheet combined with the legal problem grid
and two different comment sections
"""
styles = getSampleStyleSheet()
elements = []
performance = Performance.objects.get(student=student, module=module)
marksheet = Marksheet.objects.get(
student=student, module=module, assessment=assessment)
assessment_title = bold(module.get_assessment_title(assessment))
mark = str(performance.get_assessment_result(assessment))
elements.append(logo())
elements.append(Spacer(1, 5))
title = heading(
'Law Undergraduate Assessment Sheet: Essay / Legal Problem')
elements.append(title)
elements.append(Spacer(1, 5))
last_name = [
bold_paragraph('Student family name'),
Spacer(1, 3),
bold_paragraph(student.last_name)]
first_name = [
paragraph('First name'),
Spacer(1, 3),
bold_paragraph(student.first_name)]
module_title = [
paragraph('Module Title'),
Spacer(1, 3),
bold_paragraph(module.title)]
module_code = [
paragraph('Module Code'),
Spacer(1, 3),
bold_paragraph(module.code)]
tmp = formatted_date(marksheet.submission_date)
submission_date = [
paragraph('Submission Date'),
Spacer(1, 3),
bold_paragraph(tmp)]
assessment_title = [
paragraph('Assessment Title'),
Spacer(1, 3),
paragraph(assessment_title)]
if module.get_assessment_max_wordcount(assessment):
tmp = (
str(module.get_assessment_max_wordcount(assessment)) +
' Words max.')
else:
tmp = ''
word_count = [
paragraph('Word Count'),
Spacer(1, 3),
bold_paragraph(tmp)]
criteria = paragraph('Criteria')
category_1 = paragraph(CATEGORIES['ESSAY']['i_1'])
category_2 = paragraph(CATEGORIES['ESSAY']['i_2'])
category_3 = paragraph(CATEGORIES['ESSAY']['i_3'])
category_4 = paragraph(CATEGORIES['ESSAY']['i_4'])
category_5 = paragraph(CATEGORIES['LEGAL_PROBLEM']['i_1'])
category_6 = paragraph(CATEGORIES['LEGAL_PROBLEM']['i_2'])
category_7 = paragraph(CATEGORIES['LEGAL_PROBLEM']['i_3'])
category_8 = paragraph(CATEGORIES['LEGAL_PROBLEM']['i_4'])
data = [
[last_name, '', first_name, ''],
[module_title, '', module_code, submission_date, ''],
[assessment_title, '', word_count, '', '']]
t = Table(data)
t.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (0, 0), (1, 0)),
('SPAN', (2, 0), (-1, 0)),
('SPAN', (0, 1), (1, 1)),
('SPAN', (3, 1), (-1, 1)),
('SPAN', (0, 2), (1, 2)),
('SPAN', (2, 2), (-1, 2)),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)])
)
elements.append(t)
elements.append(Spacer(1, 5))
subtitle = Paragraph('Feedback for Part (a): Essay', styles['Heading3'])
elements.append(subtitle)
elements.append(Spacer(1, 5))
data = [[criteria, category_1, category_2, category_3, category_4]]
row = ['80 +']
if marksheet.category_mark_1 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 80:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['70 - 79']
if marksheet.category_mark_1 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 79:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['60 - 69']
if marksheet.category_mark_1 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 69:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['50 - 59']
if marksheet.category_mark_1 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 59:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['40 - 49']
if marksheet.category_mark_1 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 49:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['Under 40']
if marksheet.category_mark_1 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 39:
row.append('X')
else:
row.append(' ')
data.append(row)
t = Table(data)
t.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BACKGROUND', (0, 0), (-1, 0), colors.lightgrey),
('BACKGROUND', (0, 1), (0, -1), colors.lightgrey),
('ALIGN', (1, 1), (-1, -1), 'CENTER'),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)])
)
elements.append(t)
elements.append(Spacer(1, 5))
comments = [
bold_paragraph('General Comments'),
Spacer(1, 4)]
feedbacklist = marksheet.comments.split('\n')
for line in feedbacklist:
if line != "":
p = paragraph(line)
comments.append(p)
comments.append(Spacer(1, 4))
for comment in comments:
elements.append(comment)
part_1_mark_data = [[
Paragraph('Mark for part(a)', styles['Heading4']),
Paragraph(str(marksheet.part_1_mark), styles['Heading4'])]]
part_1_mark_table = Table(part_1_mark_data)
part_1_mark_table.setStyle(
TableStyle([
('BOX', (0, 0), (-1, -1), 0.25, colors.black)])
)
elements.append(part_1_mark_table)
elements.append(PageBreak())
heading_2 = Paragraph(
'Feedback for Part (b): Legal Problem', styles['Heading3'])
elements.append(heading_2)
elements.append(Spacer(1, 4))
data_2 = [[criteria, category_5, category_6, category_7, category_8]]
row = ['80 +']
if marksheet.category_mark_5 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_6 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_7 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_8 == 80:
row.append('X')
else:
row.append(' ')
data_2.append(row)
row = ['70 - 79']
if marksheet.category_mark_5 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_6 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_7 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_8 == 79:
row.append('X')
else:
row.append(' ')
data_2.append(row)
row = ['60 - 69']
if marksheet.category_mark_5 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_6 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_7 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_8 == 69:
row.append('X')
else:
row.append(' ')
data_2.append(row)
row = ['50 - 59']
if marksheet.category_mark_5 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_6 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_7 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_8 == 59:
row.append('X')
else:
row.append(' ')
data_2.append(row)
row = ['40 - 49']
if marksheet.category_mark_5 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_6 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_7 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_8 == 49:
row.append('X')
else:
row.append(' ')
data_2.append(row)
row = ['Under 40']
if marksheet.category_mark_5 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_6 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_7 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_8 == 39:
row.append('X')
else:
row.append(' ')
data_2.append(row)
t_2 = Table(data_2)
t_2.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BACKGROUND', (0, 0), (-1, 0), colors.lightgrey),
('BACKGROUND', (0, 1), (0, -1), colors.lightgrey),
('ALIGN', (1, 4), (-1, -1), 'CENTER'),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)])
)
elements.append(t_2)
elements.append(Spacer(1, 5))
comments_2 = [
bold_paragraph('General Comments'),
Spacer(1, 4)]
feedbacklist_2 = marksheet.comments_2.split('\n')
for line in feedbacklist_2:
if line != "":
p = paragraph(line)
comments_2.append(p)
comments_2.append(Spacer(1, 4))
for comment in comments_2:
elements.append(comment)
part_2_mark_data = [[
Paragraph('Mark for part(b)', styles['Heading4']),
Paragraph(str(marksheet.part_2_mark), styles['Heading4'])
]]
part_2_mark_table = Table(part_2_mark_data)
part_2_mark_table.setStyle(
TableStyle([
('BOX', (0, 0), (-1, -1), 0.25, colors.black)])
)
elements.append(part_2_mark_table)
elements.append(Spacer(1, 10))
marker = marksheet.marker
if marksheet.second_first_marker:
marker2 = marksheet.second_first_marker
tmp = two_markers(marker, marker2)
else:
tmp = marker.first_name + ' ' + marker.last_name
marking_date = formatted_date(marksheet.marking_date)
marked_by = [
[paragraph('Marked by'), bold_paragraph(tmp)],
[paragraph('Date'), bold_paragraph(marking_date)]]
marked_by_table = Table(marked_by)
mark = [
[
paragraph('Final Mark for (a) and (b)'),
Paragraph(mark, styles['Heading1'])
],
['', '']]
mark_table = Table(mark)
mark_table.setStyle(TableStyle([('SPAN', (1, 0), (1, 1))]))
last_data = [[marked_by_table, '', '', mark_table, '']]
last_table = Table(last_data)
last_table.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (0, 0), (2, 0)),
('SPAN', (3, -1), (-1, -1))])
)
elements.append(last_table)
return elements
def online_test_court_report_sheet(student, module, assessment):
"""Marksheet for Online Test / Court Report
This is a custom marksheet that allows to combine a mark for an online
test with a court report. Essentially, it is the essay marksheet with
a few extra points.
"""
styles = getSampleStyleSheet()
elements = []
performance = Performance.objects.get(student=student, module=module)
marksheet = Marksheet.objects.get(
student=student, module=module, assessment=assessment)
assessment_title = bold(module.get_assessment_title(assessment))
mark = str(performance.get_assessment_result(assessment))
elements.append(logo())
elements.append(Spacer(1, 5))
title = heading(
'Law Undergraduate Assessment Sheet: Online Test / Court Report')
elements.append(title)
elements.append(Spacer(1, 5))
last_name = [
bold_paragraph('Student family name'),
Spacer(1, 3),
bold_paragraph(student.last_name)]
first_name = [
paragraph('First name'),
Spacer(1, 3),
bold_paragraph(student.first_name)]
module_title = [
paragraph('Module Title'),
Spacer(1, 3),
bold_paragraph(module.title)]
module_code = [
paragraph('Module Code'),
Spacer(1, 3),
bold_paragraph(module.code)]
tmp = formatted_date(marksheet.submission_date)
submission_date = [
paragraph('Submission Date'),
Spacer(1, 3),
bold_paragraph(tmp)]
assessment_title = [
paragraph('Assessment Title'),
Spacer(1, 3),
paragraph(assessment_title)]
if module.get_assessment_max_wordcount(assessment):
tmp = (
str(module.get_assessment_max_wordcount(assessment)) +
' Words max.')
else:
tmp = ''
word_count = [
paragraph('Word Count'),
Spacer(1, 3),
bold_paragraph(tmp)]
criteria = paragraph('Criteria')
category_1 = paragraph(CATEGORIES['ESSAY']['i_1'])
category_2 = paragraph(CATEGORIES['ESSAY']['i_2'])
category_3 = paragraph(CATEGORIES['ESSAY']['i_3'])
category_4 = paragraph(CATEGORIES['ESSAY']['i_4'])
data = [
[last_name, '', first_name, ''],
[module_title, '', module_code, submission_date, ''],
[assessment_title, '', word_count, '', ''],
[criteria, category_1, category_2, category_3, category_4]
]
row = ['80 +']
if marksheet.category_mark_1 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 80:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['70 - 79']
if marksheet.category_mark_1 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 79:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['60 - 69']
if marksheet.category_mark_1 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 69:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['50 - 59']
if marksheet.category_mark_1 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 59:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['40 - 49']
if marksheet.category_mark_1 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 49:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['Under 40']
if marksheet.category_mark_1 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 39:
row.append('X')
else:
row.append(' ')
data.append(row)
t = Table(data)
t.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (0, 0), (1, 0)),
('SPAN', (2, 0), (-1, 0)),
('SPAN', (0, 1), (1, 1)),
('SPAN', (3, 1), (-1, 1)),
('SPAN', (0, 2), (1, 2)),
('SPAN', (2, 2), (-1, 2)),
('BACKGROUND', (0, 3), (-1, 3), colors.lightgrey),
('BACKGROUND', (0, 4), (0, 9), colors.lightgrey),
('ALIGN', (1, 4), (-1, -1), 'CENTER'),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)
])
)
elements.append(t)
comments = [
bold_paragraph('General Comments'),
Spacer(1, 4)]
feedbacklist = marksheet.comments.split('\n')
for line in feedbacklist:
if line != "":
p = paragraph(line)
comments.append(p)
comments.append(Spacer(1, 4))
for comment in comments:
elements.append(comment)
marker = marksheet.marker
if marksheet.second_first_marker:
marker2 = marksheet.second_first_marker
tmp = two_markers(marker, marker2)
else:
tmp = marker.first_name + ' ' + marker.last_name
marking_date = formatted_date(marksheet.marking_date)
marked_by = [
[paragraph('Marked by'), bold_paragraph(tmp)],
[paragraph('Date'), bold_paragraph(marking_date)]]
marked_by_table = Table(marked_by)
mark = [
[
paragraph('Combined Mark'),
Paragraph(mark, styles['Heading1'])
],
['', '']
]
mark_table = Table(mark)
mark_table.setStyle(TableStyle([('SPAN', (1, 0), (1, 1))]))
court = 'Mark for Court Report: ' + str(marksheet.part_1_mark)
online = 'Mark for On Line Test: ' + str(marksheet.part_2_mark)
last_data = [
['', '', paragraph(court)],
['', '', paragraph(online)],
[marked_by_table, '', '', mark_table]]
last_table = Table(last_data)
last_table.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (0, 0), (1, 1)),
('SPAN', (2, 0), (3, 0)),
('SPAN', (2, 1), (3, 1)),
('SPAN', (0, -1), (2, -1))
])
)
elements.append(last_table)
return elements
def negotiation_written_sheet(student, module, assessment):
"""Marksheet for the assessment 'Negotiation / Written Submission'
This is an assessment that includes a group component and is therefore
a little more complex.
"""
elements = []
styles = getSampleStyleSheet()
performance = Performance.objects.get(student=student, module=module)
marksheet = Marksheet.objects.get(
student=student, module=module, assessment=assessment
)
group_no = performance.group_assessment_group
group_feedback = GroupMarksheet.objects.get(
module=module, assessment=assessment, group_no=group_no
)
mark = str(performance.get_assessment_result(assessment))
elements.append(logo())
elements.append(Spacer(1, 3))
title = heading(
'Law Undergraduate Assessment Sheet: Negotiation Study', 'Heading3'
)
elements.append(title)
elements.append(Spacer(1, 3))
last_name = [
bold_paragraph('Student family name'),
Spacer(1, 3),
bold_paragraph(student.last_name)]
first_name = [
paragraph('First name'),
Spacer(1, 3),
bold_paragraph(student.first_name)]
module_title = [
paragraph('Module Title'),
Spacer(1, 3),
bold_paragraph('ELIM')]
module_code = [
paragraph('Module Code'),
Spacer(1, 3),
bold_paragraph(module.code)]
tmp = formatted_date(group_feedback.submission_date)
submission_date = [
paragraph('Presentation Date'),
Spacer(1, 3),
bold_paragraph(tmp)]
tmp = str(performance.seminar_group) + '/' + str(group_no)
group_number = [
paragraph('Seminar/LAU Group'),
Spacer(1, 3),
bold_paragraph(tmp)]
individual_category_1 = bold_paragraph(
CATEGORIES['NEGOTIATION_WRITTEN']['i_1'])
individual_category_2 = bold_paragraph(
CATEGORIES['NEGOTIATION_WRITTEN']['i_2'])
individual_category_3 = bold_paragraph(
CATEGORIES['NEGOTIATION_WRITTEN']['i_3'])
individual_category_4 = bold_paragraph(
CATEGORIES['NEGOTIATION_WRITTEN']['i_4'])
group_category_1 = bold_paragraph(CATEGORIES['NEGOTIATION_WRITTEN']['g_1'])
group_category_2 = bold_paragraph(CATEGORIES['NEGOTIATION_WRITTEN']['g_2'])
group_category_3 = bold_paragraph(CATEGORIES['NEGOTIATION_WRITTEN']['g_3'])
group_category_4 = bold_paragraph(CATEGORIES['NEGOTIATION_WRITTEN']['g_4'])
deduction_explanation = (
paragraph(CATEGORIES['NEGOTIATION_WRITTEN']['i_4_helptext']))
marker = marksheet.marker
if marksheet.second_first_marker:
marker2 = marksheet.second_first_marker
tmp = two_markers(marker, marker2)
else:
tmp = marker.first_name + ' ' + marker.last_name
marking_date = formatted_date(marksheet.marking_date)
marked_by = [
[paragraph('Marked by'), bold_paragraph(tmp)],
[paragraph('Date'), bold_paragraph(marking_date)]]
marked_by_table = Table(marked_by)
mark = [
[
paragraph('Mark'),
Paragraph(mark, styles['Heading1'])
],
['', '']]
mark_table = Table(mark)
mark_table.setStyle(TableStyle([('SPAN', (1, 0), (1, 1))]))
table_header_1 = bold_paragraph('Part 1: Assessed Negotiation')
table_header_2 = bold_paragraph('Marks Available')
table_header_3 = bold_paragraph('Marks Awarded')
part_1_subheader = bold_paragraph('1. Individual Work')
part_2_subheader = bold_paragraph('2. Group Work')
sub_total_1_string = bold_paragraph('Sub-Total Part 1')
sub_total_1 = 0
if marksheet.category_mark_1_free is not None:
sub_total_1 += marksheet.category_mark_1_free
if group_feedback.category_mark_1_free is not None:
sub_total_1 += group_feedback.category_mark_1_free
if group_feedback.category_mark_2_free is not None:
sub_total_1 += group_feedback.category_mark_2_free
table_header_4 = bold_paragraph(
'Part 2: Individual and Written Submission'
)
sub_total_2_string = paragraph('Sub-Total Part 2')
sub_total_2 = 0
if marksheet.category_mark_2_free is not None:
sub_total_2 += marksheet.category_mark_2_free
if marksheet.category_mark_3_free is not None:
sub_total_2 += marksheet.category_mark_3_free
if group_feedback.category_mark_3_free is not None:
sub_total_2 += group_feedback.category_mark_3_free
if group_feedback.category_mark_4_free is not None:
sub_total_2 += group_feedback.category_mark_4_free
deductions_h_1 = bold_paragraph('Deductions possible')
deductions_h_2 = bold_paragraph('Deductions incurred')
i_mark_1 = str(marksheet.category_mark_1_free)
i_mark_2 = str(marksheet.category_mark_2_free)
i_mark_3 = str(marksheet.category_mark_3_free)
i_mark_4 = str(marksheet.category_mark_4_free)
g_mark_1 = str(group_feedback.category_mark_1_free)
g_mark_2 = str(group_feedback.category_mark_2_free)
g_mark_3 = str(group_feedback.category_mark_3_free)
g_mark_4 = str(group_feedback.category_mark_4_free)
data = [[last_name, first_name, group_number, ''],
[module_title, module_code, submission_date, ''],
['', '', '', ''],
['', '', table_header_2, table_header_3],
[table_header_1, '', '', ''],
[part_1_subheader, '', '', ''],
[individual_category_1, '', '40', i_mark_1],
[part_2_subheader, '', '', ''],
[group_category_1, '', '10', g_mark_1],
[group_category_2, '', '10', g_mark_2],
[sub_total_1_string, '', '60', sub_total_1],
[table_header_4, '', '', ''],
[part_1_subheader, '', '', ''],
[individual_category_2, '', '10', i_mark_2],
[individual_category_3, '', '10', i_mark_3],
[part_2_subheader, '', '', ''],
[group_category_3, '', '10', g_mark_3],
[group_category_4, '', '10', g_mark_4],
[sub_total_2_string, '', '40', sub_total_2],
[individual_category_4, '', deductions_h_1, deductions_h_2],
[deduction_explanation, '', '12', i_mark_4]
]
t = Table(data)
t.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (-2, 0), (-1, 0)),
('SPAN', (-2, 1), (-1, 1)),
('SPAN', (0, 2), (-1, 2)),
('BOX', (0, 0), (-1, 1), 0.25, colors.black),
('SPAN', (0, 3), (1, 3)),
('SPAN', (0, 4), (1, 4)),
('SPAN', (0, 5), (1, 5)),
('SPAN', (0, 6), (1, 6)),
('SPAN', (0, 7), (1, 7)),
('SPAN', (0, 8), (1, 8)),
('SPAN', (0, 9), (1, 9)),
('BACKGROUND', (0, 10), (-1, 10), colors.lightgrey),
('SPAN', (0, 10), (1, 10)),
('SPAN', (0, 11), (1, 11)),
('SPAN', (0, 12), (1, 12)),
('SPAN', (0, 13), (1, 13)),
('SPAN', (0, 14), (1, 14)),
('SPAN', (0, 15), (1, 15)),
('SPAN', (0, 16), (1, 16)),
('SPAN', (0, 17), (1, 17)),
('SPAN', (0, 18), (1, 18)),
('SPAN', (0, 19), (1, 19)),
('SPAN', (0, 20), (1, 20)),
('BACKGROUND', (0, 18), (-1, 18), colors.lightgrey),
('BOX', (0, 3), (-1, -1), 0.25, colors.black)])
)
elements.append(t)
elements.append(PageBreak())
# Individual Comments
individual_comments = [
bold_paragraph('Comment on <u>Individual</u> Work for part 1 and 2'),
Spacer(1, 4)]
feedbacklist = marksheet.comments.split('\n')
for line in feedbacklist:
if line != "":
p = paragraph(line)
individual_comments.append(p)
individual_comments.append(Spacer(1, 4))
# Group Comments
group_comments = [
bold_paragraph('Comment on <u>Group</u> Work for part 1 and 2'),
Spacer(1, 4)]
feedbacklist = group_feedback.group_comments.split('\n')
for line in feedbacklist:
if line != "":
p = paragraph(line)
group_comments.append(p)
group_comments.append(Spacer(1, 4))
# Final table
last_data = [
[individual_comments, '', '', ''],
[group_comments, '', '', ''],
[marked_by_table, '', mark_table, '']
]
last_table = Table(last_data)
last_table.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (0, 0), (-1, 0)),
('SPAN', (0, 1), (-1, 1)),
('SPAN', (0, 2), (1, 2)),
('SPAN', (2, 2), (-1, 2)),
('BACKGROUND', (0, -1), (-1, -1), colors.lightgrey)])
)
elements.append(last_table)
return elements
# Functions called from website
@login_required
def export_feedback_sheet(request, code, year, assessment, student_id):
"""Will export either one or multiple feedback sheets.
This needs to be given the student id or the string 'all' if
you want all marksheets for the assessment. It will only work if
the person requesting is a teacher, an admin or the student the
marksheet is about.
"""
module = Module.objects.get(code=code, year=year)
assessment_title = get_title(module, assessment)
assessment_type = module.get_marksheet_type(assessment)
if student_id == 'all':
if is_teacher(request.user) or is_admin(request.user):
response = HttpResponse(mimetype='application/pdf')
first_part = module.title.replace(' ', '_')
second_part = assessment_title.replace(' ', '_')
filename_string = (
'attachment; filename=' + first_part +
'_' + second_part + '_-_all_marksheets.pdf')
all_students = module.student_set.all()
documentlist = []
students = [] # Only the students where feedback has been entered
for student in all_students:
try:
performance = Marksheet.objects.get(
student=student, module=module,
assessment=assessment)
students.append(student)
except Marksheet.DoesNotExist:
pass
for student in students:
if assessment_type == 'ESSAY':
elements = essay_sheet(student, module, assessment)
elif assessment_type == 'LEGAL_PROBLEM':
elements = legal_problem_sheet(
student, module, assessment)
elif assessment_type == 'PRESENTATION':
elements = presentation_sheet(student, module, assessment)
elif assessment_type == 'ESSAY_LEGAL_PROBLEM':
elements = essay_legal_problem_sheet(
student, module, assessment)
elif assessment_type == 'ONLINE_TEST_COURT_REPORT':
elements = online_test_court_report_sheet(
student, module, assessment)
elif assessment_type == 'NEGOTIATION_WRITTEN':
elements = negotiation_written_sheet(
student, module, assessment)
for element in elements:
documentlist.append(element)
documentlist.append(PageBreak())
response['Content-Disposition'] = filename_string
document = SimpleDocTemplate(response)
document.setAuthor = 'Canterbury Christ Church University'
document.build(documentlist)
return response
else:
return HttpResponseForbidden()
else:
student = Student.objects.get(student_id=student_id)
own_marksheet = False # Just for the filename
allowed = False
if is_teacher(request.user) or is_admin(request.user):
allowed = True
elif is_student(request.user):
if student.belongs_to == request.user:
own_marksheet = True
allowed = True
if allowed:
module = Module.objects.get(code=code, year=year)
response = HttpResponse(mimetype='application/pdf')
assessment_title_string = get_title(module, assessment)
if own_marksheet:
first_part = module.title.replace(' ', '_')
second_part = assessment_title_string.replace(' ', '_')
filename_string = (
'attachment; filename=' + first_part + '_' +
second_part + '_Marksheet.pdf'
)
else:
ln = student.last_name.replace(' ', '_')
fn = student.first_name.replace(' ', '_')
filename_string = (
'attachment; filename=' + ln + '_' + fn + '.pdf'
)
response['Content-Disposition'] = filename_string
document = SimpleDocTemplate(response)
document.setAuthor = 'Canterbury Christ Church University'
if assessment_type == 'ESSAY':
elements = essay_sheet(student, module, assessment)
elif assessment_type == 'LEGAL_PROBLEM':
elements = legal_problem_sheet(
student, module, assessment
)
elif assessment_type == 'PRESENTATION':
elements = presentation_sheet(student, module, assessment)
elif assessment_type == 'ESSAY_LEGAL_PROBLEM':
elements = essay_legal_problem_sheet(
student, module, assessment)
elif assessment_type == 'ONLINE_TEST_COURT_REPORT':
elements = online_test_court_report_sheet(
student, module, assessment)
elif assessment_type == 'NEGOTIATION_WRITTEN':
elements = negotiation_written_sheet(
student, module, assessment)
document.build(elements)
return response
else:
return HttpResponseForbidden()
@login_required
@user_passes_test(is_teacher)
def export_attendance_sheet(request, code, year):
"""Returns attendance sheets for a module."""
response = HttpResponse(mimetype='application/pdf')
response['Content-Disposition'] = (
'attachment; filename=attendance_sheet.pdf')
document = SimpleDocTemplate(response)
elements = []
module = Module.objects.get(code=code, year=year)
styles = getSampleStyleSheet()
next_year = str(module.year + 1)
heading = (
module.title + " (" + module.code + ") " + str(module.year) +
"/" + next_year)
performances = Performance.objects.filter(module=module)
no_of_seminar_groups = 0
for performance in performances:
if performance.seminar_group > no_of_seminar_groups:
no_of_seminar_groups = performance.seminar_group
counter = 0
while counter < no_of_seminar_groups:
counter += 1
subheading = "Seminar Group " + str(counter)
elements.append(Paragraph(heading, styles['Heading1']))
elements.append(Paragraph(subheading, styles['Heading2']))
elements.append(Spacer(1, 20))
data = []
header = ['Name']
column = 0
last_week = module.last_session + 1
no_teaching = module.no_teaching_in.split(",")
for week in range(module.first_session, last_week):
strweek = str(week)
if strweek not in no_teaching:
header.append(strweek)
data.append(header)
performances = Performance.objects.filter(
module=module, seminar_group=counter)
for performance in performances:
row = [performance.student]
for week in performance.attendance:
if week == '1':
row.append(u'\u2713')
elif week == 'e':
row.append('e')
else:
row.append(' ')
data.append(row)
table = Table(data)
table.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BACKGROUND', (0, 0), (-1, 0), colors.lightgrey),
('BACKGROUND', (0, 0), (0, -1), colors.lightgrey),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)])
)
elements.append(table)
elements.append(PageBreak())
document.build(elements)
return response
@login_required
@user_passes_test(is_admin)
def export_all_anonymous_exam_marks(request, year):
"""Gives an overview of all anonymous marks in the year"""
modules = Module.objects.filter(year=year)
modules_to_use = []
for module in modules:
if module.exam_value:
marks = AnonymousMarks.objects.filter(module=module)
for mark in marks:
if mark.exam:
modules_to_use.append(module)
break
response = HttpResponse(mimetype='application/pdf')
response['Content-Disposition'] = (
'attachment; filename=anonymous_exam_marks.pdf')
doc = BaseDocTemplate(response)
elements = []
styles = getSampleStyleSheet()
frame1 = Frame(
doc.leftMargin, doc.bottomMargin, doc.width/2-6,
doc.height, id='col1')
frame2 = Frame(
doc.leftMargin+doc.width/2+6, doc.bottomMargin, doc.width/2-6,
doc.height, id='col2')
d = formatted_date(date.today())
datenow = "Exported from MySDS, the CCCU Law DB on " + d
for module in modules_to_use:
heading = (
"Anonymous Marks for " + module.title + " (" +
str(module.year) + "/" + str(module.year + 1) + ")")
elements.append(Paragraph(heading, styles['Heading2']))
elements.append(Spacer(1, 20))
data = []
header = ['Exam ID', 'Exam Mark']
data.append(header)
marks = AnonymousMarks.objects.filter(module=module)
for mark in marks:
row = [mark.exam_id, mark.exam]
data.append(row)
table = Table(data, repeatRows=1)
table.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BACKGROUND', (0, 0), (-1, 0), colors.lightgrey),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)])
)
elements.append(table)
elements.append(paragraph(datenow))
elements.append(PageBreak())
doc.addPageTemplates([PageTemplate(id='TwoCol', frames=[frame1, frame2])])
doc.build(elements)
return response
@login_required
@user_passes_test(is_teacher)
def export_anonymous_marks(request, code, year, assessment):
"""Gives an overview of anonymous marks for an assessment"""
module = Module.objects.get(code=code, year=year)
response = HttpResponse(mimetype='application/pdf')
module_string = module.title.replace(" ", "_")
filename_string = 'attachment; filename='
filename_string += module_string
filename_string += '.pdf'
response['Content-Disposition'] = filename_string
doc = BaseDocTemplate(response)
elements = []
styles = getSampleStyleSheet()
frame1 = Frame(
doc.leftMargin, doc.bottomMargin, doc.width/2-6,
doc.height, id='col1')
frame2 = Frame(
doc.leftMargin+doc.width/2+6, doc.bottomMargin, doc.width/2-6,
doc.height, id='col2')
d = formatted_date(date.today())
datenow = "Exported from MySDS, the CCCU Law DB on " + d
heading = (
"Anonymous Marks for " + module.title + " (" +
str(module.year) + "/" + str(module.year + 1) + ") - ")
if assessment == 'exam':
heading += "Exam"
else:
assessment = int(assessment)
heading += module.get_assessment_title(assessment)
elements.append(Paragraph(heading, styles['Heading2']))
elements.append(Spacer(1, 20))
data = []
header = ['Exam ID', 'Mark']
data.append(header)
marks = AnonymousMarks.objects.filter(module=module)
for mark in marks:
row = [mark.exam_id, mark.get_assessment_result(assessment)]
data.append(row)
table = Table(data, repeatRows=1)
table.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BACKGROUND', (0, 0), (-1, 0), colors.lightgrey),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)])
)
elements.append(table)
elements.append(Spacer(1, 20))
elements.append(paragraph(datenow))
doc.addPageTemplates([PageTemplate(id='TwoCol', frames=[frame1, frame2])])
doc.build(elements)
return response
@login_required
@user_passes_test(is_teacher)
def export_marks(request, code, year):
"""Gives a useful sheet of all marks for the module.
Students will be highlighted if they failed the module, or if a QLD
student failed a component in a Foundational module
"""
module = Module.objects.get(code=code, year=year)
response = HttpResponse(mimetype='application/pdf')
filename = module.title.replace(" ", "_")
filename += "_Marks_" + str(module.year) + ".pdf"
responsestring = 'attachment; filename=' + filename
response['Content-Disposition'] = responsestring
doc = SimpleDocTemplate(response)
doc.pagesize = landscape(A4)
elements = []
styles = getSampleStyleSheet()
d = formatted_date(date.today())
datenow = "Exported from MySDS, the CCCU Law DB on " + d
modulestring = (
module.title + ' (' + module.code + ') ' + str(module.year) + '/' +
str(module.year + 1)
)
heading = "Marks for " + modulestring
elements.append(Paragraph(heading, styles['Heading2']))
elements.append(Spacer(1, 20))
data = []
header = ['ID', 'Student', ' Programme', 'QLD']
assessment_range = []
if module.assessment_1_value:
title = (
module.assessment_1_title.strip() +
' (' +
str(module.assessment_1_value) +
'%)'
)
assessment_range.append('1')
header.append(paragraph(title))
if module.assessment_2_value:
title = (
module.assessment_2_title.strip() +
' (' +
str(module.assessment_2_value) +
'%)'
)
assessment_range.append('2')
header.append(paragraph(title))
if module.assessment_3_value:
title = (
module.assessment_3_title.strip() +
' (' +
str(module.assessment_3_value) +
'%)'
)
assessment_range.append('3')
header.append(paragraph(title))
if module.assessment_4_value:
title = (
module.assessment_4_title.strip() +
' (' +
str(module.assessment_4_value) +
'%)'
)
assessment_range.append('4')
header.append(paragraph(title))
if module.assessment_5_value:
title = (
module.assessment_5_title.strip() +
' (' +
str(module.assessment_5_value) +
'%)'
)
assessment_range.append('5')
header.append(paragraph(title))
if module.assessment_6_value:
title = (
module.assessment_6_title.strip() +
' (' +
str(module.assessment_6_value) +
'%)'
)
assessment_range.append('6')
header.append(paragraph(title))
if module.exam_value:
title = (
'Exam (' +
str(module.exam_value) +
'%)'
)
assessment_range.append('exam')
header.append(paragraph(title))
header.append('Total')
header.append('Notes')
data.append(header)
performances = Performance.objects.filter(module=module)
counter = 0
highlight = []
# This needs to be replaced once model changes
ls = Course.objects.get(
title='BSc (Hons) Legal Studies / Sport And Exercise Science')
llb = Course.objects.get(
title='LLB (Hons) Bachelor Of Law')
business = Course.objects.get(
title='LLB (Hons) Bachelor Of Law With Business Studies')
ac = Course.objects.get(
title='LLB (Hons) Bachelor Of Law With Criminology')
fi = Course.objects.get(
title='LLB (Hons) Bachelor Of Law With Forensic Investigation')
ir = Course.objects.get(
title='LLB (Hons) Bachelor Of Law With International Relations')
soc = Course.objects.get(
title='LLB (Hons) Bachelor Of Law With Sociology')
# <<<
for performance in performances:
counter += 1
student = (
performance.student.last_name + ", " +
performance.student.short_first_name()
)
row = [performance.student.student_id, paragraph(student)]
# This needs to be replaced once model changes
if performance.student.course == llb:
course = 'LLB'
elif performance.student.course == business:
course = 'LLB/Business'
elif performance.student.course == ac:
course = 'LLB/AC'
elif performance.student.course == fi:
course = 'LLB/FI'
elif performance.student.course == ir:
course = 'LLB/IR'
elif performance.student.course == soc:
course = 'LLB/Sociology'
elif performance.student.course == ls:
course = 'LS/Sport'
else:
course = ''
row.append(course)
# <<<
if performance.student.qld:
row.append(u'\u2713')
else:
row.append(' ')
notes = ''
if performance.average < PASSMARK:
highlight_yellow = True
else:
highlight_yellow = False
highlight_red = False
for assessment in assessment_range:
concession = performance.get_concession(assessment)
assessment_title = module.get_assessment_title(assessment)
assessment_title = assessment_title.strip()
granted_or_pending = False
if concession == 'G':
granted_or_pending = True
if assessment == 'exam':
if len(notes) == 0:
notes = 'Sit exam'
else:
notes += ', sit exam'
else:
if len(notes) == 0:
notes = 'Submit ' + assessment_title
else:
notes += ', submit ' + assessment_title
if concession == 'P':
granted_or_pending = True
if assessment == 'exam':
if len(notes) == 0:
notes = 'Concession for exam pending'
else:
notes += ', concession for exam pending'
else:
if len(notes) == 0:
notes = (
'Concession for ' +
assessment_title +
' pending')
else:
notes += (
', concession for ' +
assessment_title +
' pending')
if performance.get_assessment_result(assessment):
row.append(performance.get_assessment_result(assessment))
if module.is_foundational and performance.student.qld:
if (performance.get_assessment_result(assessment)
< PASSMARK):
if not granted_or_pending:
if assessment == 'exam':
if len(notes) == 0:
notes = 'Resit exam'
else:
notes += ', resit exam'
else:
if len(notes) == 0:
notes = 'Resubmit ' + assessment_title
else:
notes += ', resubmit ' + assessment_title
if not highlight_yellow:
highlight_red = True
elif performance.average < PASSMARK:
if (performance.get_assessment_result(assessment)
< PASSMARK):
if not granted_or_pending:
if assessment == 'exam':
if len(notes) == 0:
notes = 'Resit exam'
else:
notes += ', resit exam'
else:
if len(notes) == 0:
notes = 'Reubmit ' + assessment_title
else:
notes += ', resubmit ' + assessment_title
else:
row.append('-')
if module.is_foundational and performance.student.qld:
if (performance.get_assessment_result(assessment)
< PASSMARK):
if not granted_or_pending:
if assessment == 'exam':
if len(notes) == 0:
notes = 'Resit exam'
else:
notes += ', resit exam'
else:
if len(notes) == 0:
notes = 'Resubmit ' + assessment_title
else:
notes += ', resubmit ' + assessment_title
if not highlight_yellow:
highlight_red = True
elif performance.average < PASSMARK:
if (performance.get_assessment_result(assessment)
< PASSMARK):
if not granted_or_pending:
if assessment == 'exam':
if len(notes) == 0:
notes = 'Resit exam'
else:
notes += ', resit exam'
else:
if len(notes) == 0:
notes = 'Reubmit ' + assessment_title
else:
notes += ', resubmit ' + assessment_title
if performance.average:
row.append(performance.average)
else:
row.append('-')
highlight_yellow = True
notes_paragraph = paragraph(notes)
row.append(notes_paragraph)
data.append(row)
if highlight_yellow:
highlight.append((counter, 'y'))
if highlight_red:
highlight.append((counter, 'r'))
table = Table(data, repeatRows=1)
tablestyle = [
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BACKGROUND', (0, 0), (-1, 0), colors.grey),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)
]
for item in highlight:
if item[1] == 'r':
tablestyle.append(
('BACKGROUND', (0, item[0]), (-1, item[0]), colors.red)
)
if item[1] == 'y':
tablestyle.append(
('BACKGROUND', (0, item[0]), (-1, item[0]), colors.yellow)
)
table.setStyle(TableStyle(tablestyle))
elements.append(table)
elements.append(Spacer(1, 20))
elements.append(paragraph(datenow))
elements.append(PageBreak())
doc.build(elements)
return response
def sample_pack(request, code, year):
"""Prepares a nice sample pack for the external examiner"""
module = Module.objects.get(code=code, year=year)
response = HttpResponse(mimetype='application/pdf')
tmp = module.title.replace(" ", "_")
filename_string = 'attachment; filename=' + tmp + '_examiners_pack.pdf'
response['Content-Disposition'] = filename_string
document = SimpleDocTemplate(response)
elements = []
styles = getSampleStyleSheet()
performances = list(Performance.objects.filter(module=module))
samplesize = sample_size(len(performances))
per_range = round(samplesize / 5) # Fail, 40s, 50s, 60s, 70 +
sample = {}
for assessment in module.get_assessment_range():
shuffle(performances) # Make sure the marks are from all over
add = []
first = []
two_one = []
two_two = []
third = []
fail = []
leftover = [] # Needed if there are less than per_range in one
complete = False
for performance in performances:
mark = performance.get_assessment_result(assessment)
if mark:
if mark > 69:
if len(first) < per_range:
first.append(performance)
else:
leftover.append(performance)
elif mark > 59:
if len(two_one) < per_range:
two_one.append(performance)
else:
leftover.append(performance)
elif mark > 49:
if len(two_two) < per_range:
two_two.append(performance)
else:
leftover.append(performance)
elif mark > 39:
if len(third) < per_range:
third.append(performance)
else:
leftover.append(performance)
else:
if len(fail) < per_range:
fail.append(performance)
else:
leftover.append(performance)
this_sample = first + two_one + two_two + third + fail
while len(this_sample) < samplesize:
this_sample.append(leftover.pop())
this_sample.sort(
key=lambda x: x.get_assessment_result(assessment),
reverse=True)
sample[assessment] = this_sample
title = heading('Checklist, not part of the pack')
elements.append(title)
assessment_string = (
'Assessments (at the end, together with the marksheets included in ' +
'this bundle)')
data = [
[
bold_paragraph('Make sure to add the following to this pack'),
'', '', ''],
['The module handbook (after the title page)', '', '', ''],
[bold_paragraph(assessment_string), '', '', '']
]
headline = [0, 2]
only_one = [1]
counter = 2
for assessment in module.get_assessment_range():
if module.get_assessment_title(assessment) == 'Exam':
blind = True
else:
blind = False
newline = True
counter += 1
title = bold_paragraph(module.get_assessment_title(assessment))
headline.append(counter)
data.append([title, '', '', ''])
counter += 1
title = paragraph(
'Instructions for ' + module.get_assessment_title(assessment))
data.append([title, '', '', ''])
only_one.append(counter)
this_sample = sample[assessment]
for performance in this_sample:
if newline:
print "True"
counter += 1
if blind:
first_column = performance.student.exam_id
print first_column
else:
first_column = performance.student.__unicode__()
newline = False
else:
if blind:
data.append(
[
first_column,
'',
performance.student.exam_id,
''
])
else:
data.append(
[
first_column,
'',
performance.student.__unicode__(),
''
])
newline = True
t = Table(data, colWidths=(200, 20, 200, 20))
style = [
('BOX', (0, 1), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 1), (-1, -1), 0.25, colors.black),
]
for line in headline:
style.append(('SPAN', (0, line), (-1, line)))
for line in only_one:
style.append(('SPAN', (0, line), (-2, line)))
# for line in checkboxline:
# style.append(('BOX', (-1, line), (-1, line)))
t.setStyle(TableStyle(style))
elements.append(t)
# Title page
elements.append(PageBreak())
elements.append(Spacer(1, 100))
elements.append(logo())
elements.append(Spacer(1, 80))
title = heading(module.__unicode__(), 'Heading1')
elements.append(title)
elements.append(Spacer(1, 40))
if len(module.eligible) == 1:
tmp = 'Year ' + module.eligible
elif len(module.eligible) == 2:
tmp = 'Years ' + module.eligible[0] + ' and ' + module.eligible[1]
else:
tmp = (
'Years ' +
module.eligible[0] +
', ' +
module.eligible[1] +
' and ' +
module.eligible[2]
)
level = heading(tmp)
elements.append(level)
elements.append(Spacer(1, 40))
subtitle = heading('Exam Board Sample Pack')
elements.append(subtitle)
elements.append(PageBreak())
# Statistics page
title = heading('Module Marks')
elements.append(title)
elements.append(Spacer(1, 20))
no_of_first = 0
no_of_two_one = 0
no_of_two_two = 0
no_of_third = 0
no_of_fail = 0
for performance in performances:
result = performance.average
if result:
if result > 69:
no_of_first += 1
elif result > 59:
no_of_two_one += 1
elif result > 49:
no_of_two_two += 1
elif result > 39:
no_of_third += 1
else:
no_of_fail += 1
first_f = float(no_of_first)
two_one_f = float(no_of_two_one)
two_two_f = float(no_of_two_two)
third_f = float(no_of_third)
fail_f = float(no_of_fail)
first_percent = round(((first_f / len(performances)) * 100), 1)
two_one_percent = round(((two_one_f / len(performances)) * 100), 1)
two_two_percent = round(((two_two_f / len(performances)) * 100), 1)
third_percent = round(((third_f / len(performances)) * 100), 1)
fail_percent = round(((fail_f / len(performances)) * 100), 1)
data = []
data.append(['Range', 'Amount', 'Percentage'])
data.append(['70 +', no_of_first, first_percent])
data.append(['60-69', no_of_two_one, two_one_percent])
data.append(['50-59', no_of_two_two, two_two_percent])
data.append(['40-49', no_of_third, third_percent])
data.append(['Fail', no_of_fail, fail_percent])
t = Table(data)
style = [
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BACKGROUND', (0, 0), (-1, 0), colors.lightgrey),
]
t.setStyle(TableStyle(style))
elements.append(t)
elements.append(PageBreak())
for assessment in module.get_assessment_range():
this_sample = sample[assessment]
assessment_type = module.get_marksheet_type(assessment)
if assessment_type:
for performance in this_sample:
student = performance.student
if assessment_type == 'ESSAY':
marksheet = essay_sheet(student, module, assessment)
elif assessment_type == 'LEGAL_PROBLEM':
marksheet = legal_problem_sheet(
student, module, assessment)
elif assessment_type == 'PRESENTATION':
marksheet = presentation_sheet(student, module, assessment)
elif assessment_type == 'ESSAY_LEGAL_PROBLEM':
marksheet = essay_legal_problem_sheet(
student, module, assessment)
elif assessment_type == 'ONLINE_TEST_COURT_REPORT':
marksheet = online_test_court_report_sheet(
student, module, assessment)
elif assessment_type == 'NEGOTIATION_WRITTEN':
marksheet = negotiation_written_sheet(
student, module, assessment)
else:
marksheet = False
if marksheet:
for element in marksheet:
elements.append(element)
elements.append(PageBreak())
document.build(elements)
return response
|
plants can be an essential part of urban life.
Paul Holt, Creative Director of N1 and W6 Garden Centres, has been a passionate advocate for the benefits of indoor plants for many years. Thanks to his expertise, N1 has become a well-known pit stop for anyone looking for high quality, stylish and unique houseplants.
In the indoor sections of N1 Garden Centre you will also find on-trend home accessories, pots and furniture. All items are handpicked by Paul, and selected from across the globe.
|
import os
from setuptools import setup, find_packages
setup(name='pyngspipe',
version='0.0.1',
description='pyngspipe is a pipeline for processing GEO NGS datasets based on the pyrnatools/pychiptools packages',
author='Patrick Lombard',
author_email='[email protected]',
packages=find_packages(),
scripts=['scripts/pyngs_pipe.py', 'scripts/pyngs_report.py'],
package_data={"pyngspipe":['data/*']},
install_requires=['pysam', 'pybedtools'],
license='GPLv3',
platforms='any',
classifiers=[
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 2.7',
'Environment :: Console',
],
long_description="""
pyngspipe is a pipeline for processing GEO NGS datasets based on the pyrnatools/pychiptools packages
Contact
=============
If you have any questions or comments about pyngspipe, please feel free to contact me via
eMail: [email protected]
""",
)
|
Craig wants to buy some software for his computer. The company that makes the software sells it to a software distributor for $30. The distributor adds 15% to the price and then sells it to the store. The store adds 25% on top of the price they pay for it to result in their regular price. But the day that Craig goes to the store, there is a sale of 25% off the regular price. How much does Craig pay for the software?
34.50+(34.50*25) = 43.13 which I rounded to 43.12.
Now take 25% of the stores normal selling price and subtract from 43.12 to obtain Craig's price.
I need help on my math questions, can someone help me??
my yest is on tuesday...i relly need hep. i wil hv questions similar to this...i need help!
Martin sold his computer and software for $900.00, receiving three times as much for the computer then the software. What was the selling price of the computer and software?
You decide to market your own custom computer software. You must invest $3,255 for computer hardware, and spend $2.90 to buy and package each disk. If each program sells for $13.75, how many copies must you sell to break even?
|
import re
from django.conf import settings
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.authentication import (
get_authorization_header,
BaseAuthentication,
)
from leaderboard.fxa.client import FXAClientMixin, FXAException
from leaderboard.contributors.models import Contributor
# A regex which matches against a Bearer token
# http://self-issued.info/docs/draft-ietf-oauth-v2-bearer.html#authz-header
FXA_ACCESS_TOKEN_RE = re.compile('Bearer\s+(?P<token>[a-zA-Z0-9._~+\/\-=]+)')
class OAuthTokenAuthentication(FXAClientMixin, BaseAuthentication):
"""
Simple token based authentication for OAuth v2.
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the string "Bearer ". For example:
Authorization: Bearer 401f7ac837da42b97f613d789819ff93537bee6a
http://self-issued.info/docs/draft-ietf-oauth-v2-bearer.html#authz-header
"""
def authenticate(self, request):
auth_header = get_authorization_header(request)
if not auth_header:
msg = 'Missing token header.'
raise AuthenticationFailed(msg)
match = FXA_ACCESS_TOKEN_RE.match(auth_header)
if not match:
msg = 'Invalid token header. Must match: `Bearer <token>`.'
raise AuthenticationFailed(msg)
access_token = match.groupdict()['token']
try:
verify_data = self.fxa_client.verify_token(access_token)
except FXAException, e:
msg = (
'Unable to verify access token '
'with Firefox Accounts: {}'
).format(e)
raise AuthenticationFailed(msg)
client_id = verify_data.get('client_id', None)
if client_id != settings.FXA_CLIENT_ID:
msg = (
'Provided access token is not '
'valid for use with this service.'
)
raise AuthenticationFailed(msg)
fxa_uid = verify_data.get('user', None)
if fxa_uid is None:
msg = 'Unable to retrieve Firefox Accounts user id.'
raise AuthenticationFailed(msg)
try:
contributor = Contributor.objects.get(fxa_uid=fxa_uid)
except Contributor.DoesNotExist:
msg = 'No contributor found.'
raise AuthenticationFailed(msg)
try:
profile_data = self.fxa_client.get_profile_data(access_token)
except FXAException, e:
msg = (
'Unable to retrieve profile '
'data from Firefox Accounts: {}'
).format(e)
raise AuthenticationFailed(msg)
display_name = profile_data.get('displayName', None)
if display_name is not None and display_name != contributor.name:
contributor.name = display_name
contributor.save()
return (
contributor,
{
'access_token': access_token,
'profile_data': profile_data,
},
)
def authenticate_header(self, request):
return 'Token'
|
We create cross-border performance opportunities for emerging choreographers. Our network shares resources and insights. We actively enhance connections between dance artists, programmers and audiences across Europe.
Aerowaves holds an annual open call for emerging choreographers working in geographic Europe who want to become one of the Aerowaves Twenty.
These selected artists represent the most exciting dance makers in Europe and are promoted by Aerowaves for a year. Presenting partners of the Aerowaves Network choose to programme at least three of the Aerowaves Twenty in their venues or festivals.
Aerowaves’ three day festival is held in a different European city in April each year. At least 10 of the Aerowaves Twenty are invited to perform at Spring Forward, alongside others from previous years. About 100 programmers from Europe and further afield attend Spring Forward, in addition to local audiences. Most of the festival will be Live Streamed from 2015.
Springback Magazine is place to read about, watch and hear about dance. A place where you can feel the pulse of contemporary movements across Europe, through opinion, commentary and reflection. A place to explore dance, and experience dance.
Springback Academy offers a unique chance for emerging dance writers to be mentored by professional critics during an international festival. It takes place during Spring Forward each year. Springback Academy experiments with forms of dance writing and positively impacts on the quality of dance criticism online.
Aerowaves provides grants to its partners so that they can create projects that will entice and engage local audiences in the work of the Aerowaves Twenty artists they present. By supporting the network partners directly, Aerowaves encourages greater focus on building audiences for emerging artists. The partners generate resources and practical knowledge to share across the network.
Aerowaves is sustained as a vibrant community of interest by a balance of generosities.
From 2017, Aerowaves is supported for four years by the European Union through a Creative Europe Platforms award.
|
# coding: utf-8
from __future__ import unicode_literals
from django.test import TestCase
from django.utils.encoding import iri_to_uri, force_text
from django.utils.functional import lazy
from django.utils.http import (cookie_date, http_date,
urlquote, urlquote_plus, urlunquote, urlunquote_plus)
from django.utils import six
from django.utils.text import get_text_list, smart_split
from django.utils.translation import override
lazystr = lazy(force_text, six.text_type)
class TextTests(TestCase):
"""
Tests for stuff in django.utils.text and other text munging util functions.
"""
def test_get_text_list(self):
self.assertEqual(get_text_list(['a', 'b', 'c', 'd']), 'a, b, c or d')
self.assertEqual(get_text_list(['a', 'b', 'c'], 'and'), 'a, b and c')
self.assertEqual(get_text_list(['a', 'b'], 'and'), 'a and b')
self.assertEqual(get_text_list(['a']), 'a')
self.assertEqual(get_text_list([]), '')
with override('ar'):
self.assertEqual(get_text_list(['a', 'b', 'c']), "a، b أو c")
def test_smart_split(self):
testdata = [
('This is "a person" test.',
['This', 'is', '"a person"', 'test.']),
('This is "a person\'s" test.',
['This', 'is', '"a person\'s"', 'test.']),
('This is "a person\\"s" test.',
['This', 'is', '"a person\\"s"', 'test.']),
('"a \'one',
['"a', "'one"]),
('all friends\' tests',
['all', 'friends\'', 'tests']),
('url search_page words="something else"',
['url', 'search_page', 'words="something else"']),
("url search_page words='something else'",
['url', 'search_page', "words='something else'"]),
('url search_page words "something else"',
['url', 'search_page', 'words', '"something else"']),
('url search_page words-"something else"',
['url', 'search_page', 'words-"something else"']),
('url search_page words=hello',
['url', 'search_page', 'words=hello']),
('url search_page words="something else',
['url', 'search_page', 'words="something', 'else']),
("cut:','|cut:' '",
["cut:','|cut:' '"]),
(lazystr("a b c d"), # Test for #20231
['a', 'b', 'c', 'd']),
]
for test, expected in testdata:
self.assertEqual(list(smart_split(test)), expected)
def test_urlquote(self):
self.assertEqual(urlquote('Paris & Orl\xe9ans'),
'Paris%20%26%20Orl%C3%A9ans')
self.assertEqual(urlquote('Paris & Orl\xe9ans', safe="&"),
'Paris%20&%20Orl%C3%A9ans')
self.assertEqual(
urlunquote('Paris%20%26%20Orl%C3%A9ans'),
'Paris & Orl\xe9ans')
self.assertEqual(
urlunquote('Paris%20&%20Orl%C3%A9ans'),
'Paris & Orl\xe9ans')
self.assertEqual(urlquote_plus('Paris & Orl\xe9ans'),
'Paris+%26+Orl%C3%A9ans')
self.assertEqual(urlquote_plus('Paris & Orl\xe9ans', safe="&"),
'Paris+&+Orl%C3%A9ans')
self.assertEqual(
urlunquote_plus('Paris+%26+Orl%C3%A9ans'),
'Paris & Orl\xe9ans')
self.assertEqual(
urlunquote_plus('Paris+&+Orl%C3%A9ans'),
'Paris & Orl\xe9ans')
def test_cookie_date(self):
t = 1167616461.0
self.assertEqual(cookie_date(t), 'Mon, 01-Jan-2007 01:54:21 GMT')
def test_http_date(self):
t = 1167616461.0
self.assertEqual(http_date(t), 'Mon, 01 Jan 2007 01:54:21 GMT')
def test_iri_to_uri(self):
self.assertEqual(iri_to_uri('red%09ros\xe9#red'),
'red%09ros%C3%A9#red')
self.assertEqual(iri_to_uri('/blog/for/J\xfcrgen M\xfcnster/'),
'/blog/for/J%C3%BCrgen%20M%C3%BCnster/')
self.assertEqual(iri_to_uri('locations/%s' % urlquote_plus('Paris & Orl\xe9ans')),
'locations/Paris+%26+Orl%C3%A9ans')
def test_iri_to_uri_idempotent(self):
self.assertEqual(iri_to_uri(iri_to_uri('red%09ros\xe9#red')),
'red%09ros%C3%A9#red')
|
This project has the long-term goal of assisting developing countries to build their capacity to detect and respond to emerging infectious disease threats. From 2006 to 2013 I served as the Executive Director of the Regional Emerging Diseases Intervention (REDI) Center in Singapore, which had the mission to help Southeast Asian countries develop their capacities to address emerging disease threats such as SARS, Avian influenza and dengue. To carry out mission at REDI Center, we engaged many international collaborators including WHO, US CDC, USAID and the University of Washington SPH, in order to provide the highest quality training for public health personnel from the Southeast Asian Region.
Since retiring from REDI Center in 2013, I have continued my interest and participation in this topic at the UW SPH Department of Global Health by working with faculty who are developing similar international programs to improve global capacity to address emerging infectious diseases threats.
|
#!/usr/bin/env python
"""
fla.gr controller for editing flags
For more information, see: https://github.com/JoshAshby/
http://xkcd.com/353/
Josh Ashby
2013
http://joshashby.com
[email protected]
"""
from seshat.route import autoRoute
from utils.baseHTMLObject import baseHTMLObject
from views.flags.flagViewTmpl import flagViewTmpl
from views.partials.flags.flagViewTmpl import flagViewTmpl as flagViewTmplPartial
import models.couch.flag.flagModel as fm
import models.couch.user.userModel as um
@autoRoute()
class flagsView(baseHTMLObject):
_title = "view flag"
def GET(self):
"""
"""
flagid = self.env["members"][0]
flag = fm.flagORM.getByID(flagid)
if not flag.visibility and flag.userID != self.session.id:
self.session.pushAlert("This is a private flag! Sorry but we \
can't let you see it.", "Hold it.", "error")
self.head = ("303 SEE OTHER", [("location", "/flags")])
return
flag.format()
flag["joineduserID"] = um.userORM.getByID(flag.userID)
view = flagViewTmpl(searchList=[self.tmplSearchList])
if self.env["cfg"].enableModalFlagDeletes:
view.scripts = ["handlebars_1.0.min",
"jquery.json-2.4.min",
"adminModal.flagr",
"editForm.flagr",
"deleteFlagModal.flagr"]
flagsTmpl = flagViewTmplPartial(searchList=[self.tmplSearchList])
flagsTmpl.flag = flag
view.flag = str(flagsTmpl)
return view
|
We emphasize progress and introduce new merchandise into the market each and every year for Class 1500 Globe Valve , Class 150 Globe Valve , Class 600 Globe Valve , We wholeheartedly welcome buyers all over the globe arrive to visit our manufacturing facility and have a win-win cooperation with us!
"It is a good way to enhance our products and solutions and repair. Our mission will be to build creative solutions to consumers with a great experience for Class 1500 Globe Valve , Class 150 Globe Valve , Class 600 Globe Valve , We have been committed to meet all your needs and solve any technical problems you may encounter with your industrial components. Our exceptional products and vast knowledge of technology makes us the preferred choice for our customers.
|
import yodel.analysis
import yodel.filter
import yodel.complex
import yodel.conversion
import matplotlib.pyplot as plt
def frequency_response(response):
size = len(response)
freq_response_real = [0] * size
freq_response_imag = [0] * size
fft = yodel.analysis.FFT(size)
fft.forward(response, freq_response_real, freq_response_imag)
return freq_response_real, freq_response_imag
def amplitude_response(spec_real, spec_imag, db=True):
size = len(spec_real)
amp = [0] * size
for i in range(0, size):
amp[i] = yodel.complex.modulus(spec_real[i], spec_imag[i])
if db:
amp[i] = yodel.conversion.lin2db(amp[i])
return amp
def phase_response(spec_real, spec_imag, degrees=True):
size = len(spec_real)
pha = [0] * size
for i in range(0, size):
pha[i] = yodel.complex.phase(spec_real[i], spec_imag[i])
if degrees:
pha[i] = (pha[i] * 180.0 / math.pi)
return pha
class CustomFilterDesigner:
def __init__(self):
self.samplerate = 48000
self.framesize = 256
self.frsize = int((self.framesize/2)+1)
self.custom_fr = [1] * self.frsize
self.hzscale = [(i*self.samplerate) / (2.0*self.frsize) for i in range(0, self.frsize)]
self.flt = yodel.filter.Custom(self.samplerate, self.framesize)
self.pressed = None
self.update_filter()
self.create_plot()
def update_filter(self):
self.flt.design(self.custom_fr, False)
fr_re, fr_im = frequency_response(self.flt.ir)
self.fft_fr = amplitude_response(fr_re, fr_im, False)
def create_plot(self):
self.fig = plt.figure()
self.cid = self.fig.canvas.mpl_connect('button_press_event', self.onpress)
self.cid = self.fig.canvas.mpl_connect('button_release_event', self.onrelease)
self.cid = self.fig.canvas.mpl_connect('motion_notify_event', self.onmotion)
self.ax_custom_fr = self.fig.add_subplot(111)
self.ax_custom_fr.set_title('Custom Filter Design')
self.plot_custom_fr, = self.ax_custom_fr.plot(self.hzscale, self.custom_fr, 'r', label='Desired Frequency Response')
self.plot_fft_fr, = self.ax_custom_fr.plot(self.hzscale, self.fft_fr[0:self.frsize], 'b', label='Actual Frequency Response')
self.ax_custom_fr.legend()
self.ax_custom_fr.grid()
self.rescale_plot()
def rescale_plot(self):
self.ax_custom_fr.set_ylim(-1, 5)
plt.draw()
def onpress(self, event):
if event.inaxes != self.ax_custom_fr:
return
self.pressed = (event.xdata, event.ydata)
xpos = int(event.xdata * 2.0 * self.frsize / self.samplerate)
ypos = max(event.ydata, 0)
if xpos >= 0 and xpos < self.frsize:
self.custom_fr[xpos] = ypos
self.update_filter()
self.plot_custom_fr.set_ydata(self.custom_fr)
self.plot_fft_fr.set_ydata(self.fft_fr[0:self.frsize])
self.rescale_plot()
def onrelease(self, event):
self.pressed = None
def onmotion(self, event):
if self.pressed != None and event.xdata != None and event.ydata != None:
xpos = int(event.xdata * 2.0 * self.frsize / self.samplerate)
ypos = max(event.ydata, 0)
if xpos >= 0 and xpos < self.frsize:
self.custom_fr[xpos] = ypos
self.update_filter()
self.plot_custom_fr.set_ydata(self.custom_fr)
self.plot_fft_fr.set_ydata(self.fft_fr[0:self.frsize])
self.rescale_plot()
cfd = CustomFilterDesigner()
plt.show()
|
I am committed to a confidential, non -judgemental and respectful approach, showing empathy, competence and sincerity whilst supporting you in finding your way through life changes.
Psychodynamic counselling helps people review emotions, thoughts, early-life experiences, and their present-day problems to identify patterns they have developed over time. Recognising recurring patterns helps people see the ways in which they avoid distress or develop defence mechanisms as a way of coping so that they can take steps to change these patterns.
Psychodynamic Counselling works by bringing the unconscious thoughts and feelings to the surface. There is an emphasis on the influence of the past, and on recurring patterns of relationships. A difficult and painful situation in the present may have its roots in the past but the individual may not be aware of the connection.
During our counselling sessions we may consider your current difficulties in the context of past influences and experiences, exploring your childhood, significant relationships, beliefs and responses. You may be able to recognise more choices available to you in order to make changes now and in the future.
I have gained my counselling experience practicing in local community organisations and I also work closely with the addiction support services in Warwickshire.
I also have a professional and personal interest in addiction, including drugs, alcohol and gambling.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/HealthcareService) on 2017-03-22.
# 2017, SMART Health IT.
from . import domainresource
class HealthcareService(domainresource.DomainResource):
""" The details of a healthcare service available at a location.
"""
resource_type = "HealthcareService"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.active = None
""" Whether this healthcareservice is in active use.
Type `bool`. """
self.appointmentRequired = None
""" If an appointment is required for access to this service.
Type `bool`. """
self.availabilityExceptions = None
""" Description of availability exceptions.
Type `str`. """
self.availableTime = None
""" Times the Service Site is available.
List of `HealthcareServiceAvailableTime` items (represented as `dict` in JSON). """
self.category = None
""" Broad category of service being performed or delivered.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.characteristic = None
""" Collection of characteristics (attributes).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.comment = None
""" Additional description and/or any specific issues not covered
elsewhere.
Type `str`. """
self.coverageArea = None
""" Location(s) service is inteded for/available to.
List of `FHIRReference` items referencing `Location` (represented as `dict` in JSON). """
self.eligibility = None
""" Specific eligibility requirements required to use the service.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.eligibilityNote = None
""" Describes the eligibility conditions for the service.
Type `str`. """
self.endpoint = None
""" Technical endpoints providing access to services operated for the
location.
List of `FHIRReference` items referencing `Endpoint` (represented as `dict` in JSON). """
self.extraDetails = None
""" Extra details about the service that can't be placed in the other
fields.
Type `str`. """
self.identifier = None
""" External identifiers for this item.
List of `Identifier` items (represented as `dict` in JSON). """
self.location = None
""" Location(s) where service may be provided.
List of `FHIRReference` items referencing `Location` (represented as `dict` in JSON). """
self.name = None
""" Description of service as presented to a consumer while searching.
Type `str`. """
self.notAvailable = None
""" Not available during this time due to provided reason.
List of `HealthcareServiceNotAvailable` items (represented as `dict` in JSON). """
self.photo = None
""" Facilitates quick identification of the service.
Type `Attachment` (represented as `dict` in JSON). """
self.programName = None
""" Program Names that categorize the service.
List of `str` items. """
self.providedBy = None
""" Organization that provides this service.
Type `FHIRReference` referencing `Organization` (represented as `dict` in JSON). """
self.referralMethod = None
""" Ways that the service accepts referrals.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.serviceProvisionCode = None
""" Conditions under which service is available/offered.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.specialty = None
""" Specialties handled by the HealthcareService.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.telecom = None
""" Contacts related to the healthcare service.
List of `ContactPoint` items (represented as `dict` in JSON). """
self.type = None
""" Type of service that may be delivered or performed.
List of `CodeableConcept` items (represented as `dict` in JSON). """
super(HealthcareService, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(HealthcareService, self).elementProperties()
js.extend([
("active", "active", bool, False, None, False),
("appointmentRequired", "appointmentRequired", bool, False, None, False),
("availabilityExceptions", "availabilityExceptions", str, False, None, False),
("availableTime", "availableTime", HealthcareServiceAvailableTime, True, None, False),
("category", "category", codeableconcept.CodeableConcept, False, None, False),
("characteristic", "characteristic", codeableconcept.CodeableConcept, True, None, False),
("comment", "comment", str, False, None, False),
("coverageArea", "coverageArea", fhirreference.FHIRReference, True, None, False),
("eligibility", "eligibility", codeableconcept.CodeableConcept, False, None, False),
("eligibilityNote", "eligibilityNote", str, False, None, False),
("endpoint", "endpoint", fhirreference.FHIRReference, True, None, False),
("extraDetails", "extraDetails", str, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("location", "location", fhirreference.FHIRReference, True, None, False),
("name", "name", str, False, None, False),
("notAvailable", "notAvailable", HealthcareServiceNotAvailable, True, None, False),
("photo", "photo", attachment.Attachment, False, None, False),
("programName", "programName", str, True, None, False),
("providedBy", "providedBy", fhirreference.FHIRReference, False, None, False),
("referralMethod", "referralMethod", codeableconcept.CodeableConcept, True, None, False),
("serviceProvisionCode", "serviceProvisionCode", codeableconcept.CodeableConcept, True, None, False),
("specialty", "specialty", codeableconcept.CodeableConcept, True, None, False),
("telecom", "telecom", contactpoint.ContactPoint, True, None, False),
("type", "type", codeableconcept.CodeableConcept, True, None, False),
])
return js
from . import backboneelement
class HealthcareServiceAvailableTime(backboneelement.BackboneElement):
""" Times the Service Site is available.
A collection of times that the Service Site is available.
"""
resource_type = "HealthcareServiceAvailableTime"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.allDay = None
""" Always available? e.g. 24 hour service.
Type `bool`. """
self.availableEndTime = None
""" Closing time of day (ignored if allDay = true).
Type `FHIRDate` (represented as `str` in JSON). """
self.availableStartTime = None
""" Opening time of day (ignored if allDay = true).
Type `FHIRDate` (represented as `str` in JSON). """
self.daysOfWeek = None
""" mon | tue | wed | thu | fri | sat | sun.
List of `str` items. """
super(HealthcareServiceAvailableTime, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(HealthcareServiceAvailableTime, self).elementProperties()
js.extend([
("allDay", "allDay", bool, False, None, False),
("availableEndTime", "availableEndTime", fhirdate.FHIRDate, False, None, False),
("availableStartTime", "availableStartTime", fhirdate.FHIRDate, False, None, False),
("daysOfWeek", "daysOfWeek", str, True, None, False),
])
return js
class HealthcareServiceNotAvailable(backboneelement.BackboneElement):
""" Not available during this time due to provided reason.
The HealthcareService is not available during this period of time due to
the provided reason.
"""
resource_type = "HealthcareServiceNotAvailable"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Reason presented to the user explaining why time not available.
Type `str`. """
self.during = None
""" Service not availablefrom this date.
Type `Period` (represented as `dict` in JSON). """
super(HealthcareServiceNotAvailable, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(HealthcareServiceNotAvailable, self).elementProperties()
js.extend([
("description", "description", str, False, None, True),
("during", "during", period.Period, False, None, False),
])
return js
import sys
try:
from . import attachment
except ImportError:
attachment = sys.modules[__package__ + '.attachment']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import contactpoint
except ImportError:
contactpoint = sys.modules[__package__ + '.contactpoint']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
|
How thick is the PVC?
Freedom uses thick durable Mirasol PVC of .9 or 1.2, which is why we can offer a 5 year warranty on our boats.
What is the durability like on the boats?
Freedom Watercraft uses the highest quality materials. Many of our competors use only one rub strip, we offer 5 which means you can beach the vessel anywhere and never worry about damage.
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base.version import Version
from twilio.rest.verify.v2.form import FormList
from twilio.rest.verify.v2.service import ServiceList
from twilio.rest.verify.v2.verification_attempt import VerificationAttemptList
class V2(Version):
def __init__(self, domain):
"""
Initialize the V2 version of Verify
:returns: V2 version of Verify
:rtype: twilio.rest.verify.v2.V2.V2
"""
super(V2, self).__init__(domain)
self.version = 'v2'
self._forms = None
self._services = None
self._verification_attempts = None
@property
def forms(self):
"""
:rtype: twilio.rest.verify.v2.form.FormList
"""
if self._forms is None:
self._forms = FormList(self)
return self._forms
@property
def services(self):
"""
:rtype: twilio.rest.verify.v2.service.ServiceList
"""
if self._services is None:
self._services = ServiceList(self)
return self._services
@property
def verification_attempts(self):
"""
:rtype: twilio.rest.verify.v2.verification_attempt.VerificationAttemptList
"""
if self._verification_attempts is None:
self._verification_attempts = VerificationAttemptList(self)
return self._verification_attempts
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Verify.V2>'
|
Alabama and Clemson meet Monday night time for the fourth straight 12 months within the School Soccer Playoff, and it’s the third time the Nationwide Championship is at stake. Ticket brokers anticipating a giant payoff are taking a shower, with costs down almost 75% over the previous two weeks. Levi’s Stadium in Santa Clara, California, is a haul for the core fan bases of Alabama and Clemson, and journey prices are prohibitive for a lot of.
Because the host group for the title sport, the San Francisco 49ers are additionally anticipated to endure loads of pink ink, to the tune of $eight million to $12 million, in keeping with San Francisco 49ers president Al Guido.
“You will get extremely important of any one-event P&L with out serious about the broader enterprise,” Guido advised the Silicon Valley Enterprise Journal final month in detailing the economics behind the sport for the 49ers.
The Santa Clara Stadium Authority owns the stadium, however the 49ers function the constructing. The group expects to generate roughly $15 million in income from the sport from promoting, concessions and its stock of 6,000 seats to the sport. However the 49ers are on the hook for prices associated to the occasion of $25 million.
The one-off loss is a drop within the bucket in contrast with the $1.Three billion in income the 49ers locked up earlier than the stadium opened in 2014. Followers and corporations decide to tickets, luxurious suites and promoting anticipating entry to huge occasions like Monday’s sport. The 49ers have delivered. Levi’s Stadium has hosted Tremendous Bowl 50, WrestleMania 31, Pac-12 Soccer Championship video games yearly, and live shows that includes artists like Taylor Swift, One Course and Kenny Chesney.
The TV publicity for sponsors like Levi’s, SAP and Intel can also be large, with 28.four million folks on common watching final 12 months’s school soccer title sport matchup. Levi’s is paying $220 million over 20 years for naming rights to the 49ers’ dwelling (click on right here for extra from Guido on his huge occasion technique through a podcast with my Forbes colleague Mike Ozanian).
Levi’s Stadium has been a boon to the franchise worth of the 49ers. The group was valued at $875 million by Forbes earlier than the group secured financing for the brand new constructing in 2010. The worth is up almost 250%, to $3.05 billion, since then. The worth ranks sixth amongst NFL groups, versus 27th earlier than the group’s new stadium plans have been solidified.
The common NFL group is now value $2.57 billion, up 146% throughout that very same span. The 49ers and the Los Angeles Rams had the biggest beneficial properties in worth over the previous decade by a large margin.
|
#!/usr/bin/python
# Copyright (c) 2015 Matthew Earl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
This is the code behind the Switching Eds blog post:
http://matthewearl.github.io/2015/07/28/switching-eds-with-python/
See the above for an explanation of the code below.
To run the script you'll need to install dlib (http://dlib.net) including its
Python bindings, and OpenCV. You'll also need to obtain the trained model from
sourceforge:
http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
Unzip with `bunzip2` and change `PREDICTOR_PATH` to refer to this file. The
script is run like so:
./faceswap.py <head image> <face image>
If successful, a file `output.jpg` will be produced with the facial features
from `<head image>` replaced with the facial features from `<face image>`.
"""
import cv2
import dlib
import numpy
import sys
PREDICTOR_PATH = "/home/matt/dlib-18.16/shape_predictor_68_face_landmarks.dat"
SCALE_FACTOR = 1
FEATHER_AMOUNT = 11
FACE_POINTS = list(range(17, 68))
MOUTH_POINTS = list(range(48, 61))
RIGHT_BROW_POINTS = list(range(17, 22))
LEFT_BROW_POINTS = list(range(22, 27))
RIGHT_EYE_POINTS = list(range(36, 42))
LEFT_EYE_POINTS = list(range(42, 48))
NOSE_POINTS = list(range(27, 35))
JAW_POINTS = list(range(0, 17))
# Points used to line up the images.
ALIGN_POINTS = (LEFT_BROW_POINTS + RIGHT_EYE_POINTS + LEFT_EYE_POINTS +
RIGHT_BROW_POINTS + NOSE_POINTS + MOUTH_POINTS)
# Points from the second image to overlay on the first. The convex hull of each
# element will be overlaid.
OVERLAY_POINTS = [
LEFT_EYE_POINTS + RIGHT_EYE_POINTS + LEFT_BROW_POINTS + RIGHT_BROW_POINTS,
NOSE_POINTS + MOUTH_POINTS,
]
# Amount of blur to use during colour correction, as a fraction of the
# pupillary distance.
COLOUR_CORRECT_BLUR_FRAC = 0.6
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(PREDICTOR_PATH)
class TooManyFaces(Exception):
pass
class NoFaces(Exception):
pass
def get_landmarks(im):
rects = detector(im, 1)
if len(rects) > 1:
raise TooManyFaces
if len(rects) == 0:
raise NoFaces
return numpy.matrix([[p.x, p.y] for p in predictor(im, rects[0]).parts()])
def annotate_landmarks(im, landmarks):
im = im.copy()
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
cv2.putText(im, str(idx), pos,
fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
fontScale=0.4,
color=(0, 0, 255))
cv2.circle(im, pos, 3, color=(0, 255, 255))
return im
def draw_convex_hull(im, points, color):
points = cv2.convexHull(points)
cv2.fillConvexPoly(im, points, color=color)
def get_face_mask(im, landmarks):
im = numpy.zeros(im.shape[:2], dtype=numpy.float64)
for group in OVERLAY_POINTS:
draw_convex_hull(im,
landmarks[group],
color=1)
im = numpy.array([im, im, im]).transpose((1, 2, 0))
im = (cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0) > 0) * 1.0
im = cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0)
return im
def transformation_from_points(points1, points2):
"""
Return an affine transformation [s * R | T] such that:
sum ||s*R*p1,i + T - p2,i||^2
is minimized.
"""
# Solve the procrustes problem by subtracting centroids, scaling by the
# standard deviation, and then using the SVD to calculate the rotation. See
# the following for more details:
# https://en.wikipedia.org/wiki/Orthogonal_Procrustes_problem
points1 = points1.astype(numpy.float64)
points2 = points2.astype(numpy.float64)
c1 = numpy.mean(points1, axis=0)
c2 = numpy.mean(points2, axis=0)
points1 -= c1
points2 -= c2
s1 = numpy.std(points1)
s2 = numpy.std(points2)
points1 /= s1
points2 /= s2
U, S, Vt = numpy.linalg.svd(points1.T * points2)
# The R we seek is in fact the transpose of the one given by U * Vt. This
# is because the above formulation assumes the matrix goes on the right
# (with row vectors) where as our solution requires the matrix to be on the
# left (with column vectors).
R = (U * Vt).T
return numpy.vstack([numpy.hstack(((s2 / s1) * R,
c2.T - (s2 / s1) * R * c1.T)),
numpy.matrix([0., 0., 1.])])
def read_im_and_landmarks(fname):
im = cv2.imread(fname, cv2.IMREAD_COLOR)
im = cv2.resize(im, (im.shape[1] * SCALE_FACTOR,
im.shape[0] * SCALE_FACTOR))
s = get_landmarks(im)
return im, s
def warp_im(im, M, dshape):
output_im = numpy.zeros(dshape, dtype=im.dtype)
cv2.warpAffine(im,
M[:2],
(dshape[1], dshape[0]),
dst=output_im,
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output_im
def correct_colours(im1, im2, landmarks1):
blur_amount = COLOUR_CORRECT_BLUR_FRAC * numpy.linalg.norm(
numpy.mean(landmarks1[LEFT_EYE_POINTS], axis=0) -
numpy.mean(landmarks1[RIGHT_EYE_POINTS], axis=0))
blur_amount = int(blur_amount)
if blur_amount % 2 == 0:
blur_amount += 1
im1_blur = cv2.GaussianBlur(im1, (blur_amount, blur_amount), 0)
im2_blur = cv2.GaussianBlur(im2, (blur_amount, blur_amount), 0)
# Avoid divide-by-zero errors.
im2_blur += 128 * (im2_blur <= 1.0)
return (im2.astype(numpy.float64) * im1_blur.astype(numpy.float64) /
im2_blur.astype(numpy.float64))
im1, landmarks1 = read_im_and_landmarks(sys.argv[1])
im2, landmarks2 = read_im_and_landmarks(sys.argv[2])
M = transformation_from_points(landmarks1[ALIGN_POINTS],
landmarks2[ALIGN_POINTS])
mask = get_face_mask(im2, landmarks2)
warped_mask = warp_im(mask, M, im1.shape)
combined_mask = numpy.max([get_face_mask(im1, landmarks1), warped_mask],
axis=0)
warped_im2 = warp_im(im2, M, im1.shape)
warped_corrected_im2 = correct_colours(im1, warped_im2, landmarks1)
output_im = im1 * (1.0 - combined_mask) + warped_corrected_im2 * combined_mask
cv2.imwrite('output.jpg', output_im)
|
I’ve been looking at a lot of “Clean Eating” meal plans, grocery guides and recipes… I’m really intrigued. Cutting out all processed foods would be really beneficial, but also very challenging! I think after depriving myself of certain foods for a while, I would be cranky and end up giving in. Even simple things like coffee creamer, granola bars, and bottled salad dressing would be hard to avoid.
Without committing to the eating plan fully, there are plenty of replacements anyone can make to clean up their diet. Some of these alternate snack choices aren’t necessarily lower calorie, but they either have significantly less sugar, fat or sodium or they just have a much-reduced amount of artificial add-ins or funky chemicals.
I feel a little left out of the Nutella bandwagon…. I have no idea how the spread got so popular but last week I read on article about a Columbia University student stealing over $5,000 worth of the chocolate treat from the school cafeteria in a week??? What does one person do with that much Nutella! It is equal parts impressive and repulsive. As for me, I’d rather just go straight for some good ole’ peanut butter.
These Nutella Snack Packs are practically HALF SUGAR. Sugar, the first ingredient, makes up 23 of the 52 grams in the container. The breadsticks and chocolate cream add little to no nutritional value to your snack.
Sabra Hummus and Pretzel Thin Cups. For about the same amount of calories, this snack contains just one gram of sugar. It’s first ingredient is chickpeas, which are packed with protein and fiber. It’s super low in saturated fats, sodium and cholesterol. The Roasted Red Pepper is my favorite!
Peanut butter, almond butter, any nut butter – I love them all! But they are not created equal. JIF makes new pre-packaged peanut butter cups, at 250 calories a piece. They are perfect for traveling, dipping apple slices or banana, adding to oatmeal or just eating with a spoon. But they’re concoction is more than just peanuts and salt – sugar, molasses, and hydrogenated oils also make their way into the little cup.
310! Three Hundred and Ten Calories. I’m sorry, but that ice cream would be gone in 1.3 minutes and then I’d need to run about 3.5 miles to burn it off. NOT HAPPENING!
Frozen Greek Yogurt – you can make your own! Pick a fat-free Greek Yogurt (usually 100 calories for 6 ounces) and add in some banana, dark chocolate chips, almonds, or all of the above. Freeze and enjoy. You just saved yourself 20 grams of fat.
If you can’t get fresh fruit, don’t settle for sugar-loaded fruit cups. I saw some new “flavored” fruit cups and would like to have a serious conversation with whoever it was who had the idea to add caramel and brown sugar to simple fruit cups. No wonder kids are getting larger and larger! We are teaching them that to enjoy fruit, it must be covered with candy!
Dehydrated fruit still contains a decent amount of nutritional value. Again, not all dried fruit is created equal! For instance banana chips are full of oil and sugar; freeze-dried banana is much lower calorie/fat/sugar, though I do admit it has a little bit of a “Styrofoam” texture… ;) My favorite freeze-dried fruit is strawberries – they are so tart! Dried and dehydrated fruits certainly shouldn’t replace fresh fruit all the time, but most do count as a serving of fruit. Blueberry muffins, Banana Bread, Carrot Cake, and Oatmeal Raison cookies do not!
Chex Mix fill almost all vending machines, and the combination of textures and flavors plus the salty seasoning can be addicting. While the 130-calories per serving is not bad for a snack, the normal size packages contain 8 servings! Unless you measure out your 30 gram portion, you could end up eating much more than a simple serving.
Kashi cereal cups are the perfect amount, and I think dry cereal is a good afternoon snack when you need something crunchy! They are lightly flavored with honey and shaped like cute little hearts. 140 calories and no temptation to eat more than a serving.
It seems like every time I walk past the community kitchen, someone is making Mac n’ Cheese. (Either that or bacon…) I used to LOVE Kraft as a kid. But now the idea of cheese in powder form and the unnatural yellow kind of grosses me out. Besides the unnatural form of cheddar, this comfort food is loaded with sodium. One box is your entire day’s worth!
Also from Kraft, a throw back from elementary school lunch boxes: String cheese! It’s pre-portioned, and now there are half a dozen different flavors. (where was the Sweet BBQ string cheese when I was 7?!) Pair it with an apple or some grapes for a complete snack!
A few years ago, I would NEVER have turned down a Starbucks bottled Frappuccino. Any time I had a Target gift card from Christmas or something, I would buy a four-pack and mix & match all of the flavors to get one of each: vanilla, caramel, mocha, coffee. Those things were my favorite! But now I’m more conscious of sugar, fat, and calories. I’d rather make my own iced coffee.
Starbucks has really improved in the healthy department over the last few years! They’re breakfast wraps, Skinny Lattes, and now VIA Iced Coffees are perfect for addicts like me.
I actually have plenty more grocery swaps but this has turned into a pretty massive post, so I’ll save them for later.
Have you tried “Clean Eating” & what was the hardest thing to give up?
Posted on March 9, 2013, in Coffee, College, Dining Out, Healthy Habits and tagged fitness, grocery, health, snacks. Bookmark the permalink. 7 Comments.
Yeah, I started eating “clean” after learning a lot of scary stuff about processed foods in my environmental science class. We do Meatless Mondays as a class and I’ve also cut most processed foods out of my diet. Some of my favorite natural brands are Choboni Greek yogurt, Paul Newman’s cereal, Kashi granola bars, and Archer Farms Blue Corn Organic tortilla chips (from Super Target).
This is a fun post! I especially agree with you about the nut butters – they’re delicious enough on their own, without hydrogenated oils or sugar!
|
# coding: utf-8
"""Models for item-to-item collaborative filtering."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core import exceptions
from django.db import models
from django.db.models import signals as model_signals
from django.db.models import Q
from django.utils.encoding import python_2_unicode_compatible
import django_recommend
from . import conf
NO_RELATED_NAME = '+' # Try to clarify obscure Django syntax.
def respect_purge_setting(*args):
"""Raise or delete related objects based on settings.
This is a when_missing handler for
ObjectSimilarityQueryset.get_instances_for.
"""
if conf.settings.RECOMMEND_PURGE_MISSING_DATA:
django_recommend.forget_object(*args)
else:
raise
class ObjectSimilarityQueryset(models.QuerySet):
"""The custom manager used for the ObjectSimilarity class."""
def get_instances_for(self, obj, when_missing=respect_purge_setting):
"""Get the instances in this queryset that are not `obj`.
Returns a list.
when_missing:
a callback function to execute when an instance that should be
suggested is not present in the database (i.e. get() raises
ObjectDoesNotExist). This function will be called with two
parameters: the content type id, and the object id.
The default callback propagates the underlying ObjectDoesNotExist
exception.
If this method does not raise an exception, the triggering object
is simply ignored and not included in the result list. For this
reason it's possible for a queryset of 5 objects to only return,
say, 4 instances, if one of the objects referred to in an
ObjectSimilarity is in fact no longer present in the database.
"""
ctype = ContentType.objects.get_for_model(obj)
def get_object_from_ctype(contenttype, target_id):
"""The builtin method of doing this breaks with multiple DBs."""
return contenttype.model_class().objects.get(pk=target_id)
def get_object_params(sim_obj, num):
"""Get the content_type and PK of an object from sim_obj."""
prefix = 'object_{}_'.format(num)
target_id = getattr(sim_obj, prefix + 'id')
target_ctype = getattr(sim_obj, prefix + 'content_type')
return target_ctype, target_id
def get_other_object_params(sim_obj):
"""Get the content type and pk of the other object in sim_obj."""
same_id_as_1 = sim_obj.object_1_id == obj.pk
same_ctype_as_1 = sim_obj.object_1_content_type == ctype
if same_id_as_1 and same_ctype_as_1:
return get_object_params(sim_obj, 2)
return get_object_params(sim_obj, 1)
instances = []
for sim in self:
other_ctype, other_pk = get_other_object_params(sim)
try:
inst = get_object_from_ctype(other_ctype, other_pk)
except exceptions.ObjectDoesNotExist:
when_missing(other_ctype.pk, other_pk)
else:
instances.append(inst)
return instances
def __build_query(self, qset):
"""Get a lookup to match qset objects as either object_1 or object_2.
qset is any Django queryset.
"""
model = qset.model
ctype = ContentType.objects.get_for_model(model)
# Prevent cross-db joins
if qset.db != self.db:
ids = qset.values_list('id', flat=True)
# Forces the DB query to happen early
qset = list(ids)
lookup = ((Q(object_1_content_type=ctype) & Q(object_1_id__in=qset)) |
(Q(object_2_content_type=ctype) & Q(object_2_id__in=qset)))
return lookup
def exclude_objects(self, qset):
"""Exclude all similarities that include the given objects.
qset is a queryset of model instances to exclude. These should be the
types of objects stored in ObjectSimilarity/UserScore, **not**
ObjectSimilarity/UserScore themselves.
"""
return self.exclude(self.__build_query(qset))
def filter_objects(self, qset):
"""Find all similarities that include the given objects.
qset is a queryset of model instances to include. These should be the
types of objects stored in ObjectSimilarity/UserScore, **not**
ObjectSimilarity/UserScore themselves.
"""
return self.filter(self.__build_query(qset))
@python_2_unicode_compatible
class ObjectSimilarity(models.Model): # pylint: disable=model-missing-unicode
"""Similarity between two Django objects."""
object_1_id = models.IntegerField()
object_1_content_type = models.ForeignKey(ContentType,
related_name=NO_RELATED_NAME)
object_1 = GenericForeignKey('object_1_content_type', 'object_1_id')
object_2_id = models.IntegerField()
object_2_content_type = models.ForeignKey(ContentType,
related_name=NO_RELATED_NAME)
object_2 = GenericForeignKey('object_2_content_type', 'object_2_id')
# The actual similarity rating
score = models.FloatField()
objects = ObjectSimilarityQueryset.as_manager()
class Meta:
index_together = (
('object_1_id', 'object_1_content_type'),
('object_2_id', 'object_2_content_type'),
)
ordering = ['-score']
unique_together = (
'object_1_id', 'object_1_content_type', 'object_2_id',
'object_2_content_type',
)
def clean(self):
if (self.object_1_id == self.object_2_id and
self.object_1_content_type == self.object_2_content_type):
raise ValidationError('An object cannot be similar to itself.')
def save(self, *args, **kwargs):
self.full_clean()
super(ObjectSimilarity, self).save(*args, **kwargs)
@classmethod
def set(cls, obj_a, obj_b, score):
"""Set the similarity between obj_a and obj_b to score.
Returns the created ObjectSimilarity instance.
"""
# Always store the lower PKs as object_1, so the pair
# (object_1, object_2) has a distinct ordering, to prevent duplicate
# data.
def sort_key(obj):
"""Get a sortable tuple representing obj."""
return (ContentType.objects.get_for_model(obj).pk, obj.pk)
obj_a_key = sort_key(obj_a)
obj_b_key = sort_key(obj_b)
if obj_a_key < obj_b_key:
obj_1, obj_2 = obj_a, obj_b
else:
obj_1, obj_2 = obj_b, obj_a
inst_lookup = dict(
object_1_content_type=ContentType.objects.get_for_model(obj_1),
object_1_id=obj_1.pk,
object_2_content_type=ContentType.objects.get_for_model(obj_2),
object_2_id=obj_2.pk,
)
# Save space by not storing scores of 0.
if score == 0:
ObjectSimilarity.objects.filter(**inst_lookup).delete()
sim = None
else:
kwargs = dict(inst_lookup)
kwargs['defaults'] = {'score': score}
sim, _ = ObjectSimilarity.objects.update_or_create(**kwargs)
return sim
def __str__(self):
return '{}, {}: {}'.format(self.object_1_id, self.object_2_id,
self.score)
@python_2_unicode_compatible
class UserScore(models.Model):
"""Store a user's rating of an object.
"Rating" doesn't necessarily need to be e.g. 1-10 points or 1-5 star voting
system. It is often easy to treat e.g. object view as 1 point and object
bookmarking as 5 points, for example. This is called 'implicit feedback.'
"""
object_id = models.IntegerField()
object_content_type = models.ForeignKey(ContentType)
object = GenericForeignKey('object_content_type', 'object_id')
user = models.CharField(max_length=255, db_index=True)
score = models.FloatField()
class Meta:
index_together = ('object_id', 'object_content_type')
unique_together = ('object_id', 'object_content_type', 'user')
def save(self, *args, **kwargs):
self.full_clean()
super(UserScore, self).save(*args, **kwargs)
@classmethod
def __user_str(cls, user_or_str):
"""Coerce user_or_str params into a string."""
try:
user_id = user_or_str.pk
except AttributeError:
return user_or_str
return 'user:{}'.format(user_id)
@classmethod
def set(cls, user_or_str, obj, score):
"""Store the score for the given user and given object.
Returns the created UserScore instance.
"""
user = cls.__user_str(user_or_str)
ctype = ContentType.objects.get_for_model(obj)
inst_lookup = dict(
user=user, object_id=obj.pk, object_content_type=ctype)
if score:
kwargs = dict(inst_lookup)
kwargs['defaults'] = {'score': score}
inst, _ = cls.objects.update_or_create(**kwargs)
else:
inst = None
cls.objects.filter(**inst_lookup).delete()
return inst
@classmethod
def setdefault(cls, user_or_str, obj, score):
"""Store the user's score only if there's no existing score."""
user = cls.__user_str(user_or_str)
ctype = ContentType.objects.get_for_model(obj)
cls.objects.get_or_create(
user=user, object_id=obj.pk, object_content_type=ctype,
defaults={'score': score}
)
@classmethod
def get(cls, user_or_str, obj):
"""Get the score that user gave to obj.
Returns the actual score value, not the UserScore instance.
"Unrated" objects return 0.
"""
user = cls.__user_str(user_or_str)
ctype = ContentType.objects.get_for_model(obj)
try:
inst = cls.objects.get(user=user, object_id=obj.pk,
object_content_type=ctype)
return inst.score
except cls.DoesNotExist:
return 0
@classmethod
def scores_for(cls, obj):
"""Get all scores for the given object.
Returns a dictionary, not a queryset.
"""
ctype = ContentType.objects.get_for_model(obj)
scores = cls.objects.filter(object_content_type=ctype,
object_id=obj.pk)
return {score.user: score.score for score in scores}
def __str__(self):
return '{}, {}: {}'.format(self.user, self.object_id, self.score)
def call_handler(*args, **kwargs):
"""Proxy for the signal handler defined in tasks.
Prevents a circular import problem.
"""
from . import tasks
tasks.signal_handler(*args, **kwargs)
model_signals.post_save.connect(call_handler, UserScore,
dispatch_uid="recommend_post_save")
model_signals.post_delete.connect(call_handler, UserScore,
dispatch_uid="recommend_post_save")
|
With Evermine coupon codes or promo codes and sales, sometimes as well as special offers or deals, discount codes, free shipping offers and clearance, savesay strives to save your time of searching for Evermine coupons or Evermine discount codes and ensure every shopping experience worth your time, money and effort. Enjoy the incredible steep discount with Evermine promo codes or Evermine.com discount codes from savesay now.
Savesay updates coupons or promo codes, alongside any sales, deals, special offers and discount information daily and presents anything new related to promotion to ensure you get the latest and best discount. In addition, savesay regularly delete invalid or expired coupons or sales to make sure the most useful Evermine.com coupons, Evermine.com promo codes, Evermine.com discount codes or sales available to you. Meanwhile, savesay offers detailed shopping and saving tips to help you better your shopping experience with Evermine coupons or Evermine promo codes. Get it started now and be sure to grab a Evermine coupon codes or Evermine discount codes before checkout.
A Evermine.com coupon or Evermine.com discount code will bring you one step closer to using the latest products or enjoying the amazing service from this trusted brand. The longer you stay with savesay, the more amazing discounts and deals you can enjoy. Savesay is always here to help you realize your vision at an beatable price with Evermine coupon codes, Evermine discount codes, or sales and deals and more. Visiting any one of Evermine locations or using your mouse all depends on you while your shopping experience will be even much better when you use Evermine.com coupons or Evermine.com promo codes from savesay.
With Evermine coupons or Evermine discount codes or any other sales and promotions, regardless of where, what and when you plan to buy, by sunlight or moonlight, using your mouse or visiting any one location, savesay is never far away from all your needs at a steep discounted price you just can't beat. Smart you will never miss out any opportunity to save time and money, and savesay is absolutely your best and the most sensible choice to fulfill it.
When you find a Evermine coupon code or Evermine discount code from savesay, go about your online shopping as you normally would. When you've got all your items in your cart, begin the checkout process by either logging in or creating a new account. Review your purchases in your cart, and then enter your Evermine code in the promotional field on the right side of your screen, generally a box next to "Have a promo code?" and click "apply" to get the discount. As you can see, this field is good for promotional codes, gift cards, and discount cards and so on. Among the many great deals and promo codes, there is a world of discounts awaiting. Just come to savesay, where you are bound to get anything you like at an incredible discounted price with an Evermine coupon or Evermine promo code in hand.
Currently all Evermine coupons have been expired. More coupons will be updated soon, meanwhile welcome to share coupons here.
|
# Copyright 2018-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
import abc
import collections
import contextlib
import functools
import imp
import inspect
import json
import optparse
import os
import os.path
import platform
import re
import sys
import time
import traceback
import types
from pathlib import Path, PurePath
from select import select as _select
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Pattern,
Set,
Tuple,
TypeVar,
Union,
)
import pywatchman
from pywatchman import WatchmanError
from six import PY3, iteritems, itervalues, string_types
# Python 2.6, 2.7, use iterator filter from Python 3
from six.moves import builtins, filter
from .deterministic_set import DeterministicSet
from .glob_internal import glob_internal
from .glob_watchman import SyncCookieState, glob_watchman
from .json_encoder import BuckJSONEncoder
from .module_whitelist import ImportWhitelistManager
from .profiler import Profiler, Tracer, emit_trace, scoped_trace, traced
from .select_support import SelectorList, SelectorValue
from .struct import create_struct_class, struct
from .util import (
Diagnostic,
cygwin_adjusted_path,
get_caller_frame,
is_in_dir,
is_special,
)
# When build files are executed, the functions in this file tagged with
# @provide_for_build will be provided in the build file's local symbol table.
# Those tagged with @provide_as_native_rule will be present unless
# explicitly disabled by parser.native_rules_enabled_in_build_files
#
# When these functions are called from a build file, they will be passed
# a keyword parameter, build_env, which is a object with information about
# the environment of the build file which is currently being processed.
# It contains the following attributes:
#
# "dirname" - The directory containing the build file.
#
# "base_path" - The base path of the build file.
#
# "cell_name" - The cell name the build file is in.
BUILD_FUNCTIONS = [] # type: List[Callable]
NATIVE_FUNCTIONS = [] # type: List[Callable]
# Wait this many seconds on recv() or send() in the pywatchman client
# if not otherwise specified in .buckconfig
DEFAULT_WATCHMAN_QUERY_TIMEOUT = 60.0 # type: float
# Globals that should not be copied from one module into another
_HIDDEN_GLOBALS = {"include_defs", "load"} # type: Set[str]
ORIGINAL_IMPORT = builtins.__import__
_LOAD_TARGET_PATH_RE = re.compile(
r"^(?P<root>(?P<cell>@?[\w\-.]+)?//)?(?P<package>.*):(?P<target>.*)$"
) # type: Pattern[str]
# matches anything equivalent to recursive glob on all dirs
# e.g. "**/", "*/**/", "*/*/**/"
_RECURSIVE_GLOB_PATTERN = re.compile("^(\*/)*\*\*/") # type: Pattern[str]
class AbstractContext(object):
"""Superclass of execution contexts."""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def includes(self):
# type: () -> Set[str]
raise NotImplementedError()
@abc.abstractproperty
def used_configs(self):
# type: () -> Dict[str, Dict[str, str]]
raise NotImplementedError()
@abc.abstractproperty
def used_env_vars(self):
# type: () -> Dict[str, str]
raise NotImplementedError()
@abc.abstractproperty
def diagnostics(self):
# type: () -> List[Diagnostic]
raise NotImplementedError()
def merge(self, other):
# type: (AbstractContext) -> None
"""Merge the context of an included file into the current context.
:param AbstractContext other: the include context to merge.
:rtype: None
"""
self.includes.update(other.includes)
self.diagnostics.extend(other.diagnostics)
self.used_configs.update(other.used_configs)
self.used_env_vars.update(other.used_env_vars)
class BuildFileContext(AbstractContext):
"""The build context used when processing a build file."""
def __init__(
self,
project_root,
base_path,
path,
dirname,
cell_name,
allow_empty_globs,
ignore_paths,
watchman_client,
watchman_watch_root,
watchman_project_prefix,
sync_cookie_state,
watchman_glob_stat_results,
watchman_use_glob_generator,
implicit_package_symbols,
):
self.globals = {}
self._includes = set()
self._used_configs = collections.defaultdict(dict)
self._used_env_vars = {}
self._diagnostics = []
self.rules = {}
self.project_root = project_root
self.base_path = base_path
self.path = path
self.cell_name = cell_name
self.dirname = dirname
self.allow_empty_globs = allow_empty_globs
self.ignore_paths = ignore_paths
self.watchman_client = watchman_client
self.watchman_watch_root = watchman_watch_root
self.watchman_project_prefix = watchman_project_prefix
self.sync_cookie_state = sync_cookie_state
self.watchman_glob_stat_results = watchman_glob_stat_results
self.watchman_use_glob_generator = watchman_use_glob_generator
self.implicit_package_symbols = implicit_package_symbols
@property
def includes(self):
return self._includes
@property
def used_configs(self):
return self._used_configs
@property
def used_env_vars(self):
return self._used_env_vars
@property
def diagnostics(self):
return self._diagnostics
class IncludeContext(AbstractContext):
"""The build context used when processing an include."""
def __init__(self, cell_name, path):
# type: (str, str) -> None
"""
:param cell_name: a cell name of the current context. Note that this cell name can be
different from the one BUCK file is evaluated in, since it can load extension files
from other cells, which should resolve their loads relative to their own location.
"""
self.cell_name = cell_name
self.path = path
self.globals = {}
self._includes = set()
self._used_configs = collections.defaultdict(dict)
self._used_env_vars = {}
self._diagnostics = []
@property
def includes(self):
return self._includes
@property
def used_configs(self):
return self._used_configs
@property
def used_env_vars(self):
return self._used_env_vars
@property
def diagnostics(self):
return self._diagnostics
# Generic context type that should be used in places where return and parameter
# types are the same but could be either of the concrete contexts.
_GCT = TypeVar("_GCT", IncludeContext, BuildFileContext)
LoadStatement = Dict[str, Union[str, Dict[str, str]]]
BuildInclude = collections.namedtuple("BuildInclude", ["cell_name", "path"])
class LazyBuildEnvPartial(object):
"""Pairs a function with a build environment in which it will be executed.
Note that while the function is specified via the constructor, the build
environment must be assigned after construction, for the build environment
currently being used.
To call the function with its build environment, use the invoke() method of
this class, which will forward the arguments from invoke() to the
underlying function.
"""
def __init__(self, func):
# type: (Callable) -> None
self.func = func
self.build_env = None
def invoke(self, *args, **kwargs):
"""Invokes the bound function injecting 'build_env' into **kwargs."""
updated_kwargs = kwargs.copy()
updated_kwargs.update({"build_env": self.build_env})
try:
return self.func(*args, **updated_kwargs)
except TypeError:
missing_args, extra_args = get_mismatched_args(
self.func, args, updated_kwargs
)
if missing_args or extra_args:
name = "[missing]"
if "name" in updated_kwargs:
name = updated_kwargs["name"]
elif len(args) > 0:
# Optimistically hope that name is the first arg. It generally is...
name = args[0]
raise IncorrectArgumentsException(
self.func.func_name, name, missing_args, extra_args
)
raise
HostInfoOs = collections.namedtuple(
"HostInfoOs", ["is_linux", "is_macos", "is_windows", "is_freebsd", "is_unknown"]
)
HostInfoArch = collections.namedtuple(
"HostInfoArch",
[
"is_aarch64",
"is_arm",
"is_armeb",
"is_i386",
"is_mips",
"is_mips64",
"is_mipsel",
"is_mipsel64",
"is_powerpc",
"is_ppc64",
"is_unknown",
"is_x86_64",
],
)
HostInfo = collections.namedtuple("HostInfo", ["os", "arch"])
__supported_oses = {
"darwin": "macos",
"windows": "windows",
"linux": "linux",
"freebsd": "freebsd",
} # type: Dict[str, str]
# Pulled from com.facebook.buck.util.environment.Architecture.java as
# possible values. amd64 and arm64 are remapped, but they may not
# actually be present on most systems
__supported_archs = {
"aarch64": "aarch64",
"arm": "arm",
"armeb": "armeb",
"i386": "i386",
"mips": "mips",
"mips64": "mips64",
"mipsel": "mipsel",
"mipsel64": "mipsel64",
"powerpc": "powerpc",
"ppc64": "ppc64",
"unknown": "unknown",
"x86_64": "x86_64",
"amd64": "x86_64",
"arm64": "aarch64",
} # type: Dict[str, str]
def host_info(platform_system=platform.system, platform_machine=platform.machine):
host_arch = __supported_archs.get(platform_machine().lower(), "unknown")
host_os = __supported_oses.get(platform_system().lower(), "unknown")
return HostInfo(
os=HostInfoOs(
is_linux=(host_os == "linux"),
is_macos=(host_os == "macos"),
is_windows=(host_os == "windows"),
is_freebsd=(host_os == "freebsd"),
is_unknown=(host_os == "unknown"),
),
arch=HostInfoArch(
is_aarch64=(host_arch == "aarch64"),
is_arm=(host_arch == "arm"),
is_armeb=(host_arch == "armeb"),
is_i386=(host_arch == "i386"),
is_mips=(host_arch == "mips"),
is_mips64=(host_arch == "mips64"),
is_mipsel=(host_arch == "mipsel"),
is_mipsel64=(host_arch == "mipsel64"),
is_powerpc=(host_arch == "powerpc"),
is_ppc64=(host_arch == "ppc64"),
is_unknown=(host_arch == "unknown"),
is_x86_64=(host_arch == "x86_64"),
),
)
_cached_host_info = host_info()
def get_mismatched_args(func, actual_args, actual_kwargs):
argspec = inspect.getargspec(func)
required_args = set()
all_acceptable_args = []
for i, arg in enumerate(argspec.args):
if i < (len(argspec.args) - len(argspec.defaults)):
required_args.add(arg)
all_acceptable_args.append(arg)
extra_kwargs = set(actual_kwargs) - set(all_acceptable_args)
for k in set(actual_kwargs) - extra_kwargs:
all_acceptable_args.remove(k)
not_supplied_args = all_acceptable_args[len(actual_args) :]
missing_args = [arg for arg in not_supplied_args if arg in required_args]
return missing_args, sorted(list(extra_kwargs))
class IncorrectArgumentsException(TypeError):
def __init__(self, func_name, name_arg, missing_args, extra_args):
self.missing_args = missing_args
self.extra_args = extra_args
message = "Incorrect arguments to %s with name %s:" % (func_name, name_arg)
if missing_args:
message += " Missing required args: %s" % (", ".join(missing_args),)
if extra_args:
message += " Extra unknown kwargs: %s" % (", ".join(extra_args),)
super(IncorrectArgumentsException, self).__init__(message)
class BuildFileFailError(Exception):
pass
def provide_as_native_rule(func):
# type: (Callable) -> Callable
NATIVE_FUNCTIONS.append(func)
return func
def provide_for_build(func):
# type: (Callable) -> Callable
BUILD_FUNCTIONS.append(func)
return func
def add_rule(rule, build_env):
# type: (Dict, BuildFileContext) -> None
"""Record a rule in the current context.
This should be invoked by rule functions generated by the Java code.
:param dict rule: dictionary of the rule's fields.
:param build_env: the current context.
"""
assert isinstance(
build_env, BuildFileContext
), "Cannot use `{}()` at the top-level of an included file.".format(
rule["buck.type"]
)
# Include the base path of the BUCK file so the reader consuming this
# output will know which BUCK file the rule came from.
if "name" not in rule:
raise ValueError("rules must contain the field 'name'. Found %s." % rule)
rule_name = rule["name"]
if not isinstance(rule_name, string_types):
raise ValueError("rules 'name' field must be a string. Found %s." % rule_name)
if rule_name in build_env.rules:
raise ValueError(
"Duplicate rule definition '%s' found. Found %s and %s"
% (rule_name, rule, build_env.rules[rule_name])
)
rule["buck.base_path"] = build_env.base_path
build_env.rules[rule_name] = rule
@traced(stats_key="Glob")
def glob(
includes, excludes=None, include_dotfiles=False, build_env=None, search_base=None
):
# type: (List[str], Optional[List[str]], bool, BuildFileContext, str) -> List[str]
if excludes is None:
excludes = []
assert isinstance(
build_env, BuildFileContext
), "Cannot use `glob()` at the top-level of an included file."
# Ensure the user passes lists of strings rather than just a string.
assert not isinstance(
includes, string_types
), "The first argument to glob() must be a list of strings."
assert not isinstance(
excludes, string_types
), "The excludes argument must be a list of strings."
if search_base is None:
search_base = Path(build_env.dirname)
if build_env.dirname == build_env.project_root and any(
_RECURSIVE_GLOB_PATTERN.match(pattern) for pattern in includes
):
fail(
"Recursive globs are prohibited at top-level directory", build_env=build_env
)
results = None
if not includes:
results = []
elif build_env.watchman_client:
results = glob_watchman(
includes,
excludes,
include_dotfiles,
build_env.base_path,
build_env.watchman_watch_root,
build_env.watchman_project_prefix,
build_env.sync_cookie_state,
build_env.watchman_client,
build_env.diagnostics,
build_env.watchman_glob_stat_results,
build_env.watchman_use_glob_generator,
)
if results:
# glob should consistently return paths of type str, but
# watchman client returns unicode in Python 2 instead.
# Extra check is added to make this conversion resilient to
# watchman API changes.
results = [
res.encode("utf-8") if not isinstance(res, str) else res
for res in results
]
if results is None:
results = glob_internal(
includes,
excludes,
build_env.ignore_paths,
include_dotfiles,
search_base,
build_env.project_root,
)
assert build_env.allow_empty_globs or results, (
"glob(includes={includes}, excludes={excludes}, include_dotfiles={include_dotfiles}) "
+ "returned no results. (allow_empty_globs is set to false in the Buck "
+ "configuration)"
).format(includes=includes, excludes=excludes, include_dotfiles=include_dotfiles)
return results
def merge_maps(*header_maps):
result = {}
for header_map in header_maps:
for key in header_map:
if key in result and result[key] != header_map[key]:
assert False, (
"Conflicting header files in header search paths. "
+ '"%s" maps to both "%s" and "%s".'
% (key, result[key], header_map[key])
)
result[key] = header_map[key]
return result
def single_subdir_glob(
dirpath, glob_pattern, excludes=None, prefix=None, build_env=None, search_base=None
):
if excludes is None:
excludes = []
results = {}
files = glob(
[os.path.join(dirpath, glob_pattern)],
excludes=excludes,
build_env=build_env,
search_base=search_base,
)
for f in files:
if dirpath:
key = f[len(dirpath) + 1 :]
else:
key = f
if prefix:
# `f` is a string, but we need to create correct platform-specific Path.
# This method is called by tests for both posix style paths and
# windows style paths.
# When running tests, search_base is always set
# and happens to have the correct platform-specific Path type.
cls = PurePath if not search_base else type(search_base)
key = str(cls(prefix) / cls(key))
results[key] = f
return results
def subdir_glob(
glob_specs, excludes=None, prefix=None, build_env=None, search_base=None
):
"""
Given a list of tuples, the form of (relative-sub-directory, glob-pattern),
return a dict of sub-directory relative paths to full paths. Useful for
defining header maps for C/C++ libraries which should be relative the given
sub-directory.
If prefix is not None, prepends it it to each key in the dictionary.
"""
if excludes is None:
excludes = []
results = []
for dirpath, glob_pattern in glob_specs:
results.append(
single_subdir_glob(
dirpath, glob_pattern, excludes, prefix, build_env, search_base
)
)
return merge_maps(*results)
def _get_package_name(func_name, build_env=None):
"""The name of the package being evaluated.
For example, in the BUCK file "some/package/BUCK", its value will be
"some/package".
If the BUCK file calls a function defined in a *.bzl file, package_name()
will return the package of the calling BUCK file. For example, if there is
a BUCK file at "some/package/BUCK" and "some/other/package/ext.bzl"
extension file, when BUCK file calls a function inside of ext.bzl file
it will still return "some/package" and not "some/other/package".
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
:return: a string, such as "java/com/facebook". Note there is no
trailing slash. The return value will be "" if called from
the build file in the root of the project.
:rtype: str
"""
assert isinstance(build_env, BuildFileContext), (
"Cannot use `%s()` at the top-level of an included file." % func_name
)
return build_env.base_path
@provide_for_build
def get_base_path(build_env=None):
"""Get the base path to the build file that was initially evaluated.
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
:return: a string, such as "java/com/facebook". Note there is no
trailing slash. The return value will be "" if called from
the build file in the root of the project.
:rtype: str
"""
return _get_package_name("get_base_path", build_env=build_env)
@provide_for_build
def package_name(build_env=None):
"""The name of the package being evaluated.
For example, in the BUCK file "some/package/BUCK", its value will be
"some/package".
If the BUCK file calls a function defined in a *.bzl file, package_name()
will return the package of the calling BUCK file. For example, if there is
a BUCK file at "some/package/BUCK" and "some/other/package/ext.bzl"
extension file, when BUCK file calls a function inside of ext.bzl file
it will still return "some/package" and not "some/other/package".
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
:return: a string, such as "java/com/facebook". Note there is no
trailing slash. The return value will be "" if called from
the build file in the root of the project.
:rtype: str
"""
return _get_package_name("package_name", build_env=build_env)
@provide_for_build
def fail(message, attr=None, build_env=None):
"""Raises a parse error.
:param message: Error message to display for the user.
The object is converted to a string.
:param attr: Optional name of the attribute that caused the error.
"""
attribute_prefix = "attribute " + attr + ": " if attr is not None else ""
msg = attribute_prefix + str(message)
raise BuildFileFailError(msg)
@provide_for_build
def get_cell_name(build_env=None):
"""Get the cell name of the build file that was initially evaluated.
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
:return: a string, such as "cell". The return value will be "" if
the build file does not have a cell
:rtype: str
"""
assert isinstance(
build_env, BuildFileContext
), "Cannot use `get_cell_name()` at the top-level of an included file."
return build_env.cell_name
@provide_for_build
def select(conditions, no_match_message=None, build_env=None):
"""Allows to provide a configurable value for an attribute"""
return SelectorList([SelectorValue(conditions, no_match_message)])
@provide_as_native_rule
def repository_name(build_env=None):
"""
Get the repository (cell) name of the build file that was initially
evaluated.
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
:return: a string, such as "@cell". The return value will be "@" if
the build file is in the main (standalone) repository.
:rtype: str
"""
assert isinstance(
build_env, BuildFileContext
), "Cannot use `repository_name()` at the top-level of an included file."
return "@" + build_env.cell_name
@provide_as_native_rule
def rule_exists(name, build_env=None):
"""
:param name: name of the build rule
:param build_env: current build environment
:return: True if a rule with provided name has already been defined in
current file.
"""
assert isinstance(
build_env, BuildFileContext
), "Cannot use `rule_exists()` at the top-level of an included file."
return name in build_env.rules
def flatten_list_of_dicts(list_of_dicts):
"""Flatten the given list of dictionaries by merging l[1:] onto
l[0], one at a time. Key/Value pairs which appear in later list entries
will override those that appear in earlier entries
:param list_of_dicts: the list of dict objects to flatten.
:return: a single dict containing the flattened list
"""
return_value = {}
for d in list_of_dicts:
for k, v in iteritems(d):
return_value[k] = v
return return_value
@provide_for_build
def flatten_dicts(*args, **_):
"""Flatten the given list of dictionaries by merging args[1:] onto
args[0], one at a time.
:param *args: the list of dict objects to flatten.
:param **_: ignore the build_env kwarg
:return: a single dict containing the flattened list
"""
return flatten_list_of_dicts(args)
@provide_for_build
def depset(elements, build_env=None):
"""Creates an instance of sets with deterministic iteration order.
:param elements: the list of elements constituting the returned depset.
:rtype: DeterministicSet
"""
return DeterministicSet(elements)
GENDEPS_SIGNATURE = re.compile(
r"^#@# GENERATED FILE: DO NOT MODIFY ([a-f0-9]{40}) #@#\n$"
)
class BuildFileProcessor(object):
"""Handles the processing of a single build file.
:type _current_build_env: AbstractContext | None
"""
SAFE_MODULES_CONFIG = {
"os": ["environ", "getenv", "path", "sep", "pathsep", "linesep"],
"os.path": [
"basename",
"commonprefix",
"dirname",
"isabs",
"join",
"normcase",
"relpath",
"split",
"splitdrive",
"splitext",
"sep",
"pathsep",
],
"pipes": ["quote"],
}
def __init__(
self,
project_root,
cell_roots,
cell_name,
build_file_name,
allow_empty_globs,
watchman_client,
watchman_glob_stat_results,
watchman_use_glob_generator,
project_import_whitelist=None,
implicit_includes=None,
extra_funcs=None,
configs=None,
env_vars=None,
ignore_paths=None,
disable_implicit_native_rules=False,
warn_about_deprecated_syntax=True,
):
if project_import_whitelist is None:
project_import_whitelist = []
if implicit_includes is None:
implicit_includes = []
if extra_funcs is None:
extra_funcs = []
if configs is None:
configs = {}
if env_vars is None:
env_vars = {}
if ignore_paths is None:
ignore_paths = []
self._include_cache = {}
self._current_build_env = None
self._sync_cookie_state = SyncCookieState()
self._project_root = project_root
self._cell_roots = cell_roots
self._cell_name = cell_name
self._build_file_name = build_file_name
self._implicit_includes = implicit_includes
self._allow_empty_globs = allow_empty_globs
self._watchman_client = watchman_client
self._watchman_glob_stat_results = watchman_glob_stat_results
self._watchman_use_glob_generator = watchman_use_glob_generator
self._configs = configs
self._env_vars = env_vars
self._ignore_paths = ignore_paths
self._disable_implicit_native_rules = disable_implicit_native_rules
self._warn_about_deprecated_syntax = warn_about_deprecated_syntax
lazy_global_functions = {}
lazy_native_functions = {}
for func in BUILD_FUNCTIONS + extra_funcs:
func_with_env = LazyBuildEnvPartial(func)
lazy_global_functions[func.__name__] = func_with_env
for func in NATIVE_FUNCTIONS:
func_with_env = LazyBuildEnvPartial(func)
lazy_native_functions[func.__name__] = func_with_env
self._global_functions = lazy_global_functions
self._native_functions = lazy_native_functions
self._native_module_class_for_extension = self._create_native_module_class(
self._global_functions, self._native_functions
)
self._native_module_class_for_build_file = self._create_native_module_class(
self._global_functions,
[] if self._disable_implicit_native_rules else self._native_functions,
)
self._import_whitelist_manager = ImportWhitelistManager(
import_whitelist=self._create_import_whitelist(project_import_whitelist),
safe_modules_config=self.SAFE_MODULES_CONFIG,
path_predicate=lambda path: is_in_dir(path, self._project_root),
)
# Set of helpers callable from the child environment.
self._default_globals_for_extension = self._create_default_globals(False, False)
self._default_globals_for_implicit_include = self._create_default_globals(
False, True
)
self._default_globals_for_build_file = self._create_default_globals(True, False)
def _create_default_globals(self, is_build_file, is_implicit_include):
# type: (bool) -> Dict[str, Callable]
return {
"include_defs": functools.partial(self._include_defs, is_implicit_include),
"add_build_file_dep": self._add_build_file_dep,
"read_config": self._read_config,
"implicit_package_symbol": self._implicit_package_symbol,
"allow_unsafe_import": self._import_whitelist_manager.allow_unsafe_import,
"glob": self._glob,
"subdir_glob": self._subdir_glob,
"load": functools.partial(self._load, is_implicit_include),
"struct": struct,
"provider": self._provider,
"host_info": self._host_info,
"native": self._create_native_module(is_build_file=is_build_file),
}
def _create_native_module(self, is_build_file):
"""
Creates a native module exposing built-in Buck rules.
This module allows clients to refer to built-in Buck rules using
"native.<native_rule>" syntax in their build files. For example,
"native.java_library(...)" will use a native Java library rule.
:return: 'native' module struct.
"""
native_globals = {}
self._install_builtins(native_globals, force_native_rules=not is_build_file)
assert "glob" not in native_globals
assert "host_info" not in native_globals
assert "implicit_package_symbol" not in native_globals
assert "read_config" not in native_globals
native_globals["glob"] = self._glob
native_globals["host_info"] = self._host_info
native_globals["implicit_package_symbol"] = self._implicit_package_symbol
native_globals["read_config"] = self._read_config
return (
self._native_module_class_for_build_file(**native_globals)
if is_build_file
else self._native_module_class_for_extension(**native_globals)
)
@staticmethod
def _create_native_module_class(global_functions, native_functions):
"""
Creates a native module class.
:return: namedtuple instance for native module
"""
return collections.namedtuple(
"native",
list(global_functions)
+ list(native_functions)
+ ["glob", "host_info", "read_config", "implicit_package_symbol"],
)
def _wrap_env_var_read(self, read, real):
"""
Return wrapper around function that reads an environment variable so
that the read is recorded.
"""
@functools.wraps(real)
def wrapper(varname, *arg, **kwargs):
self._record_env_var(varname, read(varname))
return real(varname, *arg, **kwargs)
# Save the real function for restoration.
wrapper._real = real
return wrapper
@contextlib.contextmanager
def _with_env_interceptor(self, read, obj, *attrs):
"""
Wrap a function, found at `obj.attr`, that reads an environment
variable in a new function which records the env var read.
"""
orig = []
for attr in attrs:
real = getattr(obj, attr)
wrapped = self._wrap_env_var_read(read, real)
setattr(obj, attr, wrapped)
orig.append((attr, real))
try:
yield
finally:
for attr, real in orig:
setattr(obj, attr, real)
@contextlib.contextmanager
def with_env_interceptors(self):
"""
Install environment variable read interceptors into all known ways that
a build file can access the environment.
"""
# Use a copy of the env to provide a function to get at the low-level
# environment. The wrappers will use this when recording the env var.
read = dict(os.environ).get
# Install interceptors into the main ways a user can read the env.
with self._with_env_interceptor(
read, os.environ, "__contains__", "__getitem__", "get"
):
yield
@staticmethod
def _merge_explicit_globals(src, dst, whitelist=None, whitelist_mapping=None):
# type: (types.ModuleType, Dict[str, Any], Tuple[str], Dict[str, str]) -> None
"""Copy explicitly requested global definitions from one globals dict to another.
If whitelist is set, only globals from the whitelist will be pulled in.
If whitelist_mapping is set, globals will be exported under the name of the keyword. For
example, foo="bar" would mean that a variable with name "bar" in imported file, will be
available as "foo" in current file.
"""
if whitelist is not None:
for symbol in whitelist:
if symbol not in src.__dict__:
raise KeyError('"%s" is not defined in %s' % (symbol, src.__name__))
dst[symbol] = src.__dict__[symbol]
if whitelist_mapping is not None:
for exported_name, symbol in iteritems(whitelist_mapping):
if symbol not in src.__dict__:
raise KeyError('"%s" is not defined in %s' % (symbol, src.__name__))
dst[exported_name] = src.__dict__[symbol]
def _merge_globals(self, mod, dst):
# type: (types.ModuleType, Dict[str, Any]) -> None
"""Copy the global definitions from one globals dict to another.
Ignores special attributes and attributes starting with '_', which
typically denote module-level private attributes.
"""
keys = getattr(mod, "__all__", mod.__dict__.keys())
for key in keys:
# Block copying modules unless they were specified in '__all__'
block_copying_module = not hasattr(mod, "__all__") and isinstance(
mod.__dict__[key], types.ModuleType
)
if (
not key.startswith("_")
and key not in _HIDDEN_GLOBALS
and not block_copying_module
):
dst[key] = mod.__dict__[key]
def _update_functions(self, build_env):
"""
Updates the build functions to use the given build context when called.
"""
for function in itervalues(self._global_functions):
function.build_env = build_env
for function in itervalues(self._native_functions):
function.build_env = build_env
def _install_builtins(self, namespace, force_native_rules=False):
"""
Installs the build functions, by their name, into the given namespace.
"""
for name, function in iteritems(self._global_functions):
namespace[name] = function.invoke
if not self._disable_implicit_native_rules or force_native_rules:
for name, function in iteritems(self._native_functions):
namespace[name] = function.invoke
@contextlib.contextmanager
def with_builtins(self, namespace):
"""
Installs the build functions for the duration of a `with` block.
"""
original_namespace = namespace.copy()
self._install_builtins(namespace)
try:
yield
finally:
namespace.clear()
namespace.update(original_namespace)
def _resolve_include(self, name):
# type: (str) -> BuildInclude
"""Resolve the given include def name to a BuildInclude metadata."""
match = re.match(r"^([A-Za-z0-9_]*)//(.*)$", name)
if match is None:
raise ValueError(
"include_defs argument {} should be in the form of "
"//path or cellname//path".format(name)
)
cell_name = match.group(1)
relative_path = match.group(2)
if len(cell_name) > 0:
cell_root = self._cell_roots.get(cell_name)
if cell_root is None:
raise KeyError(
"include_defs argument {} references an unknown cell named {} "
"known cells: {!r}".format(name, cell_name, self._cell_roots)
)
return BuildInclude(
cell_name=cell_name,
path=os.path.normpath(os.path.join(cell_root, relative_path)),
)
else:
return BuildInclude(
cell_name=cell_name,
path=os.path.normpath(os.path.join(self._project_root, relative_path)),
)
def _get_load_path(self, label):
# type: (str) -> BuildInclude
"""Resolve the given load function label to a BuildInclude metadata."""
match = _LOAD_TARGET_PATH_RE.match(label)
if match is None:
raise ValueError(
"load label {} should be in the form of "
"//path:file or cellname//path:file".format(label)
)
cell_name = match.group("cell")
if cell_name:
if cell_name.startswith("@"):
cell_name = cell_name[1:]
elif self._warn_about_deprecated_syntax:
self._emit_warning(
'{} has a load label "{}" that uses a deprecated cell format. '
'"{}" should instead be "@{}".'.format(
self._current_build_env.path, label, cell_name, cell_name
),
"load function",
)
else:
cell_name = self._current_build_env.cell_name
relative_path = match.group("package")
file_name = match.group("target")
label_root = match.group("root")
if not label_root:
# relative include. e.g. :foo.bzl
if "/" in file_name:
raise ValueError(
"Relative loads work only for files in the same directory. "
+ "Please use absolute label instead ([cell]//pkg[/pkg]:target)."
)
callee_dir = os.path.dirname(self._current_build_env.path)
return BuildInclude(
cell_name=cell_name,
path=os.path.normpath(os.path.join(callee_dir, file_name)),
)
elif cell_name:
cell_root = self._cell_roots.get(cell_name)
if cell_root is None:
raise KeyError(
"load label {} references an unknown cell named {} "
"known cells: {!r}".format(label, cell_name, self._cell_roots)
)
return BuildInclude(
cell_name=cell_name,
path=os.path.normpath(
os.path.join(cell_root, relative_path, file_name)
),
)
else:
return BuildInclude(
cell_name=cell_name,
path=os.path.normpath(
os.path.join(self._project_root, relative_path, file_name)
),
)
def _read_config(self, section, field, default=None):
# type: (str, str, Any) -> Any
"""
Lookup a setting from `.buckconfig`.
This method is meant to be installed into the globals of any files or
includes that we process.
"""
# Grab the current build context from the top of the stack.
build_env = self._current_build_env
# Lookup the value and record it in this build file's context.
key = section, field
value = self._configs.get(key)
if value is not None and not isinstance(value, str):
# Python 2 returns unicode values from parsed JSON configs, but
# only str types should be exposed to clients
value = value.encode("utf-8")
# replace raw values to avoid decoding for frequently used configs
self._configs[key] = value
build_env.used_configs[section][field] = value
# If no config setting was found, return the default.
if value is None:
return default
return value
def _implicit_package_symbol(self, symbol, default=None):
# type: (str, Any) -> Any
"""
Gives access to a symbol that has been implicitly loaded for the package of the
build file that is currently being evaluated. If the symbol was not present,
`default` will be returned.
"""
build_env = self._current_build_env
return build_env.implicit_package_symbols.get(symbol, default)
def _glob(
self,
includes,
excludes=None,
include_dotfiles=False,
search_base=None,
exclude=None,
):
assert exclude is None or excludes is None, (
"Mixing 'exclude' and 'excludes' attributes is not allowed. Please replace your "
"exclude and excludes arguments with a single 'excludes = %r'."
% (exclude + excludes)
)
excludes = excludes or exclude
build_env = self._current_build_env # type: BuildFileContext
return glob(
includes,
excludes=excludes,
include_dotfiles=include_dotfiles,
search_base=search_base,
build_env=build_env,
)
def _subdir_glob(self, glob_specs, excludes=None, prefix=None, search_base=None):
build_env = self._current_build_env
return subdir_glob(
glob_specs,
excludes=excludes,
prefix=prefix,
search_base=search_base,
build_env=build_env,
)
def _record_env_var(self, name, value):
# type: (str, Any) -> None
"""
Record a read of an environment variable.
This method is meant to wrap methods in `os.environ` when called from
any files or includes that we process.
"""
# Grab the current build context from the top of the stack.
build_env = self._current_build_env
# Lookup the value and record it in this build file's context.
build_env.used_env_vars[name] = value
def _called_from_project_file(self):
# type: () -> bool
"""
Returns true if the function was called from a project file.
"""
frame = get_caller_frame(skip=[__name__])
filename = inspect.getframeinfo(frame).filename
return is_in_dir(filename, self._project_root)
def _include_defs(self, is_implicit_include, name, namespace=None):
# type: (bool, str, Optional[str]) -> None
"""Pull the named include into the current caller's context.
This method is meant to be installed into the globals of any files or
includes that we process.
"""
# Grab the current build context from the top of the stack.
build_env = self._current_build_env
# Resolve the named include to its path and process it to get its
# build context and module.
build_include = self._resolve_include(name)
inner_env, mod = self._process_include(build_include, is_implicit_include)
# Look up the caller's stack frame and merge the include's globals
# into it's symbol table.
frame = get_caller_frame(skip=["_functools", __name__])
if namespace is not None:
# If using a fresh namespace, create a fresh module to populate.
fresh_module = imp.new_module(namespace)
fresh_module.__file__ = mod.__file__
self._merge_globals(mod, fresh_module.__dict__)
frame.f_globals[namespace] = fresh_module
else:
self._merge_globals(mod, frame.f_globals)
# Pull in the include's accounting of its own referenced includes
# into the current build context.
build_env.includes.add(build_include.path)
build_env.merge(inner_env)
def _load(self, is_implicit_include, name, *symbols, **symbol_kwargs):
# type: (bool, str, *str, **str) -> None
"""Pull the symbols from the named include into the current caller's context.
This method is meant to be installed into the globals of any files or
includes that we process.
"""
assert symbols or symbol_kwargs, "expected at least one symbol to load"
# Grab the current build context from the top of the stack.
build_env = self._current_build_env
# Resolve the named include to its path and process it to get its
# build context and module.
build_include = self._get_load_path(name)
inner_env, module = self._process_include(build_include, is_implicit_include)
# Look up the caller's stack frame and merge the include's globals
# into it's symbol table.
frame = get_caller_frame(skip=["_functools", __name__])
BuildFileProcessor._merge_explicit_globals(
module, frame.f_globals, symbols, symbol_kwargs
)
# Pull in the include's accounting of its own referenced includes
# into the current build context.
build_env.includes.add(build_include.path)
build_env.merge(inner_env)
def _load_package_implicit(self, build_env, package_implicit_load):
"""
Updates `build_env` to contain all symbols from `package_implicit_load`
Args:
build_env: The build environment on which to modify includes /
implicit_package_symbols properties
package_implicit_load: A dictionary with "load_path", the first part of the
a `load` statement, and "load_symbols", a dictionary
that works like the **symbols attribute of `load`
"""
# Resolve the named include to its path and process it to get its
# build context and module.
build_include = self._get_load_path(package_implicit_load["load_path"])
inner_env, module = self._process_include(build_include, True)
# Validate that symbols that are requested explicitly by config are present
# in the .bzl file
for key, value in iteritems(package_implicit_load["load_symbols"]):
try:
build_env.implicit_package_symbols[key] = getattr(module, value)
except AttributeError:
raise BuildFileFailError(
"Could not find symbol '{}' in implicitly loaded extension '{}'".format(
value, package_implicit_load["load_path"]
)
)
# Pull in the include's accounting of its own referenced includes
# into the current build context.
build_env.includes.add(build_include.path)
build_env.merge(inner_env)
@staticmethod
def _provider(doc="", fields=None):
# type: (str, Union[List[str], Dict[str, str]]) -> Callable
"""Creates a declared provider factory.
The return value of this function can be used to create "struct-like"
values. Example:
SomeInfo = provider()
def foo():
return 3
info = SomeInfo(x = 2, foo = foo)
print(info.x + info.foo()) # prints 5
Optional fields can be used to restrict the set of allowed fields.
Example:
SomeInfo = provider(fields=["data"])
info = SomeInfo(data="data") # valid
info = SomeInfo(foo="bar") # runtime exception
"""
if fields:
return create_struct_class(fields)
return struct
def _add_build_file_dep(self, name):
# type: (str) -> None
"""
Explicitly specify a dependency on an external file.
For instance, this can be used to specify a dependency on an external
executable that will be invoked, or some other external configuration
file.
"""
# Grab the current build context from the top of the stack.
build_env = self._current_build_env
cell_name, path = self._resolve_include(name)
build_env.includes.add(path)
@staticmethod
def _host_info():
return _cached_host_info
@contextlib.contextmanager
def _set_build_env(self, build_env):
# type: (AbstractContext) -> Iterator[None]
"""Set the given build context as the current context, unsetting it upon exit."""
old_env = self._current_build_env
self._current_build_env = build_env
self._update_functions(self._current_build_env)
try:
yield
finally:
self._current_build_env = old_env
self._update_functions(self._current_build_env)
def _emit_warning(self, message, source):
# type: (str, str) -> None
"""
Add a warning to the current build_env's diagnostics.
"""
if self._current_build_env is not None:
self._current_build_env.diagnostics.append(
Diagnostic(
message=message, level="warning", source=source, exception=None
)
)
@staticmethod
def _create_import_whitelist(project_import_whitelist):
# type: (List[str]) -> Set[str]
"""
Creates import whitelist by joining the global whitelist with the project specific one
defined in '.buckconfig'.
"""
global_whitelist = [
"copy",
"re",
"functools",
"itertools",
"json",
"hashlib",
"types",
"string",
"ast",
"__future__",
"collections",
"operator",
"fnmatch",
"copy_reg",
]
return set(global_whitelist + project_import_whitelist)
def _file_access_wrapper(self, real):
"""
Return wrapper around function so that accessing a file produces warning if it is
not a known dependency.
"""
@functools.wraps(real)
def wrapper(filename, *arg, **kwargs):
# Restore original 'open' because it is used by 'inspect.currentframe()' in
# '_called_from_project_file()'
with self._wrap_file_access(wrap=False):
if self._called_from_project_file():
path = os.path.abspath(filename)
if path not in self._current_build_env.includes:
dep_path = "//" + os.path.relpath(path, self._project_root)
warning_message = (
"Access to a non-tracked file detected! {0} is not a ".format(
path
)
+ "known dependency and it should be added using 'add_build_file_dep' "
+ "function before trying to access the file, e.g.\n"
+ "'add_build_file_dep('{0}')'\n".format(dep_path)
+ "The 'add_build_file_dep' function is documented at "
+ "https://buckbuild.com/function/add_build_file_dep.html\n"
)
self._emit_warning(warning_message, "sandboxing")
return real(filename, *arg, **kwargs)
# Save the real function for restoration.
wrapper._real = real
return wrapper
@contextlib.contextmanager
def _wrap_fun_for_file_access(self, obj, attr, wrap=True):
"""
Wrap a function to check if accessed files are known dependencies.
"""
real = getattr(obj, attr)
if wrap:
# Don't wrap again
if not hasattr(real, "_real"):
wrapped = self._file_access_wrapper(real)
setattr(obj, attr, wrapped)
elif hasattr(real, "_real"):
# Restore real function if it was wrapped
setattr(obj, attr, real._real)
try:
yield
finally:
setattr(obj, attr, real)
def _wrap_file_access(self, wrap=True):
"""
Wrap 'open' so that they it checks if accessed files are known dependencies.
If 'wrap' is equal to False, restore original function instead.
"""
return self._wrap_fun_for_file_access(builtins, "open", wrap)
@contextlib.contextmanager
def _build_file_sandboxing(self):
"""
Creates a context that sandboxes build file processing.
"""
with self._wrap_file_access():
with self._import_whitelist_manager.allow_unsafe_import(False):
yield
@traced(stats_key="Process")
def _process(self, build_env, path, is_implicit_include, package_implicit_load):
# type: (_GCT, str, bool, Optional[LoadStatement]) -> Tuple[_GCT, types.ModuleType]
"""Process a build file or include at the given path.
:param build_env: context of the file to process.
:param path: target-like path to the file to process.
:param is_implicit_include: whether the file being processed is an implicit include, or was
included from an implicit include.
:package_implicit_load: if provided, a dictionary containing the path to
load for this given package, and the symbols to load
from that .bzl file.
:returns: build context (potentially different if retrieved from cache) and loaded module.
"""
if isinstance(build_env, IncludeContext):
default_globals = (
self._default_globals_for_implicit_include
if is_implicit_include
else self._default_globals_for_extension
)
else:
default_globals = self._default_globals_for_build_file
emit_trace(path)
# Install the build context for this input as the current context.
with self._set_build_env(build_env):
# Don't include implicit includes if the current file being
# processed is an implicit include
if not is_implicit_include:
for include in self._implicit_includes:
build_include = self._resolve_include(include)
inner_env, mod = self._process_include(build_include, True)
self._merge_globals(mod, default_globals)
build_env.includes.add(build_include.path)
build_env.merge(inner_env)
if package_implicit_load:
self._load_package_implicit(build_env, package_implicit_load)
# Build a new module for the given file, using the default globals
# created above.
module = imp.new_module(path)
module.__file__ = path
module.__dict__.update(default_globals)
# We don't open this file as binary, as we assume it's a textual source
# file.
with scoped_trace("IO", stats_key="IO"):
with self._wrap_file_access(wrap=False):
with open(path, "r") as f:
contents = f.read()
with scoped_trace("Compile", stats_key="Compile"):
# Enable absolute imports. This prevents the compiler from
# trying to do a relative import first, and warning that
# this module doesn't exist in sys.modules.
future_features = absolute_import.compiler_flag
code = compile(contents, path, "exec", future_features, 1)
# Execute code with build file sandboxing
with self._build_file_sandboxing():
exec(code, module.__dict__)
return build_env, module
def _process_include(self, build_include, is_implicit_include):
# type: (BuildInclude, bool) -> Tuple[AbstractContext, types.ModuleType]
"""Process the include file at the given path.
:param build_include: build include metadata (cell_name and path).
:param is_implicit_include: whether the file being processed is an implicit include, or was
included from an implicit include.
"""
# First check the cache.
cached = self._include_cache.get(build_include.path)
if cached is not None:
return cached
build_env = IncludeContext(
cell_name=build_include.cell_name, path=build_include.path
)
build_env, mod = self._process(
build_env,
build_include.path,
is_implicit_include=is_implicit_include,
package_implicit_load=None,
)
self._include_cache[build_include.path] = build_env, mod
return build_env, mod
def _process_build_file(
self, watch_root, project_prefix, path, package_implicit_load
):
# type: (str, str, str, Optional[LoadStatement]) -> Tuple[BuildFileContext, types.ModuleType]
"""Process the build file at the given path."""
# Create the build file context, including the base path and directory
# name of the given path.
relative_path_to_build_file = os.path.relpath(path, self._project_root).replace(
"\\", "/"
)
len_suffix = -len(self._build_file_name) - 1
base_path = relative_path_to_build_file[:len_suffix]
dirname = os.path.dirname(path)
build_env = BuildFileContext(
self._project_root,
base_path,
path,
dirname,
self._cell_name,
self._allow_empty_globs,
self._ignore_paths,
self._watchman_client,
watch_root,
project_prefix,
self._sync_cookie_state,
self._watchman_glob_stat_results,
self._watchman_use_glob_generator,
{},
)
return self._process(
build_env,
path,
is_implicit_include=False,
package_implicit_load=package_implicit_load,
)
def process(
self, watch_root, project_prefix, path, diagnostics, package_implicit_load
):
# type: (str, Optional[str], str, List[Diagnostic], Optional[LoadStatement]) -> List[Dict[str, Any]]
"""Process a build file returning a dict of its rules and includes."""
build_env, mod = self._process_build_file(
watch_root,
project_prefix,
os.path.join(self._project_root, path),
package_implicit_load=package_implicit_load,
)
# Initialize the output object to a map of the parsed rules.
values = list(itervalues(build_env.rules))
# Add in tracked included files as a special meta rule.
values.append({"__includes": [path] + sorted(build_env.includes)})
# Add in tracked used config settings as a special meta rule.
values.append({"__configs": build_env.used_configs})
# Add in used environment variables as a special meta rule.
values.append({"__env": build_env.used_env_vars})
diagnostics.extend(build_env.diagnostics)
return values
class InvalidSignatureError(Exception):
pass
def format_traceback(tb):
formatted = []
for entry in traceback.extract_tb(tb):
(filename, line_number, function_name, text) = entry
formatted.append(
{
"filename": filename,
"line_number": line_number,
"function_name": function_name,
"text": text,
}
)
return formatted
def format_exception_info(exception_info):
(exc_type, exc_value, exc_traceback) = exception_info
formatted = {
"type": exc_type.__name__,
"value": str(exc_value),
"traceback": format_traceback(exc_traceback),
}
if exc_type is SyntaxError:
formatted["filename"] = exc_value.filename
formatted["lineno"] = exc_value.lineno
formatted["offset"] = exc_value.offset
formatted["text"] = exc_value.text
return formatted
def encode_result(values, diagnostics, profile):
# type: (List[Dict[str, object]], List[Diagnostic], Optional[str]) -> str
result = {
"values": [
{k: v for k, v in iteritems(value) if v is not None} for value in values
]
}
json_encoder = BuckJSONEncoder()
if diagnostics:
encoded_diagnostics = []
for d in diagnostics:
encoded = {"message": d.message, "level": d.level, "source": d.source}
if d.exception:
encoded["exception"] = format_exception_info(d.exception)
encoded_diagnostics.append(encoded)
result["diagnostics"] = encoded_diagnostics
if profile is not None:
result["profile"] = profile
try:
return json_encoder.encode(result)
except Exception as e:
# Try again without the values
result["values"] = []
if "diagnostics" not in result:
result["diagnostics"] = []
result["diagnostics"].append(
{
"message": str(e),
"level": "fatal",
"source": "parse",
"exception": format_exception_info(sys.exc_info()),
}
)
return json_encoder.encode(result)
def process_with_diagnostics(build_file_query, build_file_processor, to_parent):
start_time = time.time()
build_file = build_file_query.get("buildFile")
watch_root = build_file_query.get("watchRoot")
project_prefix = build_file_query.get("projectPrefix")
package_implicit_load = build_file_query.get("packageImplicitLoad")
build_file = cygwin_adjusted_path(build_file)
watch_root = cygwin_adjusted_path(watch_root)
if project_prefix is not None:
project_prefix = cygwin_adjusted_path(project_prefix)
diagnostics = []
values = []
try:
values = build_file_processor.process(
watch_root,
project_prefix,
build_file,
diagnostics=diagnostics,
package_implicit_load=package_implicit_load,
)
except BaseException as e:
# sys.exit() don't emit diagnostics.
if e is not SystemExit:
if isinstance(e, WatchmanError):
source = "watchman"
message = e.msg
else:
source = "parse"
message = str(e)
diagnostics.append(
Diagnostic(
message=message,
level="fatal",
source=source,
exception=sys.exc_info(),
)
)
raise
finally:
java_process_send_result(to_parent, values, diagnostics, None)
end_time = time.time()
return end_time - start_time
def java_process_send_result(to_parent, values, diagnostics, profile_result):
"""Sends result to the Java process"""
data = encode_result(values, diagnostics, profile_result)
if PY3:
# in Python 3 write expects bytes instead of string
data = data.encode("utf-8")
to_parent.write(data)
to_parent.flush()
def silent_excepthook(exctype, value, tb):
# We already handle all exceptions by writing them to the parent, so
# no need to dump them again to stderr.
pass
def _optparse_store_kv(option, opt_str, value, parser):
"""Optparse option callback which parses input as K=V, and store into dictionary.
:param optparse.Option option: Option instance
:param str opt_str: string representation of option flag
:param str value: argument value
:param optparse.OptionParser parser: parser instance
"""
result = value.split("=", 1)
if len(result) != 2:
raise optparse.OptionError(
"Expected argument of to be in the form of X=Y".format(opt_str), option
)
(k, v) = result
# Get or create the dictionary
dest_dict = getattr(parser.values, option.dest)
if dest_dict is None:
dest_dict = {}
setattr(parser.values, option.dest, dest_dict)
dest_dict[k] = v
# Inexplicably, this script appears to run faster when the arguments passed
# into it are absolute paths. However, we want the "buck.base_path" property
# of each rule to be printed out to be the base path of the build target that
# identifies the rule. That means that when parsing a BUCK file, we must know
# its path relative to the root of the project to produce the base path.
#
# To that end, the first argument to this script must be an absolute path to
# the project root. It must be followed by one or more absolute paths to
# BUCK files under the project root. If no paths to BUCK files are
# specified, then it will traverse the project root for BUCK files, excluding
# directories of generated files produced by Buck.
#
# All of the build rules that are parsed from the BUCK files will be printed
# to stdout encoded in JSON. That means that printing out other information
# for debugging purposes will break the JSON encoding, so be careful!
def main():
# Our parent expects to read JSON from our stdout, so if anyone
# uses print, buck will complain with a helpful "but I wanted an
# array!" message and quit. Redirect stdout to stderr so that
# doesn't happen. Actually dup2 the file handle so that writing
# to file descriptor 1, os.system, and so on work as expected too.
# w instead of a mode is used because of https://bugs.python.org/issue27805
to_parent = os.fdopen(os.dup(sys.stdout.fileno()), "wb")
os.dup2(sys.stderr.fileno(), sys.stdout.fileno())
parser = optparse.OptionParser()
parser.add_option(
"--project_root", action="store", type="string", dest="project_root"
)
parser.add_option(
"--cell_root",
action="callback",
type="string",
dest="cell_roots",
metavar="NAME=PATH",
help="Cell roots that can be referenced by includes.",
callback=_optparse_store_kv,
default={},
)
parser.add_option("--cell_name", action="store", type="string", dest="cell_name")
parser.add_option(
"--build_file_name", action="store", type="string", dest="build_file_name"
)
parser.add_option(
"--allow_empty_globs",
action="store_true",
dest="allow_empty_globs",
help="Tells the parser not to raise an error when glob returns no results.",
)
parser.add_option(
"--use_watchman_glob",
action="store_true",
dest="use_watchman_glob",
help="Invokes `watchman query` to get lists of files instead of globbing in-process.",
)
parser.add_option(
"--watchman_use_glob_generator",
action="store_true",
dest="watchman_use_glob_generator",
help="Uses Watchman glob generator to speed queries",
)
parser.add_option(
"--watchman_glob_stat_results",
action="store_true",
dest="watchman_glob_stat_results",
help="Invokes `stat()` to sanity check result of `watchman query`.",
)
parser.add_option(
"--watchman_socket_path",
action="store",
type="string",
dest="watchman_socket_path",
help="Path to Unix domain socket/named pipe as returned by `watchman get-sockname`.",
)
parser.add_option(
"--watchman_query_timeout_ms",
action="store",
type="int",
dest="watchman_query_timeout_ms",
help="Maximum time in milliseconds to wait for watchman query to respond.",
)
parser.add_option("--include", action="append", dest="include")
parser.add_option("--config", help="BuckConfig settings available at parse time.")
parser.add_option("--ignore_paths", help="Paths that should be ignored.")
parser.add_option(
"--quiet",
action="store_true",
dest="quiet",
help="Stifles exception backtraces printed to stderr during parsing.",
)
parser.add_option(
"--profile", action="store_true", help="Profile every buck file execution"
)
parser.add_option(
"--build_file_import_whitelist",
action="append",
dest="build_file_import_whitelist",
)
parser.add_option(
"--disable_implicit_native_rules",
action="store_true",
help="Do not allow native rules in build files, only included ones",
)
parser.add_option(
"--warn_about_deprecated_syntax",
action="store_true",
help="Warn about deprecated syntax usage.",
)
(options, args) = parser.parse_args()
# Even though project_root is absolute path, it may not be concise. For
# example, it might be like "C:\project\.\rule".
#
# Under cygwin, the project root will be invoked from buck as C:\path, but
# the cygwin python uses UNIX-style paths. They can be converted using
# cygpath, which is necessary because abspath will treat C:\path as a
# relative path.
options.project_root = cygwin_adjusted_path(options.project_root)
project_root = os.path.abspath(options.project_root)
cell_roots = {
k: os.path.abspath(cygwin_adjusted_path(v))
for k, v in iteritems(options.cell_roots)
}
watchman_client = None
if options.use_watchman_glob:
client_args = {"sendEncoding": "json", "recvEncoding": "json"}
if options.watchman_query_timeout_ms is not None:
# pywatchman expects a timeout as a nonnegative floating-point
# value in seconds.
client_args["timeout"] = max(
0.0, options.watchman_query_timeout_ms / 1000.0
)
else:
client_args["timeout"] = DEFAULT_WATCHMAN_QUERY_TIMEOUT
if options.watchman_socket_path is not None:
client_args["sockpath"] = options.watchman_socket_path
client_args["transport"] = "local"
watchman_client = pywatchman.client(**client_args)
configs = {}
if options.config is not None:
with open(options.config, "rb") as f:
for section, contents in iteritems(json.load(f)):
for field, value in iteritems(contents):
configs[(section, field)] = value
ignore_paths = []
if options.ignore_paths is not None:
with open(options.ignore_paths, "rb") as f:
ignore_paths = [make_glob(i) for i in json.load(f)]
build_file_processor = BuildFileProcessor(
project_root,
cell_roots,
options.cell_name,
options.build_file_name,
options.allow_empty_globs,
watchman_client,
options.watchman_glob_stat_results,
options.watchman_use_glob_generator,
project_import_whitelist=options.build_file_import_whitelist or [],
implicit_includes=options.include or [],
configs=configs,
ignore_paths=ignore_paths,
disable_implicit_native_rules=options.disable_implicit_native_rules,
warn_about_deprecated_syntax=options.warn_about_deprecated_syntax,
)
# While processing, we'll write exceptions as diagnostic messages
# to the parent then re-raise them to crash the process. While
# doing so, we don't want Python's default unhandled exception
# behavior of writing to stderr.
orig_excepthook = None
if options.quiet:
orig_excepthook = sys.excepthook
sys.excepthook = silent_excepthook
# Process the build files with the env var interceptors and builtins
# installed.
with build_file_processor.with_env_interceptors():
with build_file_processor.with_builtins(builtins.__dict__):
processed_build_file = []
profiler = None
if options.profile:
profiler = Profiler(True)
profiler.start()
Tracer.enable()
for build_file in args:
query = {
"buildFile": build_file,
"watchRoot": project_root,
"projectPrefix": project_root,
}
duration = process_with_diagnostics(
query, build_file_processor, to_parent
)
processed_build_file.append(
{"buildFile": build_file, "duration": duration}
)
# From https://docs.python.org/2/using/cmdline.html :
#
# Note that there is internal buffering in file.readlines()
# and File Objects (for line in sys.stdin) which is not
# influenced by this option. To work around this, you will
# want to use file.readline() inside a while 1: loop.
for line in wait_and_read_build_file_query():
if line == "":
break
build_file_query = json.loads(line)
if build_file_query.get("command") == "report_profile":
report_profile(options, to_parent, processed_build_file, profiler)
else:
duration = process_with_diagnostics(
build_file_query, build_file_processor, to_parent
)
processed_build_file.append(
{
"buildFile": build_file_query["buildFile"],
"duration": duration,
}
)
if options.quiet:
sys.excepthook = orig_excepthook
# Python tries to flush/close stdout when it quits, and if there's a dead
# pipe on the other end, it will spit some warnings to stderr. This breaks
# tests sometimes. Prevent that by explicitly catching the error.
try:
to_parent.close()
except IOError:
pass
def wait_build_file_query():
_select([sys.stdin], [], [])
def wait_and_read_build_file_query():
def default_wait():
return
wait = default_wait
if sys.platform != "win32":
# wait_build_file_query() is useful to attribute time waiting for queries.
# Since select.select() is not supported on Windows, we currently don't have
# a reliable way to measure it on this platform. Then, we skip it.
wait = wait_build_file_query
while True:
wait()
line = sys.stdin.readline()
if not line:
return
yield line
def report_profile(options, to_parent, processed_build_file, profiler):
if options.profile:
try:
profiler.stop()
profile_result = profiler.generate_report()
extra_result = "Total: {:.2f} sec\n\n\n".format(profiler.total_time)
extra_result += "# Parsed {} files".format(len(processed_build_file))
processed_build_file.sort(
key=lambda current_child: current_child["duration"], reverse=True
)
# Only show the top ten buck files
if len(processed_build_file) > 10:
processed_build_file = processed_build_file[:10]
extra_result += ", {} slower BUCK files:\n".format(
len(processed_build_file)
)
else:
extra_result += "\n"
for info in processed_build_file:
extra_result += "Parsed {}: {:.2f} sec \n".format(
info["buildFile"], info["duration"]
)
extra_result += "\n\n"
profile_result = extra_result + profile_result
profile_result += Tracer.get_all_traces_and_reset()
java_process_send_result(to_parent, [], [], profile_result)
except Exception:
trace = traceback.format_exc()
print(str(trace))
raise
else:
java_process_send_result(to_parent, [], [], None)
def make_glob(pat):
# type: (str) -> str
if is_special(pat):
return pat
return pat + "/**"
# import autogenerated rule instances for effect.
try:
import generated_rules
except ImportError:
# If running directly or python tests of this code, this is not an error.
sys.stderr.write("Failed to load buck generated rules module.\n")
|
If you ever thought about walking from Manhattan to the Bronx, this Wednesday is your day. Join us at Highbridge Park where you can do just that! Ironically one of New York's newer parks is home to the oldest bridge in the city. A large park with an iconic swimming pool, new mountain bike course among other amenities, it also houses part of the original aqueduct system that brought water from the Croton River to the city.
The old aqueduct that transverse the Harlem River offers us views of upper Manhattan and the Bronx. Want to get higher? Climb the newly opened tower that housed the pumping equipment for the water system.
DIRECTIONS: 1 or A Train to 168th Street. Walk east on 168th St to Amsterdam Ave then north to the park. Follow the map below to get to the bridge inside the park.
Please RSVP Directly to Raylie Dunkel at [email protected]. Looking for us? Call/text Raylie at 201-978-6387. Bring a stool, lunch and water. There are no food services in the park and it is a walk fro the bridge to the stores along the avenue. Bring lunch or purchase it from a vendor along the way.
|
from filebeat import BaseTest
import os
import socket
"""
Tests for the custom fields functionality.
"""
class Test(BaseTest):
def test_custom_fields(self):
"""
Tests that custom fields show up in the output dict.
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/test.log",
fields={"hello": "world", "number": 2}
)
with open(self.working_dir + "/test.log", "w") as f:
f.write("test message\n")
filebeat = self.start_beat()
self.wait_until(lambda: self.output_has(lines=1))
filebeat.check_kill_and_wait()
output = self.read_output()
doc = output[0]
assert doc["fields.hello"] == "world"
assert doc["fields.number"] == 2
def test_custom_fields_under_root(self):
"""
Tests that custom fields show up in the output dict under
root when fields_under_root option is used.
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/test.log",
fields={
"hello": "world",
"type": "log2",
"timestamp": "2"
},
fieldsUnderRoot=True
)
with open(self.working_dir + "/test.log", "w") as f:
f.write("test message\n")
filebeat = self.start_beat()
self.wait_until(lambda: self.output_has(lines=1))
filebeat.check_kill_and_wait()
output = self.read_output()
doc = output[0]
print doc
assert doc["hello"] == "world"
assert doc["type"] == "log2"
assert doc["timestamp"] == 2
assert "fields" not in doc
def test_beat_fields(self):
"""
Checks that it's possible to set a custom shipper name. Also
tests that beat.hostname has values.
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/test.log",
shipperName="testShipperName"
)
with open(self.working_dir + "/test.log", "w") as f:
f.write("test message\n")
filebeat = self.start_beat()
self.wait_until(lambda: self.output_has(lines=1))
filebeat.check_kill_and_wait()
output = self.read_output()
doc = output[0]
assert doc["beat.name"] == "testShipperName"
assert doc["beat.hostname"] == socket.gethostname()
assert "fields" not in doc
|
DROOZ and Company owner & shopkeeper Shelly has been fluffing her surroundings, creating art, designing products and sourcing gifts for over 20 years. Her art, creations, studios, and homes have been featured in dozens of publications.
DROOZ & Co. is the culmination of a lifetime love of color and design, collecting and sharing, entertaining and celebrating and of course inspiring and creating.
Here at DROOZ Shelly has curated her favorite finds, vintage gems, and latest creations for you to fluff, give and enjoy!
|
#!/usr/bin/env python
"""
Kover: Learn interpretable computational phenotyping models from k-merized genomic data
Copyright (C) 2015 Alexandre Drouin
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
import numpy as np
from collections import defaultdict
from copy import deepcopy
from functools import partial
from itertools import product
from math import exp, log as ln, pi
from multiprocessing import Pool, cpu_count
from scipy.misc import comb
from ...dataset.ds import KoverDataset
from ..common.models import ConjunctionModel, DisjunctionModel
from ..common.rules import LazyKmerRuleList, KmerRuleClassifications
from ..learners.scm import SetCoveringMachine
from ...utils import _duplicate_last_element, _unpack_binary_bytes_from_ints, _parse_kmer_blacklist
from ..experiments.metrics import _get_binary_metrics
def _predictions(model, kmer_matrix, train_example_idx, test_example_idx, progress_callback=None):
"""Computes predictions by loading only the columns of the kmer matrix that are targetted by the model.
Parameters
----------
model: BaseModel
The model used for predicting.
kmer_matrix: BaseRuleClassifications
The matrix containing the classifications of each rule on each learning example.
train_example_idx: array-like, dtype=uint
The index of the rows of kmer_matrix corresponding to the training examples.
test_example_idx: array-like, dtype=uint
The index of the rows of kmer_matrix corresponding to the testing examples.
progress_callback: function with arguments task, percent_completed
A callback function used to keep track of the task's completion.
"""
if progress_callback is None:
progress_callback = lambda t, p: None
progress_callback("Testing", 0.0)
# We use h5py to load only the columns of the k-mer matrix targeted by the model. The indices passed to h5py need
# to be sorted. We change the kmer_idx of the rules in the model to be 0 ... n_rules, with the rule that initially had
# the smallest kmer_idx pointing to 0 and the one with the largest kmer_idx pointing to n_rules. We then load only
# the appropriate columns and apply the readdressed model to the data (in RAM).
columns_to_load = []
readdressed_model = deepcopy(model)
for i, rule_idx in enumerate(np.argsort([r.kmer_index for r in model.rules])):
rule = readdressed_model.rules[rule_idx]
columns_to_load.append(rule.kmer_index)
rule.kmer_index = i
# Load the columns targeted by the model and make predictions using the readdressed model
X = _unpack_binary_bytes_from_ints(kmer_matrix[:, columns_to_load])
train_predictions = readdressed_model.predict(X[train_example_idx])
progress_callback("Testing", 1.0 * len(train_example_idx) / (len(train_example_idx) + len(test_example_idx)))
test_predictions = readdressed_model.predict(X[test_example_idx])
progress_callback("Testing", 1.0)
return train_predictions, test_predictions
def _cv_score_hp(hp_values, max_rules, dataset_file, split_name, rule_blacklist):
model_type = hp_values[0]
p = hp_values[1]
dataset = KoverDataset(dataset_file)
folds = dataset.get_split(split_name).folds
rules = LazyKmerRuleList(dataset.kmer_sequences, dataset.kmer_by_matrix_column)
rule_classifications = KmerRuleClassifications(dataset.kmer_matrix, dataset.genome_count)
def _iteration_callback(iteration_infos, tmp_model, test_predictions_by_model_length, test_example_idx):
tmp_model.add(iteration_infos["selected_rule"])
_, test_predictions = _predictions(tmp_model, dataset.kmer_matrix, [], test_example_idx)
test_predictions_by_model_length.append(test_predictions)
def _tiebreaker(best_utility_idx, rule_risks, model_type):
logging.debug("There are %d candidate rules." % len(best_utility_idx))
tie_rule_risks = rule_risks[best_utility_idx]
if model_type == "conjunction":
result = best_utility_idx[np.isclose(tie_rule_risks, tie_rule_risks.min())]
else:
# Use max instead of min, since in the disjunction case the risks = 1.0 - conjunction risks (inverted ys)
result = best_utility_idx[np.isclose(tie_rule_risks, tie_rule_risks.max())]
return result
fold_score_by_model_length = np.zeros((len(folds), max_rules))
for i, fold in enumerate(folds):
logging.debug("Fold: %s" % fold.name)
rule_risks = np.hstack((fold.unique_risk_by_kmer[...],
fold.unique_risk_by_anti_kmer[...])) # Too bad that we need to load each time. Maybe invert the loops (all hp for each fold)
train_example_idx = fold.train_genome_idx
test_example_idx = fold.test_genome_idx
positive_example_idx = train_example_idx[dataset.phenotype.metadata[train_example_idx] == 1].reshape(-1)
negative_example_idx = train_example_idx[dataset.phenotype.metadata[train_example_idx] == 0].reshape(-1)
tiebreaker = partial(_tiebreaker, rule_risks=rule_risks, model_type=model_type)
test_predictions_by_model_length = []
tmp_model = ConjunctionModel() if model_type == "conjunction" else DisjunctionModel()
iteration_callback = partial(_iteration_callback,
tmp_model=tmp_model,
test_predictions_by_model_length=test_predictions_by_model_length,
test_example_idx=test_example_idx)
predictor = SetCoveringMachine(model_type=model_type, p=p, max_rules=max_rules)
predictor.fit(rules=rules,
rule_classifications=rule_classifications,
positive_example_idx=positive_example_idx,
negative_example_idx=negative_example_idx,
rule_blacklist=rule_blacklist,
tiebreaker=tiebreaker,
iteration_callback=iteration_callback)
test_predictions_by_model_length = np.array(_duplicate_last_element(test_predictions_by_model_length, max_rules))
fold_score_by_model_length[i] = _get_binary_metrics(predictions=test_predictions_by_model_length,
answers=dataset.phenotype.metadata[test_example_idx])["risk"]
score_by_model_length = np.mean(fold_score_by_model_length, axis=0)
best_score_idx = np.argmin(score_by_model_length)
best_hp_score = score_by_model_length[best_score_idx]
best_model_length = best_score_idx + 1
return (model_type, p, best_model_length), best_hp_score
def _cross_validation(dataset_file, split_name, model_types, p_values, max_rules, rule_blacklist,
n_cpu, progress_callback, warning_callback, error_callback):
"""
Returns the best parameter combination and its cv score
"""
n_hp_combinations = len(model_types) * len(p_values)
logging.debug("There are %d hyperparameter combinations to try." % n_hp_combinations)
logging.debug("Using %d CPUs." % n_cpu)
pool = Pool(processes=n_cpu)
hp_eval_func = partial(_cv_score_hp,
dataset_file=dataset_file,
split_name=split_name,
max_rules=max_rules,
rule_blacklist=rule_blacklist)
best_hp_score = 1.0
best_hp = {"model_type": None, "p": None, "max_rules": None}
n_completed = 0.0
progress_callback("Cross-validation", 0.0)
for hp, score in pool.imap_unordered(hp_eval_func, product(model_types, p_values)):
n_completed += 1
progress_callback("Cross-validation", n_completed / n_hp_combinations)
if (not np.allclose(score, best_hp_score) and score < best_hp_score) or \
(np.allclose(score, best_hp_score) and hp[2] < best_hp["max_rules"]) or \
(np.allclose(score, best_hp_score) and hp[2] == best_hp["max_rules"] and not np.allclose(hp[1], best_hp["p"]) and \
abs(1.0 - hp[1]) < abs(1.0 - best_hp["p"])):
best_hp["model_type"] = hp[0]
best_hp["p"] = hp[1]
best_hp["max_rules"] = hp[2]
best_hp_score = score
return best_hp_score, best_hp
def _full_train(dataset, split_name, model_type, p, max_rules, max_equiv_rules, rule_blacklist, random_generator, progress_callback):
full_train_progress = {"n_rules": 0.0}
def _iteration_callback(iteration_infos, model_type, equivalent_rules):
full_train_progress["n_rules"] += 1
progress_callback("Training", full_train_progress["n_rules"] / max_rules)
# Ensure that there are no more equivalent rules than the specified maximum
if len(iteration_infos["equivalent_rules_idx"]) > max_equiv_rules:
logging.debug("There are more equivalent rules than the allowed maximum. Subsampling %d rules." % max_equiv_rules)
random_idx = random_generator.choice(len(iteration_infos["equivalent_rules_idx"]), max_equiv_rules,
replace=False)
random_idx.sort()
iteration_infos["equivalent_rules_idx"] = iteration_infos["equivalent_rules_idx"][random_idx]
# Adjust and store the equivalent rule indices
if model_type == "disjunction":
n_kmers = rule_classifications.shape[1] / 2
iteration_infos["equivalent_rules_idx"] += n_kmers
iteration_infos["equivalent_rules_idx"] %= (2 * n_kmers)
equivalent_rules.append(iteration_infos["equivalent_rules_idx"])
else:
equivalent_rules.append(iteration_infos["equivalent_rules_idx"])
def _tiebreaker(best_utility_idx, rule_risks, model_type):
logging.debug("There are %d candidate rules." % len(best_utility_idx))
tie_rule_risks = rule_risks[best_utility_idx]
if model_type == "conjunction":
result = best_utility_idx[np.isclose(tie_rule_risks, tie_rule_risks.min())]
else:
# Use max instead of min, since in the disjunction case the risks = 1.0 - conjunction risks (inverted ys)
result = best_utility_idx[np.isclose(tie_rule_risks, tie_rule_risks.max())]
return result
rules = LazyKmerRuleList(dataset.kmer_sequences, dataset.kmer_by_matrix_column)
rule_classifications = KmerRuleClassifications(dataset.kmer_matrix, dataset.genome_count)
split = dataset.get_split(split_name)
train_example_idx = split.train_genome_idx
positive_example_idx = train_example_idx[dataset.phenotype.metadata[train_example_idx] == 1].reshape(-1)
negative_example_idx = train_example_idx[dataset.phenotype.metadata[train_example_idx] == 0].reshape(-1)
model_equivalent_rules = []
predictor = SetCoveringMachine(model_type=model_type, p=p, max_rules=max_rules)
progress_callback("Training", 0)
predictor.fit(rules=rules,
rule_classifications=rule_classifications,
positive_example_idx=positive_example_idx,
negative_example_idx=negative_example_idx,
rule_blacklist=rule_blacklist,
tiebreaker=partial(_tiebreaker,
rule_risks=np.hstack((split.unique_risk_by_kmer[...],
split.unique_risk_by_anti_kmer[...])),
model_type=model_type),
iteration_callback=partial(_iteration_callback,
model_type=model_type,
equivalent_rules=model_equivalent_rules))
return predictor.model, predictor.rule_importances, model_equivalent_rules
def _bound(train_predictions, train_answers, train_example_idx, model, delta, max_genome_size, rule_classifications):
# Construct the smallest possible compression set (Chvatal greedy approx for minimum set cover)
logging.debug("Constructing the compression set.")
presence_by_example = rule_classifications.get_columns([r.kmer_index for r in model])[train_example_idx]
compression_set = []
while presence_by_example.shape[1] != 0:
score = presence_by_example.sum(axis=1)
best_example_relative_idx = np.argmax(score)
compression_set.append(best_example_relative_idx)
presence_by_example = presence_by_example[:, presence_by_example[best_example_relative_idx] == 0]
logging.debug("The compression set contains %d examples." % len(compression_set))
# Compute the bound value
logging.debug("Computing the bound value.")
h_card = float(len(model))
Z_card = float(len(compression_set) * max_genome_size)
m = float(len(train_answers))
mz = float(len(compression_set))
r = float((train_predictions != train_answers).sum() - (train_predictions[compression_set] != train_answers[compression_set]).sum())
return 1.0 - exp((-1.0 / (m - mz - r)) * (ln(comb(m, mz, exact=True)) +
ln(comb(m - mz, r, exact=True)) +
h_card * ln(2 * Z_card) +
ln(pi**6 *
(h_card + 1)**2 *
(r + 1)**2 *
(mz + 1)**2 /
(216 * delta))))
def _bound_score_hp(hp_values, max_rules, dataset_file, split_name, max_equiv_rules, rule_blacklist,
bound_delta, bound_max_genome_size, random_generator):
model_type = hp_values[0]
p = hp_values[1]
dataset = KoverDataset(dataset_file)
rules = LazyKmerRuleList(dataset.kmer_sequences, dataset.kmer_by_matrix_column)
rule_classifications = KmerRuleClassifications(dataset.kmer_matrix, dataset.genome_count)
def _iteration_callback(iteration_infos, tmp_model, train_example_idx, train_answers, score_by_length,
model_by_length, equivalent_rules, rule_importances, rule_classifications):
tmp_model.add(iteration_infos["selected_rule"])
model_by_length.append(deepcopy(tmp_model))
rule_importances.append(iteration_infos["rule_importances"])
# Store equivalent rules
# Ensure that there are no more equivalent rules than the specified maximum
if len(iteration_infos["equivalent_rules_idx"]) > max_equiv_rules:
logging.debug("There are more equivalent rules than the allowed maximum. Subsampling %d rules." % max_equiv_rules)
random_idx = random_generator.choice(len(iteration_infos["equivalent_rules_idx"]), max_equiv_rules,
replace=False)
random_idx.sort()
iteration_infos["equivalent_rules_idx"] = iteration_infos["equivalent_rules_idx"][random_idx]
# Adjust and store the equivalent rule indices
if model_type == "disjunction":
n_kmers = rule_classifications.shape[1] / 2
iteration_infos["equivalent_rules_idx"] += n_kmers
iteration_infos["equivalent_rules_idx"] %= (2 * n_kmers)
equivalent_rules.append(iteration_infos["equivalent_rules_idx"])
else:
equivalent_rules.append(iteration_infos["equivalent_rules_idx"])
# Compute the bound value for the current model length
_, train_predictions = _predictions(tmp_model, dataset.kmer_matrix, [], train_example_idx)
score_by_length[iteration_infos["iteration_number"] - 1] = _bound(train_predictions=train_predictions,
train_answers=train_answers,
train_example_idx=train_example_idx,
model=tmp_model,
delta=bound_delta,
max_genome_size=bound_max_genome_size,
rule_classifications=rule_classifications)
def _tiebreaker(best_utility_idx, rule_risks, model_type):
logging.debug("There are %d candidate rules." % len(best_utility_idx))
tie_rule_risks = rule_risks[best_utility_idx]
if model_type == "conjunction":
result = best_utility_idx[np.isclose(tie_rule_risks, tie_rule_risks.min())]
else:
# Use max instead of min, since in the disjunction case the risks = 1.0 - conjunction risks (inverted ys)
result = best_utility_idx[np.isclose(tie_rule_risks, tie_rule_risks.max())]
return result
split = dataset.get_split(split_name)
rule_risks = np.hstack((split.unique_risk_by_kmer[...],
split.unique_risk_by_anti_kmer[...]))
train_example_idx = split.train_genome_idx
positive_example_idx = train_example_idx[dataset.phenotype.metadata[train_example_idx] == 1].reshape(-1)
negative_example_idx = train_example_idx[dataset.phenotype.metadata[train_example_idx] == 0].reshape(-1)
train_answers = dataset.phenotype.metadata[train_example_idx]
tiebreaker = partial(_tiebreaker, rule_risks=rule_risks, model_type=model_type)
tmp_model = ConjunctionModel() if model_type == "conjunction" else DisjunctionModel()
score_by_length = np.ones(max_rules)
model_by_length = []
equivalent_rules = []
rule_importances = []
iteration_callback = partial(_iteration_callback,
tmp_model=tmp_model,
train_example_idx=train_example_idx,
train_answers=train_answers,
score_by_length=score_by_length,
model_by_length=model_by_length,
equivalent_rules=equivalent_rules,
rule_importances=rule_importances,
rule_classifications=rule_classifications)
predictor = SetCoveringMachine(model_type=model_type, p=p, max_rules=max_rules)
predictor.fit(rules=rules,
rule_classifications=rule_classifications,
positive_example_idx=positive_example_idx,
negative_example_idx=negative_example_idx,
rule_blacklist=rule_blacklist,
tiebreaker=tiebreaker,
iteration_callback=iteration_callback,
iteration_rule_importances=True)
best_score_idx = np.argmin(score_by_length)
best_hp_score = score_by_length[best_score_idx]
best_model = model_by_length[best_score_idx]
best_rule_importances = rule_importances[best_score_idx]
best_equivalent_rules = equivalent_rules[: best_score_idx + 1]
best_model_length = best_score_idx + 1
return (model_type, p, best_model_length), best_hp_score, best_model, best_rule_importances, best_equivalent_rules
def _bound_selection(dataset_file, split_name, model_types, p_values, max_rules, max_equiv_rules, rule_blacklist,
bound_delta, bound_max_genome_size, n_cpu, random_generator, progress_callback, warning_callback,
error_callback):
n_hp_combinations = len(model_types) * len(p_values)
logging.debug("There are %d hyperparameter combinations to try." % n_hp_combinations)
logging.debug("Using %d CPUs." % n_cpu)
pool = Pool(processes=n_cpu)
hp_eval_func = partial(_bound_score_hp,
dataset_file=dataset_file,
split_name=split_name,
max_rules=max_rules,
max_equiv_rules=max_equiv_rules,
rule_blacklist=rule_blacklist,
bound_delta=bound_delta,
bound_max_genome_size=bound_max_genome_size,
random_generator=random_generator)
best_hp_score = 1.0
best_hp = {"model_type": None, "p": None, "max_rules": None}
n_completed = 0.0
progress_callback("Bound selection", 0.0)
for hp, score, model, rule_importances, equiv_rules in pool.imap_unordered(hp_eval_func,
product(model_types, p_values)):
n_completed += 1
progress_callback("Bound selection", n_completed / n_hp_combinations)
if (score < best_hp_score) or \
(score == best_hp_score and hp[2] < best_hp["max_rules"]) or \
(score == best_hp_score and hp[2] == best_hp["max_rules"] and abs(1.0 - hp[1]) < abs(1.0 - best_hp["p"])):
best_hp["model_type"] = hp[0]
best_hp["p"] = hp[1]
best_hp["max_rules"] = hp[2]
best_hp_score = score
best_model = model
best_equiv_rules = equiv_rules
best_rule_importances = rule_importances
return best_hp_score, best_hp, best_model, best_rule_importances, best_equiv_rules
def _find_rule_blacklist(dataset_file, kmer_blacklist_file, warning_callback):
"""
Finds the index of the rules that must be blacklisted.
"""
dataset = KoverDataset(dataset_file)
# Find all rules to blacklist
rule_blacklist = []
if kmer_blacklist_file is not None:
kmers_to_blacklist = _parse_kmer_blacklist(kmer_blacklist_file, dataset.kmer_length)
if kmers_to_blacklist:
# XXX: the k-mers are assumed to be upper-cased in the dataset
kmer_sequences = dataset.kmer_sequences[...].tolist()
kmer_by_matrix_column = dataset.kmer_by_matrix_column[...].tolist() # XXX: each k-mer is there only once (see wiki)
n_kmers = len(kmer_sequences)
kmers_not_found = []
for k in kmers_to_blacklist:
k = k.upper()
try:
presence_rule_idx = kmer_by_matrix_column.index(kmer_sequences.index(k))
absence_rule_idx = presence_rule_idx + n_kmers
rule_blacklist += [presence_rule_idx, absence_rule_idx]
except ValueError:
kmers_not_found.append(k)
if(len(kmers_not_found) > 0):
warning_callback("The following kmers could not be found in the dataset: " + ", ".join(kmers_not_found))
return rule_blacklist
def learn_SCM(dataset_file, split_name, model_type, p, kmer_blacklist_file,max_rules, max_equiv_rules,
parameter_selection, n_cpu, random_seed, authorized_rules, bound_delta=None, bound_max_genome_size=None,
progress_callback=None, warning_callback=None, error_callback=None):
"""
parameter_selection: bound, cv, none (use first value of each if multiple)
"""
# Execution callback functions
if warning_callback is None:
warning_callback = lambda w: logging.warning(w)
if error_callback is None:
def normal_raise(exception):
raise exception
error_callback = normal_raise
if progress_callback is None:
progress_callback = lambda t, p: None
if n_cpu is None:
n_cpu = cpu_count()
random_generator = np.random.RandomState(random_seed)
model_type = np.unique(model_type)
p = np.unique(p)
logging.debug("Searching for blacklisted rules.")
rule_blacklist = _find_rule_blacklist(dataset_file=dataset_file,
kmer_blacklist_file=kmer_blacklist_file,
warning_callback=warning_callback)
dataset = KoverDataset(dataset_file)
# Score the hyperparameter combinations
# ------------------------------------------------------------------------------------------------------------------
if parameter_selection == "bound":
if bound_delta is None or bound_max_genome_size is None:
error_callback(Exception("Bound selection cannot be performed without delta and the maximum genome length."))
# For bound selection, there is no need to retrain the algorithm after selecting the best hyperparameters.
# The model is already obtained from all the training data. This is why we save the model here.
best_hp_score, \
best_hp, \
best_model, \
best_rule_importances, \
best_predictor_equiv_rules = _bound_selection(dataset_file=dataset_file,
split_name=split_name,
model_types=model_type,
p_values=p,
max_rules=max_rules,
rule_blacklist=rule_blacklist,
max_equiv_rules=max_equiv_rules,
bound_delta=bound_delta,
bound_max_genome_size=bound_max_genome_size,
n_cpu=n_cpu,
random_generator=random_generator,
progress_callback=progress_callback,
warning_callback=warning_callback,
error_callback =error_callback)
elif parameter_selection == "cv":
n_folds = len(dataset.get_split(split_name).folds)
if n_folds < 1:
error_callback(Exception("Cross-validation cannot be performed on a split with no folds."))
best_hp_score, best_hp = _cross_validation(dataset_file=dataset_file,
split_name=split_name,
model_types=model_type,
p_values=p,
max_rules=max_rules,
rule_blacklist=rule_blacklist,
n_cpu=n_cpu,
progress_callback=progress_callback,
warning_callback=warning_callback,
error_callback=error_callback)
else:
# Use the first value provided for each parameter
best_hp = {"model_type": model_type[0], "p": p[0], "max_rules": max_rules}
best_hp_score = None
# Use the best hyperparameters to train/test on the split
# ------------------------------------------------------------------------------------------------------------------
if parameter_selection == "bound":
model = best_model
equivalent_rules = best_predictor_equiv_rules
rule_importances = best_rule_importances
else:
model, rule_importances, \
equivalent_rules = _full_train(dataset=dataset,
split_name=split_name,
model_type=best_hp["model_type"],
p=best_hp["p"],
max_rules=best_hp["max_rules"],
max_equiv_rules=max_equiv_rules,
rule_blacklist=rule_blacklist,
random_generator=random_generator,
progress_callback=progress_callback)
split = dataset.get_split(split_name)
train_example_idx = split.train_genome_idx
test_example_idx = split.test_genome_idx
train_predictions, test_predictions = _predictions(model=model,
kmer_matrix=dataset.kmer_matrix,
train_example_idx=train_example_idx,
test_example_idx=test_example_idx,
progress_callback=progress_callback)
train_answers = dataset.phenotype.metadata[train_example_idx]
train_metrics = _get_binary_metrics(train_predictions, train_answers)
# No need to recompute the bound if bound selection was used
if parameter_selection == "bound":
train_metrics["bound"] = best_hp_score
else:
train_metrics["bound"] = _bound(train_predictions=train_predictions,
train_answers=train_answers,
train_example_idx=train_example_idx,
model=model,
delta=bound_delta,
max_genome_size=bound_max_genome_size,
rule_classifications=KmerRuleClassifications(dataset.kmer_matrix,
dataset.genome_count))
# Test metrics are computed only if there is a testing set
if len(test_example_idx) > 0:
test_answers = dataset.phenotype.metadata[test_example_idx]
test_metrics = _get_binary_metrics(test_predictions, test_answers)
else:
test_metrics = None
# Get the idx of the training/testing examples that are correctly/incorrectly classified by the model
classifications = defaultdict(list)
classifications["train_correct"] = dataset.genome_identifiers[train_example_idx[train_predictions == \
train_answers].tolist()].tolist() if train_metrics["risk"][0] < 1.0 else []
classifications["train_errors"] = dataset.genome_identifiers[train_example_idx[train_predictions != \
train_answers].tolist()].tolist() if train_metrics["risk"][0] > 0 else []
if len(test_example_idx) > 0:
classifications["test_correct"] = dataset.genome_identifiers[test_example_idx[test_predictions == \
test_answers].tolist()].tolist() if test_metrics["risk"][0] < 1.0 else []
classifications["test_errors"] = dataset.genome_identifiers[test_example_idx[test_predictions != \
test_answers].tolist()].tolist() if test_metrics["risk"][0] > 0 else []
# Convert the equivalent rule indexes to rule objects
rules = LazyKmerRuleList(dataset.kmer_sequences, dataset.kmer_by_matrix_column)
model_equivalent_rules = [[rules[i] for i in equiv_idx] for equiv_idx in equivalent_rules]
return best_hp, best_hp_score, train_metrics, test_metrics, model, rule_importances, \
model_equivalent_rules, classifications
|
Have you ever had issues with creating super tiny objects? I sure have. That’s usually because you are working with way too small geometry relative to your units.
3ds Max has some issues dealing with small scale. So when you are making tiny primitives, you can’t really get under about 0.1 system units, unless you type in the dimensions. For primitives with more than 1 dimension parameter (box, pyramid, etc.) you need at least one of them to be about 0.1 world units.
For example, you won’t be able to create an object with the dimensions of “Box1” but you will with those of “Box2”.
To fix that you have to change the system units to smaller value and then rescale accordingly ,e.g change from meters to centimeters and rescale the entire scene 100 times. To make things easier the utilities tab has a rescale tool.
|
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from django.contrib.auth.models import User
from django.template import Context
from django.utils.translation import ugettext
from django.utils import translation
from django.utils import timezone
from notification import backends
class OnSiteBackend(backends.BaseBackend):
spam_sensitivity = 0
def can_send(self, user, notice_type):
can_send = super(OnSiteBackend, self).can_send(user, notice_type)
if can_send:
return True
return False
def deliver(self, recipient, sender, notice_type, extra_context):
from notification.models import Notice
if 'disallow_notice' in extra_context:
if 'onsite' in extra_context['disallow_notice']:
return
recipient = User.objects.get(id=recipient.id)
language_code = 'en'
if 'language_code' in extra_context.keys():
for language_tuple in settings.LANGUAGES:
if extra_context['language_code'] in language_tuple:
language_code = language_tuple[0]
break
else:
try:
language_code = recipient.user_profile.default_language
except ObjectDoesNotExist:
language_code = 'en'
translation.activate(language_code)
if 'target' in extra_context and hasattr(extra_context['target'], 'translations'):
from general.utils import switch_language
target = extra_context['target']
extra_context['target'] = switch_language(target, language_code)
if 'pm_message' in extra_context:
sender = extra_context['pm_message'].sender
target_url = self.get_target_url(extra_context, sender, recipient)
context = Context({})
context.update({
"recipient": recipient,
"sender": sender,
"notice": ugettext(notice_type.past_tense),
'default_profile_photo': settings.DEFAULT_PROFILE_PHOTO,
'target_url': target_url,
})
context.update(extra_context)
try:
messages = self.get_formatted_messages((
"full.html",
), context['app_label'], context)
except:
messages = self.get_formatted_messages((
"full.html",
), notice_type.label, context)
if sender.__class__.__name__ == 'Company':
sender = sender.admin_primary if sender.admin_primary else sender.created_by
if recipient.is_active:
create_notice = False
if settings.PRODUCTION_SETTING or settings.DEVELOPMENT_SERVER:
try:
notice_obj = Notice.objects.filter(
recipient=recipient,
notice_type=notice_type,
sender=sender,
target_url=target_url,
on_site=True,
site_id=settings.SITE_ID
).order_by('-added')[0]
except IndexError:
notice_obj = None
create_notice = True
if notice_obj and (timezone.now()-notice_obj.added).seconds/60 > settings.TIME_INTERVAL_BTW_TWO_NOTIFICATION:
create_notice = True
if create_notice:
Notice.objects.create(
recipient=recipient,
notice_type=notice_type,
sender=sender,
message=messages['full.html'],
target_url=target_url,
on_site=True,
site_id=settings.SITE_ID
)
|
The tail lights on the car in front of you are there to warn you what they are doing/planning on doing. When they start indicating you can predict what is coming, the car will drift over the same side as the indicator, slow down and maybe stop (depending on traffic). Finally they will turn in the direction of the indication.
You then should check your mirrors, especially for bikes, and start to slow down. If clear, drift over to the other side of the road in preparation to pass them, check the road markings and oncoming traffic and if safe to do so pass the turning vehicle.
The other tail lights to watch for are the Brake Lights, simply put, when these go on the car in front is slowing down.
Your response to this should be to check your mirrors and ... wait for it ... slow down. While it does sound so obvious and simple you'll be amazed how often people don't seem to get this one.
The best and safest drivers look ahead, they will manoeuvre the car so they can see through other car windows or around the car altogether. They are looking for what the cars two or three spaces ahead are doing. When they see brake lights come on up there, these drivers know that every car in front of them will be doing the same. They'll check their mirrors and come off the accelerator, these drivers don't usually need to brake as often as others because they will lose speed naturally in advance of the upcoming hazard because they saw it coming.
These drivers will see the traffic lights ahead and have a rough idea of what colour they will be when they get there and they will adjust their driving accordingly. You'll spot these ones because they seem to slow down for no apparent reason, but if you look ahead you might just see a red light. In time these drivers get used to the length of different lights and will usually just roll up as they turn green.
All that said DON'T trust that an indicator definately means they are turning that way, don't assume that a Green Light means everyone has stopped for their Red Light. Always keep watch, always check one last time.
|
import colorsys
import webcolors
from cooperhewitt import swatchbook
from colormath.color_objects import RGBColor
from decimal import Decimal
COLOURS = {
'RED': ((255, 0, 0), (340, 17), (10, 100), (40, 100)),
'ORANGE': ((252, 106, 8), (18, 45), None, (66, 100)),
'YELLOW': ((255, 255, 0), (46, 66), None, (76, 100)),
'LIME': ((0, 255, 0), (67, 165), (15, 100), (66, 100)),
'CYAN': ((0, 255, 255), (166, 201), (15, 100), (66, 100)),
'BLUE': ((0, 0, 255), (202, 260), None, (66, 100)),
'MAGENTA': ((255, 0, 255), (261, 339), None, (66, 100)),
'MAROON': ((128, 0, 0), (340, 17), (20, 100), (24, 65)),
'BROWN': ((107, 48, 2), (18, 45), None, (26, 65)),
'OLIVE': ((128, 128, 0), (46, 66), (26, 100), (26, 81)),
'GREEN': ((0, 128, 0), (67, 165), None, (18, 65)),
'TEAL': ((0, 128, 128), (166, 201), None, (33, 65)),
'NAVY': ((0, 0, 128), (202, 260), None, (18, 65)),
'PURPLE': ((128, 0, 128), (261, 339), None, (33, 65)),
}
GREYSCALE = {
'BLACK': ((0, 0, 0), (0, 359), (0, 100), (0, 17)),
'WHITE': ((255, 255, 255), (0, 359), (0, 5), (90, 100)),
'SILVER': ((192, 192, 192), (0, 359), (0, 10), (61, 89)),
'GREY': ((128, 128, 128), (0, 359), (0, 10), (26, 60)),
}
DEFAULT_SAT = (25, 100)
DEFAUL_VAL = (50, 100)
TWOPLACES = Decimal(10) ** -2
class ArtColour:
hsv = ()
rgb = ()
hex_value = ()
css = ()
ansi = ()
ansi_rgb = ()
ansi_hsv = ()
_color = None
GREY = False
distance = None
prominence = None
def __init__(self, r, g, b, prominence):
self.rgb = (r, g, b)
self.prominence = prominence
(self.red, self.blue, self.green) = (r, g, b)
self.hsv = self.rgb_to_hsv(r, g, b)
(self.hue, self.sat, self.val) = \
(self.hsv[0], self.hsv[1], self.hsv[2])
self.ansi = self.ansi_number(r, g, b)
self.ansi_rgb = self.rgb_reduce(r, g, b)
self.ansi_hsv = self.rgb_to_hsv(*self.ansi_rgb)
self.hex_value = None
self.nearest_hex = None
def rgb_to_hsv(self, r, g, b):
fracs = [ch/255.0 for ch in (r, g, b)]
hsv = colorsys.rgb_to_hsv(*fracs)
return (int(round(hsv[0] * 360)),
int(round(hsv[1] * 100)),
int(round(hsv[2] * 100)))
def hsv_to_rgb(self, h, s, v):
rgb = colorsys.hsv_to_rgb(h/360.0, s/100.0, v/100.0)
return (int(round(rgb[0] * 255)),
int(round(rgb[1] * 255)),
int(round(rgb[2] * 255)))
def rgb_reduce(self, r, g, b):
reduced_rgb = [int(6 * float(val) / 256)
* (256/6) for val in (r, g, b)]
return tuple(reduced_rgb)
def spin(self, deg):
return (deg + 180) % 360 - 180
@property
def color(self):
if self._color is None:
self._color = self._get_color()
return self._color
def _get_color(self):
self.nearest = None
self.shortest_distance = 100
chosen_name = None
for color_dict in (COLOURS, GREYSCALE):
for name, color in color_dict.iteritems():
desired_rgb = color[0]
target = RGBColor(*desired_rgb)
cdist = target.delta_e(RGBColor(*self.rgb), method="cmc")
if self.nearest is None or cdist < self.shortest_distance:
self.nearest = name
self.nearest_rgb = desired_rgb
self.shortest_distance = cdist
self.distance = cdist
# print 'Checking', name
(hue_lo, hue_hi) = color[1]
if hue_lo > hue_hi:
h = self.spin(self.hue)
hue_lo = self.spin(hue_lo)
hue_hi = self.spin(hue_hi)
else:
h = self.hue
sat_range = color[2] or DEFAULT_SAT
val_range = color[3] or DEFAUL_VAL
if h in range(hue_lo, hue_hi + 1) and \
self.sat in range(sat_range[0], sat_range[1] + 1) and \
self.val in range(val_range[0], val_range[1] + 1):
# TODO set up desirable hues, sat and b per named colour
target = RGBColor(*desired_rgb)
self.distance = cdist
chosen_name = name
self.nearest_hex = webcolors.rgb_to_hex(self.nearest_rgb)
return chosen_name
return None
def ansi_number(self, r, g, b):
'''
Convert an RGB colour to 256 colour ANSI graphics.
'''
grey = False
poss = True
step = 2.5
while poss: # As long as the colour could be grey scale
if r < step or g < step or b < step:
grey = r < step and g < step and b < step
poss = False
step += 42.5
if grey:
colour = 232 + int(float(sum([r, g, b]) / 33.0))
else:
colour = sum([16] + [int((6 * float(val) / 256)) * mod
for val, mod in ((r, 36), (g, 6), (b, 1))])
return colour
def hex_me_up(self):
self.hex_value = webcolors.rgb_to_hex(self.rgb)
snapped, colour_name = swatchbook.closest_delta_e('css3', self.hex_value)
snapped_rgb = webcolors.hex_to_rgb(snapped)
hsv = self.rgb_to_hsv(*snapped_rgb)
target = RGBColor(*snapped_rgb)
original = RGBColor(*self.rgb)
cdist = target.delta_e(original, method="cmc")
prom = Decimal(self.prominence).quantize(TWOPLACES)
dist = Decimal(cdist).quantize(TWOPLACES)
ELITE = False
self.css = {
'r': self.rgb[0],
'g': self.rgb[1],
'b': self.rgb[2],
'hue': hsv[0],
'hex': snapped,
'name': colour_name,
'distance': float(dist),
'prominence': float(prom),
'elite': ELITE,
}
return self.css
|
This checklist provides suggested actions to take to be ready for iOS 8 upgrades of the iPhones and iPads in your organization. IBM MaaS360 delivers instant support for iPhones and iPads with iOS 8 on the day Apple makes the upgrade available, so we checked off that action item from the list below. IBM MaaS360 and Apple make it easy for organizations realize the untapped potential of mobility with their employees, customers and partners.
|
"""
Test the utils
"""
from transmogrify import utils
def test_is_tool():
assert utils.is_tool('date') is True
assert utils.is_tool('foo') is False
def test_purge_security_hash():
from hashlib import sha1
from transmogrify.settings import SECRET_KEY
security_hash = sha1('PURGE' + SECRET_KEY).hexdigest()
assert utils.is_valid_security('PURGE', security_hash) is True
def test_get_cached_files():
import os
from transmogrify import settings
from transmogrify.core import Transmogrify
testdata = os.path.abspath(settings.BASE_PATH)
t = Transmogrify('/horiz_img_r300x300.jpg?debug')
t.save()
result = utils.get_cached_files('/horiz_img.jpg', document_root=testdata)
filenames = [x.replace(testdata, '') for x in result]
assert '/horiz_img_r300x300.jpg' in filenames
def test_settings_stuff():
from transmogrify import settings
assert settings.bool_from_env('FOO', False) is False
assert settings.bool_from_env('FOO', 'False') is False
assert settings.bool_from_env('FOO', 'false') is False
assert settings.bool_from_env('FOO', 'F') is False
assert settings.bool_from_env('FOO', 'f') is False
assert settings.bool_from_env('FOO', '0') is False
assert settings.bool_from_env('FOO', 'True')
assert settings.bool_from_env('FOO', 'true')
assert settings.bool_from_env('FOO', 'T')
assert settings.bool_from_env('FOO', 't')
assert settings.bool_from_env('FOO', '1')
assert settings.list_from_env("FOO", '1,2,3,4') == ['1', '2', '3', '4']
assert settings.lists_from_env("FOO", '1,2:3,4') == [['1', '2'], ['3', '4']]
|
Posted by markus on 2014/05/19 . No comments.
Posted by markus on 2010/11/21 . No comments.
Posted by markus on 2008/11/09 . No comments.
Posted by markus on 2008/10/27 . No comments.
Posted by markus on 2008/10/03 . No comments.
Posted by markus on 2008/10/01 . No comments.
Posted by markus on 2008/04/21 . No comments.
Posted by markus on 2008/04/09 . No comments.
Posted by markus on 2008/02/28 . No comments.
Posted by markus on 2008/02/18 . No comments.
Posted by markus on 2008/02/11 . No comments.
Posted by markus on 2008/02/10 . No comments.
Ett stort periskop binder samman två olika nivåer i Tensta. Tenstagången och Taxingeplan blir samma nivå, men – det är bara en illusion. Videon ovan visar den tänkta byggprocessen.A huge periscope connects two levels in Tensta, but – it’s all an illusion. The video above shows the construction site.Videocollage.
|
"""This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import statsmodels.formula.api as smf
import statsmodels.tsa.stattools as smtsa
import matplotlib.pyplot as pyplot
import thinkplot
import thinkstats2
FORMATS = ['png']
def ReadData():
"""Reads data about cannabis transactions.
http://zmjones.com/static/data/mj-clean.csv
returns: DataFrame
"""
transactions = pandas.read_csv('mj-clean.csv', parse_dates=[5])
return transactions
def tmean(series):
"""Computes a trimmed mean.
series: Series
returns: float
"""
t = series.values
n = len(t)
if n <= 3:
return t.mean()
trim = max(1, n/10)
return np.mean(sorted(t)[trim:n-trim])
def GroupByDay(transactions, func=np.mean):
"""Groups transactions by day and compute the daily mean ppg.
transactions: DataFrame of transactions
returns: DataFrame of daily prices
"""
groups = transactions[['date', 'ppg']].groupby('date')
daily = groups.aggregate(func)
daily['date'] = daily.index
start = daily.date[0]
one_year = np.timedelta64(1, 'Y')
daily['years'] = (daily.date - start) / one_year
return daily
def GroupByQualityAndDay(transactions):
"""Divides transactions by quality and computes mean daily price.
transaction: DataFrame of transactions
returns: map from quality to time series of ppg
"""
groups = transactions.groupby('quality')
dailies = {}
for name, group in groups:
dailies[name] = GroupByDay(group)
return dailies
def PlotDailies(dailies):
"""Makes a plot with daily prices for different qualities.
dailies: map from name to DataFrame
"""
thinkplot.PrePlot(rows=3)
for i, (name, daily) in enumerate(dailies.items()):
thinkplot.SubPlot(i+1)
title = 'price per gram ($)' if i == 0 else ''
thinkplot.Config(ylim=[0, 20], title=title)
thinkplot.Scatter(daily.ppg, s=10, label=name)
if i == 2:
pyplot.xticks(rotation=30)
else:
thinkplot.Config(xticks=[])
thinkplot.Save(root='timeseries1',
formats=FORMATS)
def RunLinearModel(daily):
"""Runs a linear model of prices versus years.
daily: DataFrame of daily prices
returns: model, results
"""
model = smf.ols('ppg ~ years', data=daily)
results = model.fit()
return model, results
def PlotFittedValues(model, results, label=''):
"""Plots original data and fitted values.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
values = model.endog
thinkplot.Scatter(years, values, s=15, label=label)
thinkplot.Plot(years, results.fittedvalues, label='model')
def PlotResiduals(model, results):
"""Plots the residuals of a model.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
thinkplot.Plot(years, results.resid, linewidth=0.5, alpha=0.5)
def PlotResidualPercentiles(model, results, index=1, num_bins=20):
"""Plots percentiles of the residuals.
model: StatsModel model object
results: StatsModel results object
index: which exogenous variable to use
num_bins: how many bins to divide the x-axis into
"""
exog = model.exog[:, index]
resid = results.resid.values
df = pandas.DataFrame(dict(exog=exog, resid=resid))
bins = np.linspace(np.min(exog), np.max(exog), num_bins)
indices = np.digitize(exog, bins)
groups = df.groupby(indices)
means = [group.exog.mean() for _, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.resid) for _, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
percentiles = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(means, percentiles, label=label)
def SimulateResults(daily, iters=101, func=RunLinearModel):
"""Run simulations based on resampling residuals.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
_, results = func(daily)
fake = daily.copy()
result_seq = []
for _ in range(iters):
fake.ppg = results.fittedvalues + thinkstats2.Resample(results.resid)
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def SimulateIntervals(daily, iters=101, func=RunLinearModel):
"""Run simulations based on different subsets of the data.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
result_seq = []
starts = np.linspace(0, len(daily), iters).astype(int)
for start in starts[:-2]:
subset = daily[start:]
_, results = func(subset)
fake = subset.copy()
for _ in range(iters):
fake.ppg = (results.fittedvalues +
thinkstats2.Resample(results.resid))
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def GeneratePredictions(result_seq, years, add_resid=False):
"""Generates an array of predicted values from a list of model results.
When add_resid is False, predictions represent sampling error only.
When add_resid is True, they also include residual error (which is
more relevant to prediction).
result_seq: list of model results
years: sequence of times (in years) to make predictions for
add_resid: boolean, whether to add in resampled residuals
returns: sequence of predictions
"""
n = len(years)
d = dict(Intercept=np.ones(n), years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict_seq = []
for fake_results in result_seq:
predict = fake_results.predict(predict_df)
if add_resid:
predict += thinkstats2.Resample(fake_results.resid, n)
predict_seq.append(predict)
return predict_seq
def GenerateSimplePrediction(results, years):
"""Generates a simple prediction.
results: results object
years: sequence of times (in years) to make predictions for
returns: sequence of predicted values
"""
n = len(years)
inter = np.ones(n)
d = dict(Intercept=inter, years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict = results.predict(predict_df)
return predict
def PlotPredictions(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateResults(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.3, color='gray')
predict_seq = GeneratePredictions(result_seq, years, add_resid=False)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.5, color='gray')
def PlotIntervals(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions based on different intervals.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateIntervals(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.2, color='gray')
def Correlate(dailies):
"""Compute the correlation matrix between prices for difference qualities.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
df[name] = daily.ppg
return df.corr()
def CorrelateResid(dailies):
"""Compute the correlation matrix between residuals.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
_, results = RunLinearModel(daily)
df[name] = results.resid
return df.corr()
def TestCorrelateResid(dailies, iters=101):
"""Tests observed correlations.
dailies: map from quality to time series of ppg
iters: number of simulations
"""
t = []
names = ['high', 'medium', 'low']
for name in names:
daily = dailies[name]
t.append(SimulateResults(daily, iters=iters))
corr = CorrelateResid(dailies)
arrays = []
for result_seq in zip(*t):
df = pandas.DataFrame()
for name, results in zip(names, result_seq):
df[name] = results.resid
opp_sign = corr * df.corr() < 0
arrays.append((opp_sign.astype(int)))
print(np.sum(arrays))
def RunModels(dailies):
"""Runs linear regression for each group in dailies.
dailies: map from group name to DataFrame
"""
rows = []
for daily in dailies.values():
_, results = RunLinearModel(daily)
intercept, slope = results.params
p1, p2 = results.pvalues
r2 = results.rsquared
s = r'%0.3f (%0.2g) & %0.3f (%0.2g) & %0.3f \\'
row = s % (intercept, p1, slope, p2, r2)
rows.append(row)
# print results in a LaTeX table
print(r'\begin{tabular}{|c|c|c|}')
print(r'\hline')
print(r'intercept & slope & $R^2$ \\ \hline')
for row in rows:
print(row)
print(r'\hline')
print(r'\end{tabular}')
def FillMissing(daily, span=30):
"""Fills missing values with an exponentially weighted moving average.
Resulting DataFrame has new columns 'ewma' and 'resid'.
daily: DataFrame of daily prices
span: window size (sort of) passed to ewma
returns: new DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
ewma = pandas.ewma(reindexed.ppg, span=span)
resid = (reindexed.ppg - ewma).dropna()
fake_data = ewma + thinkstats2.Resample(resid, len(reindexed))
reindexed.ppg.fillna(fake_data, inplace=True)
reindexed['ewma'] = ewma
reindexed['resid'] = reindexed.ppg - ewma
return reindexed
def AddWeeklySeasonality(daily):
"""Adds a weekly pattern.
daily: DataFrame of daily prices
returns: new DataFrame of daily prices
"""
frisat = (daily.index.dayofweek==4) | (daily.index.dayofweek==5)
fake = daily.copy()
fake.ppg[frisat] += np.random.uniform(0, 2, frisat.sum())
return fake
def PrintSerialCorrelations(dailies):
"""Prints a table of correlations with different lags.
dailies: map from category name to DataFrame of daily prices
"""
filled_dailies = {}
for name, daily in dailies.items():
filled_dailies[name] = FillMissing(daily, span=30)
# print serial correlations for raw price data
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.ppg, lag=1)
print(name, corr)
rows = []
for lag in [1, 7, 30, 365]:
row = [str(lag)]
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.resid, lag)
row.append('%.2g' % corr)
rows.append(row)
print(r'\begin{tabular}{|c|c|c|c|}')
print(r'\hline')
print(r'lag & high & medium & low \\ \hline')
for row in rows:
print(' & '.join(row) + r' \\')
print(r'\hline')
print(r'\end{tabular}')
filled = filled_dailies['high']
acf = smtsa.acf(filled.resid, nlags=365, unbiased=True)
print('%0.3f, %0.3f, %0.3f, %0.3f, %0.3f' %
(acf[0], acf[1], acf[7], acf[30], acf[365]))
def SimulateAutocorrelation(daily, iters=1001, nlags=40):
"""Resample residuals, compute autocorrelation, and plot percentiles.
daily: DataFrame
iters: number of simulations to run
nlags: maximum lags to compute autocorrelation
"""
# run simulations
t = []
for _ in range(iters):
filled = FillMissing(daily, span=30)
resid = thinkstats2.Resample(filled.resid)
acf = smtsa.acf(resid, nlags=nlags, unbiased=True)[1:]
t.append(np.abs(acf))
high = thinkstats2.PercentileRows(t, [97.5])[0]
low = -high
lags = range(1, nlags+1)
thinkplot.FillBetween(lags, low, high, alpha=0.2, color='gray')
def PlotAutoCorrelation(dailies, nlags=40, add_weekly=False):
"""Plots autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
nlags: number of lags to compute
add_weekly: boolean, whether to add a simulated weekly pattern
"""
thinkplot.PrePlot(3)
daily = dailies['high']
SimulateAutocorrelation(daily)
for name, daily in dailies.items():
if add_weekly:
daily = AddWeeklySeasonality(daily)
filled = FillMissing(daily, span=30)
acf = smtsa.acf(filled.resid, nlags=nlags, unbiased=True)
lags = np.arange(len(acf))
thinkplot.Plot(lags[1:], acf[1:], label=name)
def MakeAcfPlot(dailies):
"""Makes a figure showing autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
"""
axis = [0, 41, -0.2, 0.2]
thinkplot.PrePlot(cols=2)
PlotAutoCorrelation(dailies, add_weekly=False)
thinkplot.Config(axis=axis,
loc='lower right',
ylabel='correlation',
xlabel='lag (day)')
thinkplot.SubPlot(2)
PlotAutoCorrelation(dailies, add_weekly=True)
thinkplot.Save(root='timeseries9',
axis=axis,
loc='lower right',
xlabel='lag (days)',
formats=FORMATS)
def PlotRollingMean(daily, name):
"""Plots rolling mean and EWMA.
daily: DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
thinkplot.PrePlot(cols=2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
roll_mean = pandas.rolling_mean(reindexed.ppg, 30)
thinkplot.Plot(roll_mean, label='rolling mean')
pyplot.xticks(rotation=30)
thinkplot.Config(ylabel='price per gram ($)')
thinkplot.SubPlot(2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
ewma = pandas.ewma(reindexed.ppg, span=30)
thinkplot.Plot(ewma, label='EWMA')
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries10',
formats=FORMATS)
def PlotFilled(daily, name):
"""Plots the EWMA and filled data.
daily: DataFrame of daily prices
"""
filled = FillMissing(daily, span=30)
thinkplot.Scatter(filled.ppg, s=15, alpha=0.3, label=name)
thinkplot.Plot(filled.ewma, label='EWMA', alpha=0.4)
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries8',
ylabel='price per gram ($)',
formats=FORMATS)
def PlotLinearModel(daily, name):
"""Plots a linear fit to a sequence of prices, and the residuals.
daily: DataFrame of daily prices
name: string
"""
model, results = RunLinearModel(daily)
PlotFittedValues(model, results, label=name)
thinkplot.Save(root='timeseries2',
title='fitted values',
xlabel='years',
xlim=[-0.1, 3.8],
ylabel='price per gram ($)',
formats=FORMATS)
PlotResidualPercentiles(model, results)
thinkplot.Save(root='timeseries3',
title='residuals',
xlabel='years',
ylabel='price per gram ($)',
formats=FORMATS)
#years = np.linspace(0, 5, 101)
#predict = GenerateSimplePrediction(results, years)
def main(name):
thinkstats2.RandomSeed(18)
transactions = ReadData()
dailies = GroupByQualityAndDay(transactions)
PlotDailies(dailies)
RunModels(dailies)
PrintSerialCorrelations(dailies)
MakeAcfPlot(dailies)
name = 'high'
daily = dailies[name]
PlotLinearModel(daily, name)
PlotRollingMean(daily, name)
PlotFilled(daily, name)
years = np.linspace(0, 5, 101)
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries4',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
name = 'medium'
daily = dailies[name]
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotIntervals(daily, years)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries5',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
if __name__ == '__main__':
import sys
main(*sys.argv)
|
We are Planning Team of the Year!
2016 was yet another strong year for NJL with an abundance of instructions, submissions and permissions including some highly significant projects.
Further to being shortlisted in December, NJL team members attended last night's North West Insider Residential Property Awards ceremony at Lancashire County Cricket Club. We are delighted to confirm our teamwork and efforts have been recognised as we won the Planning Team of the Year category!
Thank you to the clients and multi-disciplinary professionals who have supported us and helped us reach this achievement.
Banner image courtesy of Insider. From left to right: Nick Lee (NJL Managing Director), Annabel Partridge (NJL Associate Director), Katie Delaney (NJL Associate Director) and Clair Jarvis (Sales Director at Redrow Homes).
|
# coding: utf-8
from fabric import api as fab
from fabric.api import env, task
from bsdploy.fabfile_mfsbsd import bootstrap as mfsbootstrap
from ploy.common import yesno
from ploy.config import value_asbool
AV = None
# hide stdout by default
# from fabric.state import output
# output['stdout'] = False
@task
def bootstrap(**kw):
mfsbootstrap(**kw)
def get_vars():
global AV
if AV is None:
hostname = env.host_string.split('@')[-1]
AV = dict(hostname=hostname, **env.instances[hostname].get_ansible_variables())
return AV
@task
def reset_cleansers(confirm=True):
"""destroys all cleanser slaves and their rollback snapshots, as well as the initial master
snapshot - this allows re-running the jailhost deployment to recreate fresh cleansers."""
if value_asbool(confirm) and not yesno("""\nObacht!
This will destroy any existing and or currently running cleanser jails.
Are you sure that you want to continue?"""):
exit("Glad I asked...")
get_vars()
cleanser_count = AV['ploy_cleanser_count']
# make sure no workers interfere:
fab.run('ezjail-admin stop worker')
# stop and nuke the cleanser slaves
for cleanser_index in range(cleanser_count):
cindex = '{:02d}'.format(cleanser_index + 1)
fab.run('ezjail-admin stop cleanser_{cindex}'.format(cindex=cindex))
with fab.warn_only():
fab.run('zfs destroy tank/jails/cleanser_{cindex}@jdispatch_rollback'.format(cindex=cindex))
fab.run('ezjail-admin delete -fw cleanser_{cindex}'.format(cindex=cindex))
fab.run('umount -f /usr/jails/cleanser_{cindex}'.format(cindex=cindex))
fab.run('rm -rf /usr/jails/cleanser_{cindex}'.format(cindex=cindex))
with fab.warn_only():
# remove master snapshot
fab.run('zfs destroy -R tank/jails/cleanser@clonesource')
# restart worker and cleanser to prepare for subsequent ansible configuration runs
fab.run('ezjail-admin start worker')
fab.run('ezjail-admin stop cleanser')
fab.run('ezjail-admin start cleanser')
@task
def reset_jails(confirm=True, keep_cleanser_master=True):
""" stops, deletes and re-creates all jails.
since the cleanser master is rather large, that one is omitted by default.
"""
if value_asbool(confirm) and not yesno("""\nObacht!
This will destroy all existing and or currently running jails on the host.
Are you sure that you want to continue?"""):
exit("Glad I asked...")
reset_cleansers(confirm=False)
jails = ['appserver', 'webserver', 'worker']
if not value_asbool(keep_cleanser_master):
jails.append('cleanser')
with fab.warn_only():
for jail in jails:
fab.run('ezjail-admin delete -fw {jail}'.format(jail=jail))
# remove authorized keys for no longer existing key (they are regenerated for each new worker)
fab.run('rm /usr/jails/cleanser/usr/home/cleanser/.ssh/authorized_keys')
|
Roll up your sleeves, grab a bottle of water and get ready to sweat! We designed the Pops, Locks & Shimmies Series as an educational tool to help you become a better, stronger dancer with your own unique style. Using these two DVDs together as part of your regular training program is highly recommended and endorsed by us to help you achieve your dance goals!!
Your support in purchasing our DVDs directly from us, the artist, is greatly appreciated!
|
'''
Created on 30 dec. 2012
@author: Juice
'''
from PySide import QtGui, QtCore
from math import *
from structure.cell import Cell
from constraints import Constraint
def centerTextItem(text):
form = QtGui.QTextBlockFormat()
form.setAlignment(QtCore.Qt.AlignCenter)
cursor = text.textCursor()
cursor.select(QtGui.QTextCursor.Document)
cursor.mergeBlockFormat(form)
cursor.clearSelection()
class ValueHexagon(QtGui.QGraphicsPolygonItem):
def __init__(self, cell, cellSize, position, parent=None, edgeColor=QtCore.Qt.black):
# normalized hexagon
polygon = QtGui.QPolygonF(
[QtCore.QPointF(
cos(x*pi/3)+1,
sin(x*pi/3)+sqrt(3)/2
)*cellSize/2
for x in range(0,6)]
)
polygon.translate(position)
super(ValueHexagon, self).__init__(polygon, parent)
self.cell = cell
self.position = position
self.cellSize = cellSize
if isinstance(cell, Cell):
self.values = cell.getGrid().getPuzzle().getValues()
else:
self.values = []
self.setCacheMode(QtGui.QGraphicsItem.DeviceCoordinateCache)
self.hintValueItemMap = {}
self.edgeColor = edgeColor
pen = QtGui.QPen()
pen.setColor(edgeColor)
pen.setWidth(2)
self.setPen(pen)
self.hintsEnabled = True
self.instantiateRepresentation()
self.updateRepresentation()
def mousePressEvent(self, event):
return QtGui.QGraphicsRectItem.mousePressEvent(self, event)
def instantiateRepresentation(self):
# for each value, instantiate the hints, hidden by default
for val in self.values:
# this is the static calculation for a block of 3z3
off_x = (((val-1) % 3) + 0.7) * (self.cellSize/4)
off_y = (floor((val-1) / 3) + 0.25) * (self.cellSize/4)
t = QtGui.QGraphicsTextItem(str(val))
t.setParentItem(self)
t.setPos(self.position.x()+ off_x, self.position.y() + off_y)
t.setOpacity(0)
self.hintValueItemMap[val] = t
# add a big text item to show the set value, hidden by default
val = self.cell.getValue() if isinstance(self.cell, Cell) else self.cell.getTotalValue()
self.valueTextItem = QtGui.QGraphicsTextItem(str())
self.valueTextItem.setParentItem(self)
self.valueTextItem.setPos(self.position.x(), self.position.y() + self.cellSize/6)
f = QtGui.QFont("Sans serif", self.cellSize/3 ,200)
if isinstance(self.cell, Cell):
if(self.cell.isInferred()):
f.setWeight(0)
else:
self.valueTextItem.setDefaultTextColor(QtCore.Qt.blue)
self.valueTextItem.setFont(f)
self.valueTextItem.setTextWidth(self.cellSize)
# align to center of cell
centerTextItem(self.valueTextItem)
self.valueTextItem.setOpacity(0)
def updateRepresentation(self):
val = self.cell.getValue() if isinstance(self.cell, Cell) else self.cell.getTotalValue()
if(val <> None):
# first hide all the hints
self.hideHints()
# show value text
self.valueTextItem.setOpacity(1)
self.valueTextItem.setPlainText(str(val))
# re-align to middle of cell
centerTextItem(self.valueTextItem)
f = self.valueTextItem.font()
if(isinstance(self.cell, Constraint) or self.cell.isInferred()):
f.setWeight(0)
self.valueTextItem.setDefaultTextColor(QtCore.Qt.black)
else:
f.setWeight(200)
self.valueTextItem.setDefaultTextColor(QtCore.Qt.blue)
else:
self.valueTextItem.setOpacity(0)
# show all the possible values
vals = self.cell.getPossibleValues()
numValProcessed = 0
for val in self.values:
if(numValProcessed >= 9):
break
numValProcessed += 1
if self.hintsEnabled and val in vals:
self.hintValueItemMap[val].setOpacity(1)
else:
self.hintValueItemMap[val].setOpacity(0)
pass
def setHintsEnabled(self, hintsEnabled):
self.hintsEnabled = hintsEnabled
self.updateRepresentation()
def hideHints(self):
for val in self.values:
self.hintValueItemMap[val].setOpacity(0)
|
Neptune Sonar produce a vast range of high quality echo-sounder transducers in various mechanical profiles, frequencies & beam widths. These can be impedance-matched to meet a wide variety of OEM echo-sounder systems. Typical applications include fish finding, navigation, hydrographic surveys and scientific exploration. Products include single beam, dual beam and dual frequency transducers for over-side or hull mounting.
As with all our products, if you cannot find the right echo-sounder transducer to suit your needs, we’ll work with you to develop a product that meets your exact specification.
Adaptable to a wide range of seabed depths and target resolutions. Over-side, hull and external mounting are available in a choice of frequencies.
Available in three housing shapes for over-side, hull or external mounting, these transducers have applications in surveying, geophysical and fish stock assessments.
|
# -*- coding: utf-8 -*-
# Copyright(C) 2017 Théo Dorée
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from weboob.browser import LoginBrowser, URL, need_login
from weboob.exceptions import BrowserIncorrectPassword
from weboob.tools.capabilities.bank.transactions import merge_iterators
from .pages import LoginPage, AccountsPage, AccountDetailsPage, TransactionsPage
class MyedenredBrowser(LoginBrowser):
BASEURL = 'https://www.myedenred.fr'
login = URL(r'/ctr\?Length=7',
r'/ExtendedAccount/Logon', LoginPage)
accounts = URL(r'/$', AccountsPage)
accounts_details = URL(r'/ExtendedHome/ProductLine\?benId=(?P<token>\d+)', AccountDetailsPage)
transactions = URL('/Card/TransactionSet', TransactionsPage)
def __init__(self, *args, **kwargs):
super(MyedenredBrowser, self).__init__(*args, **kwargs)
self.docs = {}
def do_login(self):
self.login.go(data={'Email': self.username, 'Password': self.password, 'RememberMe': 'false',
'X-Requested-With': 'XMLHttpRequest', 'ReturnUrl': '/'})
self.accounts.go()
if self.login.is_here():
raise BrowserIncorrectPassword
@need_login
def iter_accounts(self):
for acc_id in self.accounts.stay_or_go().get_accounts_id():
yield self.accounts_details.go(headers={'X-Requested-With': 'XMLHttpRequest'},
token=acc_id).get_account()
@need_login
def iter_history(self, account):
def iter_transactions_by_type(type):
history = self.transactions.go(data={'command': 'Charger les 10 transactions suivantes',
'ErfBenId': account._product_token,
'ProductCode': account._product_type,
'SortBy': 'DateOperation',
'StartDate': '',
'EndDate': '',
'PageNum': 10,
'OperationType': type,
'failed': 'false',
'X-Requested-With': 'XMLHttpRequest'
})
return history.iter_transactions(subid=account.id)
if account.id not in self.docs:
iterator = merge_iterators(iter_transactions_by_type(type='Debit'), iter_transactions_by_type(type='Credit'))
self.docs[account.id] = list(iterator)
return self.docs[account.id]
|
adhesive foam tape double sided adhesive foam tape die cutting white color.
adhesive foam tape foam ta single sided self adhesive.
adhesive foam tape black single sided self adhesive foam tape.
adhesive foam tape double sided strong sticky self adhesive foam tape mounting fixing pad elegant tape.
adhesive foam tape adhesive foam tape.
adhesive foam tape vinyl foam.
adhesive foam tape pressure adhesive black double sided foam tape.
adhesive foam tape x double sided white colored adhesive tape heat resistant foam tape thick track paper rolls of wrapping paper from kepi.
adhesive foam tape all purpose self adhesive foam tape.
adhesive foam tape black single sided self adhesive foam tape closed cell wide x thick.
adhesive foam tape self adhesive foam strip.
adhesive foam tape foam tape.
adhesive foam tape double sided adhesive foam tape green double sided adhesive foam tape green.
adhesive foam tape white adhesive foam tape china white adhesive foam tape.
adhesive foam tape china adhesive foam foam double sided ta for car.
adhesive foam tape electric drift car loading zoom.
adhesive foam tape white double sided strong sticky self adhesive foam tape mounting fixing pad.
adhesive foam tape picture 8 of 8.
adhesive foam tape from m mm foam tape double sided sponge tape waterproof mounting adhesive tape roll automotive grade number plates cars trims black.
adhesive foam tape sponge rubber neoprene blended foam tapes.
adhesive foam tape x black dual sided self adhesive sponge foam tape length.
adhesive foam tape black single sided self adhesive foam tape sticker width x.
adhesive foam tape x single sided self adhesive shockproof sponge foam tape length.
adhesive foam tape adhesive tape.
adhesive foam tape grid adhesive foam tape.
adhesive foam tape double sided tape adhesive foam tape automotive strong.
|
import pygame, sys, re
from time import sleep
from pygame.locals import *
from random import shuffle
clock = pygame.time.Clock()
FPS = 30
playtime = 0.0
counter = 0
pygame.font.init()
font = pygame.font.Font(None, 30)
batch_size = 7 # 7 Squares displayed (e.g 7 of 16*9 = 7/144 )
tiles = {}
sprite_currently_displayed = False
##### Adjust these sleep times to suit###################
sleep_time_for_none_icons = 1 # A sprite is not displayed
sleep_time_for_icons = 1 # A sprite is displayed
##########################################################
pygame.init()
#Framebuffer size: 1776 x 952
sizex=1776
sizey=952
xdivision = 16
ydivision = 9
xshuf = [i for i in range(xdivision*ydivision)]
unitx = sizex/xdivision
unity = sizey/ydivision
import os
ins = open( "sprite_positions.txt", "r" )
for line in ins:
print line
try:
m = re.search('^(\w+)\_(\d+)\_(\d+)\.png: (\d+), (\d+)',line)
except:
print ("Cannot match regexp on %s " % line)
(spritename, spritex, spritey, extentx, extenty) = (m.group(1), float(m.group(2)), float(m.group(3)), float(m.group(4)), float(m.group(5)))
# print ("%s %f %f %i %i" % (spritename, spritex, spritey, extentx, extenty))
spriteboxx = int(spritex%xdivision)
spriteboxy = int(spritey%ydivision)
print ("spriteboxx = %i spriteboxy= %i" % (spriteboxx, spriteboxy))
spriteboxnumber = int((spriteboxy*xdivision)+spriteboxx)
print ("spriteboxnumber = %i " % spriteboxnumber)
tiles[spriteboxnumber] = ( spritename, spritex, spritey, extentx, extenty)
ins.close()
for key in tiles.keys():
( spritename, spritex, spritey, extentx, extenty) = tiles[key]
# print ("%i %s %i %i" % (key, spritename, spritex, spritey))
screen = pygame.display.set_mode((sizex, sizey))
background = pygame.image.load('data/plastic_reality_bg.png').convert()
im2= pygame.Surface(screen.get_size())
#im2.fill((0, 0, 0))
im2 = pygame.image.load('data/all_on_one_no_bg.png').convert_alpha()
screen.blit(background,(0,0))
pygame.display.flip()
while True:
milliseconds = clock.tick(FPS)
playtime += milliseconds / 1000.0
shuffle(xshuf)
for i in range(0,7):
random_value = xshuf[i]
print ("Random value %i " % random_value)
try:
( spritename, spritex, spritey, extentx, extenty) = tiles[random_value]
except:
spritename = False
if (spritename):
randomx = spritex
randomy = spritey
print ("%s %f,%f, %f, %f" % (spritename, randomx,randomy, extentx, extenty))
# screen.blit(background, (0, 0))
screen.blit(im2, (randomx, randomy), pygame.Rect(randomx, randomy, extentx, extenty))
#text_surface = font.render("FPS: %f Playtime: %f " % (clock.get_fps(),playtime), True, (255,255,255))
#screen.blit(text_surface, (10, 10))
pygame.display.flip()
# sleep(1)
#sleep(sleep_time_for_icons)
sprite_currently_displayed = True
else:
# print ('.')
# sleep(1)
sleep(sleep_time_for_none_icons)
if (sprite_currently_displayed == True):
screen.blit(background, (0, 0))
pygame.display.flip()
sprite_currently_displayed = False
|
The man with the frizzy hair and India’s favourite grandpa, APJ Abdul Kalam was one of the most proactive President of recent times. He is also known as People’s President and India’s Missile Man for advancing India’s ballistic missile programs. Known for championing youth causes’, Kalam also launched the What Can I Give movement in 2011 to defeat corruption and realize his life goal of turning India into a developed country by 2020.
|
#!/usr/bin/python3
# vim: set foldmethod=marker fileencoding=utf8 :
# Python parts of the host side driver for Franklin. {{{
# Copyright 2014-2016 Michigan Technological University
# Copyright 2016 Bas Wijnen <[email protected]>
# Author: Bas Wijnen <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# }}}
show_own_debug = False
#show_own_debug = True
# Constants {{{
C0 = 273.15 # Conversion between K and °C
WAIT = object() # Sentinel for blocking functions.
NUM_SPACES = 3
# Space types
TYPE_CARTESIAN = 0
TYPE_DELTA = 1
TYPE_POLAR = 2
TYPE_EXTRUDER = 3
TYPE_FOLLOWER = 4
record_format = '=Bidddddddd' # type, tool, X, Y, Z, E, f, F, time, dist
# }}}
# Imports. {{{
import fhs
import websocketd
from websocketd import log
import serial
import time
import math
import struct
import os
import re
import sys
import wave
import sys
import io
import base64
import json
import fcntl
import select
import subprocess
import traceback
import protocol
import mmap
import random
import errno
import shutil
# }}}
config = fhs.init(packagename = 'franklin', config = { # {{{
'cdriver': None,
'allow-system': None,
'uuid': None,
'local': False,
'arc': True
})
# }}}
# Enable code trace. {{{
if False:
def trace(frame, why, arg):
if why == 'call':
code = frame.f_code
log('call: %d %s' % (code.co_firstlineno, code.co_name))
sys.settrace(trace)
# }}}
fcntl.fcntl(sys.stdin.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
def dprint(x, data): # {{{
if show_own_debug:
log('%s: %s' % (x, ' '.join(['%02x' % c for c in data])))
# }}}
# Decorator for functions which block.
def delayed(f): # {{{
def ret(self, *a, **ka):
#log('delayed called with args %s,%s' % (repr(a), repr(ka)))
def wrap(id):
#log('wrap called with id %s' % (repr(id)))
return f(self, id, *a, **ka)
return (WAIT, wrap)
return ret
# }}}
# Call cdriver running on same machine.
class Driver: # {{{
def __init__(self):
#log(repr(config))
self.driver = subprocess.Popen((config['cdriver'],), stdin = subprocess.PIPE, stdout = subprocess.PIPE, close_fds = True)
fcntl.fcntl(self.driver.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
self.buffer = b''
def available(self):
return len(self.buffer) > 0
def write(self, data):
self.driver.stdin.write(data)
self.driver.stdin.flush()
def read(self, length):
while True:
if len(self.buffer) >= length:
ret = self.buffer[:length]
self.buffer = self.buffer[length:]
return ret
try:
r = os.read(self.driver.stdout.fileno(), 4096)
except IOError:
r = self.buffer[:length]
self.buffer = self.buffer[length:]
return r
except OSError as exc:
if exc.errno == errno.EAGAIN:
r = self.buffer[:length]
self.buffer = self.buffer[length:]
return r
raise
if r == b'':
log('EOF!')
self.close()
self.buffer += r
def close(self):
log('Closing machine driver; exiting.')
sys.exit(0)
def fileno(self):
return self.driver.stdout.fileno()
# }}}
# Reading and writing pins to and from ini files. {{{
def read_pin(machine, pin):
extra = 0
if pin.startswith('X'):
pin = pin[1:]
if pin == '':
return 0
else:
extra += 256
if pin.startswith('-'):
extra += 512
pin = pin[1:]
try:
pin = int(pin)
except:
log('incorrect pin %s' % pin)
return 0
if pin >= len(machine.pin_names):
machine.pin_names.extend([[0xf, '(Pin %d)' % i] for i in range(len(machine.pin_names), pin + 1)])
return pin + extra
def write_pin(pin):
if pin == 0:
return 'X'
ret = ''
if pin >= 512:
ret += '-'
pin -= 512
if pin >= 256:
pin -= 256
else:
ret = 'X' + ret
return ret + '%d' % pin
# }}}
class Machine: # {{{
# Internal stuff. {{{
def _read_data(self, data): # {{{
cmd, s, m, e, f = struct.unpack('=BLLLd', data[:21])
return cmd, s, m, f, e, data[21:]
# }}}
def _send(self, *data): # {{{
#log('writing to server: %s' % repr(data))
sys.stdout.write(json.dumps(data) + '\n')
sys.stdout.flush()
# }}}
def _refresh_queue(self):
if self.uuid is None:
return
spool = fhs.read_spool(self.uuid, dir = True, opened = False)
if spool is None:
return
gcode = os.path.join(spool, 'gcode')
audio = os.path.join(spool, 'audio')
probe = fhs.read_spool(os.path.join(self.uuid, 'probe' + os.extsep + 'bin'), text = False)
if probe is not None:
try:
# Map = [[targetx, targety, x0, y0, w, h], [nx, ny], [[...], [...], ...]]
size = struct.calcsize('@ddddddddLLd')
targetx, targety, x0, y0, w, h, sina, cosa, nx, ny, self.targetangle = struct.unpack('@ddddddddLLd', probe.read(size))
self.gcode_angle = math.sin(self.targetangle), math.cos(self.targetangle)
sina, cosa = self.gcode_angle
limits = [targetx, targety, x0, y0, w, h]
nums = [nx, ny, self.targetangle]
if not (0 < nx < 1000 and 0 < ny < 1000):
raise ValueError('probe map too large; probably invalid')
probes = [[None for x in range(nx + 1)] for y in range(ny + 1)]
for y in range(ny + 1):
for x in range(nx + 1):
probes[y][x] = struct.unpack('@d', probe.read(struct.calcsize('@d')))[0]
self.probemap = [limits, nums, probes]
except:
log('Failed to load probe map')
self._globals_update()
if os.path.isdir(gcode):
self.jobqueue = {}
for filename in os.listdir(gcode):
name, ext = os.path.splitext(filename)
if ext != os.extsep + 'bin':
log('skipping %s' % filename)
continue
try:
#log('opening %s' % filename)
with open(os.path.join(gcode, filename), 'rb') as f:
f.seek(-8 * 8, os.SEEK_END)
self.jobqueue[name] = struct.unpack('=' + 'd' * 8, f.read())
except:
traceback.print_exc()
log('failed to open gcode file %s' % os.path.join(gcode, filename))
sortable_queue = [(q, self.jobqueue[q]) for q in self.jobqueue]
sortable_queue.sort()
self._broadcast(None, 'queue', sortable_queue)
if os.path.isdir(audio):
self.audioqueue = {}
for filename in os.listdir(audio):
name, ext = os.path.splitext(filename)
if ext != os.extsep + 'bin':
log('skipping %s' % filename)
continue
try:
#log('opening audio %s' % filename)
self.audioqueue[name] = os.stat(os.path.join(audio, filename)).st_size
except:
traceback.print_exc()
log('failed to stat audio file %s' % os.path.join(audio, filename))
sortable_queue = list(self.audioqueue.keys())
sortable_queue.sort()
self._broadcast(None, 'audioqueue', sortable_queue)
def __init__(self, allow_system): # {{{
self.initialized = False
self.connected = False
self.uuid = config['uuid']
# Start a block because the next line has an accidental end marker. {{{
self.user_interface = '{Dv2m(Blocker:){Dv2m(No Connection:){dv3m{dv3m{dv3m[0:*Controls:{Dh60%{Dv12m{Dv5m{dh11m(Job Control:)(Buttons:)}(Position:)}{Dh85%(XY Map:)(Z Map:)}}{Dv4m(Abort:){Dv6m(Multipliers:){Dv2m(Gpios:){Dv9m(Temps:)(Temp Graph:)}}}}}Setup:{Dv2m(Save Profile:)[0:*Profile:(Profile Setup:)Probe:(Probe Setup:)Globals:(Globals Setup:)Axes:(Axis Setup:)Motors:(Motor Setup:)Type:{Dv3m(Type Setup:){Dh50%(Cartesian Setup:){Dh50%(Delta Setup:)(Polar Setup:)}}}Extruder:(Extruder Setup:)Follower:(Follower Setup:)GPIO:(Gpio Setup:)Temps:(Temp Setup:)]}](Confirmation:)}(Message:)}(State:)}}}'
self.pin_names = []
self.machine = Driver()
self.allow_system = allow_system
self.probemap = None
self.job_current = None
self.job_id = None
self.confirm_id = 0
self.confirm_message = None
self.confirm_axes = None
self.confirmer = None
self.position_valid = False
self.probing = False
self.probe_pending = False
self.parking = False
self.home_phase = None
self.home_target = None
self.home_cb = [False, self._do_home]
self.probe_cb = [False, None]
self.probe_speed = 3.
self.gcode_file = False
self.gcode_map = None
self.gcode_id = None
self.gcode_waiting = 0
self.audio_id = None
self.queue = []
self.queue_pos = 0
self.queue_info = None
self.confirm_waits = set()
self.gpio_waits = {}
self.total_time = [float('nan'), float('nan')]
self.resuming = False
self.flushing = False
self.debug_buffer = None
self.machine_buffer = ''
self.command_buffer = ''
self.bed_id = -1
self.fan_id = -1
self.spindle_id = -1
self.probe_dist = 1000
self.probe_offset = 0
self.probe_safe_dist = 10
self.num_probes = 1
self.unit_name = 'mm'
self.park_after_job = True
self.sleep_after_job = True
self.cool_after_job = True
self.spi_setup = []
# Set up state.
self.spaces = [self.Space(self, i) for i in range(NUM_SPACES)]
self.temps = []
self.gpios = []
self.probe_time_dist = [float('nan'), float('nan')]
self.sending = False
self.paused = False
self.limits = [{} for s in self.spaces]
self.wait = False
self.movewait = 0
self.movecb = []
self.tempcb = []
self.alarms = set()
self.targetx = 0.
self.targety = 0.
self.targetangle = 0.
self.zoffset = 0.
self.store_adc = False
self.temp_scale_min = 0
self.temp_scale_max = 250
self.multipliers = []
self.current_extruder = 0
try:
assert self.uuid is not None # Don't try reading if there is no uuid given.
with fhs.read_data(os.path.join(self.uuid, 'info' + os.extsep + 'txt')) as pfile:
self.name = pfile.readline().rstrip('\n')
self.profile = pfile.readline().rstrip('\n')
#log('profile is %s' % self.profile)
except:
#log("No default profile; using 'default'.")
self.name = self.uuid
self.profile = 'default'
profiles = self.list_profiles()
if self.profile not in profiles and len(profiles) > 0:
self.profile = profiles[0]
#log('Profile does not exist; using %s instead' % self.profile)
self.default_profile = self.profile
# Globals.
self.queue_length = 0
self.num_pins = 0
self.led_pin = 0
self.stop_pin = 0
self.probe_pin = 0
self.spiss_pin = 0
self.timeout = 0
self.bed_id = -1
self.fan_id = -1
self.spindle_id = -1
self.feedrate = 1
self.max_deviation = 0
self.max_v = float('inf')
self.current_extruder = 0
self.targetx = 0.
self.targety = 0.
self.targetangle = 0.
self.zoffset = 0.
# Other things don't need to be initialized, because num_* == 0.
# Fill job queue.
self.jobqueue = {}
self.audioqueue = {}
self._refresh_queue()
try:
self.load(update = False)
except:
log('Failed to import initial settings')
traceback.print_exc()
global show_own_debug
if show_own_debug is None:
show_own_debug = True
# }}}
# Constants. {{{
# Single-byte commands.
single = {'OK': b'\xb3', 'WAIT': b'\xad' }
# }}}
def _broadcast(self, *a): # {{{
self._send(None, 'broadcast', *a)
# }}}
def _close(self, notify = True): # {{{
log('disconnecting')
self.connected = False
if notify:
self._send(None, 'disconnect')
self._globals_update()
# }}}
def _machine_read(self, *a, **ka): # {{{
while True:
try:
return self.machine.read(*a, **ka)
except:
log('error reading')
traceback.print_exc()
sys.exit(0)
# }}}
def _machine_write(self, data): # {{{
#log('writing %s' % ' '.join(['%02x' % x for x in data]))
while True:
try:
self.machine.write(data)
return
except:
log('error writing')
traceback.print_exc()
sys.exit(0)
# }}}
def _command_input(self): # {{{
data = sys.stdin.read()
if data == '':
log('End of file detected on command input; exiting.')
sys.exit(0)
self.command_buffer += data
die = None
#log('cmd buf %s' % repr(self.command_buffer))
while '\n' in self.command_buffer:
pos = self.command_buffer.index('\n')
id, func, a, ka = json.loads(self.command_buffer[:pos])
self.command_buffer = self.command_buffer[pos + 1:]
try:
#log('command: %s(%s %s)' % (func, a, ka))
assert not any(func.startswith(x + '_') for x in ('benjamin', 'admin', 'expert', 'user'))
role = a.pop(0) + '_'
if hasattr(self, role + func):
func = role + func
elif role == 'benjamin_' and hasattr(self, 'admin_' + func):
func = 'admin_' + func
elif role in ('benjamin_', 'admin_') and hasattr(self, 'expert_' + func):
func = 'expert_' + func
ret = getattr(self, func)(*a, **ka)
if isinstance(ret, tuple) and len(ret) == 2 and ret[0] is WAIT:
# The function blocks; it will send its own reply later.
if ret[1] is WAIT:
# Special case: request to die.
die = id
else:
ret[1](id)
continue
except SystemExit:
sys.exit(0)
except:
log('error handling command input')
traceback.print_exc()
self._send(id, 'error', repr(sys.exc_info()))
continue
if ret != (WAIT, WAIT):
#log('returning %s' % repr(ret))
self._send(id, 'return', ret)
if die is not None:
self._send(die, 'return', None)
sys.exit(0)
# }}}
def _trigger_movewaits(self, num, done = True): # {{{
#traceback.print_stack()
#log('trigger %s' % repr(self.movecb))
#log('movecbs: %d/%d' % (num, self.movewait))
if self.movewait < num:
log('More cbs received than requested!')
self.movewait = 0
else:
#log('movewait %d/%d' % (num, self.movewait))
self.movewait -= num
if self.movewait == 0:
#log('running cbs: %s' % repr(self.movecb))
call_queue.extend([(x[1], [done]) for x in self.movecb])
self.movecb = []
if self.flushing and self.queue_pos >= len(self.queue):
#log('done flushing')
self.flushing = 'done'
#else:
# log('cb seen, but waiting for more')
# }}}
def _machine_input(self, reply = False): # {{{
while True:
if len(self.machine_buffer) == 0:
r = self._machine_read(1)
dprint('(1) read', r)
if r == b'':
return ('no data', None)
if r == self.single['WAIT']:
return ('wait', None)
if r == self.single['OK']:
return ('ok', None)
# Regular packet.
self.machine_buffer = r
packet_len = self.machine_buffer[0]
while True:
r = self._machine_read(packet_len - len(self.machine_buffer))
dprint('rest of packet read', r)
if r == '':
return (None, None)
self.machine_buffer += r
if len(self.machine_buffer) >= packet_len:
break
if not self.machine.available():
#log('waiting for more data (%d/%d)' % (len(self.machine_buffer), packet_len))
ret = select.select([self.machine], [], [self.machine], 1)
if self.machine not in ret[0]:
log('broken packet?')
return (None, None)
#log('writing ok')
self.machine.write(self.single['OK'])
cmd, s, m, f, e, data = self._read_data(self.machine_buffer[1:])
#log('received command: %s' % repr((cmd, s, m, f, e, data)))
self.machine_buffer = ''
# Handle the asynchronous events.
if cmd == protocol.rcommand['MOVECB']:
#log('movecb %d/%d (%d in queue)' % (s, self.movewait, len(self.movecb)))
self._trigger_movewaits(s)
continue
if cmd == protocol.rcommand['TEMPCB']:
self.alarms.add(s)
t = 0
while t < len(self.tempcb):
if self.tempcb[t][0] is None or self.tempcb[t][0] in self.alarms:
call_queue.append((self.tempcb.pop(t)[1], []))
else:
t += 1
continue
elif cmd == protocol.rcommand['CONTINUE']:
# Move continue.
self.wait = False
#log('resuming queue %d' % len(self.queue))
call_queue.append((self._do_queue, []))
if self.flushing is None:
self.flushing = False
continue
elif cmd == protocol.rcommand['LIMIT']:
if s < len(self.spaces) and m < len(self.spaces[s].motor):
self.limits[s][m] = f
#log('limit; %d waits' % e)
self._trigger_movewaits(self.movewait, False)
continue
elif cmd == protocol.rcommand['TIMEOUT']:
self.position_valid = False
call_queue.append((self._globals_update, ()))
for i, t in enumerate(self.temps):
if not math.isnan(t.value):
t.value = float('nan')
call_queue.append((self._temp_update, (i,)))
for i, g in enumerate(self.gpios):
if g.state != g.reset:
g.state = g.reset
call_queue.append((self._gpio_update, (i,)))
continue
elif cmd == protocol.rcommand['PINCHANGE']:
self.gpios[s].value = m
call_queue.append((self._gpio_update, (s,)))
if s in self.gpio_waits:
for id in self.gpio_waits[s]:
self._send(id, 'return', None)
del self.gpio_waits[s]
continue
elif cmd == protocol.rcommand['HOMED']:
call_queue.append((self._do_home, [True]))
continue
elif cmd == protocol.rcommand['DISCONNECT']:
self._close()
# _close returns after reconnect.
continue
elif cmd == protocol.rcommand['UPDATE_TEMP']:
if s < len(self.temps):
self.temps[s].value = f - C0
self._temp_update(s)
else:
log('Ignoring updated invalid temp %d' % s)
continue
elif cmd == protocol.rcommand['UPDATE_PIN']:
self.gpios[s].state = m
call_queue.append((self._gpio_update, (s,)))
continue
elif cmd == protocol.rcommand['CONFIRM']:
if s and self.probemap is not None:
self.probe_pending = True
call_queue.append((self.request_confirmation(data.decode('utf-8', 'replace') or 'Continue?')[1], (False,)))
continue
elif cmd == protocol.rcommand['PARKWAIT']:
def cb():
self._send_packet(bytes((protocol.command['RESUME'],)))
call_queue.append((self.park(cb = cb, abort = False)[1], (None,)))
continue
elif cmd == protocol.rcommand['FILE_DONE']:
call_queue.append((self._job_done, (True, 'completed')))
continue
elif cmd == protocol.rcommand['PINNAME']:
if s >= len(self.pin_names):
self.pin_names.extend([[0xf, '(Pin %d)' % i] for i in range(len(self.pin_names), s + 1)])
self.pin_names[s] = [data[0], data[1:].decode('utf-8', 'replace')] if len(data) >= 1 else [0, '']
#log('pin name {} = {}'.format(s, self.pin_names[s]))
continue
elif cmd == protocol.rcommand['CONNECTED']:
def sync():
# Get the machine state.
self._write_globals(update = False)
for i, s in enumerate(self.spaces):
self._send_packet(struct.pack('=BB', protocol.command['WRITE_SPACE_INFO'], i) + s.write_info())
for a in range(len(s.axis)):
self._send_packet(struct.pack('=BBB', protocol.command['WRITE_SPACE_AXIS'], i, a) + s.write_axis(a))
for m in range(len(s.motor)):
self._send_packet(struct.pack('=BBB', protocol.command['WRITE_SPACE_MOTOR'], i, m) + s.write_motor(m))
for i, t in enumerate(self.temps):
self._send_packet(struct.pack('=BB', protocol.command['WRITE_TEMP'], i) + t.write())
# Disable heater.
self.settemp(i, float('nan'), update = False)
# Disable heater alarm.
self.waittemp(i, None, None)
for i, g in enumerate(self.gpios):
self._send_packet(struct.pack('=BB', protocol.command['WRITE_GPIO'], i) + g.write())
# The machine may still be doing things. Pause it and send a move; this will discard the queue.
self.pause(True, False, update = False)
if self.spi_setup:
self._spi_send(self.spi_setup)
self.connected = True
self._globals_update()
call_queue.append((sync, ()))
continue
if reply:
return ('packet', (cmd, s, m, f, e, data))
log('unexpected packet %02x' % cmd)
raise AssertionError('Received unexpected reply packet')
# }}}
def _send_packet(self, data, move = False): # {{{
if len(data) + 2 >= 0x8000:
log('Message too long (%d >= %d)' % (len(data) + 2, 0x8000))
return
# Pack length as big endian, so first byte never has bit 7 set.
data = struct.pack('>H', len(data) + 2) + data
dprint('(1) writing', data);
self._machine_write(data)
if not move:
return
start_time = time.time()
while True:
if not self.machine.available():
ret = select.select([self.machine], [], [self.machine], 1)
if self.machine not in ret[0] and self.machine not in ret[2]:
# No response; keep waiting.
log('no response yet: %s' % repr(ret))
assert time.time() - start_time < 10
continue
ret = self._machine_input()
if ret[0] == 'wait':
#log('wait')
self.wait = True
return
elif ret[0] == 'ok':
return
#log('no response yet')
# }}}
def _get_reply(self, cb = False): # {{{
#traceback.print_stack()
while True:
if not self.machine.available():
ret = select.select([self.machine], [], [self.machine], 3)
if len(ret[0]) == 0 and len(ret[2]) == 0:
log('no reply received')
#traceback.print_stack()
continue
ret = self._machine_input(reply = True)
#log('reply input is %s' % repr(ret))
if ret[0] == 'packet' or (cb and ret[0] == 'no data'):
return ret[1]
#log('no response yet waiting for reply')
# }}}
def _read(self, cmd, channel, sub = None): # {{{
if cmd == 'SPACE':
info = self._read('SPACE_INFO', channel)
self.spaces[channel].type = struct.unpack('=B', info[:1])[0]
info = info[1:]
if self.spaces[channel].type == TYPE_CARTESIAN:
num_axes = struct.unpack('=B', info)[0]
num_motors = num_axes
elif self.spaces[channel].type == TYPE_DELTA:
self.spaces[channel].delta = [{}, {}, {}]
for a in range(3):
self.spaces[channel].delta[a]['axis_min'], self.spaces[channel].delta[a]['axis_max'], self.spaces[channel].delta[a]['rodlength'], self.spaces[channel].delta[a]['radius'] = struct.unpack('=dddd', info[32 * a:32 * (a + 1)])
self.spaces[channel].delta_angle = struct.unpack('=d', info[32 * 3:])[0]
num_axes = 3
num_motors = 3
elif self.spaces[channel].type == TYPE_POLAR:
self.spaces[channel].polar_max_r = struct.unpack('=d', info)[0]
num_axes = 3
num_motors = 3
elif self.spaces[channel].type == TYPE_EXTRUDER:
num_axes = struct.unpack('=B', info[:1])[0]
num_motors = num_axes
self.spaces[channel].extruder = []
for a in range(num_axes):
dx, dy, dz = struct.unpack('=ddd', info[1 + 24 * a:1 + 24 * (a + 1)])
self.spaces[channel].extruder.append({'dx': dx, 'dy': dy, 'dz': dz})
elif self.spaces[channel].type == TYPE_FOLLOWER:
num_axes = struct.unpack('=B', info[:1])[0]
num_motors = num_axes
self.spaces[channel].follower = []
for a in range(num_axes):
space, motor = struct.unpack('=BB', info[1 + 2 * a:1 + 2 * (a + 1)])
self.spaces[channel].follower.append({'space': space, 'motor': motor})
else:
log('invalid type %s' % repr(self.spaces[channel].type))
raise AssertionError('invalid space type')
return ([self._read('SPACE_AXIS', channel, axis) for axis in range(num_axes)], [self._read('SPACE_MOTOR', channel, motor) for motor in range(num_motors)])
if cmd == 'GLOBALS':
packet = struct.pack('=B', protocol.command['READ_' + cmd])
elif sub is not None and cmd.startswith('SPACE'):
packet = struct.pack('=BBB', protocol.command['READ_' + cmd], channel, sub)
else:
packet = struct.pack('=BB', protocol.command['READ_' + cmd], channel)
self._send_packet(packet)
cmd, s, m, f, e, data = self._get_reply()
assert cmd == protocol.rcommand['DATA']
return data
# }}}
def _read_globals(self, update = True): # {{{
data = self._read('GLOBALS', None)
if data is None:
return False
self.queue_length, self.num_pins, num_temps, num_gpios = struct.unpack('=BBBB', data[:4])
self.led_pin, self.stop_pin, self.probe_pin, self.spiss_pin, self.timeout, self.bed_id, self.fan_id, self.spindle_id, self.feedrate, self.max_deviation, self.max_v, self.current_extruder, self.targetx, self.targety, self.targetangle, self.zoffset, self.store_adc = struct.unpack('=HHHHHhhhdddBdddd?', data[4:])
while len(self.temps) < num_temps:
self.temps.append(self.Temp(len(self.temps)))
if update:
data = self._read('TEMP', len(self.temps) - 1)
self.temps[-1].read(data)
self.temps = self.temps[:num_temps]
while len(self.gpios) < num_gpios:
self.gpios.append(self.Gpio(len(self.gpios)))
if update:
data = self._read('GPIO', len(self.gpios) - 1)
self.gpios[-1].read(data)
self.gpios = self.gpios[:num_gpios]
return True
# }}}
def _write_globals(self, nt = None, ng = None, update = True): # {{{
if nt is None:
nt = len(self.temps)
if ng is None:
ng = len(self.gpios)
dt = nt - len(self.temps)
dg = ng - len(self.gpios)
data = struct.pack('=BBHHHHHhhhdddBdddd?', nt, ng, self.led_pin, self.stop_pin, self.probe_pin, self.spiss_pin, int(self.timeout), self.bed_id, self.fan_id, self.spindle_id, self.feedrate, self.max_deviation, self.max_v, self.current_extruder, self.targetx, self.targety, self.targetangle, self.zoffset, self.store_adc)
self._send_packet(struct.pack('=B', protocol.command['WRITE_GLOBALS']) + data)
self._read_globals(update = True)
if update:
self._globals_update()
for t in range(dt):
self._temp_update(nt - dt + t)
for g in range(dg):
self._gpio_update(ng - dg + g)
return True
# }}}
def _mangle_spi(self): # {{{
ret = []
for bits, data in self.spi_setup:
ret.append('%d:%s' % (bits, ','.join('%02x' % x for x in data)))
return ';'.join(ret)
# }}}
def _unmangle_spi(self, data): # {{{
ret = []
if len(data) > 0:
for p in data.split(';'):
bits, data = p.split(':')
bits = int(bits)
data = [int(x, 16) for x in data.split(',')]
ret.append([bits, data])
return ret
# }}}
def _globals_update(self, target = None): # {{{
if not self.initialized:
return
self._broadcast(target, 'globals_update', [self.name, self.profile, len(self.temps), len(self.gpios), self.user_interface, self.pin_names, self.led_pin, self.stop_pin, self.probe_pin, self.spiss_pin, self.probe_dist, self.probe_offset, self.probe_safe_dist, self.bed_id, self.fan_id, self.spindle_id, self.unit_name, self.timeout, self.feedrate, self.max_deviation, self.max_v, self.targetx, self.targety, self.targetangle, self.zoffset, self.store_adc, self.park_after_job, self.sleep_after_job, self.cool_after_job, self._mangle_spi(), self.temp_scale_min, self.temp_scale_max, self.probemap, self.connected, not self.paused and (None if self.gcode_map is None and not self.gcode_file else True)])
# }}}
def _space_update(self, which, target = None): # {{{
if not self.initialized:
return
if which >= len(self.spaces):
# This can happen if this function is scheduled before changing the number of spaces.
return
self._broadcast(target, 'space_update', which, self.spaces[which].export())
# }}}
def _temp_update(self, which, target = None): # {{{
if not self.initialized:
return
if which >= len(self.temps):
# This can happen if this function is scheduled before changing the number of temps.
return
self._broadcast(target, 'temp_update', which, self.temps[which].export())
# }}}
def _gpio_update(self, which, target = None): # {{{
if not self.initialized:
return
if which >= len(self.gpios):
# This can happen if this function is scheduled before changing the number of gpios.
return
self._broadcast(target, 'gpio_update', which, self.gpios[which].export())
# }}}
def _gcode_close(self): # {{{
self.gcode_strings = []
self.gcode_map.close()
os.close(self.gcode_fd)
self.gcode_map = None
self.gcode_fd = -1
# }}}
def _job_done(self, complete, reason): # {{{
self._send_packet(struct.pack('=BBddBB', protocol.command['RUN_FILE'], 0, 0, 0, 0xff, 0))
if self.gcode_map is not None:
log(reason)
self._gcode_close()
self.gcode_file = False
#traceback.print_stack()
if self.queue_info is None and self.gcode_id is not None:
log('Job done (%d): %s' % (complete, reason))
self._send(self.gcode_id, 'return', (complete, reason))
self.gcode_id = None
if self.audio_id is not None:
log('Audio done (%d): %s' % (complete, reason))
self._send(self.audio_id, 'return', (complete, reason))
self.audio_id = None
if self.queue_info is None and self.job_current is not None:
if self.job_id is not None:
self._send(self.job_id, 'return', (complete, reason))
self.job_id = None
self.job_current = None
if complete:
self._finish_done()
while self.queue_pos < len(self.queue):
axes, e, f0, f1, v0, v1, which, single, rel = self.queue[self.queue_pos]
self.queue_pos += 1
if id is not None:
self._send(id, 'error', 'aborted')
self.queue = []
self.queue_pos = 0
if self.home_phase is not None:
#log('killing homer')
self.home_phase = None
self.expert_set_space(0, type = self.home_orig_type)
for a, ax in enumerate(self.spaces[0].axis):
self.expert_set_axis((0, a), min = self.home_limits[a][0], max = self.home_limits[a][1])
if self.home_cb in self.movecb:
self.movecb.remove(self.home_cb)
if self.home_id is not None:
self._send(self.home_id, 'return', None)
if self.probe_cb in self.movecb:
#log('killing prober')
self.movecb.remove(self.probe_cb)
self.probe_cb[1](None)
self._globals_update()
# }}}
def _finish_done(self): # {{{
if self.cool_after_job:
for t in range(len(self.temps)):
self.settemp(t, float('nan'))
def maybe_sleep():
if self.sleep_after_job:
self.sleep()
if self.park_after_job:
self.park(cb = maybe_sleep)[1](None)
else:
maybe_sleep()
# }}}
def _unpause(self): # {{{
if self.gcode_file:
self._send_packet(bytes((protocol.command['RESUME'],))) # Just in case.
if self.queue_info is None:
return
#log('doing resume to %d/%d' % (self.queue_info[0], len(self.queue_info[2])))
self.queue = self.queue_info[2]
self.queue_pos = self.queue_info[0]
self.movecb = self.queue_info[3]
self.flushing = self.queue_info[4]
self.resuming = False
self.queue_info = None
self.paused = False
self._globals_update()
# }}}
def _queue_add(self, filename, name): # {{{
name = os.path.splitext(os.path.split(name)[1])[0]
origname = name
i = 0
while name == '' or name in self.jobqueue:
name = '%s-%d' % (origname, i)
i += 1
infilename = filename.encode('utf-8', 'replace')
outfiledir = fhs.write_spool(os.path.join(self.uuid, 'gcode'), dir = True)
if not os.path.isdir(outfiledir):
os.makedirs(outfiledir)
outfilename = os.path.join(outfiledir, name + os.path.extsep + 'bin').encode('utf-8', 'replace')
self._broadcast(None, 'blocked', 'Parsing g-code')
self._send_packet(struct.pack('=BH', protocol.command['PARSE_GCODE'], len(infilename)) + infilename + outfilename)
self._get_reply()
self._refresh_queue()
self._broadcast(None, 'blocked', None)
# }}}
def _audio_add(self, f, name): # {{{
name = os.path.splitext(os.path.split(name)[1])[0]
origname = name
i = 0
while name == '' or name in self.audioqueue:
name = '%s-%d' % (origname, i)
i += 1
try:
wav = wave.open(f)
except:
return 'Unable to open audio file'
rate = wav.getframerate()
channels = wav.getnchannels()
self._broadcast(None, 'blocked', 'Parsing audio')
data = wav.readframes(wav.getnframes())
# Data is 16 bit signed ints per channel, but it is read as bytes. First convert it to 16 bit numbers.
data = [(h << 8) + l if h < 128 else(h << 8) + l -(1 << 16) for l, h in zip(data[::2 * channels], data[1::2 * channels])]
bit = 0
byte = 0
with fhs.write_spool(os.path.join(self.uuid, 'audio', name + os.path.extsep + 'bin'), text = False) as dst:
dst.write(struct.pack('@d', rate))
for t, sample in enumerate(data):
if sample > 0:
byte |= 1 << bit
bit += 1
if bit >= 8:
dst.write(bytes((byte,)))
byte = 0
bit = 0
self.audioqueue[os.path.splitext(name)[0]] = wav.getnframes()
self._broadcast(None, 'blocked', '')
self._broadcast(None, 'audioqueue', list(self.audioqueue.keys()))
return ''
# }}}
def _do_queue(self): # {{{
#log('queue %s' % repr((self.queue_pos, len(self.queue), self.resuming, self.wait)))
if self.paused and not self.resuming and len(self.queue) == 0:
#log('queue is empty')
return
while not self.wait and (self.queue_pos < len(self.queue) or self.resuming):
#log('queue not empty %s' % repr((self.queue_pos, len(self.queue), self.resuming, self.wait)))
if self.queue_pos >= len(self.queue):
self._unpause()
#log('unpaused, %d %d' % (self.queue_pos, len(self.queue)))
if self.queue_pos >= len(self.queue):
break
axes, f0, f1, v0, v1, probe, single, rel = self.queue[self.queue_pos]
#log('queueing %s' % repr((axes, f0, f1, probe)))
self.queue_pos += 1
# Turn sequences into a dict.
if isinstance(axes, (list, tuple)):
adict = {}
for s, data in enumerate(axes):
adict[s] = data
axes = adict
# Make sure the keys are ints.
adict = {}
#log('axes: %s' % repr(axes))
for k in axes:
adict[int(k)] = axes[k]
axes = adict
a = {}
a0 = 0
for i, sp in enumerate(self.spaces):
# Only handle spaces that are specified.
if i not in axes or axes[i] is None:
a0 += len(sp.axis)
continue
# Handle sequences.
if isinstance(axes[i], (list, tuple)):
for ij, axis in enumerate(axes[i]):
if ij >= len(sp.axis):
log('ignoring nonexistent axis %d %d' % (i, ij))
continue
if axis is not None and not math.isnan(axis):
if i == 1 and ij != self.current_extruder:
#log('setting current extruder to %d' % ij)
self.current_extruder = ij
self._write_globals()
if rel:
axis += sp.get_current_pos(ij)
# Limit values for axis.
if axis > sp.axis[ij]['max'] - (0 if i != 0 or ij != 2 else self.zoffset):
log('limiting %d %d to %f because it exceeds max' % (i, ij, axis))
axis = sp.axis[ij]['max'] - (0 if i != 0 or ij != 2 else self.zoffset)
if axis < sp.axis[ij]['min'] - (0 if i != 0 or ij != 2 else self.zoffset):
log('limiting %d %d to %f because it exceeds min' % (i, ij, axis))
axis = sp.axis[ij]['min'] - (0 if i != 0 or ij != 2 else self.zoffset)
a[a0 + ij] = axis
else:
for j, axis in tuple(axes[i].items()):
ij = int(j)
if ij >= len(sp.axis):
log('ignoring nonexistent axis %d %d' % (i, ij))
continue
if axis is not None and not math.isnan(axis):
if i == 1 and ij != self.current_extruder:
log('Setting current extruder to %d' % ij)
self.current_extruder = ij
self._write_globals(len(self.temps), len(self.gpios))
if rel:
axis += sp.get_current_pos(ij)
# Limit values for axis.
if axis > sp.axis[ij]['max'] - (0 if i != 0 or ij != 2 else self.zoffset):
log('limiting %d %d to %f because it exceeds max' % (i, ij, axis))
axis = sp.axis[ij]['max'] - (0 if i != 0 or ij != 2 else self.zoffset)
if axis < sp.axis[ij]['min'] - (0 if i != 0 or ij != 2 else self.zoffset):
log('limiting %d %d to %f because it exceeds min' % (i, ij, axis))
axis = sp.axis[ij]['min'] - (0 if i != 0 or ij != 2 else self.zoffset)
log('new value: %f' % axis)
a[a0 + ij] = axis
a0 += len(sp.axis)
targets = [0] * (((2 + a0 - 1) >> 3) + 1)
axes = a
args = b''
# Set defaults for feedrates.
if v0 is not None:
assert f0 is None
f0 = -v0
elif f0 is None:
f0 = float('inf')
if v1 is not None:
assert f1 is None
f1 = -v1
elif f1 is None:
f1 = f0
assert f0 != 0 or f1 != 0
# If feedrates are equal to firmware defaults, don't send them.
if f0 != float('inf'):
targets[0] |= 1 << 0
args += struct.pack('=d', f0)
if f1 != f0:
targets[0] |= 1 << 1
args += struct.pack('=d', f1)
a = list(axes.keys())
a.sort()
#log('f0: %f f1: %f' %(f0, f1))
for axis in a:
if math.isnan(axes[axis]):
continue
targets[(axis + 2) >> 3] |= 1 << ((axis + 2) & 0x7)
args += struct.pack('=d', axes[axis])
#log('axis %d: %f' %(axis, axes[axis]))
if probe:
p = bytes((protocol.command['PROBE'],))
elif single:
p = bytes((protocol.command['SINGLE'],))
else:
p = bytes((protocol.command['LINE'],))
self.movewait += 1
#log('movewait +1 -> %d' % self.movewait)
#log('queueing %s' % repr((axes, f0, f1, self.flushing)))
self._send_packet(p + bytes(targets) + args, move = True)
if self.flushing is None:
self.flushing = False
#log('queue done %s' % repr((self.queue_pos, len(self.queue), self.resuming, self.wait)))
# }}}
def _do_home(self, done = None): # {{{
#log('do_home: %s %s' % (self.home_phase, done))
# 0: Prepare for next order.
# 1: Move to limits. (enter from loop after 2).
# 2: Finish moving to limits; loop home_order; move slowly away from switch.
# 3: Set current position; move delta and followers.
# 4: Move within limits.
# 5: Return.
#log('home %s %s' % (self.home_phase, repr(self.home_target)))
#traceback.print_stack()
home_v = 50 / self.feedrate
def mktarget():
ret = {}
for s, m in self.home_target:
if s not in ret:
ret[s] = {}
ret[s][m] = self.home_target[(s, m)]
return ret
if self.home_phase is None:
#log('_do_home ignored because home_phase is None')
return
if self.home_phase == 0:
if done is not None:
# Continuing call received after homing was aborted; ignore.
return
# Initial call; start homing.
self.home_phase = 1
# If it is currently moving, doing the things below without pausing causes stall responses.
self.pause(True, False)
self.sleep(False)
self.home_limits = [(a['min'], a['max']) for a in self.spaces[0].axis]
for a, ax in enumerate(self.spaces[0].axis):
self.expert_set_axis((0, a), min = float('-inf'), max = float('inf'))
self.home_orig_type = self.spaces[0].type
self.expert_set_space(0, type = TYPE_CARTESIAN)
n = set()
for s in self.spaces:
for m in s.motor:
if self._pin_valid(m['limit_min_pin']) or self._pin_valid(m['limit_max_pin']):
n.add(m['home_order'])
if len(n) == 0:
self.home_phase = 4
else:
self.home_order = min(n)
# Fall through.
if self.home_phase == 1:
# Move to limit.
self.home_phase = 2
self.home_motors = []
for s, sp in enumerate(self.spaces):
for i, m in enumerate(sp.motor):
if (self._pin_valid(m['limit_min_pin']) or self._pin_valid(m['limit_max_pin'])) and m['home_order'] == self.home_order:
self.home_motors.append((s, i, sp.axis[i], m))
self.limits[s].clear()
self.home_target = {}
dist = 1000 #TODO: use better value.
for s, i, a, m in self.home_motors:
self.spaces[s].set_current_pos(i, 0)
if self._pin_valid(m['limit_max_pin']):
self.home_target[(s, i)] = dist - (0 if s != 0 or i != 2 else self.zoffset)
else:
self.home_target[(s, i)] = -dist - (0 if s != 0 or i != 2 else self.zoffset)
if len(self.home_target) > 0:
self.home_cb[0] = [(s, k) for s, k in self.home_target.keys()]
if self.home_cb not in self.movecb:
self.movecb.append(self.home_cb)
#log("home phase %d target %s" % (self.home_phase, self.home_target))
self.line(mktarget(), f0 = home_v / dist, force = True, single = True)
return
# Fall through.
if self.home_phase == 2:
# Continue moving to find limit switch.
found_limits = False
for s, sp in enumerate(self.spaces):
for a in self.limits[s].keys():
if (s, a) in self.home_target:
#log('found limit %d %d' % (s, a))
self.home_target.pop((s, a))
found_limits = True
# Make sure no attempt is made to move through the limit switch (not even by rounding errors).
sp.set_current_pos(a, sp.get_current_pos(a))
# Repeat until move is done, or all limits are hit.
if (not done or found_limits) and len(self.home_target) > 0:
self.home_cb[0] = list(self.home_target.keys())
if self.home_cb not in self.movecb:
self.movecb.append(self.home_cb)
#log("0 t %s" % (self.home_target))
k = tuple(self.home_target.keys())[0]
dist = abs(self.home_target[k] - self.spaces[k[0]].get_current_pos(k[1]))
if dist > 0:
#log("home phase %d target %s" % (self.home_phase, self.home_target))
self.line(mktarget(), f0 = home_v / dist, force = True, single = True)
return
# Fall through.
if len(self.home_target) > 0:
log('Warning: not all limits were found during homing')
n = set()
for s in self.spaces:
for m in s.motor:
if (self._pin_valid(m['limit_min_pin']) or self._pin_valid(m['limit_max_pin'])) and m['home_order'] > self.home_order:
n.add(m['home_order'])
if len(n) > 0:
self.home_phase = 1
self.home_order = min(n)
return self._do_home()
# Move away slowly.
data = b''
num = 0
for s, spc in enumerate(self.spaces):
for m in spc.motor:
if self._pin_valid(m['limit_max_pin']):
data += b'\xff'
num += 1
elif self._pin_valid(m['limit_min_pin']):
data += b'\x01'
num += 1
else:
data += b'\x00'
self.home_phase = 3
if num > 0:
dprint('homing', data)
self._send_packet(bytes((protocol.command['HOME'],)) + data)
return
# Fall through.
if self.home_phase == 3:
# Move followers and delta into alignment.
self.home_return = []
for s, sp in enumerate(self.spaces):
self.home_return.append([])
for i, m in enumerate(sp.motor):
if i in self.limits[s]:
if not math.isnan(m['home_pos']):
#log('set %d %d %f' % (s, i, m['home_pos']))
self.home_return[-1].append(m['home_pos'] - sp.get_current_pos(i))
sp.set_current_pos(i, m['home_pos'])
else:
#log('limited zeroset %d %d' % (s, i))
self.home_return[-1].append(-sp.get_current_pos(i))
sp.set_current_pos(i, 0)
else:
if (self._pin_valid(m['limit_min_pin']) or self._pin_valid(m['limit_max_pin'])) and not math.isnan(m['home_pos']):
#log('defset %d %d %f' % (s, i, m['home_pos']))
self.home_return[-1].append(m['home_pos'] - sp.get_current_pos(i))
sp.set_current_pos(i, m['home_pos'])
else:
#log('unlimited zeroset %d %d' % (s, i))
self.home_return[-1].append(-sp.get_current_pos(i))
sp.set_current_pos(i, 0)
# Pre-insert delta axes as followers to align.
groups = ([], [], []) # min limits; max limits; just move.
if self.home_orig_type == TYPE_DELTA:
groups[1].append([])
for i, m in enumerate(self.spaces[0].motor):
groups[1][-1].append((0, i))
# Align followers.
for i, m in enumerate(self.spaces[2].motor):
fs = self.spaces[2].follower[i]['space']
fm = self.spaces[2].follower[i]['motor']
# Use 2, not len(self.spaces), because following followers is not supported.
if not 0 <= fs < 2 or not 0 <= fm < len(self.spaces[fs].motor):
continue
if self._pin_valid(m['limit_max_pin']):
if not self._pin_valid(self.spaces[fs].motor[fm]['limit_max_pin']) and self._pin_valid(self.spaces[fs].motor[fm]['limit_min_pin']):
# Opposite limit pin: don't compare values.
groups[2].append((2, i))
continue
for g in groups[1]:
if (fs, fm) in g:
g.append((2, i))
break
else:
groups[1].append([(2, i), (fs, fm)])
elif self._pin_valid(m['limit_min_pin']):
if self._pin_valid(self.spaces[fs].motor[fm]['limit_max_pin']):
# Opposite limit pin: don't compare values.
groups[2].append((2, i))
continue
for g in groups[0]:
if (fs, fm) in g:
g.append((2, i))
break
else:
groups[0].append([(2, i), (fs, fm)])
self.home_target = {}
for g in groups[0]:
target = max(g, key = lambda x: self.spaces[x[0]].motor[x[1]]['home_pos'])
target = self.spaces[target[0]].motor[target[1]]['home_pos']
for s, m in g:
if target != self.spaces[s].motor[m]['home_pos']:
offset = (0 if s != 0 or m != 2 else self.zoffset)
self.home_target[(s, m)] = target - offset
for g in groups[1]:
target = min(g, key = lambda x: self.spaces[x[0]].motor[x[1]]['home_pos'])
target = self.spaces[target[0]].motor[target[1]]['home_pos']
for s, m in g:
if target != self.spaces[s].motor[m]['home_pos']:
offset = (0 if s != 0 or m != 2 else self.zoffset)
self.home_target[(s, m)] = target - offset
for s, m in groups[2]:
fs = self.spaces[s].follower[m]['space']
fm = self.spaces[s].follower[m]['motor']
if self.spaces[fs].motor[fm]['home_pos'] != self.spaces[s].motor[m]['home_pos']:
offset = (0 if s != 0 or m != 2 else self.zoffset)
self.home_target[(s, m)] = self.spaces[fs].motor[fm]['home_pos'] - offset
self.home_phase = 4
if len(self.home_target) > 0:
self.home_cb[0] = False
if self.home_cb not in self.movecb:
self.movecb.append(self.home_cb)
#log("home phase %d target %s" % (self.home_phase, self.home_target))
self.line(mktarget(), force = True, single = True)
return
# Fall through.
if self.home_phase == 4:
# Reset space type and move to pos2.
self.expert_set_space(0, type = self.home_orig_type)
for a, ax in enumerate(self.spaces[0].axis):
self.expert_set_axis((0, a), min = self.home_limits[a][0], max = self.home_limits[a][1])
target = {}
for s, sp in enumerate(self.spaces[:2]):
for i, a in enumerate(sp.axis):
if not math.isnan(a['home_pos2']):
offset = (0 if s != 0 or i != 2 else self.zoffset)
if s not in target:
target[s] = {}
target[s][i] = a['home_pos2'] - offset
self.home_phase = 5
if len(target) > 0:
self.home_cb[0] = False
if self.home_cb not in self.movecb:
self.movecb.append(self.home_cb)
#log("home phase %d target %s" % (self.home_phase, target))
self.line(target, force = True)
return
# Fall through.
if self.home_phase == 5:
# Move within bounds.
target = {}
for s, sp in enumerate(self.spaces[:2]):
for i, a in enumerate(sp.axis):
current = sp.get_current_pos(i)
offset = (0 if s != 0 or i != 2 else self.zoffset)
if current > a['max'] - offset:
if s not in target:
target[s] = {}
target[s][i] = a['max'] - offset
elif current < a['min'] - offset:
if s not in target:
target[s] = {}
target[s][i] = a['min'] - offset
self.home_phase = 6
if len(target) > 0:
self.home_cb[0] = False
if self.home_cb not in self.movecb:
self.movecb.append(self.home_cb)
#log("home phase %d target %s" % (self.home_phase, target))
self.line(target, force = True)
#log('movecb: ' + repr(self.movecb))
return
# Fall through.
if self.home_phase == 6:
self.home_phase = None
self.position_valid = True
if self.home_id is not None:
self._send(self.home_id, 'return', self.home_return)
self.home_return = None
if self.home_done_cb is not None:
call_queue.append((self.home_done_cb, []))
self.home_done_cb = None
return
log('Internal error: invalid home phase')
# }}}
def _handle_one_probe(self, good): # {{{
if good is None:
return
pos = self.get_axis_pos(0)
self._send_packet(struct.pack('=Bddd', protocol.command['ADJUSTPROBE'], pos[0], pos[1], pos[2] + self.zoffset))
self.probe_cb[1] = lambda good: self.request_confirmation("Continue?")[1](False) if good is not None else None
self.movecb.append(self.probe_cb)
self.line([{2: self.probe_safe_dist}], relative = True)
# }}}
def _one_probe(self): # {{{
self.probe_cb[1] = self._handle_one_probe
self.movecb.append(self.probe_cb)
z = self.get_axis_pos(0, 2)
z_low = self.spaces[0].axis[2]['min']
self.line([{2: z_low}], f0 = float(self.probe_speed) / (z - z_low) if z > z_low else float('inf'), probe = True)
# }}}
def _do_probe(self, id, x, y, z, phase = 0, good = True): # {{{
#log('probe %d %s' % (phase, good))
# Map = [[x0, y0, x1, y1], [nx, ny, angle], [[...], [...], ...]]
if good is None:
# This means the probe has been aborted.
#log('abort probe')
self.probing = False
if id is not None:
self._send(id, 'error', 'aborted')
#self._job_done(False, 'Probe aborted')
return
self.probing = True
if not self.position_valid:
self.home(cb = lambda: self._do_probe(id, x, y, z, phase, True), abort = False)[1](None)
return
p = self.probemap
if phase == 0:
if y > p[1][1]:
# Done.
self.probing = False
self._check_probemap()
if id is not None:
self._send(id, 'return', p)
for y, c in enumerate(p[2]):
for x, o in enumerate(c):
log('map %f %f %f' % (p[0][0] + p[0][2] * x / p[1][0], p[0][1] + p[0][3] * y / p[1][1], o))
sys.stderr.write('\n')
return
# Goto x,y
self.probe_cb[1] = lambda good: self._do_probe(id, x, y, z, 1, good)
self.movecb.append(self.probe_cb)
px = p[0][2] + p[0][4] * x / p[1][0]
py = p[0][3] + p[0][5] * y / p[1][1]
log(repr((p, px, py, x, y, self.gcode_angle)))
self.line([[p[0][0] + px * self.gcode_angle[1] - py * self.gcode_angle[0], p[0][1] + py * self.gcode_angle[1] + px * self.gcode_angle[0]]])
elif phase == 1:
# Probe
self.probe_cb[1] = lambda good: self._do_probe(id, x, y, z, 2, good)
if self._pin_valid(self.probe_pin):
self.movecb.append(self.probe_cb)
z_low = self.spaces[0].axis[2]['min']
self.line([{2: z_low}], f0 = float(self.probe_speed) / (z - z_low) if z > z_low else float('inf'), probe = True)
else:
#log('confirm probe')
self.request_confirmation('Please move the tool to the surface')[1](False)
else:
# Record result
if good:
log('Warning: probe did not hit anything')
z = self.spaces[0].get_current_pos(2)
p[2][y][x].append(z + self.zoffset)
if len(p[2][y][x]) >= self.num_probes:
p[2][y][x].sort()
trash = self.num_probes // 3
if trash == 0:
p[2][y][x] = sum(p[2][y][x]) / len(p[2][y][x])
else:
p[2][y][x] = sum(p[2][y][x][trash:-trash]) / (len(p[2][y][x]) - 2 * trash)
if y & 1:
x -= 1
if x < 0:
x = 0
y += 1
else:
x += 1
if x > p[1][0]:
x = p[1][0]
y += 1
z += self.probe_safe_dist
self.probe_cb[1] = lambda good: self._do_probe(id, x, y, z, 0, good)
self.movecb.append(self.probe_cb)
# Retract
self.line([{2: z}])
# }}}
def _check_probemap(self): # {{{
'''Check the probemap, and save it if it is valid; discard it otherwise.
@returns: True if the probemap is valid, False otherwise.'''
if not isinstance(self.probemap, (list, tuple)) or len(self.probemap) != 3:
log('probemap check failed: not a list of length 3')
self.probemap = None
self._globals_update()
return False
limits, nums, probes = self.probemap
if not isinstance(limits, (list, tuple)) or not isinstance(nums, (list, tuple)) or len(limits) != 6 or len(nums) != 3:
log('probemap check failed: first lists are not length 6 and 3')
self.probemap = None
self._globals_update()
return False
if not all(isinstance(e, (float, int)) and not math.isnan(e) for e in limits):
log('probemap check failed: limits must be numbers')
self.probemap = None
self._globals_update()
return False
if not all(isinstance(e, t) and not math.isnan(e) for e, t in zip(nums, (int, int, (float, int)))):
log('probemap check failed: nums and angle must be numbers')
self.probemap = None
self._globals_update()
return False
nx, ny, angle = nums
if len(probes) != ny + 1 or not all(isinstance(e, (list, tuple)) and len(e) == nx + 1 for e in probes):
log('probemap check failed: probe map is incorrect size')
self.probemap = None
self._globals_update()
return False
if not all(all(isinstance(e, (float, int)) and not math.isnan(e) for e in f) for f in probes):
log('probemap check failed: probe points must all be numbers')
self.probemap = None
self._globals_update()
return False
with fhs.write_spool(os.path.join(self.uuid, 'probe' + os.extsep + 'bin'), text = False) as probemap_file:
# Map = [[x, y, w, h], [nx, ny], [[...], [...], ...]]
sina, cosa = self.gcode_angle
targetx, targety, x0, y0, w, h = self.probemap[0]
probemap_file.write(struct.pack('@ddddddddLLd', targetx, targety, x0, y0, w, h, sina, cosa, *self.probemap[1]))
for y in range(self.probemap[1][1] + 1):
for x in range(self.probemap[1][0] + 1):
probemap_file.write(struct.pack('@d', self.probemap[2][y][x]))
self._globals_update()
return True
# }}}
def _start_job(self, paused): # {{{
# Set all extruders to 0.
for i, e in enumerate(self.spaces[1].axis):
self.set_axis_pos(1, i, 0)
def cb():
#log('start job %s' % self.job_current)
self._gcode_run(self.job_current, abort = False, paused = paused)
if not self.position_valid:
self.park(cb = cb, abort = False)[1](None)
else:
cb()
self.gcode_id = None
# }}}
def _gcode_run(self, src, abort = True, paused = False): # {{{
if self.parking:
return
self.gcode_angle = math.sin(self.targetangle), math.cos(self.targetangle)
if 0 <= self.bed_id < len(self.temps):
self.btemp = self.temps[self.bed_id].value
else:
self.btemp = float('nan')
if abort:
self._unpause()
self._job_done(False, 'aborted by starting new job')
self.queue_info = None
# Disable all alarms.
for i in range(len(self.temps)):
self.waittemp(i, None, None)
self.paused = paused
self._globals_update()
self.sleep(False)
if len(self.spaces) > 1:
for e in range(len(self.spaces[1].axis)):
self.set_axis_pos(1, e, 0)
filename = fhs.read_spool(os.path.join(self.uuid, 'gcode', src + os.extsep + 'bin'), text = False, opened = False)
self.total_time = self.jobqueue[src][-2:]
self.gcode_fd = os.open(filename, os.O_RDONLY)
self.gcode_map = mmap.mmap(self.gcode_fd, 0, prot = mmap.PROT_READ)
filesize = os.fstat(self.gcode_fd).st_size
bboxsize = 8 * struct.calcsize('=d')
def unpack(format, pos):
return struct.unpack(format, self.gcode_map[pos:pos + struct.calcsize(format)])
num_strings = unpack('=I', filesize - bboxsize - struct.calcsize('=I'))[0]
self.gcode_strings = []
sizes = [unpack('=I', filesize - bboxsize - struct.calcsize('=I') * (num_strings + 1 - x))[0] for x in range(num_strings)]
first_string = filesize - bboxsize - struct.calcsize('=I') * (num_strings + 1) - sum(sizes)
pos = 0
for x in range(num_strings):
self.gcode_strings.append(self.gcode_map[first_string + pos:first_string + pos + sizes[x]].decode('utf-8', 'replace'))
pos += sizes[x]
self.gcode_num_records = first_string / struct.calcsize(record_format)
if self.probemap is None:
encoded_probemap_filename = b''
else:
encoded_probemap_filename = fhs.read_spool(os.path.join(self.uuid, 'probe' + os.extsep + 'bin'), text = False, opened = False).encode('utf-8')
self.gcode_file = True
self._globals_update()
self._send_packet(struct.pack('=BBddBB', protocol.command['RUN_FILE'], 1 if not paused and self.confirmer is None else 0, self.gcode_angle[0], self.gcode_angle[1], 0xff, len(encoded_probemap_filename)) + filename.encode('utf-8') + encoded_probemap_filename)
# }}}
def _gcode_parse(self, src, name): # {{{
assert len(self.spaces) > 0
self._broadcast(None, 'blocked', 'Parsing g-code')
errors = []
mode = None
message = None
bbox = [None] * 6
bbox_last = [None] * 6
strings = ['']
unit = 1.
arc_normal = (0, 0, 1)
rel = False
erel = False
pos = [[float('nan') for a in range(6)], [0., 0.], float('inf')]
time_dist = [0., 0.]
pending = []
arc = [] # center, r, diff, angle_start, angle_diff
tool_changed = False
def add_timedist(type, nums):
if type == protocol.parsed['LINE']:
if nums[-2] == float('inf'):
extra = sum((nums[2 * i + 1] - nums[2 * i + 2]) ** 2 for i in range(3)) ** .5
if not math.isnan(extra):
time_dist[1] += extra
else:
extra = 2 / (nums[-2] + nums[-1])
if not math.isnan(extra):
time_dist[0] += extra
elif type == protocol.parsed['ARC']:
pass # TODO: add time+dist.
elif type == protocol.parsed['WAIT']:
time_dist[0] += nums[1]
return nums + time_dist
with fhs.write_spool(os.path.join(self.uuid, 'gcode', os.path.splitext(name)[0] + os.path.extsep + 'bin'), text = False) as dst:
epsilon = .5 # TODO: check if this should be configurable
aepsilon = math.radians(36) # TODO: check if this should be configurable
rlimit = 500 # TODO: check if this should be configurable
def center(a, b, c):
'''Given 3 points, determine center, radius, angles of points on circle, deviation of polygon from circle.'''
try:
x0, y0, z0 = a
x1, y1, z1 = b
x2, y2, z2 = c
xc = ((y0 - y1) * (y0 ** 2 - y2 ** 2 + x0 ** 2 - x2 ** 2) - (y0 - y2) * (x0 ** 2 - x1 ** 2 + y0 ** 2 - y1 ** 2)) / (2 * (-x0 * y1 - x2 * y0 + x2 * y1 + x1 * y0 + x0 * y2 - x1 * y2))
yc = ((x0 - x1) * (x0 ** 2 - x2 ** 2 + y0 ** 2 - y2 ** 2) - (x0 - x2) * (y0 ** 2 - y1 ** 2 + x0 ** 2 - x1 ** 2)) / (2 * (-y0 * x1 - y2 * x0 + y2 * x1 + y1 * x0 + y0 * x2 - y1 * x2))
r = ((xc - x0) ** 2 + (yc - y0) ** 2) ** .5
except ZeroDivisionError:
#log('div by 0: %s' % repr((a, b, c)))
return (None, None, None, float('inf'))
angles = []
ref = math.atan2(b[1] - yc, b[0] - xc)
for p in a, b, c:
angle = math.atan2(p[1] - yc, p[0] - xc)
angles.append((angle - ref + math.pi) % (2 * math.pi) + ref - math.pi)
mid = [(p2 + p1) / 2 for p1, p2 in zip(a, c)]
amid = (angles[0] + angles[2]) / 2
cmid = [math.cos(amid) * r + xc, math.sin(amid) * r + yc]
#log('for diff center (%f %f) mids %s %s amid %f angles %s' % (xc, yc, mid, cmid, amid, angles))
diff = sum([(p2 - p1) ** 2 for p1, p2 in zip(mid, cmid)])
#log('center returns %s' % repr(((xc, yc, z0), r, angles, diff)))
return ((xc, yc, z0), r, angles, diff)
def add_record(type, nums = None, force = False):
if nums is None:
nums = []
if isinstance(nums, dict):
nums = [nums['T'], nums['X'], nums['Y'], nums['Z'], nums['E'], nums['f'], nums['F']]
nums += [0] * (7 - len(nums))
if not force and type == protocol.parsed['LINE']:
# Update bounding box.
for i in range(3):
value = nums[i + 1]
if math.isnan(value):
continue
if bbox[2 * i] is None or value < bbox[2 * i]:
#log('new min bbox %f: %f from %f' % (i, value / 25.4, float('nan' if bbox[2 * i] is None else bbox[2 * i] / 25.4)))
bbox[2 * i] = value
if bbox[2 * i + 1] is None or value > bbox[2 * i + 1]:
#log('new max bbox %f: %f from %f' % (i, value / 25.4, float('nan' if bbox[2 * i + 1] is None else bbox[2 * i + 1] / 25.4)))
bbox[2 * i + 1] = value
# Analyze this move in combination with pending moves.
if len(pending) == 0:
pending.append([0, pos[0][0], pos[0][1], pos[0][2], pos[1][nums[0]], pos[2], pos[2]])
pending.append(nums)
if len(pending) == 2:
if not config['arc'] or pending[0][3] != pending[1][3]:
#log('non equal z')
flush_pending()
return
return
if len(pending) == 3:
# If the points are not on a circle with equal angles, or the angle is too large, or the radius is too large, push pending[1] through to output.
# Otherwise, record settings.
#log('center for arc start')
arc_ctr, arc_r, angles, arc_diff = center(pending[0][1:4], pending[1][1:4], pending[2][1:4])
if arc_diff > epsilon or abs(angles[1] - angles[0] - angles[2] + angles[1]) > aepsilon or arc_r > rlimit:
#log('not arc: %s' % repr((arc_ctr, arc_r, angles, arc_diff)))
dst.write(struct.pack('=Bl' + 'd' * 8, protocol.parsed['LINE'], *add_timedist(type, pending[1])))
pending.pop(0)
return
arc[:] = [arc_ctr, arc_r, arc_diff, angles[0], (angles[2] - angles[0]) / 2]
return
current_angle = arc[4] * (len(pending) - 1)
a = arc[3] + current_angle
p = [arc[0][0] + math.cos(a) * arc[1], arc[0][1] + math.sin(a) * arc[1]]
# If new point doesn't fit on circle, push pending as circle to output.
# It should allow up to 360, but be safe and break those in two; also makes generating svgs easier.
if current_angle >= math.radians(180):
#log('flush: more than 180 degrees')
flush_pending()
elif (p[0] - pending[-1][1]) ** 2 + (p[1] - pending[-1][2]) ** 2 > epsilon ** 2:
#log('flush: point too far from prediction (%s %s)' % (p, pending[-1][1:3]))
flush_pending()
elif pending[0][3] != pending[-1][3]:
#log('flush: non equal z')
flush_pending()
return
#if not force:
#log('non-line %s' % type)
flush_pending()
#log('force or other ' + repr((type, nums, add_timedist(type, nums))))
dst.write(struct.pack('=Bl' + 'd' * 8, type, *add_timedist(type, nums)))
def flush_pending():
if len(pending) >= 6:
#log('arc')
flush_arc()
#else:
#log('no arc %d' % len(pending))
tmp = pending[1:]
pending[:] = []
for p in tmp:
add_record(protocol.parsed['LINE'], p, True)
def flush_arc():
start = pending[0]
end = pending[-2]
tmp = pending[-1]
#log('center for flush')
arc_ctr, arc_r, angles, arc_diff = center(start[1:4], pending[len(pending) // 2][1:4], end[1:4])
if arc_diff < 2 * epsilon or arc_ctr is None:
#log('refuse arc: %s' % repr((arc_ctr, arc_diff, epsilon, arc_r, angles)))
# This is really a line, or it is not detected as an arc; don't turn it into an arc.
return
pending[:] = []
add_record(protocol.parsed['PRE_ARC'], {'X': arc_ctr[0], 'Y': arc_ctr[1], 'Z': start[3], 'E': 0, 'f': 0, 'F': 1 if arc[4] > 0 else -1, 'T': 0}, True)
add_record(protocol.parsed['ARC'], {'X': end[1], 'Y': end[2], 'Z': end[3], 'E': pos[1][current_extruder], 'f': -pos[2], 'F': -pos[2], 'T': current_extruder}, True)
pending.append(end)
pending.append(tmp)
def add_string(string):
if string is None:
return 0
if string not in strings:
strings.append(string)
return strings.index(string)
current_extruder = 0
for lineno, origline in enumerate(src):
line = origline.strip()
origline = line
#log('parsing %s' % line)
# Get rid of line numbers and checksums.
if line.startswith('N'):
r = re.match(r'N(\d+)\s+(.*?)\*\d+\s*$', line)
if not r:
r = re.match(r'N(\d+)\s+(.*?)\s*$', line)
if not r:
# Invalid line; ignore it.
errors.append('%d:ignoring invalid gcode: %s' % (lineno, origline))
continue
lineno = int(r.group(1))
line = r.group(2)
else:
lineno += 1
comment = ''
while '(' in line:
b = line.index('(')
e = line.find(')', b)
if e < 0:
errors.append('%d:ignoring line with unterminated comment: %s' % (lineno, origline))
continue
comment = line[b + 1:e].strip()
line = line[:b] + ' ' + line[e + 1:].strip()
if ';' in line:
p = line.index(';')
comment = line[p + 1:].strip()
line = line[:p].strip()
if comment.upper().startswith('MSG,'):
message = comment[4:].strip()
elif comment.startswith('SYSTEM:'):
if not re.match(self.allow_system, comment[7:]):
errors.append('%d:Warning: system command %s is forbidden and will not be run' % (lineno, comment[7:]))
add_record(protocol.parsed['SYSTEM'], [add_string(comment[7:])])
continue
if line == '':
continue
line = line.split()
while len(line) > 0:
if mode is None or line[0][0] in 'GMTDS':
if len(line[0]) < 2:
errors.append('%d:ignoring unparsable line: %s' % (lineno, origline))
break
try:
cmd = line[0][0], int(line[0][1:])
except:
errors.append('%d:parse error in line: %s' % (lineno, origline))
traceback.print_exc()
break
line = line[1:]
else:
cmd = mode
args = {}
success = True
for i, a in enumerate(line):
if a[0] in 'GMD':
line = line[i:]
break
try:
args[a[0]] = float(a[1:])
except:
errors.append('%d:ignoring invalid gcode: %s' % (lineno, origline))
success = False
break
else:
line = []
if not success:
break
if cmd == ('M', 2):
# Program end.
break
elif cmd[0] == 'T':
target = cmd[1]
if target >= len(pos[1]):
pos[1].extend([0.] * (target - len(pos[1]) + 1))
current_extruder = target
# Force update of extruder.
add_record(protocol.parsed['LINE'], {'X': pos[0][0], 'Y': pos[0][1], 'Z': pos[0][2], 'E': pos[1][current_extruder], 'f': float('inf'), 'F': float('inf'), 'T': current_extruder})
continue
elif cmd == ('G', 17):
arc_normal = (0, 0, 1)
continue
elif cmd == ('G', 18):
arc_normal = (0, 1, 0)
continue
elif cmd == ('G', 19):
arc_normal = (1, 0, 0)
continue
elif cmd == ('G', 20):
unit = 25.4
continue
elif cmd == ('G', 21):
unit = 1.
continue
elif cmd == ('G', 90):
rel = False
erel = False
continue
elif cmd == ('G', 91):
rel = True
erel = True
continue
elif cmd == ('M', 82):
erel = False
continue
elif cmd == ('M', 83):
erel = True
continue
elif cmd == ('M', 84):
for e in range(len(pos[1])):
pos[1][e] = 0.
elif cmd == ('G', 92):
if 'E' not in args:
continue
args['E'] *= unit
pos[1][current_extruder] = args['E']
elif cmd[0] == 'M' and cmd[1] in (104, 109, 116):
args['E'] = int(args['T']) if 'T' in args else current_extruder
if cmd == ('M', 140):
cmd = ('M', 104)
args['E'] = -1
elif cmd == ('M', 190):
cmd = ('M', 109)
args['E'] = -1
elif cmd == ('M', 6):
# Tool change: park and remember to probe.
cmd = ('G', 28)
tool_changed = True
if cmd == ('G', 28):
nums = [current_extruder]
if len(self.spaces) > 1 and len(self.spaces[1].axis) > current_extruder:
pos[1][current_extruder] = 0.
add_record(protocol.parsed['PARK'])
for a in range(len(pos[0])):
if len(self.spaces[0].axis) > a and not math.isnan(self.spaces[0].axis[a]['park']):
pos[0][a] = float('nan')
elif cmd[0] == 'G' and cmd[1] in (0, 1, 81):
if cmd[1] != 0:
mode = cmd
components = {'X': None, 'Y': None, 'Z': None, 'A': None, 'B': None, 'C': None, 'E': None, 'F': None, 'R': None}
for c in args:
if c not in components:
errors.append('%d:invalid component %s' % (lineno, c))
continue
assert components[c] is None
components[c] = args[c]
f0 = pos[2]
if components['F'] is not None:
pos[2] = components['F'] * unit / 60
oldpos = pos[0][:], pos[1][:]
if cmd[1] != 81:
if components['E'] is not None:
if erel:
estep = components['E'] * unit
else:
estep = components['E'] * unit - pos[1][current_extruder]
pos[1][current_extruder] += estep
else:
estep = 0
else:
estep = 0
if components['R'] is not None:
if rel:
r = pos[0][2] + components['R'] * unit
else:
r = components['R'] * unit
for axis in range(6):
value = components['XYZABC'[axis]]
if value is not None:
if rel:
pos[0][axis] += value * unit
else:
pos[0][axis] = value * unit
if axis == 2:
z = pos[0][2]
if cmd[1] != 81:
dist = sum([0] + [(pos[0][x] - oldpos[0][x]) ** 2 for x in range(3) if not math.isnan(pos[0][x] - oldpos[0][x])]) ** .5
if dist > 0:
#if f0 is None:
# f0 = pos[1][current_extruder]
f0 = pos[2] # Always use new value.
if f0 == 0:
f0 = float('inf')
if math.isnan(dist):
dist = 0
if all((math.isnan(pos[0][i]) and math.isnan(oldpos[0][i])) or pos[0][i] == oldpos[0][i] for i in range(3, 6)):
add_record(protocol.parsed['LINE'], {'X': pos[0][0], 'Y': pos[0][1], 'Z': pos[0][2], 'E': pos[1][current_extruder], 'f': f0 / dist if dist > 0 and cmd[1] == 1 else float('inf'), 'F': pos[2] / dist if dist > 0 and cmd[1] == 1 else float('inf'), 'T': current_extruder})
else:
add_record(protocol.parsed['PRE_LINE'], {'X': pos[0][3], 'Y': pos[0][4], 'Z': pos[0][5], 'E': float('NaN'), 'f': float('NaN'), 'F': float('NaN'), 'T': current_extruder})
add_record(protocol.parsed['LINE'], {'X': pos[0][0], 'Y': pos[0][1], 'Z': pos[0][2], 'E': pos[1][current_extruder], 'f': f0 / dist if dist > 0 and cmd[1] == 1 else float('inf'), 'F': pos[2] / dist if dist > 0 and cmd[1] == 1 else float('inf'), 'T': current_extruder})
else:
# If old pos is unknown, use safe distance.
if math.isnan(oldpos[0][2]):
oldpos[0][2] = r
# Drill cycle.
# Only support OLD_Z (G90) retract mode; don't support repeats(L).
# goto x,y
add_record(protocol.parsed['LINE'], {'X': pos[0][0], 'Y': pos[0][1], 'Z': oldpos[0][2], 'E': 0, 'f': float('inf'), 'F': float('inf'), 'T': current_extruder})
# goto r
add_record(protocol.parsed['LINE'], {'X': pos[0][0], 'Y': pos[0][1], 'Z': r, 'E': 0, 'f': float('inf'), 'F': float('inf'), 'T': current_extruder})
# goto z; this is always straight down, because the move before and after it are also vertical.
if z != r:
f0 = pos[2] / abs(z - r)
if math.isnan(f0):
f0 = float('inf')
add_record(protocol.parsed['LINE'], {'X': pos[0][0], 'Y': pos[0][1], 'Z': z, 'E': 0, 'f': f0, 'F': f0, 'T': current_extruder})
# retract; this is always straight up, because the move before and after it are also non-horizontal.
add_record(protocol.parsed['LINE'], {'X': pos[0][0], 'Y': pos[0][1], 'Z': oldpos[0][2], 'E': 0, 'f': float('inf'), 'F': float('inf'), 'T': current_extruder})
# empty move; this makes sure the previous move is entirely vertical.
add_record(protocol.parsed['LINE'], {'X': pos[0][0], 'Y': pos[0][1], 'Z': oldpos[0][2], 'E': 0, 'f': float('inf'), 'F': float('inf'), 'T': current_extruder})
# Set up current z position so next G81 will work.
pos[0][2] = oldpos[0][2]
elif cmd[0] == 'G' and cmd[1] in (2, 3):
# Arc.
mode = cmd
components = {'X': None, 'Y': None, 'Z': None, 'E': None, 'F': None, 'I': None, 'J': None, 'K': None}
for c in args:
if c not in components:
errors.append('%d:invalid arc component %s' % (lineno, c))
continue
assert components[c] is None
components[c] = args[c]
f0 = pos[2]
if components['F'] is not None:
pos[2] = components['F'] * unit / 60
oldpos = pos[0][:], pos[1][:]
if components['E'] is not None:
if erel:
estep = components['E'] * unit - pos[1][current_extruder]
else:
estep = components['E'] * unit
pos[1][current_extruder] += estep
else:
estep = 0
center = [None] * 3
for axis in range(3):
value = components[chr(b'X'[0] + axis)]
if value is not None:
if rel:
pos[0][axis] += value * unit
else:
pos[0][axis] = value * unit
if axis == 2:
z = pos[0][2]
value = components[chr(b'I'[0] + axis)]
if value is not None:
center[axis] = oldpos[0][axis] + value
else:
center[axis] = oldpos[0][axis]
s = -1 if cmd[1] == 2 else 1
add_record(protocol.parsed['PRE_ARC'], {'X': center[0], 'Y': center[1], 'Z': center[2], 'E': s * arc_normal[0], 'f': s * arc_normal[1], 'F': s * arc_normal[2], 'T': 0})
add_record(protocol.parsed['ARC'], {'X': pos[0][0], 'Y': pos[0][1], 'Z': pos[0][2], 'E': pos[1][current_extruder], 'f': -f0, 'F': -pos[2], 'T': current_extruder})
elif cmd == ('G', 4):
add_record(protocol.parsed['WAIT'], [0, float(args['S']) if 'S' in args else float(args['P']) / 1000 if 'P' in args else 0])
elif cmd == ('G', 92):
add_record(protocol.parsed['SETPOS'], [current_extruder, args['E']])
elif cmd == ('G', 94):
# Set feedrate to units per minute; this is always used, and it shouldn't raise an error.
pass
elif cmd == ('M', 0):
add_record(protocol.parsed['CONFIRM'], [add_string(message), 1 if tool_changed else 0])
tool_changed = False
elif cmd == ('M', 3):
# Spindle on, clockwise.
add_record(protocol.parsed['GPIO'], [-3, 1])
elif cmd == ('M', 4):
# Spindle on, counterclockwise.
add_record(protocol.parsed['GPIO'], [-3, 1])
elif cmd == ('M', 5):
add_record(protocol.parsed['GPIO'], [-3, 0])
elif cmd == ('M', 9):
# Coolant off: ignore.
pass
elif cmd == ('M', 42):
if 'P' in args and 'S' in args:
add_record(protocol.parsed['GPIO'], [int(args['P']), args.get('S')])
else:
errors.append('%d:invalid M42 request (needs P and S)' % lineno)
elif cmd == ('M', 84):
# Don't sleep, but set all extruder positions to 0.
for e in range(len(pos[1])):
add_record(protocol.parsed['SETPOS'], [e, 0])
elif cmd == ('M', 104):
if args['E'] >= len(self.temps):
errors.append('%d:ignoring M104 for invalid temp %d' % (lineno, args['E']))
elif 'S' not in args:
errors.append('%d:ignoring M104 without S' % lineno)
else:
add_record(protocol.parsed['SETTEMP'], [int(args['E']), args['S'] + C0])
elif cmd == ('M', 106):
add_record(protocol.parsed['GPIO'], [-2, 1])
elif cmd == ('M', 107):
add_record(protocol.parsed['GPIO'], [-2, 0])
elif cmd == ('M', 109):
if 'S' in args:
add_record(protocol.parsed['SETTEMP'], [int(args['E']), args['S'] + C0])
add_record(protocol.parsed['WAITTEMP'], [int(args['E'])])
elif cmd == ('M', 116):
add_record(protocol.parsed['WAITTEMP'], [-2])
elif cmd[0] == 'S':
# Spindle speed; not supported, but shouldn't error.
pass
else:
errors.append('%d:invalid gcode command %s' % (lineno, repr((cmd, args))))
message = None
flush_pending()
stringmap = []
size = 0
for s in strings:
us = s.encode('utf-8')
stringmap.append(len(us))
dst.write(us)
size += len(us)
for s in stringmap:
dst.write(struct.pack('=L', s))
ret = bbox
if any(x is None for x in bbox[:4]):
bbox = bbox_last
ret = bbox
if any(x is None for x in bbox[:4]):
bbox = [0] * 6
ret = None
if any(x is None for x in bbox):
for t, b in enumerate(bbox):
if b is None:
bbox[t] = 0;
dst.write(struct.pack('=L' + 'd' * 8, len(strings), *(bbox + time_dist)))
self._broadcast(None, 'blocked', None)
return ret and ret + time_dist, '\n'.join(errors)
# }}}
def _reset_extruders(self, axes): # {{{
for i, sp in enumerate(axes):
for a, pos in enumerate(sp):
# Assume motor[a] corresponds to axis[a] if it exists.
if len(self.spaces[i].motor) > a and not self._pin_valid(self.spaces[i].motor[a]['limit_max_pin']) and not self._pin_valid(self.spaces[i].motor[a]['limit_min_pin']):
self.set_axis_pos(i, a, pos)
# }}}
def _pin_valid(self, pin): # {{{
return (pin & 0x100) != 0
# }}}
def _spi_send(self, data): # {{{
for bits, p in data:
shift = (8 - bits % 8) % 8
if shift > 0:
p = [(p[b] << shift | p[b + 1] >> (8 - shift)) & 0xff for b in range(len(p) - 1)] + [(p[-1] << shift) & 0xff]
self._send_packet(struct.pack('=BB', protocol.command['SPI'], bits) + b''.join(struct.pack('=B', b) for b in p))
# }}}
def admin_connect(self, port, run_id): # {{{
self._send_packet(struct.pack('=B', protocol.command['CONNECT']) + bytes([ord(x) for x in run_id]) + port.encode('utf-8') + b'\0')
# The rest happens in response to the CONNECTED reply.
# }}}
def admin_reconnect(self, port): # {{{
pass
# }}}
# Subclasses. {{{
class Space: # {{{
def __init__(self, machine, id):
self.name = ['position', 'extruders', 'followers'][id]
self.type = [TYPE_CARTESIAN, TYPE_EXTRUDER, TYPE_FOLLOWER][id]
self.machine = machine
self.id = id
self.axis = []
self.motor = []
self.delta = [{'axis_min': 0., 'axis_max': 0., 'rodlength': 0., 'radius': 0.} for t in range(3)]
self.delta_angle = 0
self.polar_max_r = float('inf')
self.extruder = []
self.follower = []
def read(self, data):
axes, motors = data
if self.id == 1:
self.machine.multipliers = (self.machine.multipliers + [1.] * len(axes))[:len(axes)]
if len(axes) > len(self.axis):
def nm(i):
if self.id == 0:
if i < 3:
return chr(ord('x') + i)
elif i < 6:
return chr(ord('a') + i - 3)
else:
return 'Axis %d' % i
elif self.id == 1:
return 'extruder %d' % i
else:
return 'follower %d' % i
self.axis += [{'name': nm(i), 'home_pos2': float('nan')} for i in range(len(self.axis), len(axes))]
else:
self.axis[len(axes):] = []
for a in range(len(axes)):
self.axis[a]['park'], self.axis[a]['park_order'], self.axis[a]['min'], self.axis[a]['max'] = struct.unpack('=dBdd', axes[a])
if len(motors) > len(self.motor):
self.motor += [{} for i in range(len(self.motor), len(motors))]
else:
self.motor[len(motors):] = []
for m in range(len(motors)):
self.motor[m]['step_pin'], self.motor[m]['dir_pin'], self.motor[m]['enable_pin'], self.motor[m]['limit_min_pin'], self.motor[m]['limit_max_pin'], self.motor[m]['steps_per_unit'], self.motor[m]['home_pos'], self.motor[m]['limit_v'], self.motor[m]['limit_a'], self.motor[m]['home_order'] = struct.unpack('=HHHHHddddB', motors[m])
if self.id == 1 and m < len(self.machine.multipliers):
self.motor[m]['steps_per_unit'] /= self.machine.multipliers[m]
def write_info(self, num_axes = None):
data = struct.pack('=B', self.type)
if self.type == TYPE_CARTESIAN:
data += struct.pack('=B', num_axes if num_axes is not None else len(self.axis))
elif self.type == TYPE_DELTA:
for a in range(3):
data += struct.pack('=dddd', self.delta[a]['axis_min'], self.delta[a]['axis_max'], self.delta[a]['rodlength'], self.delta[a]['radius'])
data += struct.pack('=d', self.delta_angle)
elif self.type == TYPE_POLAR:
data += struct.pack('=d', self.polar_max_r)
elif self.type == TYPE_EXTRUDER:
num = num_axes if num_axes is not None else len(self.axis)
data += struct.pack('=B', num)
for a in range(num):
if a < len(self.extruder):
data += struct.pack('=ddd', self.extruder[a]['dx'], self.extruder[a]['dy'], self.extruder[a]['dz'])
else:
data += struct.pack('=ddd', 0, 0, 0)
elif self.type == TYPE_FOLLOWER:
num = num_axes if num_axes is not None else len(self.axis)
data += struct.pack('=B', num)
for a in range(num):
if a < len(self.follower):
data += struct.pack('=BB', self.follower[a]['space'], self.follower[a]['motor'])
else:
data += struct.pack('=BB', 0xff, 0xff)
else:
log('invalid type')
raise AssertionError('invalid space type')
return data
def write_axis(self, axis):
if self.id == 0:
return struct.pack('=dBdd', self.axis[axis]['park'], int(self.axis[axis]['park_order']), self.axis[axis]['min'], self.axis[axis]['max'])
else:
return struct.pack('=dBdd', float('nan'), 0, float('-inf'), float('inf'))
def write_motor(self, motor):
if self.id == 2:
if self.follower[motor]['space'] >= len(self.machine.spaces) or self.follower[motor]['motor'] >= len(self.machine.spaces[self.follower[motor]['space']].motor):
#log('write motor for follower %d with fake base' % motor)
base = {'steps_per_unit': 1, 'limit_v': float('inf'), 'limit_a': float('inf')}
else:
#log('write motor for follower %d with base %s' % (motor, self.machine.spaces[0].motor))
base = self.machine.spaces[self.follower[motor]['space']].motor[self.follower[motor]['motor']]
else:
base = self.motor[motor]
return struct.pack('=HHHHHddddB', self.motor[motor]['step_pin'], self.motor[motor]['dir_pin'], self.motor[motor]['enable_pin'], self.motor[motor]['limit_min_pin'], self.motor[motor]['limit_max_pin'], base['steps_per_unit'] * (1. if self.id != 1 or motor >= len(self.machine.multipliers) else self.machine.multipliers[motor]), self.motor[motor]['home_pos'], base['limit_v'], base['limit_a'], int(self.motor[motor]['home_order']))
def set_current_pos(self, axis, pos):
#log('setting pos of %d %d to %f' % (self.id, axis, pos))
self.machine._send_packet(struct.pack('=BBBd', protocol.command['SETPOS'], self.id, axis, pos))
def get_current_pos(self, axis):
#log('getting current pos %d %d' % (self.id, axis))
self.machine._send_packet(struct.pack('=BBB', protocol.command['GETPOS'], self.id, axis))
cmd, s, m, f, e, data = self.machine._get_reply()
assert cmd == protocol.rcommand['POS']
#log('get current pos %d %d: %f' % (self.id, axis, f))
return f
def motor_name(self, i):
if self.type in (TYPE_CARTESIAN, TYPE_EXTRUDER, TYPE_FOLLOWER):
return self.axis[i]['name']
elif self.type == TYPE_DELTA:
return chr(ord('u') + i)
elif self.type == TYPE_POLAR:
return ['r', 'θ', 'z'][i]
else:
log('invalid type')
raise AssertionError('invalid space type')
def export(self):
std = [self.name, self.type, [[a['name'], a['park'], a['park_order'], a['min'], a['max'], a['home_pos2']] for a in self.axis], [[self.motor_name(i), m['step_pin'], m['dir_pin'], m['enable_pin'], m['limit_min_pin'], m['limit_max_pin'], m['steps_per_unit'], m['home_pos'], m['limit_v'], m['limit_a'], m['home_order']] for i, m in enumerate(self.motor)], None if self.id != 1 else self.machine.multipliers]
if self.type == TYPE_CARTESIAN:
return std
elif self.type == TYPE_DELTA:
return std + [[[a['axis_min'], a['axis_max'], a['rodlength'], a['radius']] for a in self.delta] + [self.delta_angle]]
elif self.type == TYPE_POLAR:
return std + [self.polar_max_r]
elif self.type == TYPE_EXTRUDER:
return std + [[[a['dx'], a['dy'], a['dz']] for a in self.extruder]]
elif self.type == TYPE_FOLLOWER:
return std + [[[a['space'], a['motor']] for a in self.follower]]
else:
log('invalid type')
raise AssertionError('invalid space type')
def export_settings(self):
# Things to handle specially while homing:
# * self.home_limits = [(a['min'], a['max']) for a in self.spaces[0].axis]
# * self.home_orig_type = self.spaces[0].type
ret = '[space %d]\r\n' % self.id
type = self.type if self.id != 0 or self.machine.home_phase is None else self.machine.home_orig_type
if self.id == 0:
ret += 'type = %d\r\n' % type
if type == TYPE_CARTESIAN:
ret += 'num_axes = %d\r\n' % len(self.axis)
elif type == TYPE_DELTA:
ret += 'delta_angle = %f\r\n' % self.delta_angle
for i in range(3):
ret += '[delta %d %d]\r\n' % (self.id, i)
ret += ''.join(['%s = %f\r\n' % (x, self.delta[i][x]) for x in ('rodlength', 'radius', 'axis_min', 'axis_max')])
elif type == TYPE_POLAR:
ret += 'polar_max_r = %f\r\n' % self.polar_max_r
elif type == TYPE_EXTRUDER:
ret += 'num_axes = %d\r\n' % len(self.axis)
for i in range(len(self.extruder)):
ret += '[extruder %d %d]\r\n' % (self.id, i)
ret += ''.join(['%s = %f\r\n' % (x, self.extruder[i][x]) for x in ('dx', 'dy', 'dz')])
elif type == TYPE_FOLLOWER:
ret += 'num_axes = %d\r\n' % len(self.axis)
for i in range(len(self.follower)):
ret += '[follower %d %d]\r\n' % (self.id, i)
ret += ''.join(['%s = %d\r\n' % (x, self.follower[i][x]) for x in ('space', 'motor')])
else:
log('invalid type')
raise AssertionError('invalid space type')
for i, a in enumerate(self.axis):
ret += '[axis %d %d]\r\n' % (self.id, i)
ret += 'name = %s\r\n' % a['name']
if self.id == 0:
ret += ''.join(['%s = %f\r\n' % (x, a[x]) for x in ('park', 'park_order', 'home_pos2')])
if self.machine.home_phase is None:
ret += ''.join(['%s = %f\r\n' % (x, a[x]) for x in ('min', 'max')])
else:
ret += ''.join(['%s = %f\r\n' % (x, y) for x, y in zip(('min', 'max'), self.machine.home_limits[self.id])])
for i, m in enumerate(self.motor):
ret += '[motor %d %d]\r\n' % (self.id, i)
ret += ''.join(['%s = %s\r\n' % (x, write_pin(m[x])) for x in ('step_pin', 'dir_pin', 'enable_pin')])
if self.id != 1:
ret += ''.join(['%s = %s\r\n' % (x, write_pin(m[x])) for x in ('limit_min_pin', 'limit_max_pin')])
ret += ''.join(['%s = %f\r\n' % (x, m[x]) for x in ('home_pos',)])
ret += ''.join(['%s = %d\r\n' % (x, m[x]) for x in ('home_order',)])
if self.id != 2:
ret += ''.join(['%s = %f\r\n' % (x, m[x]) for x in ('steps_per_unit', 'limit_v', 'limit_a')])
return ret
# }}}
class Temp: # {{{
def __init__(self, id):
self.name = 'temp %d' % id
self.id = id
self.value = float('nan')
def read(self, data):
self.R0, self.R1, logRc, Tc, self.beta, self.heater_pin, self.fan_pin, self.thermistor_pin, fan_temp, self.fan_duty, heater_limit_l, heater_limit_h, fan_limit_l, fan_limit_h, self.hold_time = struct.unpack('=dddddHHHddddddd', data)
try:
self.Rc = math.exp(logRc)
except:
self.Rc = float('nan')
self.Tc = Tc - C0
self.heater_limit_l = heater_limit_l - C0
self.heater_limit_h = heater_limit_h - C0
self.fan_limit_l = fan_limit_l - C0
self.fan_limit_h = fan_limit_h - C0
self.fan_temp = fan_temp - C0
self.fan_pin ^= 0x200
def write(self):
try:
logRc = math.log(self.Rc)
except:
logRc = float('nan')
return struct.pack('=dddddHHHddddddd', self.R0, self.R1, logRc, self.Tc + C0, self.beta, self.heater_pin, self.fan_pin ^ 0x200, self.thermistor_pin, self.fan_temp + C0, self.fan_duty, self.heater_limit_l + C0, self.heater_limit_h + C0, self.fan_limit_l + C0, self.fan_limit_h + C0, self.hold_time)
def export(self):
return [self.name, self.R0, self.R1, self.Rc, self.Tc, self.beta, self.heater_pin, self.fan_pin, self.thermistor_pin, self.fan_temp, self.fan_duty, self.heater_limit_l, self.heater_limit_h, self.fan_limit_l, self.fan_limit_h, self.hold_time, self.value]
def export_settings(self):
ret = '[temp %d]\r\n' % self.id
ret += 'name = %s\r\n' % self.name
ret += ''.join(['%s = %s\r\n' % (x, write_pin(getattr(self, x))) for x in ('heater_pin', 'fan_pin', 'thermistor_pin')])
ret += ''.join(['%s = %f\r\n' % (x, getattr(self, x)) for x in ('fan_temp', 'R0', 'R1', 'Rc', 'Tc', 'beta', 'fan_duty', 'heater_limit_l', 'heater_limit_h', 'fan_limit_l', 'fan_limit_h', 'hold_time')])
return ret
# }}}
class Gpio: # {{{
def __init__(self, id):
self.name = 'gpio %d' % id
self.id = id
self.state = 3
self.reset = 3
self.value = False
self.duty = 1.
def read(self, data):
self.pin, state, self.duty = struct.unpack('=HBd', data)
self.state = state & 0x3
self.reset = (state >> 2) & 0x3
def write(self):
return struct.pack('=HBd', self.pin, self.state | (self.reset << 2), self.duty)
def export(self):
return [self.name, self.pin, self.state, self.reset, self.duty, self.value if self.state >= 2 else self.state == 1]
def export_settings(self):
ret = '[gpio %d]\r\n' % self.id
ret += 'name = %s\r\n' % self.name
ret += 'pin = %s\r\n' % write_pin(self.pin)
ret += 'reset = %d\r\n' % self.reset
ret += 'duty = %f\r\n' % self.duty
return ret
# }}}
# }}}
# }}}
# Useful commands. {{{
def admin_reset_uuid(self): # {{{
uuid = protocol.new_uuid(string = False)
self._send_packet(struct.pack('=B', protocol.command['SET_UUID']) + bytes(uuid))
self.uuid = protocol.new_uuid(uuid = uuid, string = True)
if not self.name:
self.name = self.uuid
return self.uuid
# }}}
def expert_die(self, reason): # {{{
'''Kill this machine, including all files on disk.
'''
log('%s dying as requested by host (%s).' % (self.uuid, reason))
# Clean up spool.
dirname = fhs.write_spool(self.uuid, dir = True, opened = False)
if os.path.isdir(dirname):
try:
shutil.rmtree(dirname, ignore_errors = False)
except:
log('Could not remove %d' % dirname)
# Clean up profiles.
for dirname in fhs.read_data(self.uuid, dir = True, multiple = True, opened = False):
try:
shutil.rmtree(dirname, ignore_errors = False)
except:
log('Could not remove %d' % dirname)
return (WAIT, WAIT)
# }}}
@delayed
def flush(self, id): # {{{
'''Wait for currently scheduled moves to finish.
'''
#log('flush start')
def cb(w):
#log('flush done')
if id is not None:
self._send(id, 'return', w)
self.movecb.append((False, cb))
if self.flushing is not True:
self.line()
#log('end flush preparation')
# }}}
@delayed
def probe(self, id, area, speed = 3.): # {{{
'''Run a probing routine.
This moves over the given area and probes a grid of points less
than max_probe_distance apart.
If the probe pin is valid, it will be used for the probe.
If it is invalid, a confirmation is required for every point.
'''
if area is None:
try:
fhs.remove_spool(os.path.join(self.uuid, 'probe' + os.extsep + 'bin'))
except:
log('Failed to remove probe file.')
traceback.print_exc()
self.probemap = None
self._globals_update()
if id is not None:
self._send(id, 'return', None)
return
if len(self.spaces[0].axis) < 3 or not self.probe_safe_dist > 0:
if id is not None:
self._send(id, 'return', None)
return
log(repr(area))
density = [int(area[t + 4] / self.probe_dist) + 1 for t in range(2)] + [self.targetangle]
self.probemap = [area, density, [[[] for x in range(density[0] + 1)] for y in range(density[1] + 1)]]
self.gcode_angle = math.sin(self.targetangle), math.cos(self.targetangle)
self.probe_speed = speed
self._do_probe(id, 0, 0, self.get_axis_pos(0, 2))
# }}}
def line(self, moves = (), f0 = None, f1 = None, v0 = None, v1 = None, relative = False, probe = False, single = False, force = False): # {{{
'''Move the tool in a straight line.
'''
#log('line %s %s %s %d %d' % (repr(moves), f0, f1, probe))
#log('speed %s' % f0)
#traceback.print_stack()
if not force and self.home_phase is not None and not self.paused:
log('ignoring line during home')
return
self.queue.append((moves, f0, f1, v0, v1, probe, single, relative))
if not self.wait:
self._do_queue()
# }}}
@delayed
def line_cb(self, id, moves = (), f0 = None, f1 = None, v0 = None, v1 = None, relative = False, probe = False, single = False): # {{{
'''Move the tool in a straight line; return when done.
'''
if self.home_phase is not None and not self.paused:
log('ignoring linecb during home')
if id is not None:
self._send(id, 'return', None)
return
self.line(moves, f0, f1, v0, v1, relative, probe, single)
self.wait_for_cb()[1](id)
# }}}
def move_target(self, dx, dy): # {{{
'''Move the target position.
Using this function avoids a round trip to the driver.
'''
self.set_globals(targetx = self.targetx + dx, targety = self.targety + dy)
# }}}
def sleep(self, sleeping = True, update = True, force = False): # {{{
'''Put motors to sleep, or wake them up.
'''
if sleeping:
if self.home_phase is not None or (not force and not self.paused and (self.gcode_map is not None or self.gcode_file)):
return
self.position_valid = False
if update:
self._globals_update()
self._send_packet(struct.pack('=BB', protocol.command['SLEEP'], sleeping))
# }}}
def settemp(self, channel, temp, update = True): # {{{
'''Set target temperature.
'''
channel = int(channel)
self.temps[channel].value = temp
if update:
self._temp_update(channel)
self._send_packet(struct.pack('=BBd', protocol.command['SETTEMP'], channel, temp + C0 if not math.isnan(self.temps[channel].beta) else temp))
if self.gcode_waiting > 0 and any(channel == x[0] for x in self.tempcb):
self.waittemp(channel, temp)
# }}}
def waittemp(self, channel, min, max = None): # {{{
'''Set temperature alarm values.
Note that this function returns immediately; it does not wait
for the temperature to be reached.
'''
channel = int(channel)
if min is None:
min = float('nan')
if max is None:
max = float('nan')
self._send_packet(struct.pack('=BBdd', protocol.command['WAITTEMP'], channel, min + C0 if not math.isnan(self.temps[channel].beta) else min, max + C0 if not math.isnan(self.temps[channel].beta) else max))
# }}}
def readtemp(self, channel): # {{{
'''Read current temperature.
'''
channel = int(channel)
if channel >= len(self.temps):
log('Trying to read invalid temp %d' % channel)
return float('nan')
self._send_packet(struct.pack('=BB', protocol.command['READTEMP'], channel))
cmd, s, m, f, e, data = self._get_reply()
assert cmd == protocol.rcommand['TEMP']
return f - (C0 if not math.isnan(self.temps[channel].beta) else 0)
# }}}
def readpower(self, channel): # {{{
'''Read power recordings.
The return value is a tuple of the time it has been on since
this function was last called, and the current time, both in
milliseconds.
To use, this function must be called at least twice; the first
call only the time is recorded. The second call the new time
is recorded and the elapsed time is computed and used in
combination with the time it was on.
'''
channel = int(channel)
if channel >= len(self.temps):
log('Trying to read invalid power %d' % channel)
return float('nan')
self._send_packet(struct.pack('=BB', protocol.command['READPOWER'], channel))
cmd, s, m, f, e, data = self._get_reply()
assert cmd == protocol.rcommand['POWER']
return s, m
# }}}
def readpin(self, pin): # {{{
'''Read current value of a gpio pin.
'''
self._send_packet(struct.pack('=BB', protocol.command['READPIN'], pin))
cmd, s, m, f, e, data = self._get_reply()
assert cmd == protocol.rcommand['PIN']
return bool(s)
# }}}
def load(self, profile = None, update = True): # {{{
'''Load a profile.
'''
filenames = fhs.read_data(os.path.join(self.uuid, 'profiles', ((profile and profile.strip()) or self.profile) + os.extsep + 'ini'), opened = False, multiple = True)
if profile and self.profile != profile.strip():
#log('setting profile to %s' % profile.strip())
self.profile = profile.strip()
if update:
self._globals_update()
if len(filenames) > 0:
with open(filenames[0]) as f:
log('loading profile {}'.format(filenames[0]))
self.expert_import_settings(f.read(), update = update)
else:
log('not loading nonexistent profile')
# }}}
def admin_save(self, profile = None): # {{{
'''Save a profile.
If the profile name is not given, it saves the current profile.
'''
if profile and self.profile != profile.strip():
log('setting profile to %s' % profile.strip())
self.profile = profile.strip()
self._globals_update()
with fhs.write_data(os.path.join(self.uuid, 'profiles', (profile.strip() or self.profile) + os.extsep + 'ini')) as f:
f.write(self.export_settings())
# }}}
def list_profiles(self): # {{{
'''Get a list of all available profiles.
'''
dirnames = fhs.read_data(os.path.join(self.uuid, 'profiles'), dir = True, multiple = True, opened = False)
ret = []
for d in dirnames:
for f in os.listdir(d):
name = os.path.splitext(f)[0].strip()
if name not in ret:
ret.append(name)
ret.sort()
return ret
# }}}
def admin_remove_profile(self, profile): # {{{
'''Remove a profile.
'''
filename = fhs.write_data(os.path.join(self.uuid, 'profiles', (profile.strip() or self.profile) + os.extsep + 'ini'), opened = False)
if os.path.exists(filename):
os.unlink(filename)
return True
return False
# }}}
def admin_set_default_profile(self, profile): # {{{
'''Set a profile as default.
'''
self.default_profile = profile
with fhs.write_data(os.path.join(self.uuid, 'info' + os.extsep + 'txt')) as f:
f.write(self.name + '\n')
f.write(profile + '\n')
# }}}
def abort(self): # {{{
'''Abort the current job.
'''
for t, temp in enumerate(self.temps):
self.settemp(t, float('nan'))
self.pause(store = False)
for g, gpio in enumerate(self.gpios):
self.set_gpio(g, state = gpio.reset)
self._job_done(False, 'aborted by user')
# Sleep doesn't work as long as home_phase is non-None, so do it after _job_done.
self.sleep(force = True);
# }}}
def pause(self, pausing = True, store = True, update = True): # {{{
'''Pause or resume the machine.
'''
was_paused = self.paused
if pausing:
self._send_packet(struct.pack('=BB', protocol.command['QUEUED'], True))
cmd, s, m, f, e, data = self._get_reply()
if cmd != protocol.rcommand['QUEUE']:
log('invalid reply to queued command')
return
self.movewait = 0
self.wait = False
self.paused = pausing
if not self.paused:
if was_paused:
# Go back to pausing position.
# First reset all axes that don't have a limit switch.
if self.queue_info is not None:
self._reset_extruders(self.queue_info[1])
self.line(self.queue_info[1])
# TODO: adjust extrusion of current segment to shorter path length.
#log('resuming')
self.resuming = True
#log('sending resume')
self._send_packet(bytes((protocol.command['RESUME'],)))
self._do_queue()
else:
#log('pausing')
if not was_paused:
#log('pausing %d %d %d %d %d' % (store, self.queue_info is None, len(self.queue), self.queue_pos, s))
if store and self.queue_info is None and ((len(self.queue) > 0 and self.queue_pos - s >= 0) or self.gcode_file):
if self.home_phase is not None:
#log('killing homer')
self.home_phase = None
self.expert_set_space(0, type = self.home_orig_type)
for a, ax in enumerate(self.spaces[0].axis):
self.expert_set_axis((0, a), min = self.home_limits[a][0], max = self.home_limits[a][1])
if self.home_cb in self.movecb:
self.movecb.remove(self.home_cb)
if self.home_id is not None:
self._send(self.home_id, 'return', None)
store = False
if self.probe_cb in self.movecb:
self.movecb.remove(self.probe_cb)
self.probe_cb[1](None)
store = False
#log('pausing gcode %d/%d/%d' % (self.queue_pos, s, len(self.queue)))
if self.flushing is None:
self.flushing = False
if store:
self.queue_info = [len(self.queue) if self.gcode_file else self.queue_pos - s, [[s.get_current_pos(a) for a in range(len(s.axis))] for s in self.spaces], self.queue, self.movecb, self.flushing]
else:
#log('stopping')
self.paused = False
if self.probe_cb in self.movecb:
self.movecb.remove(self.probe_cb)
self.probe_cb[1](None)
if len(self.movecb) > 0:
call_queue.extend([(x[1], [False]) for x in self.movecb])
self.queue = []
self.movecb = []
self.flushing = False
self.queue_pos = 0
if update:
self._globals_update()
# }}}
def queued(self): # {{{
'''Get the number of currently queued segments.
'''
self._send_packet(struct.pack('=BB', protocol.command['QUEUED'], False))
cmd, s, m, f, e, data = self._get_reply()
if cmd != protocol.rcommand['QUEUE']:
log('invalid reply to queued command')
return None
return s
# }}}
@delayed
def home(self, id, speed = 5, cb = None, abort = True): # {{{
'''Recalibrate the position with its limit switches.
'''
if self.home_phase is not None and not self.paused:
log("ignoring request to home because we're already homing")
if id is not None:
self._send(id, 'return', None)
return
# Abort only if it is requested, and the job is not paused.
if abort and self.queue_info is None:
self._job_done(False, 'aborted by homing')
self.home_phase = 0
self.home_id = id
self.home_return = None
self.home_speed = speed
self.home_done_cb = cb
for i, e in enumerate(self.spaces[1].axis):
self.set_axis_pos(1, i, 0)
self._do_home()
# }}}
@delayed
def park(self, id, cb = None, abort = True, order = 0, aborted = False): # {{{
'''Go to the park position.
Home first if the position is unknown.
'''
if aborted:
if id is not None:
self._send(id, 'error', 'aborted')
return
#log('parking with cb %s' % repr(cb))
if abort and self.queue_info is None:
self._job_done(False, 'aborted by parking')
self.parking = True
if not self.position_valid:
#log('homing')
self.home(cb = lambda: self.park(cb, abort = False)[1](id), abort = False)[1](None)
return
next_order = None
for s in self.spaces:
topark = [a['park_order'] for a in s.axis if not math.isnan(a['park']) and a['park_order'] >= order]
if len(topark) > 0 and (next_order is None or min(topark) > next_order):
next_order = min(topark)
if next_order is None:
#log('done parking; cb = %s' % repr(cb))
self.parking = False
if cb:
def wrap_cb(done):
call_queue.append((cb, []))
if id is not None:
self._send(id, 'return', None)
self.movecb.append((False, wrap_cb))
self.line()
else:
if id is not None:
self._send(id, 'return', None)
return
self.movecb.append((False, lambda done: self.park(cb, False, next_order + 1, not done)[1](id)))
self.line([[a['park'] - (0 if si != 0 or ai != 2 else self.zoffset) if a['park_order'] == next_order else float('nan') for ai, a in enumerate(s.axis)] for si, s in enumerate(self.spaces)])
# }}}
@delayed
def benjamin_audio_play(self, id, name, motor = 2): # {{{
self.audio_id = id
self.sleep(False)
filename = fhs.read_spool(os.path.join(self.uuid, 'audio', name + os.extsep + 'bin'), opened = False)
self._send_packet(struct.pack('=BBddBB', protocol.command['RUN_FILE'], 1, 0, 0, motor, 0) + filename.encode('utf8'))
# }}}
def benjamin_audio_add_POST(self, filename, name): # {{{
with open(filename, 'rb') as f:
self._audio_add(f, name)
# }}}
def benjamin_audio_del(self, name): # {{{
assert name in self.audioqueue
filename = fhs.read_spool(os.path.join(self.uuid, 'audio', name + os.extsep + 'bin'), opened = False)
os.unlink(filename)
del self.audioqueue[name]
self._broadcast(None, 'audioqueue', tuple(self.audioqueue.keys()))
# }}}
def audio_list(self): # {{{
return self.audioqueue
# }}}
@delayed
def wait_for_cb(self, id): # {{{
'''Block until the move queue is empty.
'''
ret = lambda w: id is None or self._send(id, 'return', w)
if self.movewait == 0:
#log('not delaying with wait_for_cb, because there is no cb waiting')
ret(self.movewait == 0)
else:
#log('waiting for cb')
self.movecb.append((True, ret))
# }}}
def waiting_for_cb(self): # {{{
'''Check if any process is waiting for the move queue to be empty.
'''
return self.movewait > 0
# }}}
@delayed
def wait_for_temp(self, id, which = None): # {{{
'''Wait for a temp to trigger its alarm.
'''
def cb():
if id is not None:
self._send(id, 'return', None)
return
self.gcode_waiting -= 1
if(which is None and len(self.alarms) > 0) or which in self.alarms:
cb()
else:
self.tempcb.append((which, cb))
# }}}
def clear_alarm(self, which = None): # {{{
'''Disable a temp alarm.
If which is None, disable all temp alarms.
'''
if which is None:
self.alarms.clear()
else:
self.alarms.discard(which)
# }}}
def get_limits(self, space, motor = None): # {{{
'''Return all limits that were hit since they were cleared.
'''
if motor is None:
return self.limits[space]
if motor in self.limits[space]:
return self.limits[space][motor]
return None
# }}}
def clear_limits(self): # {{{
'''Clear all recorded limits.
'''
for s in range(len(self.spaces)):
self.limits[s].clear()
# }}}
def valid(self): # {{{
'''Return whether the position of the motors is known.
'''
return self.position_valid
# }}}
def export_settings(self): # {{{
'''Export the current settings.
The resulting string can be imported back.
'''
message = '[general]\r\n'
for t in ('temps', 'gpios'):
message += 'num_%s = %d\r\n' % (t, len(getattr(self, t)))
message += 'pin_names = %s\r\n' % ','.join(('%d' % p[0]) + p[1] for p in self.pin_names)
message += 'unit_name = %s\r\n' % self.unit_name
message += 'spi_setup = %s\r\n' % self._mangle_spi()
message += ''.join(['%s = %s\r\n' % (x, write_pin(getattr(self, x))) for x in ('led_pin', 'stop_pin', 'probe_pin', 'spiss_pin')])
message += ''.join(['%s = %d\r\n' % (x, getattr(self, x)) for x in ('bed_id', 'fan_id', 'spindle_id', 'park_after_job', 'sleep_after_job', 'cool_after_job', 'timeout')])
message += ''.join(['%s = %f\r\n' % (x, getattr(self, x)) for x in ('probe_dist', 'probe_offset', 'probe_safe_dist', 'temp_scale_min', 'temp_scale_max', 'max_deviation', 'max_v')])
message += 'user_interface = %s\r\n' % self.user_interface
for i, s in enumerate(self.spaces):
message += s.export_settings()
for i, t in enumerate(self.temps):
message += t.export_settings()
for i, g in enumerate(self.gpios):
message += g.export_settings()
return message
# }}}
def expert_import_settings(self, settings, filename = None, update = True): # {{{
'''Import new settings.
settings is a string as created by export_settings.
The filename is ignored.
'''
self._broadcast(None, 'blocked', 'importing settings')
self.sleep(update = update)
section = 'general'
index = None
obj = None
regexp = re.compile('\s*\[(general|(space|temp|gpio|(extruder|axis|motor|delta|follower)\s+(\d+))\s+(\d+))\]\s*$|\s*(\w+)\s*=\s*(.*?)\s*$|\s*(?:#.*)?$')
#1: (general|(space|temp|gpio|(axis|motor|delta)\s+(\d+))\s+(\d+)) 1 section
#2: (space|temp|gpio|(extruder|axis|motor|delta)\s+(\d+)) 2 section with index
#3: (extruder|axis|motor|delta) 3 sectionname with two indices
#4: (\d+) 4 index of space
#5: (\d+) 5 only or component index
#6: (\w+) 6 identifier
#7: (.*?) 7 value
errors = []
globals_changed = True
changed = {'space': set(), 'temp': set(), 'gpio': set(), 'axis': set(), 'motor': set(), 'extruder': set(), 'delta': set(), 'follower': set()}
keys = {
'general': {'num_temps', 'num_gpios', 'user_interface', 'pin_names', 'led_pin', 'stop_pin', 'probe_pin', 'spiss_pin', 'probe_dist', 'probe_offset', 'probe_safe_dist', 'bed_id', 'fan_id', 'spindle_id', 'unit_name', 'timeout', 'temp_scale_min', 'temp_scale_max', 'park_after_job', 'sleep_after_job', 'cool_after_job', 'spi_setup', 'max_deviation', 'max_v'},
'space': {'type', 'num_axes', 'delta_angle', 'polar_max_r'},
'temp': {'name', 'R0', 'R1', 'Rc', 'Tc', 'beta', 'heater_pin', 'fan_pin', 'thermistor_pin', 'fan_temp', 'fan_duty', 'heater_limit_l', 'heater_limit_h', 'fan_limit_l', 'fan_limit_h', 'hold_time'},
'gpio': {'name', 'pin', 'state', 'reset', 'duty'},
'axis': {'name', 'park', 'park_order', 'min', 'max', 'home_pos2'},
'motor': {'step_pin', 'dir_pin', 'enable_pin', 'limit_min_pin', 'limit_max_pin', 'steps_per_unit', 'home_pos', 'limit_v', 'limit_a', 'home_order'},
'extruder': {'dx', 'dy', 'dz'},
'delta': {'axis_min', 'axis_max', 'rodlength', 'radius'},
'follower': {'space', 'motor'}
}
for l in settings.split('\n'):
r = regexp.match(l)
if not r:
errors.append((l, 'syntax error'))
continue
if r.group(1) is not None:
# New section.
if r.group(2) is not None:
# At least one index.
#log("At least one index")
if r.group(3) is not None:
# Two indices: axis, motor, extruder, delta, follower.
#log("Two indices")
index = (int(r.group(4)), int(r.group(5)))
section = r.group(3)
if index[0] >= len(self.spaces) or index[1] >= len(getattr(self.spaces[index[0]], section)):
log('index out of range for %s; %s %s' % (index, len(self.spaces), len(getattr(self.spaces[index[0]], section)) if index[0] < len(self.spaces) else 'x'))
errors.append((l, 'index out of range'))
obj = None
continue
obj = getattr(self.spaces[index[0]], section)[index[1]]
else:
#log("One index")
# One index: space, temp, gpio.
index = int(r.group(5))
section = r.group(2)
if index >= len(getattr(self, section + 's')):
errors.append((l, 'index out of range'))
obj = None
continue
obj = getattr(self, section + 's')[index]
changed[section].add(index)
else:
#log("No index")
# No indices: general.
section = r.group(1)
index = None
obj = self
globals_changed = True
continue
elif obj is None:
# Ignore settings for incorrect section.
continue
if not r.group(6):
# Comment or empty line.
continue
key = r.group(6)
value = r.group(7)
try:
if key == 'pin_names':
if len(self.pin_names) > 0:
# Don't override hardware-provided names.
continue
value = [[int(x[0]), x[1:]] for x in value.split(',')]
elif 'name' in key or key == 'user_interface':
pass # Keep strings as they are.
elif key == 'spi_setup':
value = self._unmangle_spi(value)
elif key.endswith('pin'):
value = read_pin(self, value)
#log('pin imported as {} for {}'.format(value, key))
elif key.startswith('num') or section == 'follower' or key.endswith('_id'):
value = int(value)
else:
value = float(value)
except ValueError:
errors.append((l, 'invalid value for %s' % key))
continue
if key not in keys[section] or (section == 'motor' and ((key in ('home_pos', 'home_order') and index[0] == 1) or (key in ('steps_per_unit', 'limit_v', 'limit_a') and index[0] == 2))):
errors.append((l, 'invalid key for section %s' % section))
continue
# If something critical is changed, update instantly.
if key.startswith('num') or key == 'type':
#log('setting now for %s:%s=%s' % (section, key, value))
if index is None:
self.expert_set_globals(**{key: value})
else:
if section == 'space':
for i in changed['motor']:
if i[0] == index:
self.expert_set_motor(i, readback = False)
for i in changed['axis']:
if i[0] == index:
self.expert_set_axis(i, readback = False)
for i in changed['delta']:
if i[0] == index:
self.expert_set_axis(i, readback = False)
getattr(self, 'expert_set_' + section)(index, **{key: value})
else:
if isinstance(index, tuple):
#log('setting later %s' % repr((section, key, value)))
obj[key] = value
else:
#log('setting later other %s' % repr((section, key, value)))
if section == 'extruder':
obj[ord[key[1]] - ord['x']] = value
else:
setattr(obj, key, value)
# Update values in the machine by calling the expert_set_* functions with no new settings.
if globals_changed:
#log('setting globals')
self.expert_set_globals()
for index in changed['extruder']:
changed['space'].add(index[0])
for index in changed['follower']:
changed['space'].add(index[0])
for index in changed['delta']:
changed['space'].add(index[0])
for section in changed:
for index in changed[section]:
if not isinstance(index, tuple):
continue
if section not in ('follower', 'delta', 'extruder'):
#log('setting non-{delta,follower} %s %s' % (section, index))
getattr(self, 'expert_set_' + section)(index, readback = False)
changed['space'].add(index[0])
for section in changed:
for index in changed[section]:
if isinstance(index, tuple):
continue
#log('setting %s' % repr((section, index)))
getattr(self, 'expert_set_' + section)(index)
self._broadcast(None, 'blocked', None)
return errors
# }}}
def expert_import_POST(self, filename, name): # {{{
'''Import settings using a POST request.
Note that this function can only be called using POST; not with the regular websockets system.
'''
return ', '.join('%s (%s)' % (msg, ln) for ln, msg in self.expert_import_settings(open(filename).read(), name))
# }}}
@delayed
def gcode_run(self, id, code, paused = False): # {{{
'''Run a string of g-code.
'''
with fhs.write_temp(text = False) as f:
f.write(code)
f.seek(0)
self.gcode_id = id
# Break this in two, otherwise tail recursion may destroy f before call is done?
ret = self._gcode_run(f.filename, paused = paused)
return ret
# }}}
@delayed
def request_confirmation(self, id, message): # {{{
'''Wait for confirmation.
The return value is True if confirmation is given, False if
not.
'''
# Abort pending confirmation, if any.
if self.confirmer not in (False, None):
self._send(self.confirmer, 'return', False)
self.confirmer = id
self.confirm_id += 1
self.confirm_axes = [[s.get_current_pos(a) for a in range(len(s.axis))] for s in self.spaces]
self.confirm_message = message
self._broadcast(None, 'confirm', self.confirm_id, self.confirm_message)
for c in self.confirm_waits:
self._send(c, 'return', (self.confirm_id, self.confirm_message))
self.confirm_waits.clear()
# }}}
def get_confirm_id(self): # {{{
'''Return id of current confirmation request, if any.
'''
return self.confirm_id, self.confirm_message
# }}}
@delayed
def wait_confirm(self, id, pending = True): # {{{
'''Block until confirmation is requested.
If pending is False, ignore the current request, if any.
'''
if pending and self.confirmer is not None:
self._send(id, 'return', (self.confirm_id, self.confirm_message))
return
self.confirm_waits.add(id)
# }}}
def confirm(self, confirm_id, success = True): # {{{
'''Respond to a confirmation request.
If confirm_id is not None, it must be equal to the current id
or the confirmation is ignored.
Success is passed to the requester. If it is requested by
g-code, passing False will abort the job.
'''
if confirm_id not in (self.confirm_id, None) or self.confirm_axes is None:
# Confirmation was already sent, or never reguested.
#log('no confirm %s' % repr((confirm_id, self.confirm_id)))
return False
id = self.confirmer
self.confirmer = None
self.confirm_message = None
self._broadcast(None, 'confirm', None)
self._reset_extruders(self.confirm_axes)
self.confirm_axes = None
if id not in (False, None):
self._send(id, 'return', success)
else:
if self.probing:
call_queue.append((self.probe_cb[1], [False if success else None]))
else:
if not success:
self.probe_pending = False
self._job_done(False, 'aborted by failed confirmation')
else:
if self.probe_pending and self._pin_valid(self.probe_pin):
self.probe_pending = False
call_queue.append((self._one_probe, []))
else:
self.probe_pending = False
self._send_packet(bytes((protocol.command['RESUME'],)))
return True
# }}}
def queue_add(self, data, name): # {{{
'''Add code to the queue as a string.
'''
with fhs.write_temp() as f:
f.write(data)
f.seek(0)
return self._queue_add(f.filename, name)
# }}}
def queue_add_POST(self, filename, name): # {{{
'''Add g-code to queue using a POST request.
Note that this function can only be called using POST; not with the regular websockets system.
'''
return self._queue_add(filename, name)
# }}}
def probe_add_POST(self, filename, name): # {{{
'''Set probe map using a POST request.
Note that this function can only be called using POST; not with the regular websockets system.
'''
with open(filename) as f:
self.probemap = json.loads(f.read().strip())
return '' if self._check_probemap() else 'Invalid probemap'
# }}}
def queue_remove(self, name, audio = False): # {{{
'''Remove an entry from the queue.
'''
assert name in self.jobqueue
#log('removing %s' % name)
if audio:
filename = fhs.read_spool(os.path.join(self.uuid, 'audio', name + os.extsep + 'bin'), opened = False)
del self.audioqueue[name]
self._broadcast(None, 'audioqueue', tuple(self.audioqueue.keys()))
else:
filename = fhs.read_spool(os.path.join(self.uuid, 'gcode', name + os.extsep + 'bin'), opened = False)
del self.jobqueue[name]
self._broadcast(None, 'queue', [(q, self.jobqueue[q]) for q in self.jobqueue])
try:
os.unlink(filename)
except:
log('unable to unlink %s' % filename)
# }}}
@delayed
def queue_run(self, id, name, paused = False): # {{{
'''Start a new job.
'''
if self.probing:
log('ignoring run request while probe is in progress')
if id is not None:
self._send(id, 'return', None)
return
if self.job_current is not None and not self.paused:
log('ignoring run request while job is in progress: %s ' % repr(self.job_current) + str(self.paused))
if id is not None:
self._send(id, 'return', None)
return
#log('set active jobs to %s' % names)
self.job_current = name
self.job_id = id
self._start_job(paused)
# }}}
def get_machine_state(self): # {{{
'''Return current machine state.
Return value is a tuple of a human readable string describing
the state, NaN or the elapsed time, NaN or the total time for
the current job.
Note that the times are computed from the requested speeds.
These are generally too low, because they don't account for
acceleration and velocity limits.
'''
pos = self.tp_get_position()
context = self.tp_get_context(position = pos[0])
if self.paused:
state = 'Paused'
elif self.gcode_map is not None or self.gcode_file:
state = 'Running'
else:
return 'Idle', float('nan'), float('nan'), pos[0], pos[1], context
self._send_packet(struct.pack('=B', protocol.command['GETTIME']))
cmd, s, m, f, e, data = self._get_reply()
if cmd != protocol.rcommand['TIME']:
log('invalid reply to gettime command')
return 'Error', float('nan'), float('nan'), pos[0], pos[1], context
return state, f, (self.total_time[0] + (0 if len(self.spaces) < 1 else self.total_time[1] / self.max_v)) / self.feedrate, pos[0], pos[1], context
# }}}
def send_machine(self, target): # {{{
'''Return all settings about a machine.
'''
self.initialized = True
self._broadcast(target, 'new_machine', self.uuid, [self.queue_length])
self._globals_update(target)
for i, s in enumerate(self.spaces):
self._space_update(i, target)
for i, t in enumerate(self.temps):
self._temp_update(i, target)
for i, g in enumerate(self.gpios):
self._gpio_update(i, target)
self._broadcast(target, 'queue', [(q, self.jobqueue[q]) for q in self.jobqueue])
self._broadcast(target, 'audioqueue', tuple(self.audioqueue.keys()))
if self.confirmer is not None:
self._broadcast(target, 'confirm', self.confirm_id, self.confirm_message)
# }}}
def admin_disconnect(self, reason = None): # {{{
self._send_packet(struct.pack('=B', protocol.command['FORCE_DISCONNECT']))
self._close(False)
# }}}
# Commands for handling the toolpath.
def tp_get_position(self): # {{{
'''Get current toolpath position.
@return position, total toolpath length.'''
if self.gcode_map is None:
return 0, 0
self._send_packet(struct.pack('=B', protocol.command['TP_GETPOS']))
cmd, s, m, f, e, data = self._get_reply()
assert cmd == protocol.rcommand['TP_POS']
return f, self.gcode_num_records
# }}}
def tp_set_position(self, position): # {{{
'''Set current toolpath position.
It is an error to call this function while not paused.
@param position: new toolpath position.
@return None.'''
assert self.gcode_map is not None
assert 0 <= position < self.gcode_num_records
assert self.paused
if self.queue_info is not None:
self.queue_info[1] = [] # Don't restore extruder position on resume.
self._send_packet(struct.pack('=Bd', protocol.command['TP_SETPOS'], position))
# }}}
def tp_get_context(self, num = None, position = None): # {{{
'''Get context around a position.
@param num: number of lines context on each side.
@param position: center of the returned region, or None for current position.
@return first position of returned region (normally position - num), list of lines+arcs+specials'''
if self.gcode_map is None:
return 0, []
if num is None:
num = 100; # TODO: make configurable.
if position is None:
position = self.tp_get_position()[0]
position = int(position)
def parse_record(num):
s = struct.calcsize(record_format)
type, tool, X, Y, Z, E, f, F, time, dist = struct.unpack(record_format, self.gcode_map[num * s:(num + 1) * s])
return tuple(protocol.parsed.keys())[tuple(protocol.parsed.values()).index(type)], tool, X, Y, Z, E, f, F, time, dist
return max(0, position - num), [parse_record(x) for x in range(position - num, position + num + 1) if 0 <= x < self.gcode_num_records]
# }}}
def tp_get_string(self, num): # {{{
'''Get string from toolpath.
@param num: index of the string.
@return the string.'''
return self.gcode_strings[num]
# }}}
def tp_find_position(self, x = None, y = None, z = None): # {{{
'''Find toolpath position closest to coordinate.
Inputs may be None, in that case that coordinate is ignored.
@param x: X coordinate of target or None.
@param y: Y coordinate of target or None.
@param z: Z coordinate of target or None.
@return toolpath position.'''
assert self.gcode_map is not None
self._send_packet(struct.pack('=Bddd', protocol.command['TP_FINDPOS'], *(a if a is not None else float('nan') for a in (x, y, z))))
cmd, s, m, f, e, data = self._get_reply()
assert cmd == protocol.rcommand['TP_POS']
return f
# }}}
# }}}
# Accessor functions. {{{
# Globals. {{{
def get_globals(self): # {{{
#log('getting globals')
ret = {'num_temps': len(self.temps), 'num_gpios': len(self.gpios)}
for key in ('name', 'user_interface', 'pin_names', 'uuid', 'queue_length', 'num_pins', 'led_pin', 'stop_pin', 'probe_pin', 'spiss_pin', 'probe_dist', 'probe_offset', 'probe_safe_dist', 'bed_id', 'fan_id', 'spindle_id', 'unit_name', 'timeout', 'feedrate', 'targetx', 'targety', 'targetangle', 'zoffset', 'store_adc', 'temp_scale_min', 'temp_scale_max', 'probemap', 'paused', 'park_after_job', 'sleep_after_job', 'cool_after_job', 'spi_setup', 'max_deviation', 'max_v'):
ret[key] = getattr(self, key)
return ret
# }}}
def expert_set_globals(self, update = True, **ka): # {{{
#log('setting variables with %s' % repr(ka))
nt = ka.pop('num_temps') if 'num_temps' in ka else None
ng = ka.pop('num_gpios') if 'num_gpios' in ka else None
if 'store_adc' in ka:
self.store_adc = bool(ka.pop('store_adc'))
if 'name' in ka:
name = ka.pop('name')
if name != self.name:
self.name = name
self.admin_set_default_profile(self.default_profile)
if 'probemap' in ka:
self.probemap = ka.pop('probemap')
self._check_probemap()
for key in ('unit_name', 'user_interface', 'pin_names'):
if key in ka:
setattr(self, key, ka.pop(key))
if 'spi_setup' in ka:
self.spi_setup = self._unmangle_spi(ka.pop('spi_setup'))
if self.spi_setup:
self._spi_send(self.spi_setup)
for key in ('led_pin', 'stop_pin', 'probe_pin', 'spiss_pin', 'bed_id', 'fan_id', 'spindle_id', 'park_after_job', 'sleep_after_job', 'cool_after_job', 'timeout'):
if key in ka:
setattr(self, key, int(ka.pop(key)))
for key in ('probe_dist', 'probe_offset', 'probe_safe_dist', 'feedrate', 'targetx', 'targety', 'targetangle', 'zoffset', 'temp_scale_min', 'temp_scale_max', 'max_deviation', 'max_v'):
if key in ka:
setattr(self, key, float(ka.pop(key)))
self._write_globals(nt, ng, update = update)
assert len(ka) == 0
# }}}
def set_globals(self, update = True, **ka): # {{{
real_ka = {}
for key in ('feedrate', 'targetx', 'targety', 'targetangle', 'zoffset'):
if key in ka:
real_ka[key] = ka.pop(key)
assert len(ka) == 0
return self.expert_set_globals(update = update, **real_ka)
# }}}
# }}}
# Space {{{
def get_axis_pos(self, space, axis = None): # {{{
if space >= len(self.spaces) or (axis is not None and axis >= len(self.spaces[space].axis)):
log('request for invalid axis position %d %d' % (space, axis))
return float('nan')
if axis is None:
return [self.spaces[space].get_current_pos(a) for a in range(len(self.spaces[space].axis))]
else:
return self.spaces[space].get_current_pos(axis)
# }}}
def set_axis_pos(self, space, axis, pos): # {{{
if space >= len(self.spaces) or (axis is not None and axis >= len(self.spaces[space].axis)):
log('request to set invalid axis position %d %d' % (space, axis))
return False
return self.spaces[space].set_current_pos(axis, pos)
# }}}
def get_space(self, space): # {{{
ret = {'name': self.spaces[space].name, 'num_axes': len(self.spaces[space].axis), 'num_motors': len(self.spaces[space].motor)}
if self.spaces[space].type == TYPE_CARTESIAN:
pass
elif self.spaces[space].type == TYPE_DELTA:
delta = []
for i in range(3):
d = {}
for key in ('axis_min', 'axis_max', 'rodlength', 'radius'):
d[key] = self.spaces[space].delta[i][key]
delta.append(d)
delta.append(self.spaces[space].delta_angle)
ret['delta'] = delta
elif self.spaces[space].type == TYPE_POLAR:
ret['polar_max_r'] = self.spaces[space].polar_max_r
elif self.spaces[space].type == TYPE_EXTRUDER:
ret['extruder'] = []
for a in range(len(self.spaces[space].axis)):
ret['extruder'].append({})
for key in ('dx', 'dy', 'dz'):
ret['extruder'][-1][key] = self.spaces[space].extruder[a][key]
elif self.spaces[space].type == TYPE_FOLLOWER:
ret['follower'] = []
for a in range(len(self.spaces[space].axis)):
ret['follower'].append({})
for key in ('space', 'motor'):
ret['follower'][-1][key] = self.spaces[space].follower[a][key]
else:
log('invalid type')
return ret
# }}}
def get_axis(self, space, axis): # {{{
ret = {'name': self.spaces[space].axis[axis]['name']}
if space == 1:
ret['multiplier'] = self.multipliers[axis]
if space == 0:
for key in ('park', 'park_order', 'min', 'max', 'home_pos2'):
ret[key] = self.spaces[space].axis[axis][key]
return ret
# }}}
def get_motor(self, space, motor): # {{{
ret = {'name': self.spaces[space].motor_name(motor)}
for key in ('step_pin', 'dir_pin', 'enable_pin'):
ret[key] = self.spaces[space].motor[motor][key]
if space != 1:
for key in ('limit_min_pin', 'limit_max_pin', 'home_pos', 'home_order'):
ret[key] = self.spaces[space].motor[motor][key]
if space != 2:
for key in ('steps_per_unit', 'limit_v', 'limit_a'):
ret[key] = self.spaces[space].motor[motor][key]
return ret
# }}}
def expert_set_space(self, space, readback = True, update = True, **ka): # {{{
old_type = self.spaces[space].type
if space == 0 and 'type' in ka:
self.spaces[space].type = int(ka.pop('type'))
current_pos = None if self.spaces[space].type != old_type else self.get_axis_pos(space)
if self.spaces[space].type == TYPE_EXTRUDER:
if 'extruder' in ka:
e = ka.pop('extruder')
for ei, ee in e.items():
i = int(ei)
for key in ('dx', 'dy', 'dz'):
if key in ee:
self.spaces[space].extruder[i][key] = ee.pop(key)
assert len(ee) == 0
if self.spaces[space].type == TYPE_FOLLOWER:
if 'follower' in ka:
f = ka.pop('follower')
for fi, ff in f.items():
fi = int(fi)
for key in ('space', 'motor'):
if key in ff:
self.spaces[space].follower[fi][key] = int(ff.pop(key))
assert len(ff) == 0
if self.spaces[space].type in (TYPE_CARTESIAN, TYPE_EXTRUDER, TYPE_FOLLOWER):
if 'num_axes' in ka:
num_axes = int(ka.pop('num_axes'))
else:
num_axes = len(self.spaces[space].axis)
num_motors = num_axes
elif self.spaces[space].type == TYPE_DELTA:
num_axes = 3;
num_motors = 3;
if 'delta' in ka:
d = ka.pop('delta')
for di, dd in d.items():
i = int(di)
assert 0 <= i < 3
for key in ('axis_min', 'axis_max', 'rodlength', 'radius'):
if key in dd:
self.spaces[space].delta[i][key] = dd.pop(key)
assert len(dd) == 0
if 'delta_angle' in ka:
self.spaces[space].delta_angle = ka.pop('delta_angle')
elif self.spaces[space].type == TYPE_POLAR:
num_axes = 3;
num_motors = 3;
if 'polar_max_r' in ka:
self.spaces[space].polar_max_r = ka.pop('polar_max_r')
self._send_packet(struct.pack('=BB', protocol.command['WRITE_SPACE_INFO'], space) + self.spaces[space].write_info(num_axes))
if readback:
self.spaces[space].read(self._read('SPACE', space))
if update:
self._space_update(space)
if len(ka) != 0:
log('invalid input ignored: %s' % repr(ka))
if current_pos is not None and not all(math.isnan(x) for x in current_pos) and (self.paused or (self.home_phase is None and not self.gcode_file and self.gcode_map is None)):
self.line({space: current_pos})
#else:
# log(repr(('not going to pos:', current_pos, self.paused, self.home_phase, self.gcode_file, self.gcode_map)))
# }}}
def expert_set_axis(self, spaceaxis, readback = True, update = True, **ka): # {{{
space, axis = spaceaxis
if 'name' in ka:
self.spaces[space].axis[axis]['name'] = ka.pop('name')
if space == 0:
for key in ('park', 'park_order', 'min', 'max', 'home_pos2'):
if key in ka:
self.spaces[space].axis[axis][key] = ka.pop(key)
if space == 1 and 'multiplier' in ka and axis < len(self.spaces[space].motor):
assert(ka['multiplier'] > 0)
self.multipliers[axis] = ka.pop('multiplier')
self.expert_set_motor((space, axis), readback, update)
self._send_packet(struct.pack('=BBB', protocol.command['WRITE_SPACE_AXIS'], space, axis) + self.spaces[space].write_axis(axis))
if readback:
self.spaces[space].read(self._read('SPACE', space))
if update:
self._space_update(space)
assert len(ka) == 0
# }}}
def expert_set_motor(self, spacemotor, readback = True, update = True, **ka): # {{{
space, motor = spacemotor
current_pos = self.get_axis_pos(space)
for key in ('step_pin', 'dir_pin', 'enable_pin'):
if key in ka:
self.spaces[space].motor[motor][key] = ka.pop(key)
for key in ('home_pos', 'limit_min_pin', 'limit_max_pin'):
if space != 1 and key in ka:
self.spaces[space].motor[motor][key] = ka.pop(key)
if space != 1 and 'home_order' in ka:
self.spaces[space].motor[motor]['home_order'] = ka.pop('home_order')
for key in ('steps_per_unit', 'limit_v', 'limit_a'):
if space != 2 and key in ka:
self.spaces[space].motor[motor][key] = ka.pop(key)
self._send_packet(struct.pack('=BBB', protocol.command['WRITE_SPACE_MOTOR'], space, motor) + self.spaces[space].write_motor(motor))
followers = False
for m, mt in enumerate(self.spaces[2].motor):
fs = self.spaces[2].follower[m]['space']
fm = self.spaces[2].follower[m]['motor']
if fs == space and fm == motor:
followers = True
self._send_packet(struct.pack('=BBB', protocol.command['WRITE_SPACE_MOTOR'], 2, m) + self.spaces[2].write_motor(m))
if readback:
self.spaces[space].read(self._read('SPACE', space))
if update:
self._space_update(space)
if followers:
self.spaces[2].read(self._read('SPACE', 2))
if update:
self._space_update(2)
assert len(ka) == 0
if not all(math.isnan(x) for x in current_pos) and (self.paused or (self.home_phase is None and not self.gcode_file and self.gcode_map is None)):
self.line({space: current_pos})
# }}}
# }}}
# Temp {{{
def get_temp(self, temp): # {{{
ret = {}
for key in ('name', 'R0', 'R1', 'Rc', 'Tc', 'beta', 'heater_pin', 'fan_pin', 'thermistor_pin', 'fan_temp', 'fan_duty', 'heater_limit_l', 'heater_limit_h', 'fan_limit_l', 'fan_limit_h', 'hold_time'):
ret[key] = getattr(self.temps[temp], key)
return ret
# }}}
def expert_set_temp(self, temp, update = True, **ka): # {{{
ret = {}
for key in ('name', 'R0', 'R1', 'Rc', 'Tc', 'beta', 'heater_pin', 'fan_pin', 'thermistor_pin', 'fan_temp', 'fan_duty', 'heater_limit_l', 'heater_limit_h', 'fan_limit_l', 'fan_limit_h', 'hold_time'):
if key in ka:
setattr(self.temps[temp], key, ka.pop(key))
self._send_packet(struct.pack('=BB', protocol.command['WRITE_TEMP'], temp) + self.temps[temp].write())
self.temps[temp].read(self._read('TEMP', temp))
if update:
self._temp_update(temp)
if len(ka) != 0:
log('problem: %s' % repr(ka))
assert len(ka) == 0
# }}}
def set_temp(self, temp, update = True, **ka): # {{{
real_ka = {}
if 'fan_duty' in ka:
real_ka['fan_duty'] = ka.pop('fan_duty')
assert len(ka) == 0
return self.expert_set_temp(temp, update = update, **real_ka)
# }}}
# }}}
# Gpio {{{
@delayed
def wait_gpio(self, id, gpio, value = 1): # {{{
assert gpio < len(self.gpios)
if int(value) == int(self.gpios[gpio].value):
self._send(id, 'return', None)
return
if gpio not in self.gpio_waits:
self.gpio_waits[gpio] = []
self.gpio_waits[gpio].append(id)
# }}}
def get_gpio(self, gpio): # {{{
ret = {}
for key in ('name', 'pin', 'state', 'reset', 'duty', 'value'):
ret[key] = getattr(self.gpios[gpio], key)
return ret
# }}}
def expert_set_gpio(self, gpio, update = True, **ka): # {{{
for key in ('name', 'pin', 'state', 'reset', 'duty'):
if key in ka:
setattr(self.gpios[gpio], key, ka.pop(key))
self.gpios[gpio].state = int(self.gpios[gpio].state)
self.gpios[gpio].reset = int(self.gpios[gpio].reset)
if self.gpios[gpio].reset >= 2 or (self.gpios[gpio].reset < 2 and self.gpios[gpio].state >= 2):
self.gpios[gpio].state = self.gpios[gpio].reset
#log('gpio %d reset %d' % (gpio, self.gpios[gpio].reset))
self._send_packet(struct.pack('=BB', protocol.command['WRITE_GPIO'], gpio) + self.gpios[gpio].write())
self.gpios[gpio].read(self._read('GPIO', gpio))
if update:
self._gpio_update(gpio)
assert len(ka) == 0
# }}}
def set_gpio(self, gpio, update = True, **ka): # {{{
real_ka = {}
if 'state' in ka:
real_ka['state'] = ka.pop('state')
assert len(ka) == 0
return self.expert_set_gpio(gpio, update = update, **real_ka)
# }}}
# }}}
# }}}
# }}}
call_queue = []
machine = Machine(config['allow-system'])
if machine.machine is None:
sys.exit(0)
while True: # {{{
while len(call_queue) > 0:
f, a = call_queue.pop(0)
#log('calling %s' % repr((f, a)))
f(*a)
while machine.machine.available():
machine._machine_input()
if len(call_queue) > 0:
continue # Handle this first.
fds = [sys.stdin, machine.machine]
#log('waiting; movewait = %d' % machine.movewait)
found = select.select(fds, [], fds, None)
if sys.stdin in found[0] or sys.stdin in found[2]:
#log('command')
machine._command_input()
if machine.machine in found[0] or machine.machine in found[2]:
#log('machine')
machine._machine_input()
# }}}
|
In the past, stripping was used to treat varicose veins. When a vein is stripped away, the body doesn’t know it has been removed. It thinks the vein has been damaged, so your body tries to grow the vein back. Unlike organs, such as the gallbladder, which you can remove permanently, veins are part of the body’s connective tissue. This means that they are involved in the healing process.
If you are considering treatment for varicose veins, there is no reason to have vein stripping performed. The alternatives such as RadioFrequency Closure and Ambulatory Phlebectomy are more effective and have lasting results. If you had a vein stripping procedure performed in the past, and the veins have reappeared, make an appointment with the doctors at Reno Vein Clinic by visiting www.RenoVeinClinic.com or call (775) 329-3100.
Stay Spider Vein Free This Fall!
Just because colder weather is upon us and it’s almost time to start digging out pants and jeans, covering up doesn’t mean spider veins will disappear. In addition to looking unsightly, spider and varicose veins can pose health problems, so never let them go unnoticed or untreated.
Foam Sclerotherapy: Treating unwanted spider veins, this non-operative treatment uses micro-injection technology through small needles using an FDA approved solution. Compression hoses are required to be worn after the treatment and normal activities can resume immediately. To complete the sclerotherapy effectively, it usually takes two to four treatments, six weeks apart. What’s great is this approach treats hand, facial, leg and chest veins.
Having concerns about high cholesterol? A major risk factor leading to heart conditions, disease and strokes, cholesterol can often run in families. On the bright side, we can help manage cholesterol, starting with the foods we put in our bodies. Here’s a quick look at some tasty and functional foods recommended by the Mayo Clinic to help manage cholesterol numbers and keep us healthy and feeling our best.
Oatmeal, Bran and Fiber-Rich Foods: Containing soluble fiber, these foods help lower “bad” cholesterol. Soluble fiber is also found in kidney beans, apples, pears, barley and prunes.
Fish & Omega-3 Fatty Acids: Heart healthy because of Omega 3 Fatty Acids, fish can help lower blood pressure and reduce risks of blood clots. High levels are found in mackerel, trout, salmon, halibut, tuna, trout and more.
Walnuts, Almonds and Other Nuts: Helping reduce cholesterol, nuts are rich in good, fatty acids which also help keep blood vessels healthy. Doctors recommend eating a handful of nuts per day such as almonds, hazelnuts, peanuts, pecans, pine nuts, pistachios and walnuts – all helping reduce your risk of heart disease.
Olive Oil: Containing a blend of antioxidants, olive oil helps lower cholesterol, while leaving “good” cholesterol untouched. The Food and Drug Administration recommends using about 2 tablespoons of olive oil a day in place of other fats in your diet to receive its heart-healthy benefits.
For details and more information, check out the full article at: http://www.mayoclinic.com/health/cholesterol/CL00002.
Avoid wearing heels for a long period of time. High-heeled shoes can put stress on your veins. If you want to wear heels, try to find shoes, like wedges, that offer more support. Even with shoes that offer more support, you should give your legs a break periodically.
Uncross your legs while sitting. When you sit with your legs crossed you are limiting the blood circulation in your legs. The more often this happens, the harder it gets for your legs to recover. Try crossing your ankles when you sit instead.
Get your Vitamin C and Bioflavonoids (a kind of antioxidant). Both will help with circulation and reduce pain. Bioflavonoids also strengthen weak blood-vessel walls. Foods high in Vitamin C and Bioflavonoids include: sweet peppers, broccoli, cauliflower, kale, asparagus, spinach, tomatoes, strawberries, apricots, cantaloupe, and dark colored berries like blueberries.
Limit high-sodium foods. Too much sodium will cause your body and veins to retain more water. This can be hard on your veins because too much water retention can limit blood circulation.
Exfoliate your legs in the shower. By using a simple loofah or one of our skin scrubs on your legs daily, you can help improve blood circulation and lymphatic stimulation.
Get your Vitamin E. This vitamin is an excellent blood thinner that helps improve circulation and also helps to relieve pain. Foods high in Vitamin E are: wheat germ, egg yolks, butter, nuts, whole wheat flour, liver, and dark leafy greens.
Work out your legs. The more you move your leg muscles, the stronger you blood circulation will become. Working your legs will get you blood moving and help keep you blood moving.
Stretch your legs. Stretching helps the blood flow increase and opens up the fascia so that blood can get to the tissue more easily. Stretching is also important to help your legs recover from a long day.
Get your fiber. Constipation can be a factor in spider or varicose veins, because straining in an effort to force a bowel movement puts added pressure on veins. This pressure can lead to poor circulation.
Avoid fried foods. Fats add to circulation problems by causing plaque to deposit in you vein and arteries. By eating less fat, you can keep you blood flowing and keep plaque out of your veins.
|
# Copyright 2016 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""IPRange form."""
from django import forms
from django.contrib.auth.models import User
from maasserver.forms import MAASModelForm
from maasserver.models import Subnet
from maasserver.models.iprange import IPRange
class IPRangeForm(MAASModelForm):
"""IPRange creation/edition form."""
user = forms.ModelChoiceField(
required=False, queryset=User.objects, to_field_name="username"
)
class Meta:
model = IPRange
fields = ("subnet", "type", "start_ip", "end_ip", "user", "comment")
def __init__(
self, data=None, instance=None, request=None, *args, **kwargs
):
if data is None:
data = {}
else:
data = data.copy()
# If this is a new IPRange, fill in the 'user' and 'subnet' fields
# automatically, if necessary.
if instance is None:
start_ip = data.get("start_ip")
subnet = data.get("subnet")
if subnet is None and start_ip is not None:
subnet = Subnet.objects.get_best_subnet_for_ip(start_ip)
if subnet is not None:
data["subnet"] = subnet.id
if request is not None:
data["user"] = request.user.username
elif instance.user and "user" not in data:
data["user"] = instance.user.username
super().__init__(data=data, instance=instance, *args, **kwargs)
|
Nestled in the heart of Melbourne’s Mornington Peninsula, Rye is one the state’s most popular beachside towns. Those looking to relax head to the world-famous Peninsula Hot Springs; those looking for adventure travel to the Gunnamatta Trail Rides facility, where you can experience the sun going down at St Andrews Beach while horseback; and those looking for beachside fun have a range of options to choose from, the most popular of which is the Rye Ocean Beach, a magnificent four-kilometre stretch of exposed sandy coast.
Rye is a 15-minute drive to Sorrento, which is abuzz during the summer months as beach-loving tourists spend their nights in the many restaurants and drinking spots you find on Ocean Beach Road. Once in Sorrento, the art-deco Peninsula Cinemas Sorrento is a welcome retreat for those looking to put their feet up after a day of swimming, kitesurfing, or fishing.
The Mornington Peninsula is one of the most desirable destinations in the nation, and if you want to move to Rye then our team of moving professionals can make that dream a reality. Rye is 98 kilometres from the Melbourne CBD, but don’t let the stress of moving all your furniture to the seaside town distract you. Move On Removals will do the heavy lifting for you.
Our removalists have many years of experience in relocating equipment and heavy loads, making them the number one choice in Rye for moving to another part of Melbourne. The moving process will be made much easier through us, meaning you will be able to settle into your new home in a quick and timely manner.
Our qualified removalists use specialised moving equipment to securely store your furniture, boxes, and other belongings in a reliable moving truck that will ensure that your items will not move around by themselves while the truck is driving towards your new location. In the unlikely event anything is damaged, your items will be replaced by our insurance policy at no extra coast to you.
Is your business relocating to a new building in Rye? Our company is available to help various commercial enterprises to transfer everything they need to work to their new building. Our removalists are experienced with shifting heavy duty furniture for businesses, hauling big items such as desks, chairs, shelves, and much more. Our team’s efficient furniture removals service will ensure that your business will be operating again in no time.
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for boot methods used by iLO modules."""
import tempfile
from ironic_lib import utils as ironic_utils
import mock
from oslo_config import cfg
import six
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common import image_service
from ironic.common import images
from ironic.common import states
from ironic.common import swift
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules.ilo import boot as ilo_boot
from ironic.drivers.modules.ilo import common as ilo_common
from ironic.drivers.modules import pxe
from ironic.drivers import utils as driver_utils
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
if six.PY3:
import io
file = io.BytesIO
INFO_DICT = db_utils.get_test_ilo_info()
CONF = cfg.CONF
class IloBootCommonMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(IloBootCommonMethodsTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="iscsi_ilo")
self.node = obj_utils.create_test_node(
self.context, driver='iscsi_ilo', driver_info=INFO_DICT)
def test_parse_driver_info(self):
self.node.driver_info['ilo_deploy_iso'] = 'deploy-iso'
expected_driver_info = {'ilo_deploy_iso': 'deploy-iso'}
actual_driver_info = ilo_boot.parse_driver_info(self.node)
self.assertEqual(expected_driver_info, actual_driver_info)
def test_parse_driver_info_exc(self):
self.assertRaises(exception.MissingParameterValue,
ilo_boot.parse_driver_info, self.node)
class IloBootPrivateMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(IloBootPrivateMethodsTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="iscsi_ilo")
self.node = obj_utils.create_test_node(
self.context, driver='iscsi_ilo', driver_info=INFO_DICT)
def test__get_boot_iso_object_name(self):
boot_iso_actual = ilo_boot._get_boot_iso_object_name(self.node)
boot_iso_expected = "boot-%s" % self.node.uuid
self.assertEqual(boot_iso_expected, boot_iso_actual)
@mock.patch.object(image_service.HttpImageService, 'validate_href',
spec_set=True, autospec=True)
def test__get_boot_iso_http_url(self, service_mock):
url = 'http://abc.org/image/qcow2'
i_info = self.node.instance_info
i_info['ilo_boot_iso'] = url
self.node.instance_info = i_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
boot_iso_actual = ilo_boot._get_boot_iso(task, 'root-uuid')
service_mock.assert_called_once_with(mock.ANY, url)
self.assertEqual(url, boot_iso_actual)
@mock.patch.object(image_service.HttpImageService, 'validate_href',
spec_set=True, autospec=True)
def test__get_boot_iso_unsupported_url(self, validate_href_mock):
validate_href_mock.side_effect = exception.ImageRefValidationFailed(
image_href='file://img.qcow2', reason='fail')
url = 'file://img.qcow2'
i_info = self.node.instance_info
i_info['ilo_boot_iso'] = url
self.node.instance_info = i_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.ImageRefValidationFailed,
ilo_boot._get_boot_iso, task, 'root-uuid')
@mock.patch.object(images, 'get_image_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_parse_deploy_info', spec_set=True,
autospec=True)
def test__get_boot_iso_glance_image(self, deploy_info_mock,
image_props_mock):
deploy_info_mock.return_value = {'image_source': 'image-uuid',
'ilo_deploy_iso': 'deploy_iso_uuid'}
image_props_mock.return_value = {'boot_iso': u'glance://uui\u0111',
'kernel_id': None,
'ramdisk_id': None}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
driver_internal_info = task.node.driver_internal_info
driver_internal_info['boot_iso_created_in_web_server'] = False
task.node.driver_internal_info = driver_internal_info
task.node.save()
boot_iso_actual = ilo_boot._get_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
image_props_mock.assert_called_once_with(
task.context, 'image-uuid',
['boot_iso', 'kernel_id', 'ramdisk_id'])
boot_iso_expected = u'glance://uui\u0111'
self.assertEqual(boot_iso_expected, boot_iso_actual)
@mock.patch.object(deploy_utils, 'get_boot_mode_for_deploy',
spec_set=True, autospec=True)
@mock.patch.object(ilo_boot.LOG, 'error', spec_set=True, autospec=True)
@mock.patch.object(images, 'get_image_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_parse_deploy_info', spec_set=True,
autospec=True)
def test__get_boot_iso_uefi_no_glance_image(self,
deploy_info_mock,
image_props_mock,
log_mock,
boot_mode_mock):
deploy_info_mock.return_value = {'image_source': 'image-uuid',
'ilo_deploy_iso': 'deploy_iso_uuid'}
image_props_mock.return_value = {'boot_iso': None,
'kernel_id': None,
'ramdisk_id': None}
properties = {'capabilities': 'boot_mode:uefi'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties = properties
boot_iso_result = ilo_boot._get_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
image_props_mock.assert_called_once_with(
task.context, 'image-uuid',
['boot_iso', 'kernel_id', 'ramdisk_id'])
self.assertTrue(log_mock.called)
self.assertFalse(boot_mode_mock.called)
self.assertIsNone(boot_iso_result)
@mock.patch.object(tempfile, 'NamedTemporaryFile', spec_set=True,
autospec=True)
@mock.patch.object(images, 'create_boot_iso', spec_set=True, autospec=True)
@mock.patch.object(swift, 'SwiftAPI', spec_set=True, autospec=True)
@mock.patch.object(ilo_boot, '_get_boot_iso_object_name', spec_set=True,
autospec=True)
@mock.patch.object(driver_utils, 'get_node_capability', spec_set=True,
autospec=True)
@mock.patch.object(images, 'get_image_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_parse_deploy_info', spec_set=True,
autospec=True)
def test__get_boot_iso_create(self, deploy_info_mock, image_props_mock,
capability_mock, boot_object_name_mock,
swift_api_mock,
create_boot_iso_mock, tempfile_mock):
CONF.ilo.swift_ilo_container = 'ilo-cont'
CONF.pxe.pxe_append_params = 'kernel-params'
swift_obj_mock = swift_api_mock.return_value
fileobj_mock = mock.MagicMock(spec=file)
fileobj_mock.name = 'tmpfile'
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = fileobj_mock
tempfile_mock.return_value = mock_file_handle
deploy_info_mock.return_value = {'image_source': 'image-uuid',
'ilo_deploy_iso': 'deploy_iso_uuid'}
image_props_mock.return_value = {'boot_iso': None,
'kernel_id': 'kernel_uuid',
'ramdisk_id': 'ramdisk_uuid'}
boot_object_name_mock.return_value = 'abcdef'
create_boot_iso_mock.return_value = '/path/to/boot-iso'
capability_mock.return_value = 'uefi'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
boot_iso_actual = ilo_boot._get_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
image_props_mock.assert_called_once_with(
task.context, 'image-uuid',
['boot_iso', 'kernel_id', 'ramdisk_id'])
boot_object_name_mock.assert_called_once_with(task.node)
create_boot_iso_mock.assert_called_once_with(task.context,
'tmpfile',
'kernel_uuid',
'ramdisk_uuid',
'deploy_iso_uuid',
'root-uuid',
'kernel-params',
'uefi')
swift_obj_mock.create_object.assert_called_once_with('ilo-cont',
'abcdef',
'tmpfile')
boot_iso_expected = 'swift:abcdef'
self.assertEqual(boot_iso_expected, boot_iso_actual)
@mock.patch.object(ilo_common, 'copy_image_to_web_server', spec_set=True,
autospec=True)
@mock.patch.object(tempfile, 'NamedTemporaryFile', spec_set=True,
autospec=True)
@mock.patch.object(images, 'create_boot_iso', spec_set=True, autospec=True)
@mock.patch.object(ilo_boot, '_get_boot_iso_object_name', spec_set=True,
autospec=True)
@mock.patch.object(driver_utils, 'get_node_capability', spec_set=True,
autospec=True)
@mock.patch.object(images, 'get_image_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_parse_deploy_info', spec_set=True,
autospec=True)
def test__get_boot_iso_recreate_boot_iso_use_webserver(
self, deploy_info_mock, image_props_mock,
capability_mock, boot_object_name_mock,
create_boot_iso_mock, tempfile_mock,
copy_file_mock):
CONF.ilo.swift_ilo_container = 'ilo-cont'
CONF.ilo.use_web_server_for_images = True
CONF.deploy.http_url = "http://10.10.1.30/httpboot"
CONF.deploy.http_root = "/httpboot"
CONF.pxe.pxe_append_params = 'kernel-params'
fileobj_mock = mock.MagicMock(spec=file)
fileobj_mock.name = 'tmpfile'
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = fileobj_mock
tempfile_mock.return_value = mock_file_handle
ramdisk_href = "http://10.10.1.30/httpboot/ramdisk"
kernel_href = "http://10.10.1.30/httpboot/kernel"
deploy_info_mock.return_value = {'image_source': 'image-uuid',
'ilo_deploy_iso': 'deploy_iso_uuid'}
image_props_mock.return_value = {'boot_iso': None,
'kernel_id': kernel_href,
'ramdisk_id': ramdisk_href}
boot_object_name_mock.return_value = 'new_boot_iso'
create_boot_iso_mock.return_value = '/path/to/boot-iso'
capability_mock.return_value = 'uefi'
copy_file_mock.return_value = "http://10.10.1.30/httpboot/new_boot_iso"
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
driver_internal_info = task.node.driver_internal_info
driver_internal_info['boot_iso_created_in_web_server'] = True
instance_info = task.node.instance_info
old_boot_iso = 'http://10.10.1.30/httpboot/old_boot_iso'
instance_info['ilo_boot_iso'] = old_boot_iso
boot_iso_actual = ilo_boot._get_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
image_props_mock.assert_called_once_with(
task.context, 'image-uuid',
['boot_iso', 'kernel_id', 'ramdisk_id'])
boot_object_name_mock.assert_called_once_with(task.node)
create_boot_iso_mock.assert_called_once_with(task.context,
'tmpfile',
kernel_href,
ramdisk_href,
'deploy_iso_uuid',
'root-uuid',
'kernel-params',
'uefi')
boot_iso_expected = 'http://10.10.1.30/httpboot/new_boot_iso'
self.assertEqual(boot_iso_expected, boot_iso_actual)
copy_file_mock.assert_called_once_with(fileobj_mock.name,
'new_boot_iso')
@mock.patch.object(ilo_common, 'copy_image_to_web_server', spec_set=True,
autospec=True)
@mock.patch.object(tempfile, 'NamedTemporaryFile', spec_set=True,
autospec=True)
@mock.patch.object(images, 'create_boot_iso', spec_set=True, autospec=True)
@mock.patch.object(ilo_boot, '_get_boot_iso_object_name', spec_set=True,
autospec=True)
@mock.patch.object(driver_utils, 'get_node_capability', spec_set=True,
autospec=True)
@mock.patch.object(images, 'get_image_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_parse_deploy_info', spec_set=True,
autospec=True)
def test__get_boot_iso_create_use_webserver_true_ramdisk_webserver(
self, deploy_info_mock, image_props_mock,
capability_mock, boot_object_name_mock,
create_boot_iso_mock, tempfile_mock,
copy_file_mock):
CONF.ilo.swift_ilo_container = 'ilo-cont'
CONF.ilo.use_web_server_for_images = True
CONF.deploy.http_url = "http://10.10.1.30/httpboot"
CONF.deploy.http_root = "/httpboot"
CONF.pxe.pxe_append_params = 'kernel-params'
fileobj_mock = mock.MagicMock(spec=file)
fileobj_mock.name = 'tmpfile'
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = fileobj_mock
tempfile_mock.return_value = mock_file_handle
ramdisk_href = "http://10.10.1.30/httpboot/ramdisk"
kernel_href = "http://10.10.1.30/httpboot/kernel"
deploy_info_mock.return_value = {'image_source': 'image-uuid',
'ilo_deploy_iso': 'deploy_iso_uuid'}
image_props_mock.return_value = {'boot_iso': None,
'kernel_id': kernel_href,
'ramdisk_id': ramdisk_href}
boot_object_name_mock.return_value = 'abcdef'
create_boot_iso_mock.return_value = '/path/to/boot-iso'
capability_mock.return_value = 'uefi'
copy_file_mock.return_value = "http://10.10.1.30/httpboot/abcdef"
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
boot_iso_actual = ilo_boot._get_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
image_props_mock.assert_called_once_with(
task.context, 'image-uuid',
['boot_iso', 'kernel_id', 'ramdisk_id'])
boot_object_name_mock.assert_called_once_with(task.node)
create_boot_iso_mock.assert_called_once_with(task.context,
'tmpfile',
kernel_href,
ramdisk_href,
'deploy_iso_uuid',
'root-uuid',
'kernel-params',
'uefi')
boot_iso_expected = 'http://10.10.1.30/httpboot/abcdef'
self.assertEqual(boot_iso_expected, boot_iso_actual)
copy_file_mock.assert_called_once_with(fileobj_mock.name,
'abcdef')
@mock.patch.object(ilo_boot, '_get_boot_iso_object_name', spec_set=True,
autospec=True)
@mock.patch.object(swift, 'SwiftAPI', spec_set=True, autospec=True)
def test__clean_up_boot_iso_for_instance(self, swift_mock,
boot_object_name_mock):
swift_obj_mock = swift_mock.return_value
CONF.ilo.swift_ilo_container = 'ilo-cont'
boot_object_name_mock.return_value = 'boot-object'
i_info = self.node.instance_info
i_info['ilo_boot_iso'] = 'swift:bootiso'
self.node.instance_info = i_info
self.node.save()
ilo_boot._clean_up_boot_iso_for_instance(self.node)
swift_obj_mock.delete_object.assert_called_once_with('ilo-cont',
'boot-object')
@mock.patch.object(ilo_boot.LOG, 'exception', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_get_boot_iso_object_name', spec_set=True,
autospec=True)
@mock.patch.object(swift, 'SwiftAPI', spec_set=True, autospec=True)
def test__clean_up_boot_iso_for_instance_exc(self, swift_mock,
boot_object_name_mock,
log_mock):
swift_obj_mock = swift_mock.return_value
exc = exception.SwiftObjectNotFoundError('error')
swift_obj_mock.delete_object.side_effect = exc
CONF.ilo.swift_ilo_container = 'ilo-cont'
boot_object_name_mock.return_value = 'boot-object'
i_info = self.node.instance_info
i_info['ilo_boot_iso'] = 'swift:bootiso'
self.node.instance_info = i_info
self.node.save()
ilo_boot._clean_up_boot_iso_for_instance(self.node)
swift_obj_mock.delete_object.assert_called_once_with('ilo-cont',
'boot-object')
self.assertTrue(log_mock.called)
@mock.patch.object(ironic_utils, 'unlink_without_raise', spec_set=True,
autospec=True)
def test__clean_up_boot_iso_for_instance_on_webserver(self, unlink_mock):
CONF.ilo.use_web_server_for_images = True
CONF.deploy.http_root = "/webserver"
i_info = self.node.instance_info
i_info['ilo_boot_iso'] = 'http://x.y.z.a/webserver/boot-object'
self.node.instance_info = i_info
self.node.save()
boot_iso_path = "/webserver/boot-object"
ilo_boot._clean_up_boot_iso_for_instance(self.node)
unlink_mock.assert_called_once_with(boot_iso_path)
@mock.patch.object(ilo_boot, '_get_boot_iso_object_name', spec_set=True,
autospec=True)
def test__clean_up_boot_iso_for_instance_no_boot_iso(
self, boot_object_name_mock):
ilo_boot._clean_up_boot_iso_for_instance(self.node)
self.assertFalse(boot_object_name_mock.called)
@mock.patch.object(ilo_boot, 'parse_driver_info', spec_set=True,
autospec=True)
@mock.patch.object(deploy_utils, 'get_image_instance_info',
spec_set=True, autospec=True)
def test__parse_deploy_info(self, instance_info_mock, driver_info_mock):
instance_info_mock.return_value = {'a': 'b'}
driver_info_mock.return_value = {'c': 'd'}
expected_info = {'a': 'b', 'c': 'd'}
actual_info = ilo_boot._parse_deploy_info(self.node)
self.assertEqual(expected_info, actual_info)
@mock.patch.object(ilo_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test__validate_driver_info_MissingParam(self, mock_parse_driver_info):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaisesRegex(exception.MissingParameterValue,
"Missing 'ilo_deploy_iso'",
ilo_boot._validate_driver_info, task)
mock_parse_driver_info.assert_called_once_with(task.node)
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test__validate_driver_info_valid_uuid(self, mock_parse_driver_info,
mock_is_glance_image):
mock_is_glance_image.return_value = True
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
deploy_iso = '8a81759a-f29b-454b-8ab3-161c6ca1882c'
task.node.driver_info['ilo_deploy_iso'] = deploy_iso
ilo_boot._validate_driver_info(task)
mock_parse_driver_info.assert_called_once_with(task.node)
mock_is_glance_image.assert_called_once_with(deploy_iso)
@mock.patch.object(image_service.HttpImageService, 'validate_href',
spec_set=True, autospec=True)
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test__validate_driver_info_InvalidParam(self, mock_parse_driver_info,
mock_is_glance_image,
mock_validate_href):
deploy_iso = 'http://abc.org/image/qcow2'
mock_validate_href.side_effect = exception.ImageRefValidationFailed(
image_href='http://abc.org/image/qcow2', reason='fail')
mock_is_glance_image.return_value = False
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_info['ilo_deploy_iso'] = deploy_iso
self.assertRaisesRegex(exception.InvalidParameterValue,
"Virtual media boot accepts",
ilo_boot._validate_driver_info, task)
mock_parse_driver_info.assert_called_once_with(task.node)
mock_validate_href.assert_called_once_with(mock.ANY, deploy_iso)
@mock.patch.object(image_service.HttpImageService, 'validate_href',
spec_set=True, autospec=True)
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test__validate_driver_info_valid_url(self, mock_parse_driver_info,
mock_is_glance_image,
mock_validate_href):
deploy_iso = 'http://abc.org/image/deploy.iso'
mock_is_glance_image.return_value = False
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_info['ilo_deploy_iso'] = deploy_iso
ilo_boot._validate_driver_info(task)
mock_parse_driver_info.assert_called_once_with(task.node)
mock_validate_href.assert_called_once_with(mock.ANY, deploy_iso)
@mock.patch.object(deploy_utils, 'validate_image_properties',
spec_set=True, autospec=True)
@mock.patch.object(ilo_boot, '_parse_deploy_info', spec_set=True,
autospec=True)
def _test__validate_instance_image_info(self,
deploy_info_mock,
validate_prop_mock,
props_expected):
d_info = {'image_source': 'uuid'}
deploy_info_mock.return_value = d_info
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_boot._validate_instance_image_info(task)
deploy_info_mock.assert_called_once_with(task.node)
validate_prop_mock.assert_called_once_with(
task.context, d_info, props_expected)
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
def test__validate_glance_partition_image(self,
is_glance_image_mock):
is_glance_image_mock.return_value = True
self._test__validate_instance_image_info(props_expected=['kernel_id',
'ramdisk_id'])
def test__validate_whole_disk_image(self):
self.node.driver_internal_info = {'is_whole_disk_image': True}
self.node.save()
self._test__validate_instance_image_info(props_expected=[])
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
def test__validate_non_glance_partition_image(self, is_glance_image_mock):
is_glance_image_mock.return_value = False
self._test__validate_instance_image_info(props_expected=['kernel',
'ramdisk'])
@mock.patch.object(ilo_common, 'set_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'get_secure_boot_mode', spec_set=True,
autospec=True)
def test__disable_secure_boot_false(self,
func_get_secure_boot_mode,
func_set_secure_boot_mode):
func_get_secure_boot_mode.return_value = False
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
returned_state = ilo_boot._disable_secure_boot(task)
func_get_secure_boot_mode.assert_called_once_with(task)
self.assertFalse(func_set_secure_boot_mode.called)
self.assertFalse(returned_state)
@mock.patch.object(ilo_common, 'set_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'get_secure_boot_mode', spec_set=True,
autospec=True)
def test__disable_secure_boot_true(self,
func_get_secure_boot_mode,
func_set_secure_boot_mode):
func_get_secure_boot_mode.return_value = True
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
returned_state = ilo_boot._disable_secure_boot(task)
func_get_secure_boot_mode.assert_called_once_with(task)
func_set_secure_boot_mode.assert_called_once_with(task, False)
self.assertTrue(returned_state)
@mock.patch.object(ilo_boot, 'exception', spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'get_secure_boot_mode', spec_set=True,
autospec=True)
def test__disable_secure_boot_exception(self,
func_get_secure_boot_mode,
exception_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
exception_mock.IloOperationNotSupported = Exception
func_get_secure_boot_mode.side_effect = Exception
returned_state = ilo_boot._disable_secure_boot(task)
func_get_secure_boot_mode.assert_called_once_with(task)
self.assertFalse(returned_state)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_disable_secure_boot', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
def test_prepare_node_for_deploy(self,
func_node_power_action,
func_disable_secure_boot,
func_update_boot_mode):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_disable_secure_boot.return_value = False
ilo_boot.prepare_node_for_deploy(task)
func_node_power_action.assert_called_once_with(task,
states.POWER_OFF)
func_disable_secure_boot.assert_called_once_with(task)
func_update_boot_mode.assert_called_once_with(task)
bootmode = driver_utils.get_node_capability(task.node, "boot_mode")
self.assertIsNone(bootmode)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_disable_secure_boot', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
def test_prepare_node_for_deploy_sec_boot_on(self,
func_node_power_action,
func_disable_secure_boot,
func_update_boot_mode):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_disable_secure_boot.return_value = True
ilo_boot.prepare_node_for_deploy(task)
func_node_power_action.assert_called_once_with(task,
states.POWER_OFF)
func_disable_secure_boot.assert_called_once_with(task)
self.assertFalse(func_update_boot_mode.called)
ret_boot_mode = task.node.instance_info['deploy_boot_mode']
self.assertEqual('uefi', ret_boot_mode)
bootmode = driver_utils.get_node_capability(task.node, "boot_mode")
self.assertIsNone(bootmode)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_disable_secure_boot', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
def test_prepare_node_for_deploy_inst_info(self,
func_node_power_action,
func_disable_secure_boot,
func_update_boot_mode):
instance_info = {'capabilities': '{"secure_boot": "true"}'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_disable_secure_boot.return_value = False
task.node.instance_info = instance_info
ilo_boot.prepare_node_for_deploy(task)
func_node_power_action.assert_called_once_with(task,
states.POWER_OFF)
func_disable_secure_boot.assert_called_once_with(task)
func_update_boot_mode.assert_called_once_with(task)
bootmode = driver_utils.get_node_capability(task.node, "boot_mode")
self.assertIsNone(bootmode)
self.assertNotIn('deploy_boot_mode', task.node.instance_info)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_disable_secure_boot', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
def test_prepare_node_for_deploy_sec_boot_on_inst_info(
self, func_node_power_action, func_disable_secure_boot,
func_update_boot_mode):
instance_info = {'capabilities': '{"secure_boot": "true"}'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_disable_secure_boot.return_value = True
task.node.instance_info = instance_info
ilo_boot.prepare_node_for_deploy(task)
func_node_power_action.assert_called_once_with(task,
states.POWER_OFF)
func_disable_secure_boot.assert_called_once_with(task)
self.assertFalse(func_update_boot_mode.called)
bootmode = driver_utils.get_node_capability(task.node, "boot_mode")
self.assertIsNone(bootmode)
self.assertNotIn('deploy_boot_mode', task.node.instance_info)
class IloVirtualMediaBootTestCase(db_base.DbTestCase):
def setUp(self):
super(IloVirtualMediaBootTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="iscsi_ilo")
self.node = obj_utils.create_test_node(
self.context, driver='iscsi_ilo', driver_info=INFO_DICT)
@mock.patch.object(ilo_boot, '_validate_driver_info',
spec_set=True, autospec=True)
@mock.patch.object(ilo_boot, '_validate_instance_image_info',
spec_set=True, autospec=True)
def test_validate(self, mock_val_instance_image_info,
mock_val_driver_info):
instance_info = self.node.instance_info
instance_info['ilo_boot_iso'] = 'deploy-iso'
instance_info['image_source'] = '6b2f0c0c-79e8-4db6-842e-43c9764204af'
self.node.instance_info = instance_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_info['ilo_deploy_iso'] = 'deploy-iso'
task.driver.boot.validate(task)
mock_val_instance_image_info.assert_called_once_with(task)
mock_val_driver_info.assert_called_once_with(task)
@mock.patch.object(ilo_boot, 'prepare_node_for_deploy',
spec_set=True, autospec=True)
@mock.patch.object(manager_utils, 'node_power_action',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'eject_vmedia_devices',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'setup_vmedia', spec_set=True,
autospec=True)
@mock.patch.object(deploy_utils, 'get_single_nic_with_vif_port_id',
spec_set=True, autospec=True)
def _test_prepare_ramdisk(self, get_nic_mock, setup_vmedia_mock,
eject_mock, node_power_mock,
prepare_node_for_deploy_mock,
ilo_boot_iso, image_source,
ramdisk_params={'a': 'b'}):
instance_info = self.node.instance_info
instance_info['ilo_boot_iso'] = ilo_boot_iso
instance_info['image_source'] = image_source
self.node.instance_info = instance_info
self.node.save()
get_nic_mock.return_value = '12:34:56:78:90:ab'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
driver_info = task.node.driver_info
driver_info['ilo_deploy_iso'] = 'deploy-iso'
task.node.driver_info = driver_info
task.driver.boot.prepare_ramdisk(task, ramdisk_params)
node_power_mock.assert_called_once_with(task, states.POWER_OFF)
if task.node.provision_state == states.DEPLOYING:
prepare_node_for_deploy_mock.assert_called_once_with(task)
eject_mock.assert_called_once_with(task)
expected_ramdisk_opts = {'a': 'b', 'BOOTIF': '12:34:56:78:90:ab'}
get_nic_mock.assert_called_once_with(task)
setup_vmedia_mock.assert_called_once_with(task, 'deploy-iso',
expected_ramdisk_opts)
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
def test_prepare_ramdisk_not_deploying_not_cleaning(self, mock_is_image):
"""Ensure deploy ops are blocked when not deploying and not cleaning"""
for state in states.STABLE_STATES:
mock_is_image.reset_mock()
self.node.provision_state = state
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertIsNone(
task.driver.boot.prepare_ramdisk(task, None))
self.assertFalse(mock_is_image.called)
def test_prepare_ramdisk_glance_image(self):
self.node.provision_state = states.DEPLOYING
self.node.save()
self._test_prepare_ramdisk(
ilo_boot_iso='swift:abcdef',
image_source='6b2f0c0c-79e8-4db6-842e-43c9764204af')
self.node.refresh()
self.assertNotIn('ilo_boot_iso', self.node.instance_info)
def test_prepare_ramdisk_not_a_glance_image(self):
self.node.provision_state = states.DEPLOYING
self.node.save()
self._test_prepare_ramdisk(
ilo_boot_iso='http://mybootiso',
image_source='http://myimage')
self.node.refresh()
self.assertEqual('http://mybootiso',
self.node.instance_info['ilo_boot_iso'])
def test_prepare_ramdisk_glance_image_cleaning(self):
self.node.provision_state = states.CLEANING
self.node.save()
self._test_prepare_ramdisk(
ilo_boot_iso='swift:abcdef',
image_source='6b2f0c0c-79e8-4db6-842e-43c9764204af')
self.node.refresh()
self.assertNotIn('ilo_boot_iso', self.node.instance_info)
def test_prepare_ramdisk_not_a_glance_image_cleaning(self):
self.node.provision_state = states.CLEANING
self.node.save()
self._test_prepare_ramdisk(
ilo_boot_iso='http://mybootiso',
image_source='http://myimage')
self.node.refresh()
self.assertEqual('http://mybootiso',
self.node.instance_info['ilo_boot_iso'])
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'setup_vmedia_for_boot', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_get_boot_iso', spec_set=True,
autospec=True)
def test__configure_vmedia_boot_with_boot_iso(
self, get_boot_iso_mock, setup_vmedia_mock, set_boot_device_mock):
root_uuid = {'root uuid': 'root_uuid'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
get_boot_iso_mock.return_value = 'boot.iso'
task.driver.boot._configure_vmedia_boot(
task, root_uuid)
get_boot_iso_mock.assert_called_once_with(
task, root_uuid)
setup_vmedia_mock.assert_called_once_with(
task, 'boot.iso')
set_boot_device_mock.assert_called_once_with(
task, boot_devices.CDROM, persistent=True)
self.assertEqual('boot.iso',
task.node.instance_info['ilo_boot_iso'])
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'setup_vmedia_for_boot', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_get_boot_iso', spec_set=True,
autospec=True)
def test__configure_vmedia_boot_without_boot_iso(
self, get_boot_iso_mock, setup_vmedia_mock, set_boot_device_mock):
root_uuid = {'root uuid': 'root_uuid'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
get_boot_iso_mock.return_value = None
task.driver.boot._configure_vmedia_boot(
task, root_uuid)
get_boot_iso_mock.assert_called_once_with(
task, root_uuid)
self.assertFalse(setup_vmedia_mock.called)
self.assertFalse(set_boot_device_mock.called)
@mock.patch.object(ilo_common, 'update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_clean_up_boot_iso_for_instance',
spec_set=True, autospec=True)
def test_clean_up_instance(self, cleanup_iso_mock,
cleanup_vmedia_mock, node_power_mock,
update_secure_boot_mode_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
driver_internal_info = task.node.driver_internal_info
driver_internal_info['boot_iso_created_in_web_server'] = False
driver_internal_info['root_uuid_or_disk_id'] = (
"12312642-09d3-467f-8e09-12385826a123")
task.node.driver_internal_info = driver_internal_info
task.node.save()
task.driver.boot.clean_up_instance(task)
cleanup_iso_mock.assert_called_once_with(task.node)
cleanup_vmedia_mock.assert_called_once_with(task)
driver_internal_info = task.node.driver_internal_info
self.assertNotIn('boot_iso_created_in_web_server',
driver_internal_info)
self.assertNotIn('root_uuid_or_disk_id', driver_internal_info)
node_power_mock.assert_called_once_with(task,
states.POWER_OFF)
update_secure_boot_mode_mock.assert_called_once_with(task, False)
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot', spec_set=True,
autospec=True)
def test_clean_up_ramdisk(self, cleanup_vmedia_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.boot.clean_up_ramdisk(task)
cleanup_vmedia_mock.assert_called_once_with(task)
@mock.patch.object(ilo_common, 'update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot', spec_set=True,
autospec=True)
def _test_prepare_instance_whole_disk_image(
self, cleanup_vmedia_boot_mock, set_boot_device_mock,
update_boot_mode_mock, update_secure_boot_mode_mock):
self.node.driver_internal_info = {'is_whole_disk_image': True}
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.boot.prepare_instance(task)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.DISK,
persistent=True)
update_boot_mode_mock.assert_called_once_with(task)
update_secure_boot_mode_mock.assert_called_once_with(task, True)
def test_prepare_instance_whole_disk_image_local(self):
self.node.instance_info = {'capabilities': '{"boot_option": "local"}'}
self.node.save()
self._test_prepare_instance_whole_disk_image()
def test_prepare_instance_whole_disk_image(self):
self._test_prepare_instance_whole_disk_image()
@mock.patch.object(ilo_common, 'update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot.IloVirtualMediaBoot,
'_configure_vmedia_boot', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot', spec_set=True,
autospec=True)
def test_prepare_instance_partition_image(
self, cleanup_vmedia_boot_mock, configure_vmedia_mock,
update_boot_mode_mock, update_secure_boot_mode_mock):
self.node.driver_internal_info = {'root_uuid_or_disk_id': (
"12312642-09d3-467f-8e09-12385826a123")}
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.boot.prepare_instance(task)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
configure_vmedia_mock.assert_called_once_with(
mock.ANY, task, "12312642-09d3-467f-8e09-12385826a123")
update_boot_mode_mock.assert_called_once_with(task)
update_secure_boot_mode_mock.assert_called_once_with(task, True)
class IloPXEBootTestCase(db_base.DbTestCase):
def setUp(self):
super(IloPXEBootTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="pxe_ilo")
self.node = obj_utils.create_test_node(
self.context, driver='pxe_ilo', driver_info=INFO_DICT)
@mock.patch.object(ilo_boot, 'prepare_node_for_deploy', spec_set=True,
autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', spec_set=True,
autospec=True)
def test_prepare_ramdisk_not_deploying_not_cleaning(
self, pxe_prepare_instance_mock, prepare_node_mock):
self.node.provision_state = states.CLEANING
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertIsNone(
task.driver.boot.prepare_ramdisk(task, None))
self.assertFalse(prepare_node_mock.called)
pxe_prepare_instance_mock.assert_called_once_with(mock.ANY,
task, None)
@mock.patch.object(ilo_boot, 'prepare_node_for_deploy', spec_set=True,
autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', spec_set=True,
autospec=True)
def test_prepare_ramdisk_in_deploying(self, pxe_prepare_instance_mock,
prepare_node_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertIsNone(
task.driver.boot.prepare_ramdisk(task, None))
prepare_node_mock.assert_called_once_with(task)
pxe_prepare_instance_mock.assert_called_once_with(mock.ANY,
task, None)
@mock.patch.object(ilo_common, 'update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_instance', spec_set=True,
autospec=True)
def test_clean_up_instance(self, pxe_cleanup_mock, node_power_mock,
update_secure_boot_mode_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.boot.clean_up_instance(task)
node_power_mock.assert_called_once_with(task, states.POWER_OFF)
update_secure_boot_mode_mock.assert_called_once_with(task, False)
pxe_cleanup_mock.assert_called_once_with(mock.ANY, task)
@mock.patch.object(ilo_common, 'update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', spec_set=True,
autospec=True)
def test_prepare_instance(self, pxe_prepare_instance_mock,
update_boot_mode_mock,
update_secure_boot_mode_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.boot.prepare_instance(task)
update_boot_mode_mock.assert_called_once_with(task)
update_secure_boot_mode_mock.assert_called_once_with(task, True)
pxe_prepare_instance_mock.assert_called_once_with(mock.ANY, task)
|
The summer war of 2014 lasted two and a half months. The media, in all its diverse forms, exposed its viewers and readers to gut-wrenching images: the three mothers praying for the safety of their abducted sons, three weeks of searching, the communal funeral for the teens, the revenge in the name of which Jews murdered a teenager from Shu’afat, the following riots in East Jerusalem, thousands of missiles fired from Gaza, parents lying on their children in the middle of the street to shield them from the rockets, aerial photographs of bombed targets, and ground forces entering the Gaza Strip, marching, inwards, to the front line.
And at the heels of this mass of images, photographs of the military funerals started to appear, accompanied by the pages of newspapers, which like the Command-Order #8 (order for wartime reserve duty), were filled with a fixed and familiar pattern: a headshot of a young man flashing a white toothed smile, and a hundred words trying to summarize his world.
Sixty seven military funerals and five civil funerals. We gathered together in grief.
The gut-wrenching pictures from Gaza came in late, parsimoniously mediated by the foreign press. The IDF did not grant the Israeli media access to the front line, as the media, on its part, followed the national "public diplomacy".
Thus, we have become numb to the images of targeted killings. They flickered on the screen in black and white clips as if they were computer games, and our hearts turned to stone in the face of the pictures of the overcrowded morgues in Gaza, the devastation of wiped out neighborhoods and the hundred thousand of displaced Palestinians.
The media does not possess the stamina of the cultural critic. The critic will examine the chain of events through the perspective of time, and will attempt to mark the mechanisms of power and the generators of perception intertwined into these mechanisms. The press is instantaneous by its very definition. Our contemporary culture with its myriad channels, networks and news, which spread information, have completely changed the rules of the game, fashioning an even shorter response time to events.
The media has lost its balance in favor of instant gratification; thus leaving itself exposed, in its weakness, to the manipulations of the territory’s governing speaker, be it the speaker of the IDF or the speaker of the Hamas.
The media consumer, if he is not naïve, will doubt it all.
|
# ST2/ST3 compat
from __future__ import print_function
import sublime
if sublime.version() < '3000':
# we are on ST2 and Python 2.X
_ST3 = False
import getTeXRoot
else:
_ST3 = True
from . import getTeXRoot
import sublime_plugin, os, os.path, platform
from subprocess import Popen
# View PDF file corresonding to TEX file in current buffer
# Assumes that the SumatraPDF viewer is used (great for inverse search!)
# and its executable is on the %PATH%
# Warning: we do not do "deep" safety checks (e.g. see if PDF file is old)
class View_pdfCommand(sublime_plugin.WindowCommand):
def run(self):
s = sublime.load_settings("LaTeXTools Preferences.sublime-settings")
prefs_keep_focus = s.get("keep_focus", True)
prefs_lin = s.get("linux")
view = self.window.active_view()
texFile, texExt = os.path.splitext(view.file_name())
if texExt.upper() != ".TEX":
sublime.error_message("%s is not a TeX source file: cannot view." % (os.path.basename(view.file_name()),))
return
quotes = ""# \"" MUST CHECK WHETHER WE NEED QUOTES ON WINDOWS!!!
root = getTeXRoot.get_tex_root(view)
rootFile, rootExt = os.path.splitext(root)
pdfFile = quotes + rootFile + '.pdf' + quotes
s = platform.system()
script_path = None
if s == "Darwin":
# for inverse search, set up a "Custom" sync profile, using
# "subl" as command and "%file:%line" as argument
# you also have to put a symlink to subl somewhere on your path
# Also check the box "check for file changes"
viewercmd = ["open", "-a", "Skim"]
elif s == "Windows":
# with new version of SumatraPDF, can set up Inverse
# Search in the GUI: under Settings|Options...
# Under "Set inverse search command-line", set:
# sublime_text "%f":%l
viewercmd = ["SumatraPDF", "-reuse-instance"]
elif s == "Linux":
# the required scripts are in the 'evince' subdir
script_path = os.path.join(sublime.packages_path(), 'LaTeXTools', 'evince')
ev_sync_exec = os.path.join(script_path, 'evince_sync') # so we get inverse search
# Get python binary if set in preferences:
py_binary = prefs_lin["python2"] or 'python'
sb_binary = prefs_lin["sublime"] or 'sublime-text'
viewercmd = ['sh', ev_sync_exec, py_binary, sb_binary]
else:
sublime.error_message("Platform as yet unsupported. Sorry!")
return
print (viewercmd + [pdfFile])
try:
Popen(viewercmd + [pdfFile], cwd=script_path)
except OSError:
sublime.error_message("Cannot launch Viewer. Make sure it is on your PATH.")
|
Upto 25% Off on Luxury brands Porsche design, Prada, Tom ford, Tag Heuer. The most luxurious eye wear brands of the world. Buy sunglasses and eyeglasses from top luxury brands including Ray-ban, Prada, Vogue, Fastrack, Linda farrow and more.
|
#!/usr/bin/python -u
"""Program for monitoring serial messages from the Teensy.
This program waits for the device to be connected and when the teensy is
disconnected, then it will go back to waiting for the teensy to once again
be connected.
This program also looks for lines that start with a single letter followed
by a colon, and will colorize the lines based on the letter.
"""
import select
import pyudev
import serial
import sys
import tty
import termios
import traceback
import syslog
import argparse
(LT_BLACK, LT_RED, LT_GREEN, LT_YELLOW,
LT_BLUE, LT_MAGENTA, LT_CYAN, LT_WHITE) = [
("\033[1;%dm" % (30 + i)) for i in range(8)]
(DK_BLACK, DK_RED, DK_GREEN, DK_YELLOW,
DK_BLUE, DK_MAGENTA, DK_CYAN, DK_WHITE) = [
("\033[2;%dm" % (30 + i)) for i in range(8)]
NO_COLOR = "\033[0m"
COLORS = {
'W': LT_YELLOW,
'I': "",
'D': LT_BLUE,
'C': LT_RED,
'E': LT_RED
}
class OutputWriter(object):
"""Class for dealing with the output from the teensy."""
def __init__(self):
self.buffered_output = ""
self.column = 0
self.colored = False
def write(self, string):
"""Writes characters to output. Lines will be delimited by
newline characters.
This routine breaks the output into lines and writes each line
individually, colorizing as appropriate.
"""
if len(self.buffered_output) > 0:
string = self.buffered_output + string
self.buffered_output = ""
while True:
nl_index = string.find('\n')
if self.column == 0 and nl_index < 0 and len(string) < 2:
self.buffered_output = string
return
if nl_index < 0:
line_string = string
else:
line_string = string[0:nl_index + 1]
prefix = ""
suffix = ""
if (self.column == 0 and len(string) >= 2 and
string[1] == ':' and string[0] in COLORS):
prefix = COLORS[string[0]]
self.colored = True
if nl_index >= 0 and self.colored:
suffix = NO_COLOR
sys.stdout.write(prefix + line_string + suffix)
sys.stdout.flush()
self.column += len(line_string)
if nl_index < 0:
return
string = string[nl_index + 1:]
self.column = 0
def is_teensy(device, serial_num=None):
"""Checks device to see if its a teensy device.
If serial is provided, then it will further check to see if the
serial number of the teensy device also matches.
"""
if 'ID_VENDOR' not in device:
return False
if not device['ID_VENDOR'].startswith('Teensy'):
return False
if serial_num is None:
return True
return device['ID_SERIAL_SHORT'] == serial_num
def teensy_mon(monitor, device):
"""Monitors the serial port from a given teensy device.
This function open the USDB serial port associated with device, and
will read characters from it and send to stdout. It will also read
characters from stdin and send them to the device.
This function returns when the teensy deivce disconnects (or is
disconnected).
"""
port_name = device.device_node
serial_num = device['ID_SERIAL_SHORT']
print 'Teensy device connected @%s (serial %s)\r' % (port_name, serial_num)
epoll = select.epoll()
epoll.register(monitor.fileno(), select.POLLIN)
output = OutputWriter()
try:
serial_port = serial.Serial(port=port_name,
timeout=0.001,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
xonxoff=False,
rtscts=False,
dsrdtr=False)
except serial.serialutil.SerialException:
print "Unable to open port '%s'" % port_name
return
serial_fd = serial_port.fileno()
tty.setraw(serial_fd)
new_settings = termios.tcgetattr(serial_fd)
new_settings[6][termios.VTIME] = 0
new_settings[6][termios.VMIN] = 1
termios.tcsetattr(serial_fd, termios.TCSANOW, new_settings)
epoll.register(serial_port.fileno(), select.POLLIN)
epoll.register(sys.stdin.fileno(), select.POLLIN)
while True:
events = epoll.poll()
for fileno, _ in events:
if fileno == monitor.fileno():
dev = monitor.poll()
if (dev.device_node != port_name or
dev.action != 'remove'):
continue
print 'Teensy device @', port_name, ' disconnected.\r'
print
serial_port.close()
return
if fileno == serial_port.fileno():
try:
data = serial_port.read(256)
except serial.serialutil.SerialException:
print 'Teensy device @', port_name, ' disconnected.\r'
print
serial_port.close()
return
#for x in data:
# print "Serial.Read '%c' 0x%02x" % (x, ord(x))
# For now, we'll not support color, and let the target do it.
# That also means that we work better if the target is doing
# something like readline
#output.write(data)
sys.stdout.write(data)
sys.stdout.flush()
if fileno == sys.stdin.fileno():
data = sys.stdin.read(1)
#for x in data:
# print "stdin.Read '%c' 0x%02x" % (x, ord(x))
if data[0] == chr(3):
raise KeyboardInterrupt
if data[0] == '\n':
serial_port.write('\r')
else:
serial_port.write(data)
def main():
"""The main program."""
parser = argparse.ArgumentParser(
prog="teensy_mon",
usage="%(prog)s [options] [command]",
description="Monitor serial output from teensy devices",
epilog="Press Control-C to quit"
)
parser.add_argument(
"-l", "--list",
dest="list",
action="store_true",
help="List Teensy devices currently connected"
)
parser.add_argument(
"-s", "--serial",
dest="serial",
help="Connect to Teeny device with a given serial number"
)
parser.add_argument(
"-v", "--verbose",
dest="verbose",
action="store_true",
help="Turn on verbose messages",
default=False
)
args = parser.parse_args(sys.argv[1:])
if args.verbose:
print 'pyudev version =', pyudev.__version__
context = pyudev.Context()
context.log_priority = syslog.LOG_NOTICE
if args.list:
detected = False
for device in context.list_devices(subsystem='tty'):
if is_teensy(device):
print 'Teensy device serial %-5s found @%s' % (
device['ID_SERIAL_SHORT'], device.device_node)
detected = True
if not detected:
print 'No Teensy devices detected.'
return
stdin_fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(stdin_fd)
try:
# Make some changes to stdin. We want to turn off canonical
# processing (so that ^H gets sent to the teensy), turn off echo,
# and make it unbuffered.
tty.setraw(stdin_fd)
new_settings = termios.tcgetattr(stdin_fd)
new_settings[3] &= ~(termios.ICANON | termios.ECHO)
new_settings[6][termios.VTIME] = 0
new_settings[6][termios.VMIN] = 1
termios.tcsetattr(stdin_fd, termios.TCSANOW, new_settings)
monitor = pyudev.Monitor.from_netlink(context)
monitor.start()
monitor.filter_by('tty')
# Check to see if the teensy device is already present.
for device in context.list_devices(subsystem='tty'):
if is_teensy(device, args.serial):
teensy_mon(monitor, device)
# Otherwise wait for the teensy device to connect
while True:
if args.serial:
print 'Waiting for Teensy with serial %s ...' % args.serial
else:
print 'Waiting for Teensy...'
for device in iter(monitor.poll, None):
if device.action != 'add':
continue
if is_teensy(device, args.serial):
teensy_mon(monitor, device)
except KeyboardInterrupt:
print '\r\n'
except Exception:
traceback.print_exc()
# Restore stdin back to its old settings
termios.tcsetattr(stdin_fd, termios.TCSANOW, old_settings)
main()
|
The NES Classic Edition is no more, though another console is nearing shortly to get fans of a Nintendo Entertainment System vehement once again.
Say hello to a RetroN HD, a microconsole that plays strange NES cartridges, both PAL and NTSC.
The console also upscales a video outlay to 720p, providing an HD-resolution picture though compliance a game’s strange software.
The RetroN HD is a reconstitute of Hyperkin’s prior effort, a RetroN, that didn’t underline high-definition output.
A NES-like connected controller called a “Cadet” – that is a spitting picture of a NES gamepad – comes bundled with a console. Pre-orders have non-stop in a UK for £49.99, with a designed recover date of May 25.
Nintendo’s possess reconstitute of a console, a Nintendo Classic Edition, has sole out and is no longer in production. The NCE doesn’t take cartridges, though instead facilities 30 in-built games.
Reports advise that Nintendo is also operative on a reconstitute of a SNES, that could be expelled before Christmas 2017.
Want up-to-the-minute party news and features? Just strike ‘Like’ on the Digital Spy Facebook page and ‘Follow’ on the @digitalspy Twitter account and you’re all set.
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_spamfilter_options
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_spamfilter_options.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_spamfilter_options_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'spamfilter_options': {
'dns_timeout': '3'
},
'vdom': 'root'}
is_error, changed, response = fortios_spamfilter_options.fortios_spamfilter(input_data, fos_instance)
expected_data = {
'dns-timeout': '3'
}
set_method_mock.assert_called_with('spamfilter', 'options', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_spamfilter_options_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'spamfilter_options': {
'dns_timeout': '3'
},
'vdom': 'root'}
is_error, changed, response = fortios_spamfilter_options.fortios_spamfilter(input_data, fos_instance)
expected_data = {
'dns-timeout': '3'
}
set_method_mock.assert_called_with('spamfilter', 'options', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_spamfilter_options_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'spamfilter_options': {
'dns_timeout': '3'
},
'vdom': 'root'}
is_error, changed, response = fortios_spamfilter_options.fortios_spamfilter(input_data, fos_instance)
expected_data = {
'dns-timeout': '3'
}
set_method_mock.assert_called_with('spamfilter', 'options', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_spamfilter_options_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'spamfilter_options': {
'random_attribute_not_valid': 'tag',
'dns_timeout': '3'
},
'vdom': 'root'}
is_error, changed, response = fortios_spamfilter_options.fortios_spamfilter(input_data, fos_instance)
expected_data = {
'dns-timeout': '3'
}
set_method_mock.assert_called_with('spamfilter', 'options', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
Remember the make-and-take tag album from the Deflecto booth at Creativation? I added photos and a few other touches. I love it.
All the photos I used were ones I already had printed. There are ten photos on the three tags. I just trimmed them to size and added them to the fronts and backs of the tags. You can see the tags here. I love that rabbit.
This is ADORABLE! LOVING those photos!!!!!!!!!
What a cutie, and such a cute project!
|
# Global Forest Watch API
# Copyright (C) 2013 World Resource Institute
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""This module supports pubsub."""
import logging
import copy
import json
from appengine_config import runtime_config
from google.appengine.ext import ndb
from google.appengine.api import users
from google.appengine.api import taskqueue
from gfw.user.gfw_user import GFWUser
from gfw.models.topic import Topic
from gfw.mailers.subscription_confirmation import SubscriptionConfirmationMailer
class Subscription(ndb.Model):
name = ndb.StringProperty()
topic = ndb.StringProperty()
email = ndb.StringProperty()
url = ndb.StringProperty()
user_id = ndb.KeyProperty()
pa = ndb.StringProperty()
use = ndb.StringProperty()
useid = ndb.IntegerProperty()
iso = ndb.StringProperty()
id1 = ndb.StringProperty()
ifl = ndb.StringProperty()
fl_id1 = ndb.StringProperty()
wdpaid = ndb.IntegerProperty()
has_geom = ndb.BooleanProperty(default=False)
confirmed = ndb.BooleanProperty(default=False)
geom = ndb.JsonProperty()
params = ndb.JsonProperty()
updates = ndb.JsonProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
new = ndb.BooleanProperty(default=True)
geostore = ndb.StringProperty()
language = ndb.StringProperty(choices=['EN', 'ES', 'FR', 'ID', 'PT', 'ZH'], default='EN')
overview_image = ndb.BlobProperty()
kind = 'Subscription'
@classmethod
def create(cls, params, user=None):
"""Create subscription if email and, iso or geom is present"""
subscription = Subscription()
subscription.populate(**params)
subscription.params = params
subscription.has_geom = bool(params.get('geom'))
user_id = user.key if user is not None else ndb.Key('User', None)
subscription.user_id = user_id
subscription.put()
return subscription
@classmethod
def subscribe(cls, params, user):
subscription = Subscription.create(params, user)
if subscription:
subscription.send_confirmation_email()
return subscription
else:
return False
@classmethod
def confirm_by_id(cls, id):
subscription = cls.get_by_id(int(id))
if subscription:
return subscription.confirm()
else:
return False
def send_confirmation_email(self):
taskqueue.add(url='/v2/subscriptions/tasks/confirmation',
queue_name='pubsub-confirmation',
params=dict(subscription=self.key.urlsafe()))
def to_dict(self):
result = super(Subscription,self).to_dict()
result['key'] = self.key.id()
result.pop('overview_image', None)
return result
def formatted_name(self):
if (not self.name) or (len(self.name) == 0):
return "Unnamed Subscription"
else:
return self.name
def confirm(self):
self.confirmed = True
return self.put()
def unconfirm(self):
self.confirmed = False
self.send_confirmation_email()
return self.put()
def unsubscribe(self):
return self.key.delete()
def run_analysis(self, begin, end):
params = copy.copy(self.params)
params['begin'] = begin
params['end'] = end
if 'geom' in params:
geom = params['geom']
if 'geometry' in geom:
geom = geom['geometry']
params['geojson'] = json.dumps(geom)
topic = Topic.get_by_id(self.topic)
return topic.execute(params)
|
AJ readers are invited to a summer party on Tuesday 21 August at the Daniel Libeskind-designed Serpentine Gallery Pavilion (above) in Kensington Gardens, London, hosted by Hydro Aluminium Extrusion UK and The Architects' Journal.This will be one of the last opportunities to visit the pavilion before it is dismantled and moved to a new site. And one lucky guest will win a Lotus Elise sports car for a weekend.
Admission is free, but by invitation only. For invitations call Karen Hutt on 0121 622 6868.
|
#!/usr/bin/python
# Package imports
import requests
import csv
# Stock information
symbol = input('Enter a stock symbol: ').upper()
stock = dict( [
( 'name' , 'Excellent Co.' ),
( 'symbol' , symbol ),
( 'phone' , '800-YOU-GAIN' ),
( 'currentPrice' , '150' ),
( 'yearHigh' , '12' ),
( 'yearLow' , '5' ),
( 'marketCap' , '$100 Million' ),
( 'dayDollarVolume' , '$1 Million' ),
( 'sales' , '$50 Million' ),
( 'netProfitMargin' , '25%' ),
( 'cash' , '$10 Million' ),
( 'totalDebt' , '$2.5 Million' ),
( 'salesPerShare' , '$15' ),
( 'cashFloatingPerShare' , '$0.45' ),
( 'earningsPerShare' , '$4' ),
( 'dividendYield' , 'N/A' ),
( 'returnOnEquity' , '20%' ),
( 'insiderBuy' , '10' ),
( 'insiderOwn' , '30%' ),
( 'stockBuyback' , 'Yes' ),
( 'epsRank' , '95' ),
( 'rpsRank' , '95' ),
( '5yrSales' , '20%' ),
( '5yrPrice' , '900%' ),
( 'projSales' , '25%' ),
( 'projHi' , '100' ),
( 'projLow' , '60' ),
( 'time' , '1' ),
( 'safety' , '2' ),
( 'stars' , '5' ),
( 'fairValue' , '5' ),
( 'currentPE' , '2.5' ),
( 'averagePE' , '5' ),
( 'ps' , '0.67' ),
( 'pb' , '0.5' ),
( 'currentRatio' , '10' ),
( 'quickRatio' , '2' ),
( 'smaMacdRsi' , 'strongUp' )
] )
'''
# Search the website for the quote and grab the information
# This gives me of the statistics that we need for the
# table but not everything. May need to explore multiple sites.
r = requests.get( 'http://www.google.com/finance?q=' + symbol )
print( r.text )
'''
# Output the statistics to a CSV formatted file
with open('worksheet.csv','w',newline='') as csvfile:
writer = csv.writer( csvfile, delimiter=' ', quoting=csv.QUOTE_MINIMAL )
writer.writerow( [
'Company Name, Symbol, and Phone',
'Current Price',
'52 wk Hi/Lo',
'Market Cap',
'Day Dollar Volume',
'Sales',
'Net Profit Margin',
'Cash',
'Total Debt',
'Sales / Share',
'Cash Flow / Share',
'Earnings / Share',
'Dividend Yield',
'ROE',
'Insider Buy/Own',
'Stock Buyback',
'EPS Rank',
'RPS Rank',
'5 yr Sales',
'5 yr Price',
'Proj Sales',
'Proj Hi/Lo',
'Time Safe',
'STARS Fair Val',
'Current P/E',
'Average P/E',
'P/S',
'P/B',
'Current Ratio',
'Quick Ratio',
'SMA MACD RSI' ] )
writer.writerow( [
str( stock['name'] + ', ' + stock['symbol'] + ', ' + stock['phone'] ),
str( stock['currentPrice'] ),
str( stock['yearHigh'] + '/' + stock['yearLow'] ),
str( stock['marketCap'] ),
str( stock['dayDollarVolume'] ),
str( stock['sales'] ),
str( stock['netProfitMargin'] ),
str( stock['cash'] ),
str( stock['totalDebt'] ),
str( stock['salesPerShare'] ),
str( stock['cashFloatingPerShare'] ),
str( stock['earningsPerShare'] ),
str( stock['dividendYield'] ),
str( stock['returnOnEquity'] ),
str( stock['insiderBuy'] + '/' + stock['insiderOwn'] ),
str( stock['stockBuyback'] ),
str( stock['epsRank'] ),
str( stock['rpsRank'] ),
str( stock['5yrSales'] ),
str( stock['5yrPrice'] ),
str( stock['projSales'] ),
str( stock['projHi'] + '/' + stock['projLow'] ),
str( stock['time'] + '/' + stock['safety'] ),
str( stock['stars'] + '/' + stock['fairValue'] ),
str( stock['currentPE'] ),
str( stock['averagePE'] ),
str( stock['ps'] ),
str( stock['pb'] ),
str( stock['currentRatio'] ),
str( stock['quickRatio'] ),
str( stock['smaMacdRsi'] ) ] )
|
In early 2009, the Cambodian railway system was in shambles. A French company was brought in to repair the network of bridges, d eemed unsafe for travel. In the country’s second-largest city, Battambang, bright red panels were affixed to either end of an iron train track, preventing passage over the Sangker river. Inconvenienced locals took matters into their own hands, re-opening the panels to lay rotting planks of wood onto the track, facilitating unstable and unsafe passage to the other side. For locals, it was a godsend. For anyone caught unaware, their lives hung in the balance. Locals had an explanation for unsuspecting foreigners who tumbled off the bridge towards injury or death; the river spirits were calling them.
Ten years ago tomorrow, one day before Chinese New Year (of the Ox), I fell. That mighty spill dented my body—and stirred my spirit and soul—in ways I couldn’t then have foreseen.
But, like a frail phoenix, I would rise again. I’d be lowered into the pool. I’d hobble into yoga class. I would learn to walk again. Then, I would set my aching-and-waking self on an odyssey of nearly 1000 klicks. I walked the Camino de Santiago; and regardless of the unseen bumps and messy spots that I encountered along the Way, every day I inched one step closer to the sun.
Oh, how we imagine ourselves to be mighty. Infallible. Invincible. Immune from danger. In our hubris, we sleepwalk through a timeless fallacy; believing that we can blindly, unthinkingly, cross every bridge that appears in our lives, safely, untainted.
But if we fail to notice signs hidden in plain sight; and if we don’t honor our submerged stories, we are doomed, one way or another, sooner or later, to ‘fall.’ It’s what we do with our respective tumbles and setbacks that matters.
Find the seeds of your strength, nourish them well, and go on.
My stories (submerged and otherwise) come to life in my upcoming memoir, about walking the Camino de Santiago, titled (Un)Bound, Together: A Journey to the End of the Earth (and Beyond). Stay tuned for news about a release, launch and other tidbits. Follow me also on Facebook, at: My Book: Unbound Together and Healing Pilgrim.
You are amazing. I am looking forward to reading your memoir.
Thanks, I’m looking forward to getting it into print and shipped out!!
Wow, that was quite a fall. It’s funny, I am getting more aware of how the simple act of movement can become dangerous if we get caught up in the past or future, and lost to what’s happening right in front of us. I’m so glad you healed, walked again, and walked far!
Thanks for intuiting what was going on…and for the reminder! Yes, I do love long walks! Another long walk is planned for later this year – in Italy, to raise funds for EMERGENCY, the NGO that saved my life. Blessings..
|
#coding:utf-8
import codecs
class SpiderDataOutput(object):
def __init__(self):
self.filepath='kuwo.html'
self.output_head(self.filepath)
def output_head(self,path):
'''
将HTML头写进去
:return:
'''
fout=codecs.open(path,'w',encoding='utf-8')
fout.write("<html>")
fout.write("<body>")
fout.write("<table>")
fout.close()
def output_html(self,path,datas):
'''
将数据写入HTML文件中
:param path: 文件路径
:return:
'''
if datas==None:
return
fout=codecs.open(path,'a',encoding='utf-8')
for data in datas:
fout.write("<tr>")
fout.write("<td>%s</td>"%data['file_id'])
fout.write("<td>%s</td>"%data['name'])
fout.write("<td>%s</td>"%data['file_path'])
fout.write("</tr>")
fout.close()
def ouput_end(self,path):
'''
输出HTML结束
:param path: 文件存储路径
:return:
'''
fout=codecs.open(path,'a',encoding='utf-8')
fout.write("</table>")
fout.write("</body>")
fout.write("</html>")
fout.close()
|
Read stories from people like you, who have found money for their charity.
Where can you Find Money?
No one likes wasting money by accident - particularly not cash-strapped charities - so here you’ll find a few simple checks to make sure you’re not overpaying for overheads. If you are - and there are ways to claw back cash - then we’ll either give you the right tools or just do it for you.
For charities, there are pitfalls to avoid and windfalls to be had where reductions apply on overheads - but usually only for those that ask. We can show you how and weigh-up alternative suppliers who’ll offer you a better deal for your utilities - or even the best price for simply staying put with your existing one.
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from unittest.mock import patch
from odoo.tests.common import users
from odoo.addons.test_mass_mailing.tests import common
from odoo.addons.test_mass_mailing.models.mass_mail_test import MassMailTestBlacklist
from odoo.exceptions import AccessError, UserError
class TestBLMixin(common.MassMailingCase):
@classmethod
def setUpClass(cls):
super(TestBLMixin, cls).setUpClass()
cls.env['mail.blacklist'].create([{
'email': '[email protected]',
'active': True,
}, {
'email': '[email protected]',
'active': False,
}])
@users('emp')
def test_bl_mixin_primary_field_consistency(self):
MassMailTestBlacklist._primary_email = ['not_a_field']
with self.assertRaises(UserError):
self.env['mass.mail.test.bl'].search([('is_blacklisted', '=', False)])
MassMailTestBlacklist._primary_email = 'not_a_list'
with self.assertRaises(UserError):
self.env['mass.mail.test.bl'].search([('is_blacklisted', '=', False)])
MassMailTestBlacklist._primary_email = 'email_from'
with self.assertRaises(UserError):
self.env['mass.mail.test.bl'].search([('is_blacklisted', '=', False)])
MassMailTestBlacklist._primary_email = ['email_from', 'name']
with self.assertRaises(UserError):
self.env['mass.mail.test.bl'].search([('is_blacklisted', '=', False)])
MassMailTestBlacklist._primary_email = ['email_from']
self.env['mass.mail.test.bl'].search([('is_blacklisted', '=', False)])
@users('emp')
def test_bl_mixin_is_blacklisted(self):
""" Test is_blacklisted field computation """
record = self.env['mass.mail.test.bl'].create({'email_from': '[email protected]'})
self.assertTrue(record.is_blacklisted)
record = self.env['mass.mail.test.bl'].create({'email_from': '[email protected]'})
self.assertFalse(record.is_blacklisted)
@users('emp')
def test_bl_mixin_search_blacklisted(self):
""" Test is_blacklisted field search implementation """
record1 = self.env['mass.mail.test.bl'].create({'email_from': '[email protected]'})
record2 = self.env['mass.mail.test.bl'].create({'email_from': '[email protected]'})
search_res = self.env['mass.mail.test.bl'].search([('is_blacklisted', '=', False)])
self.assertEqual(search_res, record2)
search_res = self.env['mass.mail.test.bl'].search([('is_blacklisted', '!=', True)])
self.assertEqual(search_res, record2)
search_res = self.env['mass.mail.test.bl'].search([('is_blacklisted', '=', True)])
self.assertEqual(search_res, record1)
search_res = self.env['mass.mail.test.bl'].search([('is_blacklisted', '!=', False)])
self.assertEqual(search_res, record1)
@users('emp')
def test_bl_mixin_search_blacklisted_format(self):
""" Test is_blacklisted field search using email parsing """
record1 = self.env['mass.mail.test.bl'].create({'email_from': 'Arya Stark <[email protected]>'})
self.assertTrue(record1.is_blacklisted)
search_res = self.env['mass.mail.test.bl'].search([('is_blacklisted', '=', True)])
self.assertEqual(search_res, record1)
|
These Happi Coats, Short Kimono Robes, and Sushi Chef Coats are pretty much perfect for the festivals, sports events, and for all the happy occasions, which are lined up for you this season. They are also the chicest for a smart look as you switch into laid-back mood and for an Asian restaurant ensemble that goes from the kitchen to the tables with ease, respectively! And, whether you're going for the breathable comfort of pure cotton, the lush look and strength of Chinese silk, or the versatility of all the other Asian fabrics in-between, there is the perfect happi coat, short kimono robe, and sushi chef coat for you here on KimonoRobeStore.com! Of course, there is also their Asian details and designs to shower good vibes to your days and let you nail an enchantingly modern yet still traditional Asian look.
Shou Silk Happi Coat100% Silk, + More Colors!
Japanese Floral Happi Coat+ More Colors!
Bamboo Happi Coat+ More Colors!
Mon and Sayagata Japanese Lattice Sushi Chef Coat+ More Colors!
White with Red Bamboo Print Sushi Chef CoatWith Front Pockets!
Happi coats, or the short, straight-sleeved, and unlined Japanese kimono robes, which are worn over the clothes or as a shirt and tied at the waist with a belt or a rope for a traditional look, serve many purposes. In Japanese festivals and public events, happi coats are gold in that groups, teams, and organizations wear them proud with their logo. Guests and distinguished personalities wear them, too, on promotional company events, like the opening of a new store, as well as on private parties with the name of their brand, company, or product, often as an outerwear to a Western-style outfit, like a dress or pants ensemble, to give the latter a Japanese-style celebratory twist.
Happi coats have also been of great use among the working class, like the chefs and the shopkeepers, who look to their convenience and breezy comfort as protective working jacket. Wealthy individuals wear them, too, on celebrations so that their servants can easily identify them when needed. Of course, happi coats also gained popularity as bath robes, lounge robes, short dressing robes, nursing robes, and night robes. In fact, you can find them these days in an exciting plethora of designs for use as the latter. And in Japan, they are one of the top picks, just like the Japanese yukata or cotton kimono, for souvenirs, Asian gifts, Japanese gifts for foreigners, and tokens for home-stay guests.
Now, for your need of a happi coat, look only here on KimonoRobeStore.com for the best selection of designs, colors, and prices. Whether you need your happi coat as street-smart wear for the Japanese festival, as short kimono robe for your spring, summer, or fall ensemble, as traditional Japanese short robe to complete your look for a Japanese party or event, as bath robe, lounge robe, sports coat, sushi chef coat, Asian restaurant or Japanese restaurant uniform, or as an all-occasion-perfect gift that will be of great use and that will also deliver a new experience to the very important guest or gift recipient, there is the perfect happi coat here for you and for everyone in your gift list.
When it comes to happi coats, pick out the pattern or design that best reflects your style and the best fabric for its intended purpose. For the festivals, as work clothes, sports coat, and for everyday use at home, e.g., as bath robe, lounge robe, and protective clothing while you do the household chores, happi coats, which are made of 100% cotton are the best because they are breathable, easy to care for, and durable. For a smart look on parties and events, silken happi coats, such as those which are made of polyester, rayon, and poly-blend, are a fine choice because they have the classy look of silk, a lot of them are wrinkle-resistant, as such they can go without ironing, and will last for many years without showing as much wear and tear as silk does. And, if you love luxury, there are the silk happi coats in your favorite colors and designs.
Happi coats are also tailored to fit a wide range of sizes, so shopping for your crew, for your best friends, and for the whole family is an easy feat. Lovers of unique and investment-worthy clothing will also be so spoiled for choices, especially with our highly-curated selection of happi coats with auspicious and classic Asian themes and designs. And, for you with a keen eye for contemporary details, there are, of course, the happi coats in bright colors and with geometric patterns, also prized auspicious and distinctly Japanese, to feed your love for modern style.
So, go beyond just having a happi coat, a short kimono robe, lounge robe, bath robe, sushi chef coat, sports coat, and more with the happi coats of your choice here on KimonoRobeStore.com! Collect them in all their colors and designs because you can hardly to never find any to all of them around. And, shop all you want and get your order worth $150 and above with FREE Shipping across the 48 contiguous states of the USA. KimonoRobeStore.com is home to authentic traditional Asian clothing, thus every happi coat you buy is made even more special by the way it makes you feel closer to its maker�s home in East Asia. So shop now and be sure to browse our site for all the other traditional Asian clothing, especially Japanese clothing, and Asian implements that you, your crew, and your best pals need. Happy Shopping!
|
from __future__ import print_function
import sys
sys.path.append('/usr/lib/freecad/lib')
print(sys.path)
import FreeCAD# as App
import ImportGui
import FreeCADGui# as Gui
import os
import Draft#,Sketch,Part
# lets assume for now that we have all the information in a filename
# lets also assume that they are only full ball arrays no missing ball in the center)
# all distances in mm
# FIXME doesnt handle different x and y pitch
# FIXME size of balls
# NOTE incomplete bsall matrices not handled
# one should remove them by hand because impossible to handle all the fishy cases automatically
MMTOMIL = 0.3937
directory = sys.argv[2]; name = sys.argv[3]; pitch = float(sys.argv[4])
nBallx = int(sys.argv[5]); nBally = int(sys.argv[6])
length = float(sys.argv[7]); width = float(sys.argv[8])
height = float(sys.argv[9]); ballradius = pitch/4.
# go in sketch mode
Gui.activateWorkbench("SketcherWorkbench")
# create doc
App.newDocument()
App.setActiveDocument("Unnamed")
App.ActiveDocument=App.getDocument("Unnamed")
Gui.ActiveDocument=Gui.getDocument("Unnamed")
print("document created")
# create sketch
App.activeDocument().addObject('Sketcher::SketchObject','Sketch')
print("sketch added")
App.activeDocument().Sketch.Placement = App.Placement(App.Vector(0.0,0.0,0.0),App.Rotation(0.0,0.0,0.0,1.0))
Gui.activeDocument().setEdit('Sketch')
print("edit sketch")
# trace rectangle
App.ActiveDocument.Sketch.addGeometry(Part.Line(App.Vector(width/2.0,-length/2.0,0),App.Vector(-width/2.0,-length/2.0,0)))
App.ActiveDocument.Sketch.addGeometry(Part.Line(App.Vector(-width/2.0,-length/2.0,0),App.Vector(-width/2.0,length/2.0,0)))
App.ActiveDocument.Sketch.addGeometry(Part.Line(App.Vector(-width/2.0,length/2.0,0),App.Vector(width/2.0,length/2.0,0)))
App.ActiveDocument.Sketch.addGeometry(Part.Line(App.Vector(width/2.0,length/2.0,0),App.Vector(width/2.0,-length/2.0,0)))
print("place lines")
# add circular cutout
App.ActiveDocument.Sketch.addGeometry(Part.Circle(App.Vector(-width/2.0+1,length/2.0-1,0),App.Vector(0,0,1),0.5))
App.ActiveDocument.recompute()
Gui.getDocument('Unnamed').resetEdit()
App.getDocument('Unnamed').recompute()
# create pad from sketch
Gui.activateWorkbench("PartDesignWorkbench")
App.activeDocument().addObject("PartDesign::Pad","Pad")
App.activeDocument().Pad.Sketch = App.activeDocument().Sketch
App.activeDocument().Pad.Length = height
App.ActiveDocument.recompute()
Gui.activeDocument().hide("Sketch")
# change pad color to black
Gui.getDocument("Unnamed").getObject("Pad").ShapeColor = (0.00,0.00,0.00)
Gui.getDocument("Unnamed").getObject("Pad").Visibility=False #Hide pad
# Add Cylinder
Gui.activateWorkbench("PartWorkbench")
App.ActiveDocument.addObject("Part::Cylinder","Cylinder")
FreeCAD.getDocument("Unnamed").getObject("Cylinder").Radius = 0.5
FreeCAD.getDocument("Unnamed").getObject("Cylinder").Height = height
FreeCAD.getDocument("Unnamed").getObject("Cylinder").Placement = App.Placement(App.Vector(-width/2.0+1,length/2.0-1,ballradius),App.Rotation(0,0,0,1))
App.ActiveDocument.recompute()
# Ball creation
App.ActiveDocument.addObject("Part::Sphere","Sphere")
App.ActiveDocument.recompute()
FreeCAD.getDocument("Unnamed").getObject("Sphere").Radius = ballradius
App.ActiveDocument.recompute()
# Ball Array creation
Gui.activateWorkbench("ArchWorkbench")
Draft.array(App.getDocument("Unnamed").getObject("Sphere"),App.Vector(pitch,0,0),App.Vector(0,pitch,0),nBallx,nBally)
## Merge all the spheres into a single object
Gui.activateWorkbench("PartWorkbench")
shapesToFuse=[]
for obj in FreeCAD.ActiveDocument.Objects:
if obj.Name.find("Sphere") != -1:
Gui.Selection.addSelection(obj)
shapesToFuse.append(obj)
App.activeDocument().addObject("Part::MultiFuse","Fusion")
App.activeDocument().Fusion.Shapes = shapesToFuse
App.ActiveDocument.recompute()
fuse = FreeCAD.ActiveDocument.getObject("Fusion")
fuse.Placement = App.Placement(App.Vector(-(nBallx-1)*pitch/2.0,-(nBally-1)*pitch/2.0,ballradius),App.Rotation(0,0,0,1))
App.ActiveDocument.getObject("Pad").Placement = App.Placement(App.Vector(0,0,ballradius),App.Rotation(0,0,0,1))
Gui.ActiveDocument.getObject("Pad").Visibility=True
Gui.SendMsgToActiveView("ViewFit")
Gui.activeDocument().activeView().viewBottom()
## Export as a step model
exp_objects = []
for obj in FreeCAD.ActiveDocument.Objects:
# select all but indivudial Spheres and Sketch
if (obj.Name.find("Sphere") == -1) and (obj.Name.find("Sketch") == -1):
Gui.Selection.addSelection(obj)
exp_objects.append(obj)
else:
FreeCAD.ActiveDocument.removeObject(obj.Name)
App.activeDocument().addObject("Part::MultiFuse","Fusion2")
App.activeDocument().Fusion2.Shapes = exp_objects
App.ActiveDocument.recompute()
for obj in exp_objects:
FreeCAD.ActiveDocument.removeObject(obj.Name)
exp_objects= []
exp_objects.append(FreeCAD.ActiveDocument.getObject("Fusion2"))
ImportGui.export(exp_objects,os.path.join(directory, name + '.step'))
del exp_objects
# Scale to mil before export to VRML for KiCAD use
Draft.scale(FreeCAD.ActiveDocument.ActiveObject, FreeCAD.Vector(MMTOMIL,MMTOMIL,MMTOMIL))
FreeCAD.ActiveDocument.removeObject("Fusion2")
### Export as a VRML model
exp_objects = []
exp_objects.append(FreeCAD.ActiveDocument.getObject("Scale"))
FreeCADGui.export(exp_objects,os.path.join(directory, name + '.wrl'))
del exp_objects
exit(1)
|
Kim Kardashian celebrated husband Kanye West’s 41st birthday in style this weekend. The Keeping Up With the Kardashians star threw the rapper a huge birthday bash on Saturday.
According to a June 10 report by Us Weekly Magazine, Kim Kardashian and Kanye West partied with their closest friends and family members on Saturday night to celebrate the rapper’s birthday. Kim reportedly went all out for the bash, which took place in L.A.
Some of Kim and Kanye’s guests included the rapper’s father, Ray West, and his close friends, Pusha T, 2 Chainz, and Kid Cudi, as well as Kim’s famous sisters, Kourtney Kardashian and Kendall Jenner. Kardashian even hired professional mentalist, Lior Suchard, to provide the “supernatural entertainment” for the night. Lior, whom Kim flew in from Tel Aviv, reportedly had the crowd gathered around him as they watched him perform his many tricks.
Kim and Kanye’s friend, Teyana Taylor, and her husband, Iman Shumpert, attended the bash, and the mentalist performed a trick where he made Taylor’s body drop backward without even touching her, setting the crowd into a frenzy.
Kim Kardashian is said to have blasted Kanye West’s newest music during the party, and even had special desserts made with the rapper’s face on them. The mother-of-three seemingly thought of everything as she had Kanye’s birthday cake modeled after the cover of his most recent album, Ye. The cake looked like the mountain landscape on the cover, and even said “Happy birthday Kanye” in the same green font that was used on the album.
Kim Kardashian and guests showed off all of the lavish party moments via social media, including special lattes that she had designed for the event. Guests could choose from a latte with Kanye West’s face, or his nickname, Ye, drawn on the foam.
As previously reported by the Inquisitr, Kardashian couldn’t help but gush over West on his birthday earlier this week. The KKW Beauty founder took to Instagram to write her husband a sweet message, which included an adorable photo of him holding their youngest child, daughter Chicago.
It seems fun was had by all who attended Kanye West’s big 41st birthday party, and Kim Kardashian pulled off an epic celebration for her beloved husband.
|
import jwt
import warnings
from calendar import timegm
from datetime import datetime
from rest_framework_jwt.compat import get_username, get_username_field
from rest_framework_jwt.settings import api_settings
def jwt_payload_handler(user):
username_field = get_username_field()
username = get_username(user)
warnings.warn(
'The following fields will be removed in the future: '
'`email` and `user_id`. ',
DeprecationWarning
)
payload = {
'user_id': user.pk,
'email': user.email,
'username': username,
'exp': datetime.utcnow() + api_settings.JWT_EXPIRATION_DELTA
}
payload[username_field] = username
# Include original issued at time for a brand new token,
# to allow token refresh
if api_settings.JWT_ALLOW_REFRESH:
payload['orig_iat'] = timegm(
datetime.utcnow().utctimetuple()
)
return payload
def jwt_get_user_id_from_payload_handler(payload):
"""
Override this function if user_id is formatted differently in payload
"""
warnings.warn(
'The following will be removed in the future. '
'Use `JWT_PAYLOAD_GET_USERNAME_HANDLER` instead.',
DeprecationWarning
)
return payload.get('user_id')
def jwt_get_username_from_payload_handler(payload):
"""
Override this function if username is formatted differently in payload
"""
return payload.get('username')
def jwt_encode_handler(payload):
return jwt.encode(
payload,
api_settings.JWT_SECRET_KEY,
api_settings.JWT_ALGORITHM
).decode('utf-8')
def jwt_decode_handler(token):
options = {
'verify_exp': api_settings.JWT_VERIFY_EXPIRATION,
}
return jwt.decode(
token,
api_settings.JWT_SECRET_KEY,
api_settings.JWT_VERIFY,
options=options,
leeway=api_settings.JWT_LEEWAY,
audience=api_settings.JWT_AUDIENCE,
issuer=api_settings.JWT_ISSUER,
algorithms=[api_settings.JWT_ALGORITHM]
)
def jwt_response_payload_handler(token, user=None, request=None):
"""
Returns the response data for both the login and refresh views.
Override to return a custom response such as including the
serialized representation of the User.
Example:
def jwt_response_payload_handler(token, user=None, request=None):
return {
'token': token,
'user': UserSerializer(user).data
}
"""
return {
'token': token
}
|
Fans greatly play a very big role in the sports and there are many different kinds of fans who can be seen in the different sports team. Recently there is an aggressive action which is been performed by a deep fan of football as he is found to be a very aggressive fan of the England football team. This is the very hot news which is moving on in the present Euro cup 2016 tournament. The news is that the great England fan who is been put to prison for two years and he is also been banned to enter into the England and to watch the football match. This is the great punishment which is been given to this particular person who is the fan of the football match. He has been given the order of about five years of football banning and this is the punishment which is been given by the British government after facing a very harsh troubles in the city of Marseilles.
It is a well known incident which happened in the place of Marseilles and it is described as the most aggressive incident which is been committed by the fans since ever in the football tournament.
The fans that are named as Joe Pizarro, belonging to the Clayton Street of Kennington in the South London was greatly summoned to appear in the place of Uxbridge court and it is the most famous Magistrates court found in the city. This person is also a great fan of football and he has been summoned because he has been spotted on the great film footage. The officers of the Met football intelligence has identified him in the video which is been taken by the great French police when the match took place and it was considered to be a very big disorder also. This is the great disorder which occurred before the tournament of the great two teams which is the England and the Russia in the month of June 10.
It was yesterday all the important chronicles in England has published about this great person and the five year banning order is been imposed on this person. This is the order which is found to be the great order which affects him deeply and this person has also been summoned to pay a huge amount as fine. This order will not only make the player to stop attending or viewing the matches of the games in the abroad but also in the United Kingdom. This order and punishments are found to be the very big warning for the other fans and it is also announced that if the match is interrupted by the fans then the team which is represented on the behalf of the fans will also be summoned to severe punishments and so all the fans of the football game are now totally shocked with this incident and the news which happened in this Euro cup 2016. This will also be unforgettable news in this season of Euro cup.
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import cirq
import cirq.testing as ct
import cirq.contrib.acquaintance as cca
def test_remove_redundant_acquaintance_opportunities():
device = cca.UnconstrainedAcquaintanceDevice
a, b, c, d, e = cirq.LineQubit.range(5)
swap = cca.SwapPermutationGate()
with pytest.raises(TypeError):
ops = [cca.acquaint(a, b)]
strategy = cirq.Circuit(ops)
cca.remove_redundant_acquaintance_opportunities(strategy)
ops = [cca.acquaint(a, b), cca.acquaint(a, b)]
strategy = cirq.Circuit(ops, device=device)
diagram_before = """
0: ───█───█───
│ │
1: ───█───█───
"""
ct.assert_has_diagram(strategy, diagram_before)
cca.remove_redundant_acquaintance_opportunities(strategy)
cca.remove_redundant_acquaintance_opportunities(strategy)
diagram_after = """
0: ───█───────
│
1: ───█───────
"""
ct.assert_has_diagram(strategy, diagram_after)
ops = [cca.acquaint(a, b), cca.acquaint(c, d), swap(d, e), swap(c, d), cca.acquaint(d, e)]
strategy = cirq.Circuit(ops, device=device)
diagram_before = """
0: ───█───────────────────
│
1: ───█───────────────────
2: ───█─────────0↦1───────
│ │
3: ───█───0↦1───1↦0───█───
│ │
4: ───────1↦0─────────█───
"""
ct.assert_has_diagram(strategy, diagram_before)
cca.remove_redundant_acquaintance_opportunities(strategy)
diagram_after = """
0: ───█───────────────────
│
1: ───█───────────────────
2: ───█─────────0↦1───────
│ │
3: ───█───0↦1───1↦0───────
│
4: ───────1↦0─────────────
"""
ct.assert_has_diagram(strategy, diagram_after)
|
HeartOfDear2 . jasminnlive. GregoryBrown. AhinaraANDStev.
SARAREIMONKINKYROCHELLEFNastySubsexLionessmay .SinningAngelSonechkaaVIPkovMissMalika .FireAliceHitomiGreatmatureElenaOldHotMilf .MagnusLoveJulietRosseAemiliaSweetCandiceWons .MilkyTITS4you21jasminnliveSinningAngelPa0LiiTa .brianaaTenderTiffanyHotSweetKarenTwentyThreeCenti .JulietRosseCurvyAlycea0ExcitingEarla0ExcitingEarl .PapiJordy1HotDiamondSun0001SexyQUEENGODDES3 .KittycicusSonechkaaHovieSweetJoily .RebeccaCoxxSonechkaaTwentyThreeCentiEroticJessie .BeatriceBeautyJuicyMEAT4saleElleTaylorKINKYROCHELLEF .
SRQ7201TenderTiffanyAndyXDannyXdollebonyx .AlexaMorrenoSun0001JuicyMEAT4saleBlondieSlaveforU .Lionessmay69deboraSamantaGraySweetJoily .MissMalikaChloyaBrownCurvyAlyce4SexFantasticLat .HannaMillSRQ7201Sun0001Sun0001 .RebeccaCoxxCurvyAlyceBlondieSlaveforUSARAREIMON .BlondieSlaveforUAndrew97jasminnliveAndyXDannyX .JoshWadeVIPkovShrisKimAlexaMorreno .JoshWade19Blonde19CurvyAlyceDevilsAngelsCock .jasminnliveKotoGirlAmyMooneBlondieSlaveforU .AmyMooneSamantaGrayEdwijesyMissMalika .
|
# mycroft-skill-obitcoin-enhanced
#
# A skill for MycroftAI that querys various bitcoin statistics.
#
# Adapted from a MycroftAI skill by Red5d
#
# Licensed under the GNU General Public License v3
# (see LICENSE for more details
from os.path import dirname, join
from adapt.intent import IntentBuilder
from mycroft.skills.core import MycroftSkill
import requests
__author__ = 'Red5d', 'chrison999'
class BitcoinSkill(MycroftSkill):
def __init__(self):
super(BitcoinSkill, self).__init__(name="BitcoinSkill")
def initialize(self):
intent = IntentBuilder("BitcoinAvgIntent").require("BitcoinAvgKeyword") \
.optionally("Currency").build()
self.register_intent(intent, self.handle_avg)
intent = IntentBuilder("BitcoinHighIntent").require("BitcoinHighKeyword") \
.optionally("Currency").build()
self.register_intent(intent, self.handle_high)
intent = IntentBuilder("BitcoinLowIntent").require("BitcoinLowKeyword") \
.optionally("Currency").build()
self.register_intent(intent, self.handle_low)
intent = IntentBuilder("BitcoinLastIntent").require("BitcoinLastKeyword") \
.optionally("Currency").build()
self.register_intent(intent, self.handle_last)
intent = IntentBuilder("BitcoinVolIntent").require("BitcoinVolKeyword") \
.optionally("Currency").build()
self.register_intent(intent, self.handle_volume)
def handle_avg(self, message):
currency = str(message.data.get("Currency")) # optional parameter
if currency == 'None':
currency = 'u s dollars'
result = self.fiat_get(currency)
price = requests.get("https://api.bitcoinaverage.com/all").json()[str(result)]['averages']['24h_avg']
self.speak("The 24 hour average bitcoin price is "+str(price)+" "+currency+".")
def handle_high(self, message):
currency = str(message.data.get("Currency")) # optional parameter
if currency == 'None':
currency = 'u s dollars'
result = self.fiat_get(currency)
price = requests.get("https://api.bitcoinaverage.com/all").json()[str(result)]['averages']['ask']
self.speak("The current asking price for bitcoin is "+str(price)+" "+currency+".")
def handle_low(self, message):
currency = str(message.data.get("Currency")) # optional parameter
if currency == 'None':
currency = 'u s dollars'
result = self.fiat_get(currency)
price = requests.get("https://api.bitcoinaverage.com/all").json()[str(result)]['averages']['bid']
self.speak("The current bid price for bitcoin is "+str(price)+" "+currency+".")
def handle_last(self, message):
currency = str(message.data.get("Currency")) # optional parameter
if currency == 'None':
currency = 'u s dollars'
result = self.fiat_get(currency)
price = requests.get("https://api.bitcoinaverage.com/all").json()[str(result)]['averages']['last']
self.speak("The current price for bitcoin is "+str(price)+" "+currency+".")
def handle_volume(self, message):
currency = str(message.data.get("Currency")) # optional parameter
if currency == 'None':
currency = 'u s dollars'
result = self.fiat_get(currency)
price = requests.get("https://api.bitcoinaverage.com/all").json()[str(result)]['averages']['total_vol']
self.speak("The 24 hour volume for "+currency+" bitcoin is "+str(price)+" btc.")
def fiat_get(self, currency):
if currency == 'None':
currency = 'U S dollars'
result = 'USD'
return result
else:
choices = {
'reals': 'BRL',
'canadian dollars': 'CAD',
'euros': 'EUR',
'yuans': 'CNY',
'koruna': 'CZK',
'rupiahs': 'IDR',
'shekels': 'ILS',
'rupees': 'INR',
'yens': 'JPY',
'won': 'KRW',
'pesos': 'MXN',
'ringgit': 'MYR',
'nairas': 'NGN',
'zlotys': 'PLN',
'roubles': 'RUB',
'kronas': 'SEK',
'singapore dollars': 'SGD',
'lira': 'TRY',
'u s a dollars': 'USD',
'american dollars': 'USD',
'rands': 'ZAR',
'pounds': "GBP"}
result = choices.get(str(currency), 'USD')
return result
def stop(self):
pass
def create_skill():
return BitcoinSkill()
|
This is a genuine c.1920s Shops Tin Advertising Sign for PALETHORPES PORK PIES. Wonderful original condition including its original hanging chain.
You do see Palethorpes Sausages advertising items around but it is much harder to find advertising relating to their Pork Pies.
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import signal
import sys
import traceback
import threading
from typing import Optional
try:
import PyQt5
except Exception:
sys.exit("Error: Could not import PyQt5 on Linux systems, you may try 'sudo apt-get install python3-pyqt5'")
from PyQt5.QtGui import QGuiApplication
from PyQt5.QtWidgets import (QApplication, QSystemTrayIcon, QWidget, QMenu,
QMessageBox)
from PyQt5.QtCore import QObject, pyqtSignal, QTimer
import PyQt5.QtCore as QtCore
from electrum.i18n import _, set_language
from electrum.plugin import run_hook
from electrum.base_wizard import GoBack
from electrum.util import (UserCancelled, profiler,
WalletFileException, BitcoinException, get_new_wallet_name)
from electrum.wallet import Wallet, Abstract_Wallet
from electrum.logging import Logger
from .installwizard import InstallWizard, WalletAlreadyOpenInMemory
from .util import get_default_language, read_QIcon, ColorScheme, custom_message_box
from .main_window import ElectrumWindow
from .network_dialog import NetworkDialog
from .stylesheet_patcher import patch_qt_stylesheet
class OpenFileEventFilter(QObject):
def __init__(self, windows):
self.windows = windows
super(OpenFileEventFilter, self).__init__()
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.FileOpen:
if len(self.windows) >= 1:
self.windows[0].pay_to_URI(event.url().toEncoded())
return True
return False
class QElectrumApplication(QApplication):
new_window_signal = pyqtSignal(str, object)
class QNetworkUpdatedSignalObject(QObject):
network_updated_signal = pyqtSignal(str, object)
class ElectrumGui(Logger):
@profiler
def __init__(self, config, daemon, plugins):
set_language(config.get('language', get_default_language()))
Logger.__init__(self)
# Uncomment this call to verify objects are being properly
# GC-ed when windows are closed
#network.add_jobs([DebugMem([Abstract_Wallet, SPV, Synchronizer,
# ElectrumWindow], interval=5)])
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_X11InitThreads)
if hasattr(QtCore.Qt, "AA_ShareOpenGLContexts"):
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_ShareOpenGLContexts)
if hasattr(QGuiApplication, 'setDesktopFileName'):
QGuiApplication.setDesktopFileName('electrum.desktop')
self.gui_thread = threading.current_thread()
self.config = config
self.daemon = daemon
self.plugins = plugins
self.windows = []
self.efilter = OpenFileEventFilter(self.windows)
self.app = QElectrumApplication(sys.argv)
self.app.installEventFilter(self.efilter)
self.app.setWindowIcon(read_QIcon("electrum.png"))
# timer
self.timer = QTimer(self.app)
self.timer.setSingleShot(False)
self.timer.setInterval(500) # msec
self.nd = None
self.network_updated_signal_obj = QNetworkUpdatedSignalObject()
self._num_wizards_in_progress = 0
self._num_wizards_lock = threading.Lock()
# init tray
self.dark_icon = self.config.get("dark_icon", False)
self.tray = QSystemTrayIcon(self.tray_icon(), None)
self.tray.setToolTip('Electrum')
self.tray.activated.connect(self.tray_activated)
self.build_tray_menu()
self.tray.show()
self.app.new_window_signal.connect(self.start_new_window)
self.set_dark_theme_if_needed()
run_hook('init_qt', self)
def set_dark_theme_if_needed(self):
use_dark_theme = self.config.get('qt_gui_color_theme', 'default') == 'dark'
if use_dark_theme:
try:
import qdarkstyle
self.app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
except BaseException as e:
use_dark_theme = False
self.logger.warning(f'Error setting dark theme: {repr(e)}')
# Apply any necessary stylesheet patches
patch_qt_stylesheet(use_dark_theme=use_dark_theme)
# Even if we ourselves don't set the dark theme,
# the OS/window manager/etc might set *a dark theme*.
# Hence, try to choose colors accordingly:
ColorScheme.update_from_widget(QWidget(), force_dark=use_dark_theme)
def build_tray_menu(self):
# Avoid immediate GC of old menu when window closed via its action
if self.tray.contextMenu() is None:
m = QMenu()
self.tray.setContextMenu(m)
else:
m = self.tray.contextMenu()
m.clear()
for window in self.windows:
submenu = m.addMenu(window.wallet.basename())
submenu.addAction(_("Show/Hide"), window.show_or_hide)
submenu.addAction(_("Close"), window.close)
m.addAction(_("Dark/Light"), self.toggle_tray_icon)
m.addSeparator()
m.addAction(_("Exit Electrum"), self.close)
def tray_icon(self):
if self.dark_icon:
return read_QIcon('electrum_dark_icon.png')
else:
return read_QIcon('electrum_light_icon.png')
def toggle_tray_icon(self):
self.dark_icon = not self.dark_icon
self.config.set_key("dark_icon", self.dark_icon, True)
self.tray.setIcon(self.tray_icon())
def tray_activated(self, reason):
if reason == QSystemTrayIcon.DoubleClick:
if all([w.is_hidden() for w in self.windows]):
for w in self.windows:
w.bring_to_top()
else:
for w in self.windows:
w.hide()
def close(self):
for window in self.windows:
window.close()
def new_window(self, path, uri=None):
# Use a signal as can be called from daemon thread
self.app.new_window_signal.emit(path, uri)
def show_network_dialog(self, parent):
if not self.daemon.network:
parent.show_warning(_('You are using Electrum in offline mode; restart Electrum if you want to get connected'), title=_('Offline'))
return
if self.nd:
self.nd.on_update()
self.nd.show()
self.nd.raise_()
return
self.nd = NetworkDialog(self.daemon.network, self.config,
self.network_updated_signal_obj)
self.nd.show()
def _create_window_for_wallet(self, wallet):
w = ElectrumWindow(self, wallet)
self.windows.append(w)
self.build_tray_menu()
# FIXME: Remove in favour of the load_wallet hook
run_hook('on_new_window', w)
w.warn_if_testnet()
w.warn_if_watching_only()
return w
def count_wizards_in_progress(func):
def wrapper(self: 'ElectrumGui', *args, **kwargs):
with self._num_wizards_lock:
self._num_wizards_in_progress += 1
try:
return func(self, *args, **kwargs)
finally:
with self._num_wizards_lock:
self._num_wizards_in_progress -= 1
return wrapper
@count_wizards_in_progress
def start_new_window(self, path, uri, *, app_is_starting=False):
'''Raises the window for the wallet if it is open. Otherwise
opens the wallet and creates a new window for it'''
wallet = None
try:
wallet = self.daemon.load_wallet(path, None)
except BaseException as e:
self.logger.exception('')
custom_message_box(icon=QMessageBox.Warning,
parent=None,
title=_('Error'),
text=_('Cannot load wallet') + ' (1):\n' + str(e))
# if app is starting, still let wizard to appear
if not app_is_starting:
return
if not wallet:
try:
wallet = self._start_wizard_to_select_or_create_wallet(path)
except (WalletFileException, BitcoinException) as e:
self.logger.exception('')
custom_message_box(icon=QMessageBox.Warning,
parent=None,
title=_('Error'),
text=_('Cannot load wallet') + ' (2):\n' + str(e))
if not wallet:
return
# create or raise window
try:
for window in self.windows:
if window.wallet.storage.path == wallet.storage.path:
break
else:
window = self._create_window_for_wallet(wallet)
except BaseException as e:
self.logger.exception('')
custom_message_box(icon=QMessageBox.Warning,
parent=None,
title=_('Error'),
text=_('Cannot create window for wallet') + ':\n' + str(e))
if app_is_starting:
wallet_dir = os.path.dirname(path)
path = os.path.join(wallet_dir, get_new_wallet_name(wallet_dir))
self.start_new_window(path, uri)
return
if uri:
window.pay_to_URI(uri)
window.bring_to_top()
window.setWindowState(window.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
window.activateWindow()
return window
def _start_wizard_to_select_or_create_wallet(self, path) -> Optional[Abstract_Wallet]:
wizard = InstallWizard(self.config, self.app, self.plugins)
try:
path, storage = wizard.select_storage(path, self.daemon.get_wallet)
# storage is None if file does not exist
if storage is None:
wizard.path = path # needed by trustedcoin plugin
wizard.run('new')
storage = wizard.create_storage(path)
else:
wizard.run_upgrades(storage)
except (UserCancelled, GoBack):
return
except WalletAlreadyOpenInMemory as e:
return e.wallet
finally:
wizard.terminate()
# return if wallet creation is not complete
if storage is None or storage.get_action():
return
wallet = Wallet(storage)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
return wallet
def close_window(self, window: ElectrumWindow):
if window in self.windows:
self.windows.remove(window)
self.build_tray_menu()
# save wallet path of last open window
if not self.windows:
self.config.save_last_wallet(window.wallet)
run_hook('on_close_window', window)
self.daemon.stop_wallet(window.wallet.storage.path)
def init_network(self):
# Show network dialog if config does not exist
if self.daemon.network:
if self.config.get('auto_connect') is None:
wizard = InstallWizard(self.config, self.app, self.plugins)
wizard.init_network(self.daemon.network)
wizard.terminate()
def main(self):
try:
self.init_network()
except UserCancelled:
return
except GoBack:
return
except BaseException as e:
self.logger.exception('')
return
self.timer.start()
self.config.open_last_wallet()
path = self.config.get_wallet_path()
if not self.start_new_window(path, self.config.get('url'), app_is_starting=True):
return
signal.signal(signal.SIGINT, lambda *args: self.app.quit())
def quit_after_last_window():
# keep daemon running after close
if self.config.get('daemon'):
return
# check if a wizard is in progress
with self._num_wizards_lock:
if self._num_wizards_in_progress > 0 or len(self.windows) > 0:
return
self.app.quit()
self.app.setQuitOnLastWindowClosed(False) # so _we_ can decide whether to quit
self.app.lastWindowClosed.connect(quit_after_last_window)
def clean_up():
# Shut down the timer cleanly
self.timer.stop()
# clipboard persistence. see http://www.mail-archive.com/[email protected]/msg17328.html
event = QtCore.QEvent(QtCore.QEvent.Clipboard)
self.app.sendEvent(self.app.clipboard(), event)
self.tray.hide()
self.app.aboutToQuit.connect(clean_up)
# main loop
self.app.exec_()
# on some platforms the exec_ call may not return, so use clean_up()
def stop(self):
self.logger.info('closing GUI')
self.app.quit()
|
Does Sprint Drug Test for Pre-employment?
Does Sprint Do Random and Pre-Promotional Drug Testing?
What Kind of Drug Test Does Sprint Use?
Can You Fail Your Drug Test if you’re on Prescription Drugs?
How to Pass Urine Drug Test at Sprint?
“Does Sprint drug test for pre-employment?” sounds like a question a person who uses recreational drugs would ask before going for an interview with Sprint. This is not such a bad idea to ask. Actually, it’s a good question to ask ahead of any interview in order not to be surprised on the spot and drug tested.
Showing up for an interview and not being prepared for a potential a drug test could cost you the job. Knowing a few days ahead of time that you may have to undergo a potential drug test will give you time to abstain in order to pass a test.
Therefore, I researched the question “does Sprint drug test for pre-employment” in order to give you as much information as possible on the subject before your interview. My findings are set forth below. But first let’s talk a bit about the company and various job openings available.
Sprint was founded in 1899 and operated as a telephone landline business. In 2006, the company shifted from being a landline company to providing wireless services and becoming an internet service provider. Sprint is currently considered the 4th largest mobile network company in the United States in terms of number of subscribers, with over 50 million customers.
The company employs over 30,000 people, which explains why people are so eager to work there. Sprint is a great place to work, whether short term or long term. There are also a variety of job options available at Sprint.
You can apply as a retail consultant, sails consultant, or as a business sales account executive. If you’re into technology and have the knowledge needed, you can also apply for a job as an application developer. Openings for branch manager are also sometimes available. You can apply for these also if you have the necessary experience.
Sprint has a pretty strict ‘no drug’ policy. When they test you, they do it using a urine drug test. Failing the drug test will cost you the job. It will also likely get you fired if you already work there.
Some of the drugs they test for include cocaine, opiates, methamphetamine, benzodiazepines, barbiturates, PCP, marijuana, methadone, and propoxyphene. A drug test that shows up positive for any of these substances will certainly result in adverse consequences as I already mentioned.
It usually takes about 2 weeks for a person to be hired at Sprint. First you have to submit an application; then wait for them to call you for an interview. The first interview will be with the hiring manager. This might be it, or you could be asked to go in for a 2nd interview with the general manager.
If both interviews go well, you’ll receive a call from HR to go in for a drug test and background check. Once all of these are clear, you’ll be hired. It’s a little more complicated if you’re applying for a job as manager though. Potential managers often undergo up to 4 interviews and might even be interview in front of a panel.
The answer is sadly ‘Yes’! Through my research, I found that Sprint does in fact drug test for pre-employment. After you pass your interviews, HR will certainly call asking you to come in for a drug test. Failing to pass the test will definitely rule you out from getting the job. This applies to all Sprint branches throughout the United States. It isn’t limited to a few states.
If you’re only using drugs recreationally, it would be best to stop them at least a couple of months before your interview to ensure you pass the drug test. Otherwise, you may fail the test and won’t even be able to apply for another job at Sprint as they’ll have your record.
Yes, Sprint reserves the right to drug test its employees randomly. Unlike most places, they don’t just test those who are involved in work related accidents. Sometimes they might even ask for drug tests for everyone working in a certain branch all the way from the branch manager down to the interns.
Failing to pass a random drug test is very likely to get you fired. So, if you got the job in the first place, don’t lose it by resuming the use drugs after being hired. Try to abstain as much as possible in anticipation of any surprise drug test.
Pre-promotional drug tests are also required before making the jump to manager. Sprint’s drug policy is very strict. They’re very serious when it comes to making sure their employees aren’t using drugs.
Sprint relies on a urine drug test. You have to go to the clinic and urinate in a cup, which is then submitted for testing in a company contracted lab for the aforementioned substances.
No, you can’t fail a drug test at Sprint if you’re on prescription drugs. Just let the company know that you’re on a prescription and give them the contact info of your doctor. They might need to call your doctor to confirm that you’re on the prescription.
Nonetheless, they have no right to ask your doctor what you’re taking or why you’re taking it. That information is confidential. Just make sure to let them know that you’ve been prescribed medications that may show up positive on your drug screen.
If it is a drug test for pre-employment, not a random test, and you have enough time until your test, you detox your body naturally. This might be an effective approach if you are a light or moderate user. However, if you are heavy marijuana user, it might take from 35 to 90 days to completely cleanse your body.
If you are running out of time, and have 5-10 days until your test, you might consider using a whole body detox program like the Toxin Rid program.
If you don’t have enough time to complete a Toxin Rid program, on your drug test day you may consider using a detox drink like Mega Clean (50%/50% chance of working though).
We have seen in some emergency situations where the drug test is unsupervised some heavy users try using synthetic urine. However, there is always the risk of being caught and facing penalties for such action.
So to answer your question “does Sprint drug test for pre-employment?” the answer is yes. The company has a very strict ‘no drugs’ policy. They also conduct random drug tests for all employees, including managers. Sprint also conducts pre-promotional drug tests.
If you’re a recreational drug user who’s hoping to get hired at Sprint, then you’ll probably have to stay away from drugs – not just before the pre-employment test, but also throughout your period of work there.
We would love to hear about your experiences going through Sprint’s hiring process and drug testing procedure.
We believe others who are applying for jobs at Sprint will definitely appreciate and benefit from it.
|
# Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import datetime
import sickbeard
import generic
from sickbeard.common import *
from sickbeard import logger, exceptions, helpers
from sickbeard import encodingKludge as ek
from lib.tvdb_api import tvdb_api, tvdb_exceptions
import xml.etree.cElementTree as etree
class XBMCMetadata(generic.GenericMetadata):
def __init__(self):
generic.GenericMetadata.__init__(self)
self.name = 'XBMC'
def _show_data(self, show_obj):
"""
Creates an elementTree XML structure for an XBMC-style tvshow.nfo and
returns the resulting data object.
show_obj: a TVShow instance to create the NFO for
"""
show_ID = show_obj.tvdbid
t = tvdb_api.Tvdb(actors=True, **sickbeard.TVDB_API_PARMS)
tv_node = etree.Element("tvshow")
for ns in XML_NSMAP.keys():
tv_node.set(ns, XML_NSMAP[ns])
try:
myShow = t[int(show_ID)]
except tvdb_exceptions.tvdb_shownotfound:
logger.log(u"Unable to find show with id " + str(show_ID) + " on tvdb, skipping it", logger.ERROR)
raise
except tvdb_exceptions.tvdb_error:
logger.log(u"TVDB is down, can't use its data to add this show", logger.ERROR)
raise
# check for title and id
try:
if myShow["seriesname"] == None or myShow["seriesname"] == "" or myShow["id"] == None or myShow["id"] == "":
logger.log(u"Incomplete info for show with id " + str(show_ID) + " on tvdb, skipping it", logger.ERROR)
return False
except tvdb_exceptions.tvdb_attributenotfound:
logger.log(u"Incomplete info for show with id " + str(show_ID) + " on tvdb, skipping it", logger.ERROR)
return False
title = etree.SubElement(tv_node, "title")
if myShow["seriesname"] != None:
title.text = myShow["seriesname"]
rating = etree.SubElement(tv_node, "rating")
if myShow["rating"] != None:
rating.text = myShow["rating"]
plot = etree.SubElement(tv_node, "plot")
if myShow["overview"] != None:
plot.text = myShow["overview"]
episodeguide = etree.SubElement(tv_node, "episodeguide")
episodeguideurl = etree.SubElement( episodeguide, "url")
episodeguideurl2 = etree.SubElement(tv_node, "episodeguideurl")
if myShow["id"] != None:
showurl = sickbeard.TVDB_BASE_URL + '/series/' + myShow["id"] + '/all/en.zip'
episodeguideurl.text = showurl
episodeguideurl2.text = showurl
mpaa = etree.SubElement(tv_node, "mpaa")
if myShow["contentrating"] != None:
mpaa.text = myShow["contentrating"]
tvdbid = etree.SubElement(tv_node, "tvdbid")
if myShow["id"] != None:
tvdbid.text = myShow["id"]
genre = etree.SubElement(tv_node, "genre")
if myShow["genre"] != None:
genre.text = " / ".join([x for x in myShow["genre"].split('|') if x])
premiered = etree.SubElement(tv_node, "premiered")
if myShow["firstaired"] != None:
premiered.text = myShow["firstaired"]
studio = etree.SubElement(tv_node, "studio")
if myShow["network"] != None:
studio.text = myShow["network"]
for actor in myShow['_actors']:
cur_actor = etree.SubElement(tv_node, "actor")
cur_actor_name = etree.SubElement( cur_actor, "name")
cur_actor_name.text = actor['name']
cur_actor_role = etree.SubElement( cur_actor, "role")
cur_actor_role_text = actor['role']
if cur_actor_role_text != None:
cur_actor_role.text = cur_actor_role_text
cur_actor_thumb = etree.SubElement( cur_actor, "thumb")
cur_actor_thumb_text = actor['image']
if cur_actor_thumb_text != None:
cur_actor_thumb.text = cur_actor_thumb_text
# Make it purdy
helpers.indentXML(tv_node)
data = etree.ElementTree(tv_node)
return data
def _ep_data(self, ep_obj):
"""
Creates an elementTree XML structure for an XBMC-style episode.nfo and
returns the resulting data object.
show_obj: a TVEpisode instance to create the NFO for
"""
eps_to_write = [ep_obj] + ep_obj.relatedEps
try:
t = tvdb_api.Tvdb(actors=True, **sickbeard.TVDB_API_PARMS)
myShow = t[ep_obj.show.tvdbid]
except tvdb_exceptions.tvdb_shownotfound, e:
raise exceptions.ShowNotFoundException(str(e))
except tvdb_exceptions.tvdb_error, e:
logger.log(u"Unable to connect to TVDB while creating meta files - skipping - "+str(e).decode('utf-8'), logger.ERROR)
return
if len(eps_to_write) > 1:
rootNode = etree.Element( "xbmcmultiepisode" )
else:
rootNode = etree.Element( "episodedetails" )
# Set our namespace correctly
for ns in XML_NSMAP.keys():
rootNode.set(ns, XML_NSMAP[ns])
# write an NFO containing info for all matching episodes
for curEpToWrite in eps_to_write:
try:
myEp = myShow[curEpToWrite.season][curEpToWrite.episode]
except (tvdb_exceptions.tvdb_episodenotfound, tvdb_exceptions.tvdb_seasonnotfound):
logger.log(u"Unable to find episode " + str(curEpToWrite.season) + "x" + str(curEpToWrite.episode) + " on tvdb... has it been removed? Should I delete from db?")
return None
if myEp["firstaired"] == None and ep_obj.season == 0:
myEp["firstaired"] = str(datetime.date.fromordinal(1))
if myEp["episodename"] == None or myEp["firstaired"] == None:
return None
logger.log(u"Creating metadata for episode "+str(ep_obj.season)+"x"+str(ep_obj.episode), logger.DEBUG)
if len(eps_to_write) > 1:
episode = etree.SubElement( rootNode, "episodedetails" )
else:
episode = rootNode
title = etree.SubElement( episode, "title" )
if curEpToWrite.name != None:
title.text = curEpToWrite.name
season = etree.SubElement( episode, "season" )
season.text = str(curEpToWrite.season)
episodenum = etree.SubElement( episode, "episode" )
episodenum.text = str(curEpToWrite.episode)
aired = etree.SubElement( episode, "aired" )
if curEpToWrite.airdate != datetime.date.fromordinal(1):
aired.text = str(curEpToWrite.airdate)
else:
aired.text = ''
plot = etree.SubElement( episode, "plot" )
if curEpToWrite.description != None:
plot.text = curEpToWrite.description
displayseason = etree.SubElement( episode, "displayseason" )
if myEp.has_key('airsbefore_season'):
displayseason_text = myEp['airsbefore_season']
if displayseason_text != None:
displayseason.text = displayseason_text
displayepisode = etree.SubElement( episode, "displayepisode" )
if myEp.has_key('airsbefore_episode'):
displayepisode_text = myEp['airsbefore_episode']
if displayepisode_text != None:
displayepisode.text = displayepisode_text
thumb = etree.SubElement( episode, "thumb" )
thumb_text = myEp['filename']
if thumb_text != None:
thumb.text = thumb_text
watched = etree.SubElement( episode, "watched" )
watched.text = 'false'
credits = etree.SubElement( episode, "credits" )
credits_text = myEp['writer']
if credits_text != None:
credits.text = credits_text
director = etree.SubElement( episode, "director" )
director_text = myEp['director']
if director_text != None:
director.text = director_text
rating = etree.SubElement( episode, "rating" )
rating_text = myEp['rating']
if rating_text != None:
rating.text = rating_text
gueststar_text = myEp['gueststars']
if gueststar_text != None:
for actor in gueststar_text.split('|'):
cur_actor = etree.SubElement( episode, "actor" )
cur_actor_name = etree.SubElement(
cur_actor, "name"
)
cur_actor_name.text = actor
for actor in myShow['_actors']:
cur_actor = etree.SubElement( episode, "actor" )
cur_actor_name = etree.SubElement( cur_actor, "name" )
cur_actor_name.text = actor['name']
cur_actor_role = etree.SubElement( cur_actor, "role" )
cur_actor_role_text = actor['role']
if cur_actor_role_text != None:
cur_actor_role.text = cur_actor_role_text
cur_actor_thumb = etree.SubElement( cur_actor, "thumb" )
cur_actor_thumb_text = actor['image']
if cur_actor_thumb_text != None:
cur_actor_thumb.text = cur_actor_thumb_text
#
# Make it purdy
helpers.indentXML( rootNode )
data = etree.ElementTree( rootNode )
return data
# present a standard "interface" from the module
metadata_class = XBMCMetadata
|
Catskill Mountains (N.Y.)--Social life and customs.
Painter. Sawyier, an impressionist from Frankfort, Ky, studied briefly under both William Merrit Chase in New York, and Frank Duveneck in Cincinnati, OH. Sawyier lived mostly in Kentucky, painting watercolors and oils of scenes in the central part of the state, particularly in Frankfort. He spent the last few years of his life in the Catskill Mountains in New York, where he did some of his best work.
These are two sets of letters written by Paul Sawyier. The first set of letters (1910-1917) was written to John Wilson Townsend of Lexington, Kentucky (1VF52M48) and the other set (1911-1917) was written to J.J. King of Frankfort, Ky. (1VF64M11). The majority of the letters were written from High Bridge, Ky., where Sawyier had temporarily docked his houseboat. They concern his work, particularly his progress on a portrait of bishop Henry B, Bascom, an early president of Transylvania University. He also mentions his agent, C.F. Bower and Company (Lexington furniture store) and the Shaker Ferry Boat Company of High Bridge, Ky.
One letter to Townsend is from Highmount, New York, where Sawyier was living on the estate of Mrs. Marshall Emory in the Catskill Mountains. In the letter, Sawyier discusses his large studio, another guest of Mrs. Emory, Belgian artist Edward Buyck, and arrangements he is making for exhibitions in Albany, NY and New York, NY. Sawyier says that he enjoys being around other artists and that he, "can almost hold my own in the bunch" (February 3, 1916 letter).
The photocopied letters from Sawyier to his major patron, John J. King, are with two exceptions, written from Camp Nelson, Ky., another place that Sawyier had moored his houseboat. The letters concern Sawyier's work for Mr. King. Two letters are from Fleishmans, NY, the village next to Highmount, where Sawyier had moved into the home of Phillip F. Schaefer, an amateur artist. One of the letters to King mentions that Sawyier sent "a small bunch of watercolors of the old time things around Frankfort" to King, for which he was paid two hundred dollars.
|
"""This module generates the "bytes" module which contains various
byte munging C functions: copying, alignment, byteswapping, choosing,
putting, taking.
WARNING: This module exists solely as a mechanism to generate a
portion of numarray and is not intended to provide any
post-installation functionality.
"""
from basecode import CodeGenerator, template, _HEADER
BYTES_HEADER = _HEADER + \
'''
#include <assert.h>
#define NA_ACOPYN(i, o) memcpy(o, i, N)
/* The following is used to copy nbytes of data for each element. **
** As such it can be used to align any sort of data provided the **
** output pointers used are aligned */
static int copyNbytes(long dim, long nbytes, maybelong *niters,
void *input, long inboffset, maybelong *inbstrides,
void *output, long outboffset, maybelong *outbstrides) {
long i, j;
char *tin = (char *) input + inboffset;
char *tout = (char *) output + outboffset;
if (dim == 0) {
for (i=0; i<niters[dim]; i++) {
for (j=0; j<nbytes; j++) {
*tout++ = *tin++;
}
tin = tin + inbstrides[dim] - nbytes;
tout = tout + outbstrides[dim]- nbytes;
}
}
else {
for (i=0; i<niters[dim]; i++) {
copyNbytes(dim-1, nbytes, niters,
input, inboffset + i*inbstrides[dim], inbstrides,
output, outboffset + i*outbstrides[dim], outbstrides);
}
}
return 0;
}
STRIDING_DESCR2(copyNbytes, !CHECK_ALIGN, -1, -1);
/* Copy a data buffer to a new string
**
** Arguments:
**
** Tuple of iteration values for each dimension of input array.
** Input buffer object.
** Input byte offset.
** Tuple of input byte strides.
** Size of input data item in bytes.
**
** Returns Python string.
*/
static PyObject *copyToString(PyObject *self, PyObject *args) {
PyObject *inbuffObj;
PyObject *nitersObj, *inbstridesObj;
PyObject *otemp, *outstring;
long ltemp;
int nniters, ninbstrides, nargs;
long nbytes;
maybelong niters[MAXDIM], inbstrides[MAXDIM], outbstrides[MAXDIM];
void *inbuffer, *outbuffer;
long i, inbsize, outbsize, nelements=1, inboffset;
nargs = PyObject_Length(args);
if (!PyArg_ParseTuple(args, "OOlOl",
&nitersObj, &inbuffObj, &inboffset, &inbstridesObj, &nbytes))
return NULL;
if (!PySequence_Check(nitersObj))
return PyErr_Format(PyExc_TypeError,
"copyToString: invalid shape object");
if (!PySequence_Check(inbstridesObj))
return PyErr_Format(PyExc_TypeError,
"copyToString: invalid strides object");
nniters = PyObject_Length(nitersObj);
ninbstrides = PyObject_Length(inbstridesObj);
if (nniters != ninbstrides)
return PyErr_Format(PyExc_ValueError,
"copyToString: shape & strides don't match");
for (i=nniters-1; i>=0; i--) {
otemp = PySequence_GetItem(nitersObj, i);
if (PyInt_Check(otemp))
ltemp = PyInt_AsLong(otemp);
else if (PyLong_Check(otemp))
ltemp = PyLong_AsLong(otemp);
else
return PyErr_Format(PyExc_TypeError,
"copyToString: non-integer shape element");
nelements *= ltemp;
niters[nniters-i-1] = ltemp;
Py_DECREF(otemp);
otemp = PySequence_GetItem(inbstridesObj, i);
if (PyInt_Check(otemp))
inbstrides[nniters-i-1] = PyInt_AsLong(otemp);
else if (PyLong_Check(otemp))
inbstrides[nniters-i-1] = PyLong_AsLong(otemp);
else
return PyErr_Format(PyExc_TypeError,
"copyToString: non-integer stride element");
Py_DECREF(otemp);
}
if (!nelements)
return PyString_FromStringAndSize("", 0);
outbstrides[0] = nbytes;
for (i=1; i<nniters; i++) {
outbstrides[i] = outbstrides[i-1]*niters[i-1];
}
outbsize = outbstrides[nniters-1]*niters[nniters-1];
outstring = PyString_FromStringAndSize(NULL, outbsize);
if (!outstring)
return NULL;
outbuffer = (void *) PyString_AsString(outstring);
if ((inbsize = NA_getBufferPtrAndSize(inbuffObj, 1, &inbuffer)) < 0)
return PyErr_Format(PyExc_TypeError,
"copyToString: Problem with array buffer");
if (NA_checkOneStriding("copyToString", nniters, niters,
inboffset, inbstrides, inbsize, nbytes, 0) ||
NA_checkOneStriding("copyToString", nniters, niters,
0, outbstrides, outbsize, nbytes, 0))
return NULL;
BEGIN_THREADS
copyNbytes(nniters-1, nbytes, niters,
inbuffer, inboffset, inbstrides, outbuffer, 0, outbstrides);
END_THREADS
return outstring;
}
/* chooseXbytes functions are called as uFuncs... */
enum CLIP_MODE {
CLIPPED,
WRAPPED,
RAISE
};
#define wrap(i, max) \
while(i < 0) \
i += max; \
while(i >= max) \
i -= max;
static int takeNbytes(long niter, long ninargs, long noutargs,
void **buffers, long *bsizes)
{
maybelong i, cMode, N;
maybelong *scatteredstrides, *scatteredshape, **indices;
char *gathered, *scattered;
maybelong nindices = ninargs-4, outi = ninargs+noutargs-1;
if (NA_checkIo("takeNbytes", 4, 1, MIN(ninargs, 4), noutargs))
return -1;
if (nindices == 0)
return 0;
if (NA_checkOneCBuffer("takeNbytes", 2, buffers[0], bsizes[0], sizeof(maybelong)))
return -1;
else {
cMode = ((maybelong *) buffers[0])[0];
N = ((maybelong *) buffers[0])[1];
}
if (NA_checkOneCBuffer("takeNbytes", nindices, buffers[2], bsizes[2], sizeof(maybelong)))
return -1;
else {
scatteredstrides = (maybelong *) buffers[2];
}
if (NA_checkOneCBuffer("takeNbytes", nindices, buffers[3], bsizes[3], sizeof(maybelong)))
return -1;
else {
scatteredshape = (maybelong *) buffers[3];
}
if (NA_checkOneStriding("takeNBytes", nindices, scatteredshape, 0, scatteredstrides, bsizes[1], N, 0))
return -1;
else
scattered = (char *) buffers[1];
for(i=4; i<nindices; i++)
if (NA_checkOneCBuffer("takeNbytes", niter, buffers[i], bsizes[i], sizeof(maybelong)))
return -1;
indices = (maybelong **) &buffers[4];
if (NA_checkOneCBuffer("takeNbytes", niter*N, buffers[outi], bsizes[outi], 1))
return -1;
else
gathered = (char *) buffers[ninargs+noutargs-1];
switch( cMode )
{
case WRAPPED:
for(i=0; i<niter; i++)
{
maybelong j, index;
for(j=index=0; j<nindices; j++)
{
maybelong k = indices[j][i];
wrap(k, scatteredshape[j]);
index += scatteredstrides[j]*k;
}
memcpy( &gathered[i*N], scattered+index, N);
}
break;
case CLIPPED:
default:
for(i=0; i<niter; i++)
{
maybelong j, index;
for(j=index=0; j<nindices; j++)
{
maybelong k = indices[j][i];
if (k < 0)
k = 0;
else if (k >= scatteredshape[j])
k = scatteredshape[j]-1;
index += scatteredstrides[j]*k;
}
memcpy( &gathered[i*N], scattered+index, N);
}
break;
case RAISE:
for(i=0; i<niter; i++)
{
maybelong j, index;
for(j=index=0; j<nindices; j++)
{
maybelong k = indices[j][i];
if (k < 0)
k += scatteredshape[j];
if (k >= scatteredshape[j]) {
PyErr_Format(PyExc_IndexError, "Index out of range");
return -1;
}
index += scatteredstrides[j]*k;
}
memcpy( &gathered[i*N], scattered+index, N);
}
break;
}
return 0;
}
SELF_CHECKED_CFUNC_DESCR(takeNbytes, CFUNC_UFUNC);
static int putNbytes(long niter, long ninargs, long noutargs,
void **buffers, long *bsizes)
{
maybelong i, cMode, N;
maybelong *scatteredstrides, *scatteredshape, **indices;
char *gathered, *scattered;
long nindices = ninargs-4, outi = ninargs+noutargs-1;
if (nindices == 0)
return 0;
if (NA_checkIo("putNbytes", 4, 1, MIN(ninargs, 4), noutargs))
return -1;
if (NA_checkOneCBuffer("putNbytes", 2, buffers[0], bsizes[0], sizeof(maybelong)))
return -1;
else {
cMode = ((maybelong *) buffers[0])[0];
N = ((maybelong *) buffers[0])[1];
}
if (NA_checkOneCBuffer("putNbytes", niter*N, buffers[1], bsizes[1], 1))
return -1;
else
gathered = (char *) buffers[1];
if (NA_checkOneCBuffer("putNbytes", nindices, buffers[2], bsizes[2], sizeof(maybelong)))
return -1;
else {
scatteredstrides = (maybelong *) buffers[2];
}
if (NA_checkOneCBuffer("putNbytes", nindices, buffers[3], bsizes[3], sizeof(maybelong)))
return -1;
else {
scatteredshape = (maybelong *) buffers[3];
}
for(i=4; i<nindices; i++)
if (NA_checkOneCBuffer("putNbytes", niter, buffers[i], bsizes[i], sizeof(maybelong)))
return -1;
indices = (maybelong **) &buffers[4];
if (NA_checkOneStriding("putNBytes", nindices, scatteredshape, 0, scatteredstrides, bsizes[outi], N, 0))
return -1;
else
scattered = (char *) buffers[outi];
switch( cMode )
{
case WRAPPED:
for(i=0; i<niter; i++)
{
maybelong j, index;
for(j=index=0; j<nindices; j++)
{
maybelong k = indices[j][i];
wrap(k, scatteredshape[j]);
index += scatteredstrides[j]*k;
}
memcpy( scattered+index, &gathered[i*N], N);
}
break;
case CLIPPED:
default:
for(i=0; i<niter; i++)
{
maybelong j, index;
for(j=index=0; j<nindices; j++)
{
maybelong k = indices[j][i];
if (k < 0)
k = 0;
else if (k >= scatteredshape[j])
k = scatteredshape[j]-1;
index += scatteredstrides[j]*k;
}
memcpy( scattered+index, &gathered[i*N], N);
}
break;
case RAISE:
for(i=0; i<niter; i++)
{
maybelong j, index;
for(j=index=0; j<nindices; j++)
{
maybelong k = indices[j][i];
if (k < 0)
k += scatteredshape[j];
if (k >= scatteredshape[j]) {
PyErr_Format(PyExc_IndexError, "Index out of range");
return -1;
}
index += scatteredstrides[j]*k;
}
memcpy( scattered+index, &gathered[i*N], N);
}
break;
}
return 0;
}
SELF_CHECKED_CFUNC_DESCR(putNbytes, CFUNC_UFUNC);
'''
COPY_TEMPLATE = \
'''
/*******************************************
* *
* These copy data to a contiguous buffer. *
* They do not handle non-aligned data. *
* Offsets and Strides are in byte units *
* *
*******************************************/
static int copy<size>bytes(long dim, long dummy, maybelong *niters,
void *input, long inboffset, maybelong *inbstrides,
void *output, long outboffset, maybelong *outbstrides) {
long i;
char *tin = (char *) input + inboffset;
char *tout = (char *) output + outboffset;
if (dim == 0) {
for (i=0; i<niters[dim]; i++) {
NA_ACOPY<size>(tin, tout);
tin += inbstrides[dim];
tout += outbstrides[dim];
}
}
else {
for (i=0; i<niters[dim]; i++) {
copy<size>bytes(dim-1, dummy, niters,
input, inboffset + i*inbstrides[dim], inbstrides,
output, outboffset + i*outbstrides[dim], outbstrides);
}
}
return 0;
}
STRIDING_DESCR2(copy<size>bytes, CHECK_ALIGN, <size>, <size>);
'''
ALIGN_TEMPLATE = \
'''
static int align<size>bytes(long dim, long dummy, maybelong *niters,
void *input, long inboffset, maybelong *inbstrides,
void *output, long outboffset, maybelong *outbstrides) {
return copyNbytes(dim, <size>, niters, input, inboffset, inbstrides,
output, outboffset, outbstrides);
}
STRIDING_DESCR2(align<size>bytes, !CHECK_ALIGN, <size>, <size>);
'''
BYTESWAP_TEMPLATE = \
'''
/******* byteswap *****/
static int byteswap<sizename>(long dim, long dummy, maybelong *niters,
void *input, long inboffset, maybelong *inbstrides,
void *output, long outboffset, maybelong *outbstrides) {
long i;
char *tin = (char *) input + inboffset;
char *tout = (char *) output + outboffset;
if (dim == 0) {
for (i=0; i<niters[dim]; i++) {
char t[<size>];
NA_COPY<size>(tin, t);
<swapkind><size>(t, tout);
tin += inbstrides[dim];
tout += outbstrides[dim];
}
}
else {
for (i=0; i<niters[dim]; i++) {
byteswap<sizename>(dim-1, dummy, niters,
input, inboffset + i*inbstrides[dim], inbstrides,
output, outboffset + i*outbstrides[dim], outbstrides);
}
}
return 0;
}
STRIDING_DESCR2(byteswap<sizename>, !CHECK_ALIGN, <size>, <size>);
'''
CHOOSE_TEMPLATE = \
'''
static int choose<size>bytes(long niter, long ninargs, long noutargs,
void **buffers, long *bsizes)
{
maybelong i, cMode, maxP, N, *selector;
char **population, *output;
int outi = ninargs + noutargs - 1;
if (NA_checkIo("choose<size>bytes", 2, 1, MIN(ninargs,2), noutargs))
return -1;
if (NA_checkOneCBuffer("choose<size>bytes", 2, buffers[0], bsizes[0], sizeof(maybelong)))
return -1;
else {
cMode = ((maybelong *) buffers[0])[0];
N = ((maybelong *) buffers[0])[1];
}
if (NA_checkOneCBuffer("choose<size>bytes", niter, buffers[1], bsizes[1],
sizeof(maybelong)))
return -1;
else
selector = (maybelong *) buffers[1];
if (ninargs-2 == 0)
return 0;
else
maxP = ninargs-2;
for(i=2; i<ninargs; i++)
if (NA_checkOneCBuffer("choose<size>bytes", niter,
buffers[i], bsizes[i], <size>))
return -1;
population = (char **) &buffers[2];
if (NA_checkOneCBuffer("choose<size>bytes", niter,
buffers[outi], bsizes[outi], <size>))
return -1;
else
output = (char *) buffers[outi];
if (maxP == 0)
return 0;
switch(cMode)
{
case WRAPPED:
for(i=0; i<niter; i++)
{
maybelong j = selector[i];
wrap(j, maxP);
NA_ACOPY<size>(&population[j][i*<size>], &output[i*<size>]);
}
break;
default:
case CLIPPED:
for(i=0; i<niter; i++)
{
maybelong j = selector[i];
if (j < 0)
j = 0;
else if (j >= maxP)
j = maxP-1;
NA_ACOPY<size>(&population[j][i*<size>], &output[i*<size>]);
}
break;
case RAISE:
for(i=0; i<niter; i++)
{
maybelong j = selector[i];
if ((j < 0) || (j >= maxP)) {
PyErr_Format(PyExc_IndexError, "Index out of range");
return -1;
}
NA_ACOPY<size>(&population[j][i*<size>], &output[i*<size>]);
}
break;
}
return 0;
}
SELF_CHECKED_CFUNC_DESCR(choose<size>bytes, CFUNC_UFUNC);
'''
BYTES_TEMPLATE = ( COPY_TEMPLATE +
ALIGN_TEMPLATE +
BYTESWAP_TEMPLATE +
CHOOSE_TEMPLATE )
# ============================================================================
# IMPORTANT: no <>-sugared strings below this point
# translate <var> --> %(var)s in templates seen *so far*
template.sugar_dict(globals())
# ============================================================================
bytesconfig = [
["1", "Int8"],
["2", "Int16"],
["4", "Int32"],
["8", "Float64"],
["16", "Complex64"],
];
class BytesParams:
def __init__(self, size, type):
self.size = size
self.sizename = str(size) + "bytes"
self.typename = type
self.swapkind = "NA_SWAP"
NBytesParams = BytesParams("N","AnyType")
class ComplexBytesParams:
def __init__(self, size, type):
self.size = size
self.sizename = type
self.typename = type
self.swapkind = "NA_COMPLEX_SWAP"
Complex32BytesCfg = ComplexBytesParams(8, "Complex32")
Complex64BytesCfg = ComplexBytesParams(16, "Complex64")
class BytesCodeGenerator(CodeGenerator):
def __init__(self, *components):
CodeGenerator.__init__(self, *components)
self.module = "_bytes"
self.qualified_module = "numarray._bytes"
def gen_body(self):
for cfg in bytesconfig:
t = apply(BytesParams, cfg)
self.codelist.append((self.separator + BYTES_TEMPLATE) %
t.__dict__)
self.addcfunc("copy"+ t.sizename)
self.addcfunc("byteswap"+t.sizename)
self.addcfunc("align"+t.sizename)
self.addcfunc("choose"+t.sizename)
self.codelist.append((self.separator + CHOOSE_TEMPLATE) %
NBytesParams.__dict__)
self.addcfunc("chooseNbytes")
self.addcfunc("copyNbytes")
self.addcfunc("putNbytes")
self.addcfunc("takeNbytes")
# Hack in the type based (not size based) methods for complex
self.codelist.append((self.separator + BYTESWAP_TEMPLATE) %
Complex32BytesCfg.__dict__)
self.addcfunc("byteswapComplex32" )
self.codelist.append((self.separator + BYTESWAP_TEMPLATE) %
Complex64BytesCfg.__dict__)
self.addcfunc("byteswapComplex64" )
self.addmethod("copyToString")
generate_bytes_code = BytesCodeGenerator(BYTES_HEADER)
|
My tiny urban garden—here along an alley between 46th/47th, Osage, and Larchwood—is producing lots of beautiful herbs and veggies. It’s south-facing and so gets tons of sun (and the worst heat) but it drains well and we’ve had a good deal of rain. Here are some mid-August wonders.
The day lilies are now out, and looking good despite the 90+-degree heat. Click on an image for a larger view.
|
import input_output
def launch_input_output_menu():
print("I/O Section - Enter the exercise number you want to run")
selection = 0
while selection != 7:
print("exercise #1")
print("exercise #2")
print("exercise #3")
print("exercise #4")
print("exercise #5")
print("exercise #6")
print("exit #7")
selection = int(input("Insert Selection = "))
if selection == 1:
print("Extract users form a file")
for key, value in sorted(input_output.extracted_users.items()):
print('{0} {1}'.format(key, value))
elif selection == 2:
print("Apply word count on a file")
file_info = input_output.wc('passwd')
print("{0} characters {1} words {2} lines and {3} "
"unique words in file".format(file_info[0], file_info[1],
file_info[2], file_info[3]))
elif selection == 3:
print("Output users to a file")
file_users = input_output.extract_users('passwd')
input_output.output_users_to_file(file_users, 'output.csv')
elif selection == 4:
print("Read text file")
print(input_output.read_text("text.txt"))
elif selection == 5:
some_words = input_output.read_text("text.txt")
words_statistics = input_output.word_count(some_words)
print(words_statistics)
elif selection == 6:
some_words = input_output.read_text("text.txt")
words_statistics = input_output.word_count(some_words)
word = input_output.word_with_max_occurence(words_statistics)
print("Word with most occurences = " + word)
elif selection == 7:
print("exit")
launch_input_output_menu()
|
There is a movie adaptation for the classic fairytale villain Rumpelstiltskin on the horizon and Peter Dinklage is set to star as the titular deal-making trickster.
"Rumpelstiltskin" movie will be based on the Brothers Grimm story. It is currently under development at Sony and Peter Dinklage is attached to star and to produce.
The fairytale is said to be originated around 4,000 years ago. It is about a girl making a deal with Rumpelstiltskin after her father lies to the king about her being able to spin straw into gold. The king shuts her in the tower and demands her to spin a room full of straw to gold by morning, if not, her head will be cut off. Then the "imp-like creature" appears and does the deed in return for her necklace. This goes on for a few nights until she has nothing to trade for, and of course, Rumpelstiltskin wants her firstborn child for a final favor. Suffice to say, this promise ultimately bites her in the rear end.
According to Variety, the project is still in deep development but it is a priority for the actor and the studio.
Plot details are kept under wraps, so it is still not clear whether or not the movie will be a modern adaptation of the classic tale.
"A Monster Calls" author Patrick Ness is set to write the screenplay for the movie.
You can also watch Dinklage in the upcoming end of the world movie "I Think We're Alone Now" and also the HBO movie “My Dinner With Herve”, before the final season of "Game of Thrones" airs.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" STL to SCAD converter.
This code is based on Riham javascript code.
See http://www.thingiverse.com/thing:62666
Ascii STL file:
solid _40x10
facet normal 0.000000e+000 0.000000e+000 1.000000e+000
outer loop
vertex 1.286803e+001 2.957990e+001 1.200000e+001
vertex 1.173648e+001 2.984808e+001 1.200000e+001
vertex 1.115715e+001 2.953001e+001 1.200000e+001
endloop
endfacet
facet normal 0.000000e+000 0.000000e+000 1.000000e+000
outer loop
vertex 1.115715e+001 2.953001e+001 1.200000e+001
vertex 1.173648e+001 2.984808e+001 1.200000e+001
vertex 1.058145e+001 2.998308e+001 1.200000e+001
endloop
endfacet
...
ensolid
Binary STL file:
"""
import re
import sys
import struct
import os.path
USE_FACES = True # set to False for OpenSCAD version < 2014.03
def parseAscii(inputFile):
"""
"""
inputFile.seek(0)
inputStr = inputFile.read()
modules = []
solidName = None
vertices = None
faces = None
face = None
for solidStr in re.findall(r"solid\s(.*?)endsolid", inputStr, re.S):
solidName = re.match(r"^(.*)$", solidStr, re.M).group(0)
print "Processing object %s..." % solidName
vertices = []
faces = []
for facetStr in re.findall(r"facet\s(.*?)endfacet", solidStr, re.S):
for outerLoopStr in re.findall(r"outer\sloop(.*?)endloop", facetStr, re.S):
face = []
for vertexStr in re.findall(r"vertex\s(.*)$", outerLoopStr, re.M):
vertex = [float(coord) for coord in vertexStr.split()]
try:
face.append(vertices.index(vertex))
except ValueError:
vertices.append(str(vertex))
face.append(len(vertices) - 1)
faces.append(str(face))
modules.append((solidName, vertices, faces))
return modules
def parseBinary(inputFile, solidName="stl2scad"):
"""
"""
# Skip header
inputFile.seek(80)
nbTriangles = struct.unpack("<I", inputFile.read(4))[0]
print "found %d faces" % nbTriangles
modules = []
vertices = []
faces = []
face = None
# Iterate over faces
for i in range(nbTriangles):
face = []
# Skip normal vector (3x uint32)
inputFile.seek(3*4, 1)
# Iterate over vertices
for j in range(3):
vertex = struct.unpack("<fff", inputFile.read(3*4))
#print repr(s), repr(vertex)
try:
face.append(vertices.index(vertex))
except ValueError:
vertices.append(str(list(vertex)))
face.append(len(vertices) - 1)
faces.append(str(face))
# Skip byte count
inputFile.seek(2, 1)
modules.append((solidName, vertices, faces))
return modules
def convert(outputFile, modules):
"""
"""
for solidName, vertices, faces in modules:
points_ = ",\n\t\t\t".join(vertices)
faces_ = ",\n\t\t\t".join(faces)
if USE_FACES:
module = "module %s() {\n\tpolyhedron(\n\t\tpoints=[\n\t\t\t%s\n\t\t],\n\t\tfaces=[\n\t\t\t%s\n\t\t]\n\t);\n}\n\n\n%s();\n" % (solidName, points_, faces_, solidName)
else:
module = "module %s() {\n\tpolyhedron(\n\t\tpoints=[\n\t\t\t%s\n\t\t],\n\t\ttriangles=[\n\t\t\t%s\n\t\t]\n\t);\n}\n\n\n%s();\n" % (solidName, points_, faces_, solidName)
outputFile.write(module)
outputFile.close()
def main():
inputFileName = sys.argv[1]
inputFile = file(inputFileName)
# Check if ascii or binary
if inputFile.read(5) == "solid":
print "ascii file"
modules = parseAscii(inputFile)
else:
print "binary file"
modules = parseBinary(inputFile)
outputFileName = "%s%s%s" % (os.path.splitext(inputFileName)[0], os.path.extsep, "scad")
outputFile = file(outputFileName, "w")
convert(outputFile, modules)
print "%s saved" % outputFileName
if __name__ == "__main__":
main()
|
Visionary and engaged, Charles of Gaulle was one of the first to think Europe like an entity intended to create a “solidarity in fact” without attacking national sovereignties.
Born in Lille on November 22, 1890 , it leaves graduate military Ecole of Saint-Cyr military school in 1912 and becomes officer the day before Première Guerre Mondiale, where it obtains Légion of honor.
During Seconde Guerre Mondiale, it is as an under-secretary of State to Guerre that it is opposed to Armistice and leaves France for England on June 17, 1940.
The call of June 18, 1940.
June 18, 1940 , General de Gaulle addresses to the French on the waves of the BBC and lance his call to Résistance. He directs then Forces French Libres and becomes the symbol of resistance to the German occupant. Chief of the provisional government as from October 1945, it resigns in January 1946 in opposition to Constitution of IVème République which it considers it unable to ensure the stability of the State. He melts Rassemblement for France (RPF) and writes the three volumes of his Mémoires between 1947 and 1959.
During its called political absence “crossed desert”, it criticizes, often and in a vehement way, the methods of European construction.
It rejects the supranational character of Communauté European of coal and steel (ECSC) and disapproves the project of Communauté European of defense (CED), which it contributes to ruin Assemblée Nationale in 1954. The principle of a European army including/understanding of the German quotas, moreover more placed under command of NATO (i.e. American), appears an unacceptable questioning to him of the sovereignty-main road.
It is recalled because of the developments in Algeria and makes approve by referendum the constitution of Vème République. December 21, 1958 , Charles of Gaulle is elected Président of République Frenchwoman by the indirect vote for all.
1959-1968 : France in Europe.
It adopts the idea of European integration at that time and ensures the entry of France in economic Communauté European (the EEC) following the signature of the Treaty of Rome in 1957. This one envisages the economic co-operation, progressive abolition of the tariff barriers between the Member States and the installation of common customs tariffs with-the outside.
Charles of Gaulle undertakes above all to consolidate the Franco-German agreement, because in its eyes the two people constitute the pivot of Europe . It is in the sense that it signs on January 22, 1963 with Konrad Adenauer Traité of the Elysium which marks the beginning of a new era of the Franco-German relations, marked by the cultural co-operation, the actions in favor of the bringing together of the youth of the two countries or the intergovernmental dialog.
Its attitude vis-a-vis the European Community and with its widenings remains careful. It twice opposes the veto of France to the accession of the United Kingdom at the Common Market, in 1963 and 1966-1967, because it strategically judges Britanniques too close to the United States .
If it rejects with force the federalistic ideas of which the Traité de Rome took care, Charles of Gaulle adheres resolutely to the prospect for European free trade. He proposes on September 5, 1960 a European co-operation in political matter and either only economic. This idea is taken up at the time of European Sommet of Bonn the following year.
the PAC and the policy of “the empty chair”.
The implementation of agricultural Politique common (PAC) illustrates well the way in which General de Gaulle considers the application of the Traité de Rome. According to him, the agricultural Common Market is indeed inseparable from the industrial Common Market provided for in the treaty. January 14 1962 are adopted the first payments on the PAC.
From July 1, 1965 , it wishes to let know in a spectacular way that France does not intend to let limit its choices and decisions by the Community authorities.
It thus practices the policy known as of “the empty chair” by suspending the participation of the French representation in the meetings of the Conseil des Ministres of the EEC, in order to express all at the same time its refusal of the application of the principle of the majority qualified in the Community decision-makings and its opposition to the insufficiency of the Commission proposals about the financing of the PAC.
After six months of blocking, in January 1966, the Luxembourg compromise on the work of the institutions, allows in a State to ask for the change of mode of vote when a Community draft decision port risk reached to a judge-vital national interest.
Charles of Gaulle remains very attached to a certain idea of Europe , that of a free association of Etats sovereigns. It is in the sense that it proposes the Fouchet plan, which envisages Conseil bringing together the chiefs of government, parliamentary Assemblée European and political Commission European. But, April 17, 1962 , the governments Belgian and Dutch oppose their veto to the French plan.
One month later, May 15, 1962, in a press conference in front of all his ministers, General de Gaulle opposes the supranationality categorically : “Dante, Goethe, Chateaubriant […] would not have been useful much if they had been stateless people and if they had thought, written in some Esperanto or volapük integrated”. Five ministers, whose Pierre Pflimlin, leave the government then, outraged abusive treatment inflicted with the European idea.
Charles of Gaulle designs Europe like a space of mutual aid and solidarity, where no member abdicates nor his national identity if the prerogatives of his sovereignty. A thought certainly founded on the nation, but which recognizes in pacified Europe and thrives the only possible future of the nations of Europe, vis-a-vis the dramatic wars of last and with the international stakes of the present.
Europe which it considers must affirm itself vis-a-vis the United States and refuse any subordination in their connection. If the diplomacy and defense remain national competences, it wishes to see him playing a part in the development aid, as it affirms it in Washington in 1960. This mission is more specifically reserved for Fond Européen de Développement than it strongly promoted near the Community authorities.
After having dominated the French political life during more than 30 years, it gave up the capacity in 1969, the shortly after the failure of the referendum.
He dies EPU after, November 9, 1970 , in his house of Colombey-les-deux-églises.
Speech made in front of the provisional Assemblée consultative in Algiers, March 18, 1944, in Discours and Messages, T. 1, pp. 387-388.
“No one more than me is not convinced of the need for building Europe … Between Europe that the Commonwealth requests and Germany which seeks itself, I always thought that France was intended by its geography to even promote Union European”.
Letter in Comte Richard NR. of Coudenhove-Kalergi, enthusiastic defender of a European political union, in 1948.
Speech made in Lille on June 29, 1947 , in Discours and Messages, T. 2, pp. 87-88.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
User controls for interactivity.
"""
import logging
import numpy as np
from qtpy.QtCore import *
from qtpy.QtGui import *
from qtpy.QtWidgets import *
from qplotutils import CONFIG
from .ui.playback import Ui_PlaybackControl
__author__ = "Philipp Baust"
__copyright__ = "Copyright 2019, Philipp Baust"
__credits__ = []
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "Philipp Baust"
__email__ = "[email protected]"
__status__ = "Development"
_log = logging.getLogger(__name__)
_log.setLevel(logging.DEBUG)
class PlaybackWidget(QWidget):
""" Playback control widget with the following button
* toggle playback / pause
* advance one step forward
* advance one step back
The current timestamp is inidcated by slider and a line edit.
Models / Visualization that choose to be controlled through the playback widget should
connect to :meth:`qplotutils.player.PlaybackWidget.timestamp_changed`.
"""
#: emited whenever the timestamp is changed.
timestamp_changed = Signal(int, float)
def __init__(self, parent=None):
super(PlaybackWidget, self).__init__(parent)
self.ui = Ui_PlaybackControl()
self.ui.setupUi(self)
self.__is_playing = False
self.__timestamps = None
self.__last_index = None
self.ui.button_play_pause.clicked.connect(self.play_pause)
self.ui.button_back.clicked.connect(self.step_back)
self.ui.button_next.clicked.connect(self.step_forward)
self.ui.slider_index.valueChanged.connect(self._slider_value_changed)
self.ui.slider_index.sliderPressed.connect(self._slider_pressed)
self.ui.edit_timestamp.textEdited.connect(self.jump_to_timestamp)
if CONFIG.debug:
self.timestamp_changed.connect(self.debug_slider)
def jump_to_timestamp(self, text):
try:
_log.debug(text)
ts = float(text)
idx, = np.where(self.timestamps == ts)
self.ui.slider_index.setValue(idx[0])
except Exception as ex:
_log.info(
"Could not set timestamp. Format no recognized or out of interval."
)
_log.debug("Exception %s", ex)
def debug_slider(self, index, timestamp):
_log.debug("{}: {}".format(index, timestamp))
@property
def timestamps(self):
return self.__timestamps
@timestamps.setter
def timestamps(self, value):
self.__timestamps = value
self.__last_index = len(value)
self.ui.slider_index.setMinimum(0)
self.ui.slider_index.setMaximum(self.__last_index)
self.ui.slider_index.setValue(0)
def _slider_pressed(self):
self.pause()
def _slider_value_changed(self, value):
ts = self.timestamps[value]
self.ui.edit_timestamp.setText("{}".format(ts))
self.timestamp_changed.emit(value, ts)
def play_pause(self):
if self.__is_playing:
self.pause()
else:
self.play()
def pause(self):
if not self.__is_playing:
return
self.ui.button_play_pause.setIcon(
QIcon(":/player/icons/media-playback-start.svg")
)
self.__is_playing = False
def play(self):
if self.__is_playing:
return
self.ui.button_play_pause.setIcon(
QIcon(":/player/icons/media-playback-pause.svg")
)
self.__is_playing = True
self.advance()
def step_back(self):
self.pause()
self.advance(reverse=True)
def step_forward(self):
self.pause()
self.advance()
def advance(self, reverse=False):
if reverse:
next_index = self.ui.slider_index.value() - 1
else:
next_index = self.ui.slider_index.value() + 1
if not 0 < next_index < self.__last_index:
self.pause()
return
self.ui.slider_index.setValue(next_index)
if self.__is_playing:
QTimer.singleShot(10, self.advance)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
qapp = QApplication([])
CONFIG.debug = True
p = PlaybackWidget()
p.show()
p.timestamps = np.arange(0, 1000, 12) * 141000
qapp.exec_()
|
The ARTC will be holding staffed information displays about the Kagaru to Acacia Ridge and Bromelton (K2ARB) section of the Inland Rail project during December. These will be located at Calamvale, Browns Plains and Jimboomba.
This is a chance to talk to ARTC staff about what the Inland Rail project means for you, and communities such as Acacia Ridge, Algester, Pallara, Parkinson, Forestdale, Hillcrest, Greenbank, New Beith, MacLean, Boronia Heights, Flagstone, Kagaru and Bromelton. There will be maps of the existing rail alignment to view.
For more about the K2ARB project, click here.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kindergartens', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('name', models.CharField(verbose_name='Name', max_length=254)),
('description', models.CharField(verbose_name='Description', max_length=1000)),
('kindergarten', models.ForeignKey(to='kindergartens.Kindergarten')),
],
options={
'verbose_name': 'Group',
'verbose_name_plural': 'Groups',
},
),
migrations.CreateModel(
name='GroupType',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('name', models.CharField(verbose_name='Name', max_length=254)),
],
options={
'verbose_name': 'Group type',
'verbose_name_plural': 'Group types',
},
),
migrations.AddField(
model_name='group',
name='type',
field=models.ForeignKey(to='kindergartens.GroupType'),
),
]
|
Are you looking for water front property? If so, then this is the lot for you! Beautiful River Lot in Grandview Harbor on the Alabama River with approximately 100 ft of water frontage! Perfect spot for you to build and design your private dream home or weekend retreat where you will enjoy living with great fishing, boating and recreational activities! Perfect place to entertain your family and friends! It is also 15 - 20 minutes from Downtown Prattville...you get the best of both worlds!
|
# Copyright (c) 2016 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
from tornado import gen
from tornado.concurrent import Future
def submit(fn, io_loop, *args, **kwargs):
"""Submit Tornado Coroutine to IOLoop.current().
:param fn: Tornado Coroutine to execute
:param io_loop: Tornado IOLoop where to schedule the coroutine
:param args: Args to pass to coroutine
:param kwargs: Kwargs to pass to coroutine
:returns tornado.concurrent.Future: future result of coroutine
"""
future = Future()
def execute():
"""Execute fn on the IOLoop."""
try:
result = gen.maybe_future(fn(*args, **kwargs))
except Exception:
# The function we ran didn't return a future and instead raised
# an exception. Let's pretend that it returned this dummy
# future with our stack trace.
f = gen.Future()
f.set_exc_info(sys.exc_info())
on_done(f)
else:
result.add_done_callback(on_done)
def on_done(tornado_future):
"""
Set tornado.Future results to the concurrent.Future.
:param tornado_future:
"""
exception = tornado_future.exception()
if not exception:
future.set_result(tornado_future.result())
else:
future.set_exception(exception)
io_loop.add_callback(execute)
return future
def future_result(result):
future = Future()
future.set_result(result)
return future
def future_exception(exception):
future = Future()
future.set_exception(exception)
return future
|
According to the Swedish tabloid/newspaper Expressen, the Finnish Head of Delegation and member of the EBU Reference Group, Kjell Ekholm, now confirms that in the past he has been approached by broadcaster officials from several other countries who wanted to exchange points with Finland.
In the interview, he refused to disclose the countries in question, and added that the only way to ensure complete accuracy and fairness was for the EBU to go in and monitor the televoting in selected countries during the final. This will actually happen this year.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from datetime import datetime
from udata.models import db
log = logging.getLogger(__name__)
class Message(db.EmbeddedDocument):
content = db.StringField(required=True)
posted_on = db.DateTimeField(default=datetime.now, required=True)
posted_by = db.ReferenceField('User')
class Discussion(db.Document):
user = db.ReferenceField('User')
subject = db.GenericReferenceField()
title = db.StringField(required=True)
discussion = db.ListField(db.EmbeddedDocumentField(Message))
created = db.DateTimeField(default=datetime.now, required=True)
closed = db.DateTimeField()
closed_by = db.ReferenceField('User')
meta = {
'indexes': [
'user',
'subject',
'created'
],
'ordering': ['created'],
}
def person_involved(self, person):
"""Return True if the given person has been involved in the
discussion, False otherwise.
"""
return any(message.posted_by == person for message in self.discussion)
@property
def external_url(self):
return self.subject.url_for(
_anchor='discussion-{id}'.format(id=self.id),
_external=True)
|
OnePlus started its journey in mobile manufacturing in 2014 and no one would have ever thought that it would be one of the most popular smartphone brands in 2018. With the launch of OnePlus 6 and 6T, OnePlus became the top premium smartphone brand in India beating Samsung for the first time ever with a market share of 40%. Thanks to close to software, great hardware, and a relatively affordable price, OnePlus transformed from rags to riches.
OnePlus’s Oxygen OS which powers all the OnePlus phones since 2015, is the most popular Android skin among Android enthusiasts. Oxygen OS is a near to stock Android user interface with zero to less bloatware. This combo of decent hardware and fluid software helps OnePlus devices to be one of the fastest in the market. Now, not everyone can buy a OnePlus device as their price has been increasing steadily with each launch. If you are someone who wants to get a taste of Oxygen OS on your any non OnePlus phone, this is the guide for the same.
XDA senior member MarcAnt01 developed an open source Magisk module Oxy-ify which brings OxygenOS boot animation, custom media, several apps like the Camera, Gallery, Weather, and the Slate font to any Android phone. Although not every Oxygen OS feature works flawlessly on every device, fixes are coming soon and the Oxygen OS camera app may work on a few devices because of hardware compatibility issues.
Mandatory Disclaimer: These processes may not always produce the desired results. Know what you are doing and always have a backup before proceeding. The author or ThemeFoxx cannot be held responsible in case anything unexpected happens to your device. Remember, you are warned.
Your phone must be running ARM64 based hardware and software.
Your device must be rooted with the latest magisk.
Download the Oxy-ify Magisk Module from the downloads section of this article and store it from in your phone’s internal storage.
Open Magisk Manager and swipe from the left edge of your screen to open the menu drawer.
Tap on the Modules section followed by a tap on the “+” button from the button half of the screen.
Now, search for the magisk module that you downloaded in the 1st step and tap on it to install it.
Once, installation is complete, reboot your device.
Congratulations! You just installed the Oxy-ify Magisk Module on your phone. Now, you should find the OnePlus Camera, Gallery and Weather apps in your app drawer and various other changes to your phone’s user interface as well.
This is the easiest way to Turn Your Phone Into A OnePlus Device. I hope this article works the way it is intended to be. Let us know about your experience trying out the methods mentioned in this article in the comments section down below. In case I missed mentioning anything or if there is some topic which you want me to cover, please let me know in the comments section down below.
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import unittest
from argparse import Namespace
from future import standard_library
from mock.mock import MagicMock
import cloudomate.cmdline as cmdline
from cloudomate.hoster.vpn.azirevpn import AzireVpn
from cloudomate.hoster.vps.linevast import LineVast
from cloudomate.hoster.vps.vps_hoster import VpsOption
standard_library.install_aliases()
class TestCmdLine(unittest.TestCase):
def setUp(self):
self.settings_file = os.path.join(os.path.dirname(__file__), 'resources/test_settings.cfg')
self.vps_options_real = LineVast.get_options
self.vps_purchase_real = LineVast.purchase
def tearDown(self):
LineVast.get_options = self.vps_options_real
LineVast.purchase = self.vps_purchase_real
def test_execute_vps_list(self):
command = ["vps", "list"]
cmdline.execute(command)
def test_execute_vpn_list(self):
command = ["vpn", "list"]
cmdline.execute(command)
def test_execute_vps_options(self):
mock_method = self._mock_vps_options()
command = ["vps", "options", "linevast"]
cmdline.providers["vps"]["linevast"].configurations = []
cmdline.execute(command)
mock_method.assert_called_once()
self._restore_vps_options()
def test_execute_vpn_options(self):
mock_method = self._mock_vpn_options()
command = ["vpn", "options", "azirevpn"]
cmdline.providers["vpn"]["azirevpn"].configurations = []
cmdline.execute(command)
mock_method.assert_called_once()
self._restore_vpn_options()
def test_execute_vps_purchase(self):
self._mock_vps_options([self._create_option()])
purchase = LineVast.purchase
LineVast.purchase = MagicMock()
command = ["vps", "purchase", "linevast", "-f", "-c", self.settings_file, "-rp", "asdf", "0"]
cmdline.execute(command)
LineVast.purchase.assert_called_once()
LineVast.purchase = purchase
self._restore_vps_options()
@staticmethod
def _create_option():
return VpsOption(
name="Option name",
memory="Option ram",
cores="Option cpu",
storage="Option storage",
bandwidth="Option bandwidth",
price=12,
connection="Option connection",
purchase_url="Option url"
)
def test_execute_vps_purchase_verify_options_failure(self):
self._mock_vps_options()
command = ["vps", "purchase", "linevast", "-f", "-c", self.settings_file, "1"]
self._check_exit_code(1, cmdline.execute, command)
self._restore_vps_options()
def test_execute_vps_purchase_unknown_provider(self):
command = ["vps", "purchase", "nonode", "-f", "-rp", "asdf", "1"]
self._check_exit_code(2, cmdline.execute, command)
def test_execute_vps_options_unknown_provider(self):
command = ["vps", "options", "nonode"]
self._check_exit_code(2, cmdline.execute, command)
def _check_exit_code(self, exit_code, method, args):
try:
method(args)
except SystemExit as e:
self.assertEqual(exit_code, e.code)
def test_execute_vps_options_no_provider(self):
command = ["vps", "options"]
self._check_exit_code(2, cmdline.execute, command)
def test_purchase_vps_unknown_provider(self):
args = Namespace()
args.provider = "sd"
args.type = "vps"
self._check_exit_code(2, cmdline.purchase, args)
def test_purchase_no_provider(self):
args = Namespace()
self._check_exit_code(2, cmdline.purchase, args)
def test_purchase_vps_bad_provider(self):
args = Namespace()
args.provider = False
args.type = "vps"
self._check_exit_code(2, cmdline.purchase, args)
def test_purchase_bad_type(self):
args = Namespace()
args.provider = "azirevpn"
args.type = False
self._check_exit_code(2, cmdline.purchase, args)
def test_execute_vps_purchase_high_id(self):
self._mock_vps_options()
command = ["vps", "purchase", "linevast", "-c", self.settings_file, "-rp", "asdf", "1000"]
self._check_exit_code(1, cmdline.execute, command)
self._restore_vps_options()
def test_execute_vps_purchase_low_id(self):
mock = self._mock_vps_options()
command = ["vps", "purchase", "linevast", "-c", self.settings_file, "-rp", "asdf", "-1"]
self._check_exit_code(1, cmdline.execute, command)
mock.assert_called_once()
self._restore_vps_options()
def _mock_vps_options(self, items=None):
if items is None:
items = []
self.vps_options = LineVast.get_options
LineVast.get_options = MagicMock(return_value=items)
return LineVast.get_options
def _restore_vps_options(self):
LineVast.get_options = self.vps_options
def _mock_vpn_options(self, items=None):
if items is None:
items = []
self.vpn_options = AzireVpn.get_options
AzireVpn.get_options = MagicMock(return_value=items)
return AzireVpn.get_options
def _restore_vpn_options(self):
AzireVpn.get_options = self.vpn_options
if __name__ == '__main__':
unittest.main(exit=False)
|
In a bowl, toss the melon and peach with the oil and vin cotto; season with salt and pepper. Let stand for 5 minutes. Stir in the herbs and cheese. Transfer the salad to a platter, top with the prosciutto and serve.
Try a Chardonnay from New Zealand.
|
# Source: https://gist.github.com/singingwolfboy/2fca1de64950d5dfed72
# Want to run your Flask tests with CSRF protections turned on, to make sure
# that CSRF works properly in production as well? Here's an excellent way
# to do it!
# First some imports. I'm assuming you're using Flask-WTF for CSRF protection.
import flask
from flask.testing import FlaskClient as BaseFlaskClient
from flask_wtf.csrf import generate_csrf
# Flask's assumptions about an incoming request don't quite match up with
# what the test client provides in terms of manipulating cookies, and the
# CSRF system depends on cookies working correctly. This little class is a
# fake request that forwards along requests to the test client for setting
# cookies.
class RequestShim(object):
"""
A fake request that proxies cookie-related methods to a Flask test client.
"""
def __init__(self, client):
self.client = client
def set_cookie(self, key, value='', *args, **kwargs):
"Set the cookie on the Flask test client."
server_name = flask.current_app.config["SERVER_NAME"] or "localhost"
return self.client.set_cookie(
server_name, key=key, value=value, *args, **kwargs
)
def delete_cookie(self, key, *args, **kwargs):
"Delete the cookie on the Flask test client."
server_name = flask.current_app.config["SERVER_NAME"] or "localhost"
return self.client.delete_cookie(
server_name, key=key, *args, **kwargs
)
# We're going to extend Flask's built-in test client class, so that it knows
# how to look up CSRF tokens for you!
class FlaskClient(BaseFlaskClient):
@property
def csrf_token(self):
# First, we'll wrap our request shim around the test client, so that
# it will work correctly when Flask asks it to set a cookie.
request = RequestShim(self)
# Next, we need to look up any cookies that might already exist on
# this test client, such as the secure cookie that powers `flask.session`,
# and make a test request context that has those cookies in it.
environ_overrides = {}
self.cookie_jar.inject_wsgi(environ_overrides)
with flask.current_app.test_request_context(
"/login", environ_overrides=environ_overrides,
):
# Now, we call Flask-WTF's method of generating a CSRF token...
csrf_token = generate_csrf()
# ...which also sets a value in `flask.session`, so we need to
# ask Flask to save that value to the cookie jar in the test
# client. This is where we actually use that request shim we made!
flask.current_app.save_session(flask.session, request)
# And finally, return that CSRF token we got from Flask-WTF.
return csrf_token
|
Avidyne’s new Class 1 Mode S Level 2 Datalink transponder, the AXP340, fulfils all the current legal requirements for both IFR and VFR flight and is an affordable and lightweight option for your own aircraft. Mode S technology allows an interrogator to selectively address a single transponder in the relevant airspace, vastly increasing the accuracy of position plotting.
Avidyne’s AXP340 is an ideal solution for a new installation, or is a slide in replacement for an existing KT76A or KT78A transponder. This unit is also designed to fit with Avidyne’s new plug-and-play avionics stack that also includes the AMX240 audio panel with stereo intercom and the IFD540, a touch screen GPS/Navigation/Communication system.
Additional functionality within this transponder includes pressure altitude, flight ID entry, VFR code entry, GPS Latitude/Longitude readout, direct entry numeric keypad, stop watch timer, flight timer and altitude alerter, not to mention the all important ability to support the latest version Automatic Dependent Surveillance Broadcast (ADS-B) Out, when coupled with any of Avidyne’s Traffic Advisory Systems.
By the end of 2020 it is anticipated that ADS-B Out will be essential in almost all US airspace. Teaming the AXP340 with a suitable GPS receiver allows the position of your aircraft to be transmitted to other aircraft or ground stations that are similarly equipped, along with ground speed, ground track and altitude information. The improvements to airborne surveillance that this technology offers makes it a key consideration of all future airspace plans, making this the perfect opportunity to get on board and upgrade your transponder now.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.