text
stringlengths 29
850k
|
---|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: lily/translate.py
# Purpose: music21 classes for translating to Lilypond
#
# Authors: Michael Scott Cuthbert
#
# Copyright: Copyright © 2007-2012 Michael Scott Cuthbert and the music21 Project
# License: LGPL or BSD, see license.txt
#-------------------------------------------------------------------------------
'''
music21 translates to Lilypond format and if Lilypond is installed on the
local computer, can automatically generate .pdf, .png, and .svg versions
of musical files using Lilypond
this replaces (July 2012) the old LilyString() conversion methods.
'''
from __future__ import unicode_literals
import os
import subprocess
import sys
import re
# import threading
import unittest
from music21 import common
from music21 import duration
from music21 import environment
from music21 import exceptions21
from music21 import variant
from music21 import note
from music21.lily import lilyObjects as lyo
from music21.ext import six
_MOD = 'lily.translate2012.py'
environLocal = environment.Environment(_MOD)
try:
# optional imports for PIL
from PIL import Image
from PIL import ImageOps
noPIL = False
except ImportError:
try:
import Image
import ImageOps
noPIL = False
except ImportError:
noPIL = True
from music21 import corpus
### speed up tests! move to music21 base...
class _sharedCorpusTestObject(object):
sharedCache = {}
sharedCacheObject = _sharedCorpusTestObject()
def _getCachedCorpusFile(keyName):
#return corpus.parse(keyName)
if keyName not in sharedCacheObject.sharedCache:
sharedCacheObject.sharedCache[keyName] = corpus.parse(keyName)
return sharedCacheObject.sharedCache[keyName]
#b.parts[0].measure(4)[2].color = 'blue'#.rightBarline = 'double'
def makeLettersOnlyId(inputString):
'''
Takes an id and makes it purely letters by substituting
letters for all other characters.
>>> print(lily.translate.makeLettersOnlyId('rainbow123@@dfas'))
rainbowxyzmmdfas
'''
inputString = str(inputString)
returnString = ''
for c in inputString:
if not c.isalpha():
c = chr(ord(c) % 26 + 97)
returnString += c
return returnString
#-------------------------------------------------------------------------------
class LilypondConverter(object):
fictaDef = \
r'''
ficta = #(define-music-function (parser location) () #{ \once \set suggestAccidentals = ##t #})
'''.lstrip()
colorDef = \
r'''
color = #(define-music-function (parser location color) (string?) #{
\once \override NoteHead #'color = #(x11-color color)
\once \override Stem #'color = #(x11-color color)
\once \override Rest #'color = #(x11-color color)
\once \override Beam #'color = #(x11-color color)
#})
'''.lstrip()
simplePaperDefinitionScm = r'''
\paper { #(define dump-extents #t)
indent = 0\mm
force-assignment = #""
oddFooterMarkup=##f
oddHeaderMarkup=##f
bookTitleMarkup=##f
}
'''.lstrip()
transparencyStartScheme = r'''
\override Rest #'transparent = ##t
\override Dots #'transparent = ##t
'''.lstrip()
transparencyStopScheme = r'''
\revert Rest #'transparent
\revert Dots #'transparent
'''.lstrip()
bookHeader = r'''
\include "lilypond-book-preamble.ly"
'''.lstrip()
accidentalConvert = {"double-sharp": u"isis",
"double-flat": u"eses",
"one-and-a-half-sharp": u"isih",
"one-and-a-half-flat": u"eseh",
"sharp": u"is",
"flat": u"es",
"half-sharp": u"ih",
"half-flat": u"eh",
}
barlineDict = {'regular': '|',
'dotted': ':',
'dashed': 'dashed',
'heavy': '.', #??
'double': '||',
'final': '|.',
'heavy-light': '.|',
'heavy-heavy': '.|.',
'start-repeat': '|:',
'end-repeat': ':|',
# no music21 support for |.| lightHeavyLight yet
'tick': '\'',
#'short': '', # no lilypond support??
'none': '',
}
def __init__(self):
self.topLevelObject = lyo.LyLilypondTop()
self.setupTools()
self.context = self.topLevelObject
self.storedContexts = []
self.doNotOutput = []
self.currentMeasure = None
self.addedVariants = []
self.variantColors = ['blue', 'red', 'purple', 'green', 'orange', 'yellow', 'grey']
self.coloredVariants = False
self.variantMode = False
self.LILYEXEC = None
self.tempName = None
self.inWord = None
def findLilyExec(self):
if os.path.exists(environLocal['lilypondPath']):
LILYEXEC = environLocal['lilypondPath']
else:
if sys.platform == "darwin":
LILYEXEC = '/Applications/Lilypond.app/Contents/Resources/bin/lilypond'
if not os.path.exists(LILYEXEC):
LILYEXEC = 'lilypond'
elif sys.platform == 'win32' and os.path.exists('c:/Program Files (x86)'):
LILYEXEC = r'c:/Program\ Files\ (x86)/lilypond/usr/bin/lilypond'
if not os.path.exists(LILYEXEC) and not os.path.exists(LILYEXEC + '.exe'):
LILYEXEC = 'lilypond'
elif sys.platform == 'win32':
LILYEXEC = r'c:/Program\ Files/lilypond/usr/bin/lilypond'
if not os.path.exists(LILYEXEC) and not os.path.exists(LILYEXEC + '.exe'):
LILYEXEC = 'lilypond'
else:
LILYEXEC = 'lilypond'
self.LILYEXEC = LILYEXEC
return LILYEXEC
def setupTools(self):
LILYEXEC = self.findLilyExec()
command = [LILYEXEC, '--version']
try:
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
except OSError:
raise LilyTranslateException("Cannot find a copy of Lilypond installed on your system. " +
"Please be sure it is installed. And that your " +
"environment.UserSettings()['lilypondPath'] is set to find it.")
stdout, unused = proc.communicate()
if six.PY3:
stdout = stdout.decode(encoding='utf-8')
versionString = stdout.split()[2]
versionPieces = versionString.split('.')
self.majorVersion = versionPieces[0]
self.minorVersion = versionPieces[1]
#self.majorVersion = 2 # this should be obtained from user and/or user's system
#self.minorVersion = 13
self.versionString = self.topLevelObject.backslash + "version " + self.topLevelObject.quoteString(str(self.majorVersion) + '.' + str(self.minorVersion))
self.versionScheme = lyo.LyEmbeddedScm(self.versionString)
self.headerScheme = lyo.LyEmbeddedScm(self.bookHeader)
self.backend = 'ps'
if int(self.majorVersion) >= 2:
if int(self.minorVersion) >= 11:
self.backendString = '-dbackend='
else:
self.backendString = '--backend='
else:
self.backendString = '--backend='
# I had a note that said 2.12 and > should use 'self.backendString = '--formats=' ' but doesn't seem true
def newContext(self, newContext):
self.storedContexts.append(self.context)
self.context = newContext
def restoreContext(self):
try:
self.context = self.storedContexts.pop()
except IndexError:
self.context = self.topLevelObject
#------------ Set a complete Lilypond Tree from a music21 object ----------#
def textFromMusic21Object(self, m21ObjectIn):
r'''
get a proper lilypond text file for writing from a music21 object
>>> n = note.Note()
>>> print(lily.translate.LilypondConverter().textFromMusic21Object(n))
\version "2..."
\include "lilypond-book-preamble.ly"
color = #(define-music-function (parser location color) (string?) #{
\once \override NoteHead #'color = #(x11-color color)
\once \override Stem #'color = #(x11-color color)
\once \override Rest #'color = #(x11-color color)
\once \override Beam #'color = #(x11-color color)
#})
\header { }
\score {
<< \new Staff = ... { c' 4
}
>>
}
\paper { }
...
'''
self.loadFromMusic21Object(m21ObjectIn)
s = str(self.topLevelObject)
s = re.sub(r'\s*\n\s*\n', '\n', s).strip()
return s
def loadFromMusic21Object(self, m21ObjectIn):
r'''
Create a Lilypond object hierarchy in self.topLevelObject from an
arbitrary music21 object.
TODO: make lilypond automatically run makeNotation.makeTupletBrackets(s)
TODO: Add tests...
'''
from music21 import stream
c = m21ObjectIn.classes
if 'Stream' in c:
if m21ObjectIn.recurse().variants:
## has variants so we need to make a deepcopy...
m21ObjectIn = variant.makeAllVariantsReplacements(m21ObjectIn, recurse = True)
m21ObjectIn.makeVariantBlocks()
if ('Stream' not in c) or ('Measure' in c) or ('Voice' in c):
scoreObj = stream.Score()
partObj = stream.Part()
# no need for measures or voices...
partObj.insert(0, m21ObjectIn)
scoreObj.insert(0, partObj)
self.loadObjectFromScore(scoreObj, makeNotation = False)
elif 'Part' in c:
scoreObj = stream.Score()
scoreObj.insert(0, m21ObjectIn)
self.loadObjectFromScore(scoreObj, makeNotation = False)
elif 'Score' in c:
self.loadObjectFromScore(m21ObjectIn, makeNotation = False)
elif 'Opus' in c:
self.loadObjectFromOpus(m21ObjectIn, makeNotation = False)
else: # treat as part...
scoreObj = stream.Score()
scoreObj.insert(0, m21ObjectIn)
self.loadObjectFromScore(scoreObj, makeNotation = False)
#raise LilyTranslateException("Unknown stream type %s." % (m21ObjectIn.__class__))
def loadObjectFromOpus(self, opusIn = None, makeNotation = True):
r'''
creates a filled topLevelObject (lily.lilyObjects.LyLilypondTop)
whose string representation accurately reflects all the Score objects
in this Opus object.
>>> #_DOCS_SHOW fifeOpus = corpus.parse('miscFolk/americanfifeopus.abc')
>>> #_DOCS_SHOW lpc = lily.translate.LilypondConverter()
>>> #_DOCS_SHOW lpc.loadObjectFromOpus(fifeOpus, makeNotation = False)
>>> #_DOCS_SHOW lpc.showPDF()
'''
contents = []
lpVersionScheme = self.versionScheme
lpHeaderScheme = self.headerScheme
lpColorScheme = lyo.LyEmbeddedScm(self.colorDef)
contents.append(lpVersionScheme)
contents.append(lpHeaderScheme)
contents.append(lpColorScheme)
for thisScore in opusIn.scores:
if makeNotation is True:
thisScore = thisScore.makeNotation(inPlace = False)
lpHeader = lyo.LyLilypondHeader()
lpScoreBlock = self.lyScoreBlockFromScore(thisScore)
if thisScore.metadata is not None:
self.setHeaderFromMetadata(thisScore.metadata, lpHeader = lpHeader)
contents.append(lpHeader)
contents.append(lpScoreBlock)
lpOutputDefHead = lyo.LyOutputDefHead(defType = 'paper')
lpOutputDefBody = lyo.LyOutputDefBody(outputDefHead = lpOutputDefHead)
lpOutputDef = lyo.LyOutputDef(outputDefBody = lpOutputDefBody)
contents.append(lpOutputDef)
lpLayout = lyo.LyLayout()
contents.append(lpLayout)
self.context.contents = contents
def loadObjectFromScore(self, scoreIn = None, makeNotation = True):
r'''
creates a filled topLevelObject (lily.lilyObjects.LyLilypondTop)
whose string representation accurately reflects this Score object.
>>> lpc = lily.translate.LilypondConverter()
>>> #_DOCS_SHOW b = corpus.parse('bach/bwv66.6')
>>> b = lily.translate._getCachedCorpusFile('bach/bwv66.6') #_DOCS_HIDE
>>> lpc.loadObjectFromScore(b)
>>> #print lpc.topLevelObject
'''
if makeNotation is True:
scoreIn = scoreIn.makeNotation(inPlace = False)
lpVersionScheme = self.versionScheme
lpHeaderScheme = self.headerScheme
lpColorScheme = lyo.LyEmbeddedScm(self.colorDef)
lpHeader = lyo.LyLilypondHeader()
# here's the heavy work...
lpScoreBlock = self.lyScoreBlockFromScore(scoreIn)
lpOutputDefHead = lyo.LyOutputDefHead(defType = 'paper')
lpOutputDefBody = lyo.LyOutputDefBody(outputDefHead = lpOutputDefHead)
lpOutputDef = lyo.LyOutputDef(outputDefBody = lpOutputDefBody)
lpLayout = lyo.LyLayout()
contents = [lpVersionScheme, lpHeaderScheme, lpColorScheme, lpHeader, lpScoreBlock, lpOutputDef, lpLayout]
if scoreIn.metadata is not None:
self.setHeaderFromMetadata(scoreIn.metadata, lpHeader = lpHeader)
self.context.contents = contents
#------- return Lily objects or append to the current context -----------#
def lyScoreBlockFromScore(self, scoreIn):
lpCompositeMusic = lyo.LyCompositeMusic()
self.newContext(lpCompositeMusic)
# Also get the variants, and the total number of measures here and make start each
# staff context with { \stopStaff s1*n} where n is the number of measures.
if hasattr(scoreIn, 'parts') and scoreIn.iter.parts: # or has variants
if scoreIn.recurse().variants:
lpPartsAndOssiaInit = self.lyPartsAndOssiaInitFromScore(scoreIn)
lpGroupedMusicList = self.lyGroupedMusicListFromScoreWithParts(
scoreIn,
scoreInit=lpPartsAndOssiaInit)
else:
lpGroupedMusicList = self.lyGroupedMusicListFromScoreWithParts(scoreIn)
lpCompositeMusic.groupedMusicList = lpGroupedMusicList
else:
# treat as a part...
lpPrefixCompositeMusic = self.lyPrefixCompositeMusicFromStream(scoreIn)
lpCompositeMusic.prefixCompositeMusic = lpPrefixCompositeMusic
lpMusic = lyo.LyMusic(compositeMusic = lpCompositeMusic)
lpScoreBody = lyo.LyScoreBody(music = lpMusic)
lpScoreBlock = lyo.LyScoreBlock(scoreBody = lpScoreBody)
self.restoreContext()
return lpScoreBlock
def lyPartsAndOssiaInitFromScore(self, scoreIn):
r'''
Takes in a score and returns a block that starts each part context and variant context
with an identifier and {\stopStaff s1*n} (or s, whatever is needed for the duration)
where n is the number of measures in the score.
>>> import copy
Set up score:
>>> s = stream.Score()
>>> p1,p2 = stream.Part(), stream.Part()
>>> p1.insert(0, meter.TimeSignature('4/4'))
>>> p2.insert(0, meter.TimeSignature('4/4'))
>>> p1.append(variant.Variant(name = 'london'))
>>> p2.append(variant.Variant(name = 'london'))
>>> p1.append(variant.Variant(name = 'rome'))
>>> p2.append(variant.Variant(name = 'rome'))
>>> for i in range(4):
... m = stream.Measure()
... n = note.Note('D4', type='whole')
... m.append(n)
... p1.append(m)
... p2.append(copy.deepcopy(m))
>>> p1.id = 'pa'
>>> p2.id = 'pb'
>>> s.append(p1)
>>> s.append(p2)
Run method
>>> lpc = lily.translate.LilypondConverter()
>>> print(lpc.lyPartsAndOssiaInitFromScore(s))
\new Staff = pa { \stopStaff s1 s1 s1 s1 }
\new Staff = londonpa
\with {
\remove "Time_signature_engraver"
alignAboveContext = #"pa"
fontSize = #-3
\override StaffSymbol #'staff-space = #(magstep -3)
\override StaffSymbol #'thickness = #(magstep -3)
\override TupletBracket #'bracket-visibility = ##f
\override TupletNumber #'stencil = ##f
\override Clef #'transparent = ##t
\override OctavateEight #'transparent = ##t
\consists "Default_bar_line_engraver"
}
{ \stopStaff s1 s1 s1 s1 }
\new Staff = romepa
\with {
\remove "Time_signature_engraver"
alignAboveContext = #"pa"
fontSize = #-3
\override StaffSymbol #'staff-space = #(magstep -3)
\override StaffSymbol #'thickness = #(magstep -3)
\override TupletBracket #'bracket-visibility = ##f
\override TupletNumber #'stencil = ##f
\override Clef #'transparent = ##t
\override OctavateEight #'transparent = ##t
\consists "Default_bar_line_engraver"
}
{ \stopStaff s1 s1 s1 s1 }
\new Staff = pb { \stopStaff s1 s1 s1 s1 }
\new Staff = londonpb
\with {
\remove "Time_signature_engraver"
alignAboveContext = #"pb...
fontSize = #-3
\override StaffSymbol #'staff-space = #(magstep -3)
\override StaffSymbol #'thickness = #(magstep -3)
\override TupletBracket #'bracket-visibility = ##f
\override TupletNumber #'stencil = ##f
\override Clef #'transparent = ##t
\override OctavateEight #'transparent = ##t
\consists "Default_bar_line_engraver"
}
{ \stopStaff s1 s1 s1 s1 }
\new Staff = romepb
\with {
\remove "Time_signature_engraver"
alignAboveContext = #"pb...
fontSize = #-3
\override StaffSymbol #'staff-space = #(magstep -3)
\override StaffSymbol #'thickness = #(magstep -3)
\override TupletBracket #'bracket-visibility = ##f
\override TupletNumber #'stencil = ##f
\override Clef #'transparent = ##t
\override OctavateEight #'transparent = ##t
\consists "Default_bar_line_engraver"
}
{ \stopStaff s1 s1 s1 s1 }
'''
lpMusicList = lyo.LyMusicList()
musicList = []
lpMusic = r'{ \stopStaff %s}'
for p in scoreIn.parts:
partIdText = makeLettersOnlyId(p.id)
partId = lyo.LyOptionalId(partIdText)
spacerDuration = self.getLySpacersFromStream(p)
lpPrefixCompositeMusicPart = lyo.LyPrefixCompositeMusic(type = 'new',
optionalId = partId,
simpleString = 'Staff',
music = lpMusic % spacerDuration)
musicList.append(lpPrefixCompositeMusicPart)
variantsAddedForPart = []
for v in p.variants:
variantName = v.groups[0]
if not variantName in variantsAddedForPart:
self.addedVariants.append(variantName)
variantsAddedForPart.append(variantName)
variantId = lyo.LyOptionalId(makeLettersOnlyId(variantName)+partIdText)
lpPrefixCompositeMusicVariant = lyo.LyPrefixCompositeMusic(type = 'new',
optionalId = variantId,
simpleString = 'Staff',
music = lpMusic % spacerDuration)
contextModList = [r'\remove "Time_signature_engraver"',
r'alignAboveContext = #"%s"' % partIdText,
r'fontSize = #-3',
r"\override StaffSymbol #'staff-space = #(magstep -3)",
r"\override StaffSymbol #'thickness = #(magstep -3)",
r"\override TupletBracket #'bracket-visibility = ##f",
r"\override TupletNumber #'stencil = ##f",
r"\override Clef #'transparent = ##t",
r"\override OctavateEight #'transparent = ##t",
r'\consists "Default_bar_line_engraver"',
]
optionalContextMod = lyo.LyContextModification(contextModList)
lpPrefixCompositeMusicVariant.optionalContextMod = optionalContextMod
musicList.append(lpPrefixCompositeMusicVariant)
lpMusicList.contents = musicList
return lpMusicList
def getLySpacersFromStream(self, streamIn, measuresOnly = True):
'''
Creates a series of Spacer objects for the measures in a Stream Part.
>>> m1 = stream.Measure(converter.parse("tinynotation: 3/4 a2."))
>>> m2 = stream.Measure(converter.parse("tinynotation: 3/4 b2."))
>>> m3 = stream.Measure(converter.parse("tinynotation: 4/4 a1"))
>>> m4 = stream.Measure(converter.parse("tinynotation: 4/4 b1"))
>>> m5 = stream.Measure(converter.parse("tinynotation: 4/4 c1"))
>>> m6 = stream.Measure(converter.parse("tinynotation: 5/4 a4 b1"))
>>> streamIn = stream.Stream([m1, m2, m3, m4, m5, m6])
>>> lpc = lily.translate.LilypondConverter()
>>> print(lpc.getLySpacersFromStream(streamIn))
s2. s2. s1 s1 s1 s1 s4
TODO: Low-priority... rare, but possible: tuplet time signatures (3/10)...
'''
returnString = ''
#mostRecentDur = ''
#recentDurCount = 0
for el in streamIn:
if not "Measure" in el.classes:
continue
if el.duration.quarterLength == 0.0:
continue
try:
dur = str(self.lyMultipliedDurationFromDuration(el.duration))
returnString = returnString + 's'+ dur
# general exception is the only way to catch str exceptions
except: #pylint: disable=bare-except
for c in el.duration.components:
dur = str(self.lyMultipliedDurationFromDuration(c))
returnString = returnString + 's'+ dur
#if dur == mostRecentDur:
# recentDurCount += 1
#else:
# mostRecentDur = dur
# recentDurCount = 0
#if recentDurCount != 0:
# returnString = returnString + '*' + str(recentDurCount)
return returnString
def lyGroupedMusicListFromScoreWithParts(self, scoreIn, scoreInit = None):
r'''
More complex example showing how the score can be set up with ossia parts...
>>> lpc = lily.translate.LilypondConverter()
>>> #_DOCS_SHOW b = corpus.parse('bach/bwv66.6')
>>> b = lily.translate._getCachedCorpusFile('bach/bwv66.6') #_DOCS_HIDE
>>> lpPartsAndOssiaInit = lpc.lyPartsAndOssiaInitFromScore(b)
>>> lpGroupedMusicList = lpc.lyGroupedMusicListFromScoreWithParts(b, scoreInit = lpPartsAndOssiaInit)
>>> print(lpGroupedMusicList)
<BLANKLINE>
<< \new Staff = Soprano { \stopStaff s4 s1 s1 s1 s1 s1 s1 s1 s1 s2. }
\new Staff = Alto { \stopStaff s4 s1 s1 s1 s1 s1 s1 s1 s1 s2. }
\new Staff = Tenor { \stopStaff s4 s1 s1 s1 s1 s1 s1 s1 s1 s2. }
\new Staff = Bass { \stopStaff s4 s1 s1 s1 s1 s1 s1 s1 s1 s2. }
<BLANKLINE>
\context Staff = Soprano \with {
\autoBeamOff
}
{ \startStaff \partial 32*8
\clef "treble"
\key fis \minor
\time 4/4
\set stemRightBeamCount = #1
\once \override Stem #'direction = #DOWN
cis'' 8 [
\set stemLeftBeamCount = #1
\once \override Stem #'direction = #DOWN
b... 8 ]
\bar "|" %{ end measure 0 %}
\once \override Stem #'direction = #UP
a' 4
\once \override Stem #'direction = #DOWN
b... 4
\once \override Stem #'direction = #DOWN
cis'' 4 \fermata
\once \override Stem #'direction = #DOWN
e'' 4
\bar "|" %{ end measure 1 %}
\once \override Stem #'direction = #DOWN
cis'' 4
...
}
<BLANKLINE>
<BLANKLINE>
\context Staff = Alto \with {
\autoBeamOff
}
{ \startStaff \partial 32*8
\clef "treble"...
\once \override Stem #'direction = #UP
e' 4
\bar "|" %{ end measure 0 %}
\once \override Stem #'direction = #UP
fis' 4
\once \override Stem #'direction = #UP
e' 4
...
}
<BLANKLINE>
<BLANKLINE>
>>
<BLANKLINE>
'''
compositeMusicList = []
lpGroupedMusicList = lyo.LyGroupedMusicList()
lpSimultaneousMusic = lyo.LySimultaneousMusic()
lpMusicList = lyo.LyMusicList()
lpSimultaneousMusic.musicList = lpMusicList
lpGroupedMusicList.simultaneousMusic = lpSimultaneousMusic
self.newContext(lpMusicList)
if scoreInit is None:
for p in scoreIn.parts:
compositeMusicList.append(self.lyPrefixCompositeMusicFromStream(p))
else:
compositeMusicList.append(scoreInit)
for p in scoreIn.parts:
compositeMusicList.append(self.lyPrefixCompositeMusicFromStream(p, type='context', beforeMatter = 'startStaff'))
self.restoreContext()
lpMusicList.contents = compositeMusicList
return lpGroupedMusicList
def lyNewLyricsFromStream(self, streamIn, streamId = None, alignment = 'alignBelowContext' ):
r'''
returns a LyNewLyrics object
This is a little bit of a hack. This should be switched over to using a
prefixed context thing with \new Lyric = "id" \with { } {}
>>> s = converter.parse('tinyNotation: 4/4 c4_hel- d4_-lo r4 e4_world')
>>> s.makeMeasures(inPlace = True)
>>> s.id = 'helloWorld'
>>> lpc = lily.translate.LilypondConverter()
>>> lyNewLyrics = lpc.lyNewLyricsFromStream(s)
>>> print(lyNewLyrics)
\addlyrics { \set alignBelowContext = #"helloWorld"
"hel" --
"lo"__
"world"
}
'''
lyricsDict = streamIn.lyrics(skipTies = True)
if streamId is None:
streamId = makeLettersOnlyId(streamIn.id)
streamId = "#"+ lyo.LyObject().quoteString(streamId)
lpGroupedMusicLists = []
for lyricNum in sorted(lyricsDict):
lyricList = []
lpAlignmentProperty = lyo.LyPropertyOperation(mode = 'set', value1 = alignment, value2 = streamId)
lyricList.append(lpAlignmentProperty)
self.inWord = False
for el in lyricsDict[lyricNum]:
lpLyricElement = self.lyLyricElementFromM21Lyric(el)
lyricList.append(lpLyricElement)
self.inWord = False
lpLyricList = lyo.LyMusicList(lyricList)
lpSequentialMusic = lyo.LySequentialMusic(musicList = lpLyricList)
lpGroupedMusicList = lyo.LyGroupedMusicList(sequentialMusic = lpSequentialMusic)
lpGroupedMusicLists.append(lpGroupedMusicList)
lpNewLyrics = lyo.LyNewLyrics(groupedMusicLists = lpGroupedMusicLists)
return lpNewLyrics
def lyLyricElementFromM21Lyric(self, m21Lyric):
'''
Returns a :class:`~music21.lily.lilyObjects.LyLyricElement` object
from a :class:`~music21.note.Lyric` object.
Uses self.inWord to keep track of whether or not we're in the middle of
a word.
>>> s = converter.parse('tinyNotation: 4/4 c4_hel- d4_-lo r2 e2 f2_world')
>>> s.makeMeasures(inPlace = True)
>>> lyrics = s.lyrics()[1] # get first verse (yes, 1 = first, not 0!)
>>> lpc = lily.translate.LilypondConverter()
>>> lpc.lyLyricElementFromM21Lyric(lyrics[0])
<music21.lily.lilyObjects.LyLyricElement object...'"hel" --'>
>>> lpc.inWord
True
>>> lpc.lyLyricElementFromM21Lyric(lyrics[1])
<music21.lily.lilyObjects.LyLyricElement object...'"lo"__'>
>>> lpc.lyLyricElementFromM21Lyric(lyrics[2])
<music21.lily.lilyObjects.LyLyricElement object...' _ '>
>>> lpc.lyLyricElementFromM21Lyric(lyrics[3])
<music21.lily.lilyObjects.LyLyricElement object...'"world"'>
>>> lpc.inWord
False
'''
if hasattr(self, 'inWord'):
inWord = self.inWord
else:
inWord = False
el = m21Lyric
if el is None and inWord:
text = ' _ '
elif el is None and inWord is False:
text = ' _ '
elif el.text == '':
text = ' _ '
else:
text = '"' + el.text + '"'
if el.syllabic == 'end':
text = text + '__'
inWord = False
elif el.syllabic == 'begin' or el.syllabic == 'middle':
text = text + ' --'
inWord = True
else:
text = text
self.inWord = inWord
lpLyricElement = lyo.LyLyricElement(text)
return lpLyricElement
def lySequentialMusicFromStream(self, streamIn, beforeMatter = None):
r'''
returns a LySequentialMusic object from a stream
>>> c = converter.parse('tinynotation: 3/4 C4 D E F2.')
>>> lpc = lily.translate.LilypondConverter()
>>> lySequentialMusicOut = lpc.lySequentialMusicFromStream(c)
>>> lySequentialMusicOut
<music21.lily.lilyObjects.LySequentialMusic object at 0x...>
>>> print(lySequentialMusicOut)
{ \clef "bass"
\time 3/4
c 4
d 4
e 4
\bar "|" %{ end measure 1 %}
f 2.
\bar "|." %{ end measure 2 %}
}
<BLANKLINE>
'''
musicList = []
lpMusicList = lyo.LyMusicList(contents = musicList)
lpSequentialMusic = lyo.LySequentialMusic(musicList = lpMusicList, beforeMatter = beforeMatter)
self.newContext(lpMusicList)
self.appendObjectsToContextFromStream(streamIn)
lyObject = self.closeMeasure()
if lyObject is not None:
musicList.append(lyObject)
self.restoreContext()
return lpSequentialMusic
def lyPrefixCompositeMusicFromStream(self, streamIn, contextType = None, type = None, beforeMatter = None): #@ReservedAssignment
r'''
returns an LyPrefixCompositeMusic object from
a stream (generally a part, but who knows...)
>>> c = converter.parse('tinynotation: 3/4 C4 D E F2.')
>>> c.staffLines = 4
>>> lpc = lily.translate.LilypondConverter()
>>> lyPrefixCompositeMusicOut = lpc.lyPrefixCompositeMusicFromStream(c, contextType='Staff')
>>> lyPrefixCompositeMusicOut
<music21.lily.lilyObjects.LyPrefixCompositeMusic object at 0x...>
>>> print(lyPrefixCompositeMusicOut)
\new Staff = ... \with {
\override StaffSymbol #'line-count = #4
}
{ \clef "bass"
\time 3/4
c 4
d 4
e 4
\bar "|" %{ end measure 1 %}
f 2.
\bar "|." %{ end measure 2 %}
}
<BLANKLINE>
<BLANKLINE>
'''
compositeMusicType = type
optionalId = None
contextModList = []
c = streamIn.classes
if contextType is None:
if 'Part' in c:
newContext = 'Staff'
optionalId = lyo.LyOptionalId(makeLettersOnlyId(streamIn.id))
elif 'Voice' in c:
newContext = 'Voice'
else:
newContext = 'Voice'
else:
newContext = contextType
optionalId = lyo.LyOptionalId(makeLettersOnlyId(streamIn.id))
if streamIn.streamStatus.haveBeamsBeenMade() is True:
contextModList.append(r"\autoBeamOff ")
if hasattr(streamIn, 'staffLines') and streamIn.staffLines != 5:
contextModList.append(r"\override StaffSymbol #'line-count = #%d" % streamIn.staffLines)
if streamIn.staffLines % 2 == 0: # even stafflines need a change...
pass
lpNewLyrics = self.lyNewLyricsFromStream(streamIn, streamId = makeLettersOnlyId(streamIn.id))
lpSequentialMusic = self.lySequentialMusicFromStream(streamIn, beforeMatter = beforeMatter)
lpGroupedMusicList = lyo.LyGroupedMusicList(sequentialMusic = lpSequentialMusic)
lpCompositeMusic = lyo.LyCompositeMusic(groupedMusicList = lpGroupedMusicList, newLyrics = lpNewLyrics)
lpMusic = lyo.LyMusic(compositeMusic = lpCompositeMusic)
if compositeMusicType is None:
compositeMusicType = 'new'
if contextModList:
contextMod = lyo.LyContextModification(contextModList)
else:
contextMod = None
lpPrefixCompositeMusic = lyo.LyPrefixCompositeMusic(type = compositeMusicType,
optionalId = optionalId,
simpleString = newContext,
optionalContextMod = contextMod,
music = lpMusic)
return lpPrefixCompositeMusic
def appendObjectsToContextFromStream(self, streamObject):
r'''
takes a Stream and appends all the elements in it to the current
context's .contents list, and deals with creating Voices in it. It also deals with
variants in it.
(should eventually replace the main Score parts finding tools)
>>> lpc = lily.translate.LilypondConverter()
>>> lpMusicList = lily.lilyObjects.LyMusicList()
>>> lpc.context = lpMusicList
>>> lpc.context.contents
[]
>>> c = converter.parse('tinynotation: 3/4 c4 d- e#')
>>> lpc.appendObjectsToContextFromStream(c)
>>> print(lpc.context.contents)
[<music21.lily.lilyObjects.LyEmbeddedScm...>, <music21.lily.lilyObjects.LySimpleMusic...>, <music21.lily.lilyObjects.LySimpleMusic...>, <music21.lily.lilyObjects.LySimpleMusic...]
>>> print(lpc.context)
\clef "treble"
\time 3/4
c' 4
des' 4
eis' 4
<BLANKLINE>
>>> v1 = stream.Voice()
>>> v1.append(note.Note("C5", quarterLength = 4.0))
>>> v2 = stream.Voice()
>>> v2.append(note.Note("C#5", quarterLength = 4.0))
>>> m = stream.Measure()
>>> m.insert(0, v1)
>>> m.insert(0, v2)
>>> lpMusicList = lily.lilyObjects.LyMusicList()
>>> lpc.context = lpMusicList
>>> lpc.appendObjectsToContextFromStream(m)
>>> print(lpc.context) # internal spaces removed...
<< \new Voice { c'' 1
\bar "|." %{ end measure 1 %}
}
\new Voice { cis'' 1
}
>>
'''
for groupedElements in streamObject.groupElementsByOffset():
#print groupedElements
if len(groupedElements) == 1: # one thing at that moment...
el = groupedElements[0]
el.activeSite = streamObject
self.appendM21ObjectToContext(el)
else: # voices or other More than one thing at once...
# if voices
voiceList = []
variantList = []
otherList = []
for el in groupedElements:
if 'Voice' in el.classes:
voiceList.append(el)
elif 'Variant' in el.classes:
variantList.append(el)
else:
el.activeSite = streamObject
otherList.append(el)
if len(variantList) > 0:
for v in variantList:
v.activeSite = streamObject
self.appendContextFromVariant(variantList, activeSite = streamObject, coloredVariants = self.coloredVariants)
if len(voiceList) > 0:
musicList2 = []
lp2GroupedMusicList = lyo.LyGroupedMusicList()
lp2SimultaneousMusic = lyo.LySimultaneousMusic()
lp2MusicList = lyo.LyMusicList()
lp2SimultaneousMusic.musicList = lp2MusicList
lp2GroupedMusicList.simultaneousMusic = lp2SimultaneousMusic
for voice in voiceList:
if voice not in self.doNotOutput:
lpPrefixCompositeMusic = self.lyPrefixCompositeMusicFromStream(voice)
musicList2.append(lpPrefixCompositeMusic)
lp2MusicList.contents = musicList2
contextObject = self.context
currentMusicList = contextObject.contents
currentMusicList.append(lp2GroupedMusicList)
lp2GroupedMusicList.setParent(self.context)
if len(otherList) > 0:
for el in otherList:
self.appendM21ObjectToContext(el)
def appendM21ObjectToContext(self, thisObject):
'''
converts any type of object into a lilyObject of LyMusic (
LySimpleMusic, LyEmbeddedScm etc.) type
'''
if thisObject in self.doNotOutput:
return
### treat complex duration objects as multiple objects
c = thisObject.classes
if 'Stream' not in c and thisObject.duration.type == 'complex':
thisObjectSplit = thisObject.splitAtDurations()
for subComponent in thisObjectSplit:
self.appendM21ObjectToContext(subComponent)
return
contextObject = self.context
if hasattr(contextObject, 'contents'):
currentMusicList = contextObject.contents
else:
raise LilyTranslateException("Cannot get a currentMusicList from contextObject %r" % contextObject)
if hasattr(thisObject, 'startTransparency') and thisObject.startTransparency is True:
# old hack, replace with the better "hidden" attribute
lyScheme = lyo.LyEmbeddedScm(self.transparencyStartScheme)
currentMusicList.append(lyScheme)
lyObject = None
if "Measure" in c:
## lilypond does not put groups around measures...
## it does however need barline ends
## also, if variantMode is True, the last note in each "measure" should have \noBeam
closeMeasureObj = self.closeMeasure() # could be None
if closeMeasureObj is not None:
currentMusicList.append(closeMeasureObj)
closeMeasureObj.setParent(contextObject)
padObj = self.getSchemeForPadding(thisObject)
if padObj is not None:
currentMusicList.append(padObj)
padObj.setParent(contextObject)
## here we go!
self.appendObjectsToContextFromStream(thisObject)
self.currentMeasure = thisObject
elif "Stream" in c:
#try:
lyObject = self.lyPrefixCompositeMusicFromStream(thisObject)
currentMusicList.append(lyObject)
lyObject.setParent(contextObject)
#except AttributeError as ae:
# raise Exception("Cannot parse %s: %s" % (thisObject, str(ae)))
elif "Note" in c or "Rest" in c:
self.appendContextFromNoteOrRest(thisObject)
elif "Chord" in c:
lyObject = self.lySimpleMusicFromChord(thisObject)
currentMusicList.append(lyObject)
lyObject.setParent(contextObject)
elif "Clef" in c:
lyObject = self.lyEmbeddedScmFromClef(thisObject)
currentMusicList.append(lyObject)
lyObject.setParent(contextObject)
elif "KeySignature" in c:
lyObject = self.lyEmbeddedScmFromKeySignature(thisObject)
currentMusicList.append(lyObject)
lyObject.setParent(contextObject)
elif "TimeSignature" in c and self.variantMode is False:
lyObject = self.lyEmbeddedScmFromTimeSignature(thisObject)
currentMusicList.append(lyObject)
lyObject.setParent(contextObject)
elif "Variant" in c:
self.appendContextFromVariant(thisObject, coloredVariants=self.coloredVariants)
elif "SystemLayout" in c:
lyObject = lyo.LyEmbeddedScm(r'\break')
currentMusicList.append(lyObject)
lyObject.setParent(contextObject)
elif "PageLayout" in c:
lyObject = lyo.LyEmbeddedScm(r'\pageBreak')
currentMusicList.append(lyObject)
lyObject.setParent(contextObject)
else:
lyObject = None
if hasattr(thisObject, 'stopTransparency') and thisObject.stopTransparency is True:
# old hack, replace with the better "hidden" attribute
lyScheme = lyo.LyEmbeddedScm(self.transparencyStopScheme)
currentMusicList.append(lyScheme)
def appendContextFromNoteOrRest(self, noteOrRest):
r'''
appends lySimpleMusicFromNoteOrRest to the
current context.
>>> n = note.Note("C#4")
>>> lpc = lily.translate.LilypondConverter()
>>> lpMusicList = lily.lilyObjects.LyMusicList()
>>> lpc.context = lpMusicList
>>> lpc.appendContextFromNoteOrRest(n)
>>> print(lpMusicList)
cis' 4
<BLANKLINE>
>>> n2 = note.Note("D#4")
>>> n2.duration.quarterLength = 1.0/3
>>> n2.duration.tuplets[0].type = 'start'
>>> n3 = note.Note("E4")
>>> n3.duration.quarterLength = 1.0/3
>>> n4 = note.Note("F4")
>>> n4.duration.quarterLength = 1.0/3
>>> n4.duration.tuplets[0].type = 'stop'
>>> n5 = note.Note("F#4")
>>> lpc.appendContextFromNoteOrRest(n2)
>>> lpc.appendContextFromNoteOrRest(n3)
>>> lpc.appendContextFromNoteOrRest(n4)
>>> lpc.appendContextFromNoteOrRest(n5)
>>> print(lpc.context)
cis' 4
\times 2/3 { dis' 8
e' 8
f' 8
}
<BLANKLINE>
fis' 4
<BLANKLINE>
'''
# commented out until complete
# if self.variantMode is True:
# #TODO: attach \noBeam to note if it is the last note
# if "NotRest" in noteOrRest.classes:
# n = noteOrRest
# activeSite = n.activeSite
# offset = n.offset
# # failed at least once...
# if offset + n.duration.quarterLength == activeSite.duration.quarterLength:
# pass
self.setContextForTupletStart(noteOrRest)
self.appendBeamCode(noteOrRest)
self.appendStemCode(noteOrRest)
lpSimpleMusic = self.lySimpleMusicFromNoteOrRest(noteOrRest)
self.context.contents.append(lpSimpleMusic)
lpSimpleMusic.setParent(self.context)
self.setContextForTupletStop(noteOrRest)
def lySimpleMusicFromNoteOrRest(self, noteOrRest):
r'''
returns a lilyObjects.LySimpleMusic object for the generalNote containing...
LyEventChord containing
LySimpleChordElements containing
LySimpleElement containing
LyPitch AND
LyMultipliedDuration containing:
LyMultipliedDuration containing
LyStenoDuration
does not check for tuplets. That's in
appendContextFromNoteOrRest
read-only property that returns a string of the lilypond representation of
a note (or via subclassing, rest or chord)
>>> conv = lily.translate.LilypondConverter()
>>> n0 = note.Note("D#5")
>>> n0.pitch.accidental.displayType = 'always'
>>> n0.pitch.accidental.displayStyle = 'parentheses'
>>> n0.editorial.color = 'blue'
>>> sm = conv.lySimpleMusicFromNoteOrRest(n0)
>>> print(sm)
\color "blue" dis'' ! ? 4
Now make the note disappear...
>>> n0.hideObjectOnPrint = True
>>> sm = conv.lySimpleMusicFromNoteOrRest(n0)
>>> print(sm)
s 4
'''
c = noteOrRest.classes
simpleElementParts = []
if noteOrRest._editorial is not None:
if noteOrRest.editorial.color and noteOrRest.hideObjectOnPrint is not True:
simpleElementParts.append(noteOrRest.editorial.colorLilyStart())
if 'Note' in c:
if noteOrRest.hideObjectOnPrint is not True:
lpPitch = self.lyPitchFromPitch(noteOrRest.pitch)
simpleElementParts.append(lpPitch)
if noteOrRest.pitch.accidental is not None:
if noteOrRest.pitch.accidental.displayType == 'always':
simpleElementParts.append('! ')
if noteOrRest.pitch.accidental.displayStyle == 'parentheses':
simpleElementParts.append('? ')
else:
simpleElementParts.append("s ")
elif "SpacerRest" in c:
simpleElementParts.append("s ")
elif 'Rest' in c:
if noteOrRest.hideObjectOnPrint is True:
simpleElementParts.append("s ")
else:
simpleElementParts.append("r ")
lpMultipliedDuration = self.lyMultipliedDurationFromDuration(noteOrRest.duration)
simpleElementParts.append(lpMultipliedDuration)
if 'NotRest' in c and noteOrRest.beams is not None and len(noteOrRest.beams) > 0:
if noteOrRest.beams.beamsList[0].type == 'start':
simpleElementParts.append("[ ")
elif noteOrRest.beams.beamsList[0].type == 'stop':
simpleElementParts.append("] ") # no start-stop in music21...
simpleElement = lyo.LySimpleElement(parts = simpleElementParts)
postEvents = self.postEventsFromObject(noteOrRest)
evc = lyo.LyEventChord(simpleElement, postEvents = postEvents)
mlSM = lyo.LySimpleMusic(eventChord = evc)
return mlSM
def appendBeamCode(self, noteOrChord):
r'''
Adds an LyEmbeddedScm object to the context's contents if the object's has a .beams
attribute.
>>> lpc = lily.translate.LilypondConverter()
>>> lpMusicList = lily.lilyObjects.LyMusicList()
>>> lpc.context = lpMusicList
>>> lpc.context.contents
[]
>>> n1 = note.Note(quarterLength = 0.25)
>>> n2 = note.Note(quarterLength = 0.25)
>>> n1.beams.fill(2, 'start')
>>> n2.beams.fill(2, 'stop')
>>> lpc.appendBeamCode(n1)
>>> print(lpc.context.contents)
[<music21.lily.lilyObjects.LyEmbeddedScm object at 0x...>]
>>> print(lpc.context)
\set stemRightBeamCount = #2
>>> lpc = lily.translate.LilypondConverter()
>>> lpMusicList = lily.lilyObjects.LyMusicList()
>>> lpc.context = lpMusicList
>>> lpc.context.contents
[]
>>> lpc.appendBeamCode(n2)
>>> print(lpc.context.contents)
[<music21.lily.lilyObjects.LyEmbeddedScm object at 0x...>]
>>> print(lpc.context)
\set stemLeftBeamCount = #2
'''
leftBeams = 0
rightBeams = 0
if hasattr(noteOrChord, 'beams'):
if noteOrChord.beams is not None:
for b in noteOrChord.beams:
if b.type == 'start':
rightBeams += 1
elif b.type == 'continue':
rightBeams += 1
leftBeams += 1
elif b.type == 'stop':
leftBeams += 1
elif b.type == 'partial':
if b.direction == 'left':
leftBeams += 1
else: # better wrong direction than none
rightBeams += 1
if leftBeams > 0:
beamText = r'''\set stemLeftBeamCount = #%d''' % leftBeams
lpBeamScheme = lyo.LyEmbeddedScm(beamText)
self.context.contents.append(lpBeamScheme)
lpBeamScheme.setParent(self.context)
if rightBeams > 0:
beamText = r'''\set stemRightBeamCount = #%d''' % rightBeams
lpBeamScheme = lyo.LyEmbeddedScm(beamText)
self.context.contents.append(lpBeamScheme)
lpBeamScheme.setParent(self.context)
def appendStemCode(self, noteOrChord):
r'''
Adds an LyEmbeddedScm object to the context's contents if the object's stem direction
is set (currrently, only "up" and "down" are supported).
>>> lpc = lily.translate.LilypondConverter()
>>> lpMusicList = lily.lilyObjects.LyMusicList()
>>> lpc.context = lpMusicList
>>> lpc.context.contents
[]
>>> n = note.Note()
>>> n.stemDirection = 'up'
>>> lpc.appendStemCode(n)
>>> print(lpc.context.contents)
[<music21.lily.lilyObjects.LyEmbeddedScm object at 0x...>]
>>> print(lpc.context.contents[0])
\once \override Stem #'direction = #UP
'''
if hasattr(noteOrChord, 'stemDirection') and noteOrChord.stemDirection is not None:
stemDirection = noteOrChord.stemDirection.upper()
if stemDirection in ['UP', 'DOWN']:
stemFile = r'''\once \override Stem #'direction = #%s ''' % stemDirection
lpStemScheme = lyo.LyEmbeddedScm(stemFile)
self.context.contents.append(lpStemScheme)
lpStemScheme.setParent(self.context)
def lySimpleMusicFromChord(self, chordObj):
'''
>>> conv = lily.translate.LilypondConverter()
>>> c1 = chord.Chord(["C#2", "E4", "D#5"])
>>> c1.quarterLength = 3.5
>>> c1.pitches[2].accidental.displayType = 'always'
>>> print(conv.lySimpleMusicFromChord(c1))
< cis, e' dis'' ! > 2..
test hidden chord:
>>> c1.hideObjectOnPrint = True
>>> print(conv.lySimpleMusicFromChord(c1))
s 2..
'''
self.appendBeamCode(chordObj)
if chordObj.hideObjectOnPrint is not True:
self.appendStemCode(chordObj)
chordBodyElements = []
for p in chordObj.pitches:
chordBodyElementParts = []
lpPitch = self.lyPitchFromPitch(p)
chordBodyElementParts.append(lpPitch)
if p.accidental is not None:
if p.accidental.displayType == 'always':
chordBodyElementParts.append('! ')
if p.accidental.displayStyle == 'parentheses':
chordBodyElementParts.append('? ')
lpChordElement = lyo.LyChordBodyElement(parts = chordBodyElementParts)
chordBodyElements.append(lpChordElement)
lpChordBody = lyo.LyChordBody(chordBodyElements = chordBodyElements)
else:
lpChordBody = lyo.LyPitch('s ', '')
lpMultipliedDuration = self.lyMultipliedDurationFromDuration(chordObj.duration)
postEvents = self.postEventsFromObject(chordObj)
lpNoteChordElement = lyo.LyNoteChordElement(chordBody = lpChordBody,
optionalNoteModeDuration = lpMultipliedDuration,
postEvents = postEvents)
evc = lyo.LyEventChord(noteChordElement = lpNoteChordElement)
mlSM = lyo.LySimpleMusic(eventChord = evc)
return mlSM
# TODO: Chord beaming...
def postEventsFromObject(self, generalNote):
'''
attaches events that apply to notes and chords (and some other things) equally
'''
postEvents = []
# remove this hack once lyrics work
#if generalNote.lyric is not None: # hack that uses markup...
# postEvents.append(r'_\markup { "' + generalNote.lyric + '" }\n ')
# consider this hack removed. Yeah!
if (hasattr(generalNote, 'tie') and generalNote.tie is not None):
if (generalNote.tie.type != "stop"):
postEvents.append("~ ")
if (hasattr(generalNote, 'expressions') and generalNote.expressions):
for thisExpression in generalNote.expressions:
if 'Fermata' in thisExpression.classes:
postEvents.append(r'\fermata ')
return postEvents
def lyPitchFromPitch(self, pitch):
'''
converts a music21.pitch.Pitch object to a lily.lilyObjects.LyPitch
object.
'''
baseName = self.baseNameFromPitch(pitch)
octaveModChars = self.octaveCharactersFromPitch(pitch)
lyPitch = lyo.LyPitch(baseName, octaveModChars)
return lyPitch
def baseNameFromPitch(self, pitch):
'''
returns a string of the base name (including accidental)
for a music21 pitch
'''
baseName = pitch.step.lower()
if pitch.accidental is not None:
if pitch.accidental.name in self.accidentalConvert:
baseName += self.accidentalConvert[pitch.accidental.name]
return baseName
def octaveCharactersFromPitch(self, pitch):
'''
returns a string of single-quotes or commas or "" representing
the octave of a :class:`~music21.pitch.Pitch` object
'''
spio = pitch.implicitOctave
if (spio < 3):
correctedOctave = 3 - spio
octaveModChars = u',' * correctedOctave # C2 = c, C1 = c,,
else:
correctedOctave = spio - 3
octaveModChars = u'\'' * correctedOctave # C4 = c', C5 = c'' etc.
return octaveModChars
def lyMultipliedDurationFromDuration(self, durationObj):
r'''
take a simple Duration (that is one with one DurationTuple
object and return a LyMultipliedDuration object:
>>> d = duration.Duration(3)
>>> lpc = lily.translate.LilypondConverter()
>>> lyMultipliedDuration = lpc.lyMultipliedDurationFromDuration(d)
>>> str(lyMultipliedDuration)
'2. '
>>> str(lpc.lyMultipliedDurationFromDuration(duration.Duration(8.0)))
'\\breve '
Does not work with complex durations:
>>> d = duration.Duration(5.0)
>>> str(lpc.lyMultipliedDurationFromDuration(d))
Traceback (most recent call last):
LilyTranslateException: DurationException for durationObject <music21.duration.Duration 5.0>: Could not determine durationNumber from None
Instead split by components:
>>> components = d.components
>>> [str(lpc.lyMultipliedDurationFromDuration(c)) for c in components]
['1 ', '4 ']
'''
try:
number_type = duration.convertTypeToNumber(durationObj.type) # module call
except duration.DurationException as de:
raise LilyTranslateException("DurationException for durationObject %s: %s" % (durationObj, de))
if number_type < 1:
if number_type == 0.5:
number_type = r'\breve'
elif number_type == 0.25:
number_type = r'\longa'
else:
# no support for maxima...
number_type = int(number_type * 16)
else:
number_type = int(number_type)
try:
stenoDuration = lyo.LyStenoDuration(number_type, int(durationObj.dots))
multipliedDuration = lyo.LyMultipliedDuration(stenoDuration)
except duration.DurationException as de:
raise LilyTranslateException("DurationException: Cannot translate durationObject %s: %s" % (durationObj, de))
return multipliedDuration
def lyEmbeddedScmFromClef(self, clefObj):
r'''
converts a Clef object to a
lilyObjects.LyEmbeddedScm object
>>> tc = clef.TrebleClef()
>>> conv = lily.translate.LilypondConverter()
>>> lpEmbeddedScm = conv.lyEmbeddedScmFromClef(tc)
>>> print(lpEmbeddedScm)
\clef "treble"
'''
c = clefObj.classes
if 'Treble8vbClef' in c:
lilyName = 'treble_8'
elif 'TrebleClef' in c:
lilyName = "treble"
elif 'BassClef' in c:
lilyName = "bass"
elif 'AltoClef' in c:
lilyName = 'alto'
elif 'TenorClef' in c:
lilyName = 'tenor'
elif 'SopranoClef' in c:
lilyName = 'soprano'
elif 'PercussionClef' in c:
lilyName = 'percussion'
else:
environLocal.printDebug('got a clef that lilypond does not know what to do with: %s' % clefObj)
lilyName = ""
lpEmbeddedScm = lyo.LyEmbeddedScm()
clefScheme = lpEmbeddedScm.backslash + 'clef ' + lpEmbeddedScm.quoteString(lilyName) + lpEmbeddedScm.newlineIndent
lpEmbeddedScm.content = clefScheme
return lpEmbeddedScm
def lyEmbeddedScmFromKeySignature(self, keyObj):
r'''
converts a Key or KeySignature object
to a lilyObjects.LyEmbeddedScm object
>>> d = key.KeySignature(-1)
>>> d.mode = 'minor'
>>> conv = lily.translate.LilypondConverter()
>>> lpEmbeddedScm = conv.lyEmbeddedScmFromKeySignature(d)
>>> print(lpEmbeddedScm)
\key d \minor
Major is assumed:
>>> fsharp = key.KeySignature(6)
>>> print(conv.lyEmbeddedScmFromKeySignature(fsharp))
\key fis \major
'''
(p, m) = keyObj.pitchAndMode
if m is None:
m = "major"
pn = self.baseNameFromPitch(p)
lpEmbeddedScm = lyo.LyEmbeddedScm()
keyScheme = lpEmbeddedScm.backslash + 'key ' + pn + ' ' + lpEmbeddedScm.backslash + m + ' ' + lpEmbeddedScm.newlineIndent
lpEmbeddedScm.content = keyScheme
return lpEmbeddedScm
def lyEmbeddedScmFromTimeSignature(self, ts):
r'''
convert a :class:`~music21.meter.TimeSignature` object
to a lilyObjects.LyEmbeddedScm object
>>> ts = meter.TimeSignature('3/4')
>>> conv = lily.translate.LilypondConverter()
>>> print(conv.lyEmbeddedScmFromTimeSignature(ts))
\time 3/4
'''
lpEmbeddedScm = lyo.LyEmbeddedScm()
keyScheme = lpEmbeddedScm.backslash + 'time ' + ts.ratioString + lpEmbeddedScm.newlineIndent
lpEmbeddedScm.content = keyScheme
return lpEmbeddedScm
def setContextForTupletStart(self, inObj):
'''
if the inObj has tuplets then we set a new context
for the tuplets and anything up till a tuplet stop.
Note that a broken tuplet (a la Michael Gordon)
will not work.
If there are no tuplets, this routine does
nothing. If there are tuplets and they have type start then
it returns an lpMusicList object, which is the new context
For now, no nested tuplets. They're an
easy extension, but there's too much
else missing to do it now...
'''
if inObj.duration.tuplets is None or len(inObj.duration.tuplets) == 0:
return None
elif inObj.duration.tuplets[0].type == 'start':
numerator = str(int(inObj.duration.tuplets[0].tupletNormal[0]))
denominator = str(int(inObj.duration.tuplets[0].tupletActual[0]))
lpMusicList = self.setContextForTimeFraction(numerator, denominator)
return lpMusicList
else:
return None
def setContextForTimeFraction(self, numerator, denominator):
'''
Explicitly starts a new context for scaled music (tuplets, etc.)
for the given numerator and denominator (either an int or a string or unicode)
Returns an lpMusicList object contained in an lpSequentialMusic object
in an lpPrefixCompositeMusic object which sets the times object to a particular
fraction.
>>> lpc = lily.translate.LilypondConverter()
>>> lpc.context
<music21.lily.lilyObjects.LyLilypondTop object at 0x...>
>>> lyTop = lpc.context
>>> lyoMusicList = lpc.setContextForTimeFraction(5, 4)
>>> lyoMusicList
<music21.lily.lilyObjects.LyMusicList object at 0x...>
>>> lpc.context
<music21.lily.lilyObjects.LyMusicList object at 0x...>
>>> lpc.context is lyoMusicList
True
>>> lpc.context.getParent()
<music21.lily.lilyObjects.LySequentialMusic object at 0x...>
>>> lpc.context.getParent().getParent()
<music21.lily.lilyObjects.LyPrefixCompositeMusic object at 0x...>
>>> lpc.context.getParent().getParent().fraction
'5/4'
>>> lpc.context.getParent().getParent().type
'times'
>>> lpc.context.getParent().getParent().getParent()
<music21.lily.lilyObjects.LyLilypondTop object at 0x...>
>>> lpc.context.getParent().getParent().getParent() is lyTop
True
'''
# pylint: disable=undefined-variable
if six.PY2:
fraction = unicode(numerator) + '/' + unicode(denominator) # @UndefinedVariable
else:
fraction = str(numerator) + '/' + str(denominator)
lpMusicList = lyo.LyMusicList()
lpSequentialMusic = lyo.LySequentialMusic(musicList = lpMusicList)
## technically needed, but we can speed things up
#lpGroupedMusicList = lyo.LyGroupedMusicList(sequentialMusic = lpSequentialMusic)
#lpCompositeMusic = lyo.LyCompositeMusic(groupedMusicList = lpGroupedMusicList)
#lpMusic = lyo.LyMusic(compositeMusic = lpCompositeMusic)
lpPrefixCompositeMusic = lyo.LyPrefixCompositeMusic(type='times',
fraction = fraction,
music = lpSequentialMusic)
currentContents = self.context.contents
if currentContents is None:
raise LilyTranslateException("Cannot find contents for self.context: %r " % self.context)
currentContents.append(lpPrefixCompositeMusic)
lpPrefixCompositeMusic.setParent(self.context)
self.newContext(lpMusicList)
return lpMusicList
def setContextForTupletStop(self, inObj):
'''
Reverse of setContextForTupletStart
'''
if len(inObj.duration.tuplets) == 0:
return
elif inObj.duration.tuplets[0].type == 'stop':
self.restoreContext()
else:
return None
def appendContextFromVariant(self, variantObjectOrList, activeSite=None, coloredVariants=False):
'''
Create a new context from the variant object or a list of variants and append.
'''
musicList = []
if isinstance(variantObjectOrList, variant.Variant):
variantObject = variantObjectOrList
replacedElements = variantObject.replacedElements(activeSite)
lpPrefixCompositeMusicVariant = self.lyPrefixCompositeMusicFromVariant(
variantObject, replacedElements, coloredVariants=coloredVariants)
lpSequentialMusicStandard = self.lySequentialMusicFromStream(replacedElements)
musicList.append(lpPrefixCompositeMusicVariant)
musicList.append(lpSequentialMusicStandard)
elif isinstance(variantObjectOrList, list):
longestReplacementLength = -1
variantDict = {}
for variantObject in variantObjectOrList:
if variantObject.groups:
variantName = variantObject.groups[0]
else:
variantName = "variant"
if variantName in variantDict:
variantDict[variantName].append(variantObject)
else:
variantDict[variantName] = [variantObject]
for key in variantDict:
variantList = variantDict[key]
if len(variantList) == 1:
variantObject = variantList[0]
replacedElements = variantObject.replacedElements(activeSite)
lpPrefixCompositeMusicVariant = self.lyPrefixCompositeMusicFromVariant(
variantObject, replacedElements, coloredVariants=coloredVariants)
musicList.append(lpPrefixCompositeMusicVariant)
else:
lpPrefixCompositeMusicVariant, replacedElements = self.lyPrefixCompositeMusicFromRelatedVariants(
variantList, activeSite=activeSite, coloredVariants=coloredVariants)
musicList.append(lpPrefixCompositeMusicVariant)
if longestReplacementLength < replacedElements.duration.quarterLength:
longestReplacementLength = replacedElements.duration.quarterLength
longestReplacedElements = replacedElements
lpSequentialMusicStandard = self.lySequentialMusicFromStream(longestReplacedElements)
musicList.append(lpSequentialMusicStandard)
for el in longestReplacedElements:
self.doNotOutput.append(el)
lp2MusicList = lyo.LyMusicList()
lp2MusicList.contents = musicList
lp2SimultaneousMusic = lyo.LySimultaneousMusic()
lp2SimultaneousMusic.musicList = lp2MusicList
lp2GroupedMusicList = lyo.LyGroupedMusicList()
lp2GroupedMusicList.simultaneousMusic = lp2SimultaneousMusic
contextObject = self.context
currentMusicList = contextObject.contents
currentMusicList.append(lp2GroupedMusicList)
lp2GroupedMusicList.setParent(self.context)
def lyPrefixCompositeMusicFromRelatedVariants(self, variantList,
activeSite=None, coloredVariants=False):
r'''
>>> s1 = converter.parse("tinynotation: 4/4 a4 a a a a1")
>>> s2 = converter.parse("tinynotation: 4/4 b4 b b b")
>>> s3 = converter.parse("tinynotation: 4/4 c4 c c c")
>>> s4 = converter.parse("tinynotation: 4/4 d4 d d d")
>>> s5 = converter.parse("tinynotation: 4/4 e4 e e e f f f f g g g g a a a a b b b b")
>>> for s in [ s1, s2, s3, s4, s5]:
... s.makeMeasures(inPlace = True)
>>> activeSite = stream.Part(s5)
>>> v1 = variant.Variant()
>>> for el in s1:
... v1.append(el)
>>> v1.replacementDuration = 4.0
>>> v2 = variant.Variant()
>>> sp2 = note.SpacerRest()
>>> sp2.duration.quarterLength = 4.0
>>> v2.replacementDuration = 4.0
>>> v2.append(sp2)
>>> for el in s2:
... v2.append(el)
>>> v3 = variant.Variant()
>>> sp3 = note.SpacerRest()
>>> sp3.duration.quarterLength = 8.0
>>> v3.replacementDuration = 4.0
>>> v3.append(sp3)
>>> for el in s3:
... v3.append(el)
>>> v4 = variant.Variant()
>>> sp4 = note.SpacerRest()
>>> sp4.duration.quarterLength = 16.0
>>> v4.replacementDuration = 4.0
>>> v4.append(sp4)
>>> for el in s4:
... v4.append(el)
>>> variantList = [v4,v1,v3,v2]
>>> for v in variantList :
... v.groups = ['london']
... activeSite.insert(0.0, v)
>>> lpc = lily.translate.LilypondConverter()
>>> print(lpc.lyPrefixCompositeMusicFromRelatedVariants(variantList, activeSite = activeSite)[0])
\new Staff = london... { { \times 1/2 {\startStaff \clef "treble"
a' 4
a' 4
a' 4
a' 4
\clef "treble"
| %{ end measure 1 %}
a' 1
| %{ end measure 2 %}
\stopStaff}
}
<BLANKLINE>
{\startStaff \clef "treble"
b... 4
b... 4
b... 4
b... 4
| %{ end measure 1 %}
\stopStaff}
<BLANKLINE>
{\startStaff \clef "treble"
c' 4
c' 4
c' 4
c' 4
| %{ end measure 1 %}
\stopStaff}
<BLANKLINE>
s 1
{\startStaff \clef "treble"
d' 4
d' 4
d' 4
d' 4
| %{ end measure 1 %}
\stopStaff}
<BLANKLINE>
}
<BLANKLINE>
'''
# Order List
def findOffsetOfFirstNonSpacerElement(inputStream):
for el in inputStream:
if "SpacerRest" in el.classes:
pass
else:
return inputStream.elementOffset(el)
variantList.sort(key = lambda v: findOffsetOfFirstNonSpacerElement(v._stream))
# Stuff that can be done on the first element only (clef, new/old, id, color)
replacedElements = variantList[0].replacedElements(activeSite)
replacedElementsClef = replacedElements[0].getContextByClass('Clef')
variantContainerStream = variantList[0].getContextByClass('Part')
if variantContainerStream is None:
variantContainerStream = variantList[0].getContextByClass('Stream')
variantList[0].insert(0.0, replacedElementsClef)
variantName = variantList[0].groups[0]
if variantName in self.addedVariants:
newVariant = False
else:
self.addedVariants.append(variantName)
newVariant = True
containerId = makeLettersOnlyId(variantContainerStream.id)
variantId = lyo.LyOptionalId(makeLettersOnlyId(variantName)+containerId)
if coloredVariants is True:
color = self.variantColors[self.addedVariants.index(variantName) % 6]
#######################
musicList = []
highestOffsetSoFar = 0.0
self.variantMode = True
for v in variantList:
# For each variant in the list, we make a lilypond representation of the
# spacer between this variant and the previous if it is non-zero and append it
# Then we strip off the spacer and make a lilypond representation of the variant
# with the appropriate tupletting if any and append that.
# At the end we make a new lilypond context for it and return it.
firstOffset = findOffsetOfFirstNonSpacerElement(v._stream)
if firstOffset < highestOffsetSoFar:
raise LilyTranslateException("Should not have overlapping variants.")
else:
spacerDuration = firstOffset - highestOffsetSoFar
highestOffsetSoFar = v.replacementDuration + firstOffset
# make spacer with spacerDuration and append
if spacerDuration > 0.0:
spacer = note.SpacerRest()
spacer.duration.quarterLength = spacerDuration
lySpacer = self.lySimpleMusicFromNoteOrRest(spacer)
musicList.append(lySpacer)
if coloredVariants is True:
for n in v._stream.flat.notesAndRests:
n.editorial.color = color# make thing (with or without fraction)
# Strip off spacer
endOffset = v.containedHighestTime
vStripped = variant.Variant(v._stream.getElementsByOffset(firstOffset,
offsetEnd = endOffset))
vStripped.replacementDuration = v.replacementDuration
replacedElementsLength = vStripped.replacementDuration
variantLength = vStripped.containedHighestTime - firstOffset
if variantLength != replacedElementsLength:
numerator, denominator = common.decimalToTuplet(replacedElementsLength/variantLength)
fraction = str(numerator) + '/' + str(denominator)
lpOssiaMusicVariantPreFraction = self.lyOssiaMusicFromVariant(vStripped)
lpVariantTuplet = lyo.LyPrefixCompositeMusic(type='times',
fraction = fraction,
music = lpOssiaMusicVariantPreFraction)
lpOssiaMusicVariant = lyo.LySequentialMusic(musicList = lpVariantTuplet)
else:
lpOssiaMusicVariant = self.lyOssiaMusicFromVariant(vStripped)
musicList.append(lpOssiaMusicVariant)
longestVariant = v
# The last variant in the iteration should have the highestOffsetSoFar,
# so it has the appropriate replacementElements to return can compare with the rest in
# appendContextFromVariant.
replacedElements = longestVariant.replacedElements(activeSite, includeSpacers = True)
lpMusicList = lyo.LyMusicList(musicList)
lpInternalSequentialMusic = lyo.LySequentialMusic(musicList = lpMusicList )
if newVariant is True:
lpPrefixCompositeMusicVariant = lyo.LyPrefixCompositeMusic(type = 'new',
optionalId = variantId,
simpleString = "Staff",
music = lpInternalSequentialMusic)
else: #newVariant is False
lpPrefixCompositeMusicVariant = lyo.LyPrefixCompositeMusic(type = 'context',
optionalId = variantId,
simpleString = "Staff",
music = lpInternalSequentialMusic)
#optionalContextMod = r'''
#\with {
# \remove "Time_signature_engraver"
# alignAboveContext = #"%s"
# fontSize = ##-3
# \override StaffSymbol #'staff-space = #(magstep -3)
# \override StaffSymbol #'thickness = #(magstep -3)
# \override TupletBracket #'bracket-visibility = ##f
# \override TupletNumber #'stencil = ##f
# \override Clef #'transparent = ##t
# }
# ''' % containerId #\override BarLine #'transparent = ##t is the best way of fixing #the barlines that I have come up with.
#
#lpPrefixCompositeMusicVariant.optionalContextMod = optionalContextMod
self.variantMode = False
return lpPrefixCompositeMusicVariant, replacedElements
def lyPrefixCompositeMusicFromVariant(self, variantObject, replacedElements, coloredVariants = False):
r'''
>>> pstream = converter.parse("tinynotation: 4/4 a4 b c d e4 f g a")
>>> pstream.makeMeasures(inPlace = True)
>>> p = stream.Part(pstream)
>>> p.id = 'p1'
>>> vstream = converter.parse("tinynotation: 4/4 a4. b8 c4 d")
>>> vstream.makeMeasures(inPlace = True)
>>> v = variant.Variant(vstream)
>>> v.groups = ['london']
>>> p.insert(0.0, v)
>>> lpc = lily.translate.LilypondConverter()
>>> replacedElements = v.replacedElements()
>>> lpPrefixCompositeMusicVariant = lpc.lyPrefixCompositeMusicFromVariant(v, replacedElements)
>>> print(lpPrefixCompositeMusicVariant) # ellipses are for non-byte fixups
\new Staff = londonpx { {\startStaff \clef "treble"
a' 4.
b...
c' 4
d' 4
\clef "treble"
| %{ end measure 1 %}
\stopStaff}
}
>>> replacedElements.show('text')
{0.0} <music21.stream.Measure 1 offset=0.0>
{0.0} <music21.clef.TrebleClef>
{0.0} <music21.meter.TimeSignature 4/4>
{0.0} <music21.note.Note A>
{1.0} <music21.note.Note B>
{2.0} <music21.note.Note C>
{3.0} <music21.note.Note D>
>>> print(lpc.addedVariants)
['london']
'''
replacedElementsClef = replacedElements[0].getContextByClass('Clef')
variantContainerStream = variantObject.getContextByClass('Part')
if variantContainerStream is None:
variantContainerStream = variantObject.getContextByClass('Stream')
if replacedElementsClef is not None:
if not replacedElementsClef in variantObject.elements:
variantObject.insert(0, replacedElementsClef)
if variantObject.groups:
variantName = variantObject.groups[0]
else:
variantName = 'variant'
if variantName in self.addedVariants:
newVariant = False
else:
self.addedVariants.append(variantName)
newVariant = True
containerId = makeLettersOnlyId(variantContainerStream.id)
variantId = lyo.LyOptionalId(makeLettersOnlyId(variantName)+containerId)
if coloredVariants is True:
color = self.variantColors[self.addedVariants.index(variantName) % 6]
for n in variantObject._stream.flat.notesAndRests:
n.editorial.color = color
musicList = []
varFilter = variantObject.getElementsByClass("SpacerRest")
if varFilter:
spacer = varFilter[0]
spacerDur = spacer.duration.quarterLength
if spacer.duration.quarterLength > 0.0:
lySpacer = self.lySimpleMusicFromNoteOrRest(spacer)
musicList.append(lySpacer)
variantObject.remove(spacer)
else:
spacerDur = 0.0
lpOssiaMusicVariant = self.lyOssiaMusicFromVariant(variantObject)
replacedElementsLength = variantObject.replacementDuration
variantLength = variantObject.containedHighestTime - spacerDur
self.variantMode = True
if variantLength != replacedElementsLength:
numerator, denominator = common.decimalToTuplet(replacedElementsLength/variantLength)
fraction = str(numerator) + '/' + str(denominator)
lpVariantTuplet = lyo.LyPrefixCompositeMusic(type='times',
fraction = fraction,
music = lpOssiaMusicVariant)
lpInternalSequentialMusic = lyo.LySequentialMusic(musicList = lpVariantTuplet)
musicList.append(lpInternalSequentialMusic)
else:
musicList.append(lpOssiaMusicVariant)
lpMusicList = lyo.LyMusicList(musicList)
lpOssiaMusicVariantWithSpacer = lyo.LySequentialMusic(musicList = lpMusicList )
if newVariant is True:
lpPrefixCompositeMusicVariant = lyo.LyPrefixCompositeMusic(type = 'new',
optionalId = variantId,
simpleString = "Staff",
music = lpOssiaMusicVariantWithSpacer)
else:
lpPrefixCompositeMusicVariant = lyo.LyPrefixCompositeMusic(type = 'context',
optionalId = variantId,
simpleString = "Staff",
music = lpOssiaMusicVariantWithSpacer)
# optionalContextMod = r'''
#\with {
# \remove "Time_signature_engraver"
# alignAboveContext = #"%s"
# fontSize = #-3
# \override StaffSymbol #'staff-space = #(magstep -3)
# \override StaffSymbol #'thickness = #(magstep -3)
# \override TupletBracket #'bracket-visibility = ##f
# \override TupletNumber #'stencil = ##f
# \override Clef #'transparent = ##t
# }
# ''' % containerId #\override BarLine #'transparent = ##t is the best way of fixing the #barlines that I have come up with.
#
# lpPrefixCompositeMusicVariant.optionalContextMod = optionalContextMod
self.variantMode = False
return lpPrefixCompositeMusicVariant
#musicList2 = []
#musicList2.append(lpPrefixCompositeMusicVariant)
#musicList2.append(lpSequentialMusicStandard )
#
#lp2MusicList = lyo.LyMusicList()
#lp2MusicList.contents = musicList2
#lp2SimultaneousMusic = lyo.LySimultaneousMusic()
#lp2SimultaneousMusic.musicList = lp2MusicList
#lp2GroupedMusicList = lyo.LyGroupedMusicList()
#lp2GroupedMusicList.simultaneousMusic = lp2SimultaneousMusic
#
#contextObject = self.context
#currentMusicList = contextObject.contents
#currentMusicList.append(lp2GroupedMusicList)
#lp2GroupedMusicList.setParent(self.context)
def lyOssiaMusicFromVariant(self, variantIn):
r'''
returns a LyOssiaMusic object from a stream
>>> c = converter.parse('tinynotation: 3/4 C4 D E F2.')
>>> v = variant.Variant(c)
>>> lpc = lily.translate.LilypondConverter()
>>> lySequentialMusicOut = lpc.lySequentialMusicFromStream(v)
>>> lySequentialMusicOut
<music21.lily.lilyObjects.LySequentialMusic object at 0x...>
>>> print(lySequentialMusicOut)
{ \clef "bass"
\time 3/4
c 4
d 4
e 4
\bar "|" %{ end measure 1 %}
f 2.
\bar "|." %{ end measure 2 %}
}
<BLANKLINE>
'''
musicList = []
lpMusicList = lyo.LyMusicList(contents = musicList)
lpOssiaMusic = lyo.LyOssiaMusic(musicList = lpMusicList)
self.newContext(lpMusicList)
self.variantMode = True
self.appendObjectsToContextFromStream(variantIn._stream)
lyObject = self.closeMeasure()
if lyObject is not None:
musicList.append(lyObject)
self.restoreContext()
self.variantMode = False
return lpOssiaMusic
def setHeaderFromMetadata(self, metadataObject = None, lpHeader = None):
r'''
Returns a lilypond.lilyObjects.LyLilypondHeader object
set with data from the metadata object
>>> md = metadata.Metadata()
>>> md.title = 'My Title'
>>> md.alternativeTitle = 'My "sub"-title'
>>> lpc = lily.translate.LilypondConverter()
>>> lpHeader = lpc.setHeaderFromMetadata(md)
>>> print(lpHeader)
\header { title = "My Title"
subtitle = "My \"sub\"-title"
}
'''
if lpHeader is None:
lpHeader = lyo.LyLilypondHeader()
if lpHeader.lilypondHeaderBody is None:
lpHeaderBody = lyo.LyLilypondHeaderBody()
lpHeader.lilypondHeaderBody = lpHeaderBody
else:
lpHeaderBody = lpHeader.lilypondHeaderBody
lpHeaderBodyAssignments = lpHeaderBody.assignments
if metadataObject is not None:
if metadataObject.title is not None:
lyTitleAssignment = lyo.LyAssignment(assignmentId = "title",
identifierInit = lyo.LyIdentifierInit(
string=metadataObject.title))
lpHeaderBodyAssignments.append(lyTitleAssignment)
lyTitleAssignment.setParent(lpHeaderBody)
if metadataObject.alternativeTitle is not None:
lySubtitleAssignment = lyo.LyAssignment(assignmentId = "subtitle",
identifierInit = lyo.LyIdentifierInit(
string=metadataObject.alternativeTitle))
lpHeaderBodyAssignments.append(lySubtitleAssignment)
lyTitleAssignment.setParent(lpHeaderBody)
lpHeaderBody.assignments = lpHeaderBodyAssignments
return lpHeader
def closeMeasure(self, barChecksOnly=False):
r'''
return a LyObject or None for the end of the previous Measure
uses self.currentMeasure
>>> lpc = lily.translate.LilypondConverter()
>>> m = stream.Measure()
>>> m.number = 2
>>> m.rightBarline = 'double'
>>> lpc.currentMeasure = m
>>> lyObj = lpc.closeMeasure()
>>> lpc.currentMeasure is None
True
>>> print(lyObj)
\bar "||" %{ end measure 2 %}
'''
m = self.currentMeasure
self.currentMeasure = None
if m is None:
return None
#if m.rightBarline is None:
# return None
#elif m.rightBarline.style == 'regular':
# return None
if self.variantMode is True:
barChecksOnly = True
lpBarline = lyo.LyEmbeddedScm()
if barChecksOnly is True:
barString = "|"
elif m.rightBarline is None:
barString = lpBarline.backslash + 'bar ' + lpBarline.quoteString("|")
else:
barString = lpBarline.backslash + 'bar ' + lpBarline.quoteString(
self.barlineDict[m.rightBarline.style])
if m.number is not None:
barString += lpBarline.comment("end measure %d" % m.number)
lpBarline.content = barString
return lpBarline
def getSchemeForPadding(self, measureObject):
r'''
lilypond partial durations are very strange and are really of
type LyMultipliedDuration. You notate how many
notes are left in the measure, for a quarter note, write "4"
for an eighth, write "8", but for 3 eighths, write "8*3" !
so we will measure in 32nd notes always... won't work for tuplets
of course.
returns a scheme object or None if not needed
>>> m = stream.Measure()
>>> m.append(meter.TimeSignature('3/4'))
>>> m.paddingLeft = 2.0
>>> lpc = lily.translate.LilypondConverter()
>>> outScheme = lpc.getSchemeForPadding(m)
>>> print(outScheme)
\partial 32*8
'''
pL = measureObject.paddingLeft
if pL == 0:
return None
tses = measureObject.getTimeSignatures()
if len(tses) == 0:
barLength = 4.0
else:
ts = tses[0]
barLength = ts.barDuration.quarterLength
remainingQL = barLength - pL
if remainingQL <= 0:
raise LilyTranslateException('your first pickup measure is non-existent!')
remaining32s = int(remainingQL * 8)
lyObject = lyo.LyEmbeddedScm()
schemeStr = lyObject.backslash + 'partial 32*' + str(remaining32s) + ' '
lyObject.content = schemeStr
return lyObject
#--------------display and converter routines ---------------------#
def writeLyFile(self, ext='', fp=None):
'''
writes the contents of the self.topLevelObject to a file.
The extension should be ly. If fp is None then a named temporary
file is created by environment.getTempFile.
'''
tloOut = str(self.topLevelObject)
if six.PY2:
tloOut = tloOut.encode('utf-8')
if fp is None:
fp = environLocal.getTempFile(ext)
self.tempName = fp
with open(self.tempName, 'w') as f:
f.write(tloOut)
return self.tempName
def runThroughLily(self, format=None, backend=None, fileName=None, skipWriting=False): #@ReservedAssignment
'''
creates a .ly file from self.topLevelObject via .writeLyFile
then runs the file through Lilypond.
Returns the full path of the file produced by lilypond including the format extension.
If skipWriting is True and a fileName is given then it will run
that file through lilypond instead
'''
LILYEXEC = self.findLilyExec()
if fileName is None:
fileName = self.writeLyFile(ext='ly')
else:
if skipWriting is False:
fileName = self.writeLyFile(ext='ly', fp=fileName)
lilyCommand = '"' + LILYEXEC + '" '
if format is not None:
lilyCommand += "-f " + format + " "
if backend is not None:
lilyCommand += self.backendString + backend + " "
lilyCommand += "-o " + fileName + " " + fileName
try:
os.system(lilyCommand)
except:
raise
try:
os.remove(fileName + ".eps")
except OSError:
pass
fileform = fileName + '.' + format
if not os.path.exists(fileform):
# cannot find full path; try current directory
fileend = os.path.basename(fileform)
if not os.path.exists(fileend):
raise LilyTranslateException("cannot find " + fileend +
" or the full path " + fileform + " original file was " + fileName)
else:
fileform = fileend
return fileform
def createPDF(self, fileName=None):
'''
create a PDF file from self.topLevelObject and return the filepath of the file.
most users will just call stream.write('lily.pdf') on a stream.
'''
self.headerScheme.content = "" # clear header
lilyFile = self.runThroughLily(backend='ps', format = 'pdf', fileName = fileName)
return lilyFile
def showPDF(self):
'''
create a SVG file from self.topLevelObject, show it with your pdf reader
(often Adobe Acrobat/Adobe Reader or Apple Preview)
and return the filepath of the file.
most users will just call stream.Stream.show('lily.pdf') on a stream.
'''
lF = self.createPDF()
if not os.path.exists(lF):
raise Exception('Something went wrong with PDF Creation')
else:
if os.name == 'nt':
command = 'start /wait %s && del /f %s' % (lF, lF)
elif sys.platform == 'darwin':
command = 'open %s' % lF
else:
command = ''
os.system(command)
def createPNG(self, fileName=None):
'''
create a PNG file from self.topLevelObject and return the filepath of the file.
most users will just call stream.write('lily.png') on a stream.
if PIL is installed then a small white border is created around the score
'''
lilyFile = self.runThroughLily(backend='eps', format='png', fileName=fileName)
if noPIL is False:
try:
lilyImage = Image.open(lilyFile) # @UndefinedVariable
lilyImage2 = ImageOps.expand(lilyImage, 10, 'white')
lilyImage2.save(lilyFile)
except Exception: # pylint: disable=broad-except
pass # no big deal probably...
return lilyFile
# if os.name == 'nt':
# format = 'png'
# # why are we changing format for darwin? -- did not work before
# elif sys.platform == 'darwin':
# format = 'jpeg'
# else: # default for all other platforms
# format = 'png'
#
# if lilyImage2.mode == "I;16":
# # @PIL88 @PIL101
# # "I;16" isn't an 'official' mode, but we still want to
# # provide a simple way to show 16-bit images.
# base = "L"
# else:
# base = Image.getmodebase(lilyImage2.mode)
# if base != lilyImage2.mode and lilyImage2.mode != "1":
# file = lilyImage2.convert(base)._dump(format=format)
# else:
# file = lilyImage2._dump(format=format)
# return file
# except:
# raise
def showPNG(self):
'''
Take the object, run it through LilyPond, and then show it as a PNG file.
On Windows, the PNG file will not be deleted, so you will need to clean out
TEMP every once in a while.
Most users will just want to call stream.Stream.show('lily.png') instead.
'''
try:
lilyFile = self.createPNG()
except LilyTranslateException as e:
raise LilyTranslateException("Problems creating PNG file: (" + str(e) + ")")
environLocal.launch('png', lilyFile)
#self.showImageDirect(lilyFile)
return lilyFile
def createSVG(self, fileName=None):
'''
create an SVG file from self.topLevelObject and return the filepath of the file.
most users will just call stream.Stream.write('lily.svg') on a stream.
'''
self.headerScheme.content = "" # clear header
lilyFile = self.runThroughLily(format='svg', backend='svg', fileName=fileName)
return lilyFile
def showSVG(self, fileName=None):
'''
create a SVG file from self.topLevelObject, show it with your svg reader (often Internet Explorer/
WebBrowser on PC)
and return the filepath of the file.
most users will just call stream.Stream.show('lily.png') on a stream.
'''
lilyFile = self.createSVG(fileName)
environLocal.launch('svg', lilyFile)
return lilyFile
class LilyTranslateException(exceptions21.Music21Exception):
pass
class Test(unittest.TestCase):
pass
def testExplicitConvertChorale(self):
lpc = LilypondConverter()
b = _getCachedCorpusFile('bach/bwv66.6')
lpc.loadObjectFromScore(b, makeNotation = False)
#print lpc.topLevelObject
def testComplexDuration(self):
from music21 import stream, meter
s = stream.Stream()
n1 = note.Note('C') # test no octave also!
n1.duration.quarterLength = 2.5 # BUG 2.3333333333 doesn't work right
self.assertEqual(n1.duration.type, 'complex')
n2 = note.Note('D4')
n2.duration.quarterLength = 1.5
s.append(meter.TimeSignature('4/4'))
s.append(n1)
s.append(n2)
#s.show('text')
lpc = LilypondConverter()
lpc.loadObjectFromScore(s)
#print lpc.topLevelObject
#lpc.showPNG()
#s.show('lily.png')
class TestExternal(unittest.TestCase):
def xtestConvertNote(self):
n = note.Note("C5")
n.show('lily.png')
def xtestConvertChorale(self):
b = _getCachedCorpusFile('bach/bwv66.6')
for n in b.flat:
n.beams = None
b.parts[0].show('lily.svg')
def xtestSlowConvertOpus(self):
fifeOpus = corpus.parse('miscFolk/americanfifeopus.abc')
fifeOpus.show('lily.png')
def xtestBreve(self):
from music21 import stream, meter
n = note.Note("C5")
n.duration.quarterLength = 8.0
m = stream.Measure()
m.append(meter.TimeSignature('8/4'))
m.append(n)
p = stream.Part()
p.append(m)
s = stream.Score()
s.append(p)
s.show('lily.png')
def testStaffLines(self):
from music21 import stream
s = stream.Score()
p = stream.Part()
p.append(note.Note("B4", type='whole'))
p.staffLines = 1
s.insert(0, p)
p2 = stream.Part()
p2.append(note.Note("B4", type='whole'))
p2.staffLines = 7
s.insert(0, p2)
s.show('lily.png')
#-------------------------------------------------------------------------------
if __name__ == "__main__":
import music21
music21.mainTest(Test, TestExternal)
#music21.mainTest(TestExternal, 'noDocTest')
#------------------------------------------------------------------------------
# eof
|
Watch Golden Kamuy Episode 20 in its "official English dubbed" version as presented on the official DVD.
Watch Golden Kamuy Episode 20 in its "official Japanese Subbed" version as presented on the official DVD.
At animecrave.com we provide you the choice of audio for watching Golden Kamuy Episode 20. We work hard give you the best and official audio tracks that are found on the official DVD's or Blu-Ray!
Please select the audio track you would like to Watch Golden Kamuy Episode 20 in. Further details on each type is explained above.
|
# This file is part of cloud-init. See LICENSE file for license information.
"""Run the dhclient hook to record network info."""
import argparse
import os
from cloudinit import atomic_helper
from cloudinit import log as logging
from cloudinit import stages
LOG = logging.getLogger(__name__)
NAME = "dhclient-hook"
UP = "up"
DOWN = "down"
EVENTS = (UP, DOWN)
def _get_hooks_dir():
i = stages.Init()
return os.path.join(i.paths.get_runpath(), 'dhclient.hooks')
def _filter_env_vals(info):
"""Given info (os.environ), return a dictionary with
lower case keys for each entry starting with DHCP4_ or new_."""
new_info = {}
for k, v in info.items():
if k.startswith("DHCP4_") or k.startswith("new_"):
key = (k.replace('DHCP4_', '').replace('new_', '')).lower()
new_info[key] = v
return new_info
def run_hook(interface, event, data_d=None, env=None):
if event not in EVENTS:
raise ValueError("Unexpected event '%s'. Expected one of: %s" %
(event, EVENTS))
if data_d is None:
data_d = _get_hooks_dir()
if env is None:
env = os.environ
hook_file = os.path.join(data_d, interface + ".json")
if event == UP:
if not os.path.exists(data_d):
os.makedirs(data_d)
atomic_helper.write_json(hook_file, _filter_env_vals(env))
LOG.debug("Wrote dhclient options in %s", hook_file)
elif event == DOWN:
if os.path.exists(hook_file):
os.remove(hook_file)
LOG.debug("Removed dhclient options file %s", hook_file)
def get_parser(parser=None):
if parser is None:
parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
parser.add_argument(
"event", help='event taken on the interface', choices=EVENTS)
parser.add_argument(
"interface", help='the network interface being acted upon')
# cloud-init main uses 'action'
parser.set_defaults(action=(NAME, handle_args))
return parser
def handle_args(name, args, data_d=None):
"""Handle the Namespace args.
Takes 'name' as passed by cloud-init main. not used here."""
return run_hook(interface=args.interface, event=args.event, data_d=data_d)
if __name__ == '__main__':
import sys
parser = get_parser()
args = parser.parse_args(args=sys.argv[1:])
return_value = handle_args(
NAME, args, data_d=os.environ.get('_CI_DHCP_HOOK_DATA_D'))
if return_value:
sys.exit(return_value)
# vi: ts=4 expandtab
|
We are looking for volunteer "facilitators" to help ensure this section remains current. Please let us know if you have 1-2 hours per month. Additional opportunities include periodic blogging on the topic, answering visitor questions, and/or hosting online event(s). Let us know if you are up for it.
'Ready Together' Emergency Preparedness Guide is Launching... And You Can Help!
HeartBeats: What does "The Revolution is Love" really mean?
|
import data as mldata
import pdb
class ClassificationBase(object):
def __init__(self, start, length):
self.start = start
self.length = length
self.profiles = {}
self.attempt = {}
self.userlvl = []
self.mindtPr = {}
self.level = -1
def readProfiles(self):
""" Get data for all users + the min in each level for all users """
users = mldata.getUsers()
ud = {}
udcount = {}
for user in users:
dtuser = mldata.UserData(user, self.level)
udcount[user] = dtuser.getUserFeatureLevels()
ud[user] = dtuser.ftlevels #data from all levels and features for one user
minc = 1000000
self.userlvl = []
for user in users:
if mldata.DEBUGL >= 2:
print ("User %s, Level %d -> Length:%d"%(user,self.level,udcount[user]))
cntuserlvl = udcount[user]
if cntuserlvl <= 109:
continue
self.userlvl.append(user)
if cntuserlvl < minc:
minc = cntuserlvl
if minc == 1000000:
minc = 0
# Only get the last portion of the profile
for user in self.userlvl:
for ft in ud[user]:
ud[user][ft] = ud[user][ft][-minc:]
return ud, minc
def readAttempt(self, level, user):
users = mldata.getUsers(is_profile = False)
if user not in users:
return False, False
dtuser = mldata.UserData(user, self.level, is_profile = False)
udcount = dtuser.getUserFeatureLevels()
return dtuser.ftlevels,udcount
def readPAdata(self, level, user=''):
self.level = level
if not self.profiles:
self.profiles,self.mindtPr = self.readProfiles()
if user=='':
return True
self.attempt, tmp = self.readAttempt(level, user)
if tmp < 30:
print "0"
print "0"
print("Not enough data for login. At least 30 rounds of game is needed but %d is provided!"%tmp)
exit(0)
return tmp
def classifyByFeature(self, feature):
levelscores = self.classifyByLevelFeature(level, feature)
def classifyUsers(self):
allscores = {}
for level in mldata.levelenum:
allscores[level] = self.classifyByLevel(level)
return allscores
class ClassificationOneD(ClassificationBase):
def __init__(self, start, length):
super(ClassificationOneD, self).__init__(start, length)
def classifyByLevelFeature(self, level, feature):
if not self.readPAdata(level):
return {}
refscores = {}
for ref in self.userlvl:
refscores[ref] = self.classifyByLevelFeatureRef(level, feature)
return refscores
def classifyByLevel(self, level):
featurecores = {}
if not self.readPAdata(level):
return {}
for ft in mldata.enfeatures:
featurecores[ft] = self.classifyByLevelFeature(level, ft)
return featurecores
class ClassificationMultiD(ClassificationBase):
def __init__(self, start, length):
super(ClassificationMultiD, self).__init__(start, length)
def classifyByLevelFeature(self, level, user = ''):
#if not self.readPAdata(level):
# return {}
refscores = {}
if user != '':
return self.classifyByLevelMultiRef(user)
for ref in self.userlvl:
refscores[ref] = self.classifyByLevelMultiRef(ref)
return refscores
def classifyByLevelUser(self, level, user):
cnt = self.readPAdata(level, user)
if mldata.DEBUGL >=2:
print("User login data length: %d"%cnt)
if cnt < 30:
return {}
self.level = level
scores = self.classifyByLevelFeature(level, user)
return scores
def classifyByLevel(self, level):
scores = {}
self.level = level
if not self.profiles:
self.profiles,self.mindtPr = self.readProfiles()
for user in self.userlvl:
sc = self.classifyByLevelUser(level, user)
if len(sc):
scores[user] = sc
return scores
class ClassificationFusion(ClassificationMultiD):
def __init__(self, start, length):
super(ClassificationFusion, self).__init__(start, length)
#weights = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
self.weights = [0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125]
def classifyByLevelMultiRef(self, ref):
scores = {}
for ft in mldata.enfeatures:
scores[ft] = self.classifyByLevelFeatureRef(self.level, ft)
finalscores = {}
for user in self.userlvl:
finalscores[user] = 0
for ft in mldata.enfeatures:
finalscores[user] += scores[ft][user] * self.weights[ft]
return finalscores
|
I’ve got creativity on the brain again, always looking for more ways to find meaning and connection with the art of making! Last week I wrote about how we can expand our creativity by making a list to narrow down what methods and mediums we want to explore further. For me it was watercoloring. I’m ready to take it to the “next level“. But what does that even mean? Next Level can be defined as… doing more of something or doing something better. And I really want to improve my skills, so I’ve been seeking out all kinds of curious and creativity ways to watercolor.
I shared a few instagram posts about Viviva Colorsheets, which are such a fun and unique way to watercolor. I love their sheets, and my kids love them too! I follow a few other watercolor artists on instagram who share all kinds of amazing projects and inspiration as well.
JolaPictures – always shares mesmerizing videos with watercolor pictures.
Art By Bianca – love how she breaks down all the steps!
Watercolor Art Therapy – beautiful object paintings, she makes it look so easy.
Nina Hidalgo – who specializes in mini paintings, and uses Quiver Smith’s Handmade Tools for darling watercolor books, soooo cute!
To help me take my creativity to the next level, which right now I’m focusing on watercoloring, I decided to sign up and purchase a monthly box subscription for Let’s Make Art.
Marie: What is your first memory with creativity? Share a little about what you created and with who!
Marie: When/If you get a “maker’s block”, what are some of your favorite sources of inspiration? Where to do you go for it?
Sarah: I have learned that one of the best things I can do when I am feeling uninspired is to find and observe other people doing something they clearly love. I don’t know if you have ever seen “Salt, Fat, Acid, Heat” on Netflix, but that is the perfect example of what I need in order to get creating again. Seeing someone who loves what they do, whether it is painting, or cooking, or dancing, reminds me how much I love to create and it gives me the urge to go and do.
Marie: Besides watercoloring (obvi) what is your next favorite style of crafting, way to be creative, or fave supply/material and why?
Sarah: My next favorite creative outlet would be art journaling. I love being able to combine my love for drawing/doodling with my thoughts/memories I want to preserve, as well as notes or things I need to in order to stay organized. Its freeing to be able to write or draw whatever I feel like without the need to make it pretty or finished, and it’s a great way to preserve these moments that can be so easy to forget.
Marie: What keeps you motivated to keep creating?
Sarah: My absolutely favorite thing to see is when people are surprised by how their painting came out. I love thinking of new projects that would get someone so excited to actually pick up a paintbrush as well as feel successful when they are finished with it. This obviously varies from person to person since we all of our preferences and skill levels, but I love preparing projects that I think people will be excited to learn about!
Marie: What made you think of creating your box business? I’m assuming you were seeing a need!!
Sarah: We originally did not think about creating kits or our a subscription box, but it turned into that by problem solving. At first we were simply selling the supplies used for each project, but then people were spending about $40ish for every project! We also didn’t want people to feel like they couldn’t try it due to such a high financial commitment, so we figured if we could break down the supplies to give them what they would need, it would seem less intimidating. The box grew from there – the idea being you will have what you need to paint the projects, with the education and flexibility when you would have time to do it.
Marie: When you get creatively frustrated, what do you do to work through it? How do you shift that energy?
Sarah: One thing that has been extremely helpful for my mental and creative stability is physical activity. I was a competitive athlete in my youth, so going to the gym and spending an hour or two where I don’t think about work or painting – I just sweat it out – gives me the opportunity to have a clear mind when I am ready to sit down and work. Finding time for this when you run your own business proves difficult, so it sometimes gets put on the back burner, but I have found that I am at my best when I take that time out for me.
Thanks to Sarah for this great interview, and her team at Let’s Make Art. Keep up all your awesome work!
What creative projects are you diving into a little bit deeper to expand your creativity?
|
__author__ = 'frank'
from Certificate import Certificate
from Domain import Domain
from Partner import Partner
from Requestor import process_request
from WalletName import WalletName
class Netki:
"""
General methods for interacting with Netki's Partner API.
:param partner_id: Your Partner ID available in the API Keys section of your My Account page.
:param api_key: API Key available in the API Key section of your My Account page.
:param api_url: https://api.netki.com unless otherwise noted
"""
def __init__(self, api_key, partner_id, api_url='https://api.netki.com'):
self.api_key = api_key
self.api_url = api_url
self.partner_id = partner_id
self._auth_type = 'api_key'
@classmethod
def distributed_api_access(cls, key_signing_key, signed_user_key, user_key, api_url='https://api.netki.com'):
"""
Instantiate the Netki Client for distributed_api_access if your user's clients will communicate directly with
Netki to manage Wallet Names instead of communicating with your servers. More information can be found here:
http://docs.netki.apiary.io/#reference/partner-api
:param key_signing_key:
:param signed_user_key:
:param user_key:
:param api_url: https://api.netki.com unless otherwise noted
:return: Netki client.
"""
client = cls(None, None, api_url)
client.key_signing_key = key_signing_key
client.signed_user_key = signed_user_key
client.user_key = user_key
client._auth_type = 'distributed'
if not client.key_signing_key:
raise ValueError('key_signing_key Required for Distributed API Access')
if not client.signed_user_key:
raise ValueError('signed_user_key Required for Distributed API Access')
if not user_key:
raise ValueError('user_key Required for Distributed API Access')
return client
@classmethod
def certificate_api_access(cls, user_key, partner_id, api_url='https://api.netki.com'):
"""
Instantiate the Netki Client for certificate_api_access in order manage your user's Digital Identity Certificates
:param user_key:
:param partner_id:
:param api_url: https://api.netki.com unless otherwise noted
:return: Netki client.
"""
client = cls(None, None, api_url)
client.user_key = user_key
client.partner_id = partner_id
client._auth_type = 'certificate'
if not client.user_key:
raise ValueError('user_key Required for Certificate API Access')
if not client.partner_id:
raise ValueError('partner_id Required for Certificate API Access')
return client
# Wallet Name Operations #
def get_wallet_names(self, domain_name=None, external_id=None):
"""
Wallet Name Operation
Retrieve Wallet Names from the Netki API. Four options are available for retrieval:
* Retrieve all Wallet Names associated with your partner_id by not specifying a domain_name or external_id.
* Retrieve all Wallet Names associated with a particular partner domain_name by specifying a domain_name.
* Retrieve all Wallet Names associated with a particular external_id by specifying an external_id.
* Retrieve all Wallet Names associated with a domain_name and external_id by specifying both domain_name
and external_id.
:param domain_name: Domain name to which the requested Wallet Names belong. ``partnerdomain.com``
:param external_id: Your unique customer identifier specified when creating a Wallet Name.
:return: List of WalletName objects.
"""
args = []
if domain_name:
args.append('domain_name=%s' % domain_name)
if external_id:
args.append('external_id=%s' % external_id)
uri = '/v1/partner/walletname'
if args:
uri = uri + '?' + '&'.join(args)
response = process_request(self, uri, 'GET')
if not response.wallet_name_count:
return []
# Assemble and return a list of Wallet Name objects from the response data
all_wallet_names = []
for wn in response.wallet_names:
wallet_name = WalletName(
domain_name=wn.domain_name,
name=wn.name,
external_id=wn.external_id,
id=wn.id
)
for wallet in wn.wallets:
wallet_name.set_currency_address(wallet.currency, wallet.wallet_address)
wallet_name.set_netki_client(self)
all_wallet_names.append(wallet_name)
return all_wallet_names
def create_wallet_name(self, domain_name, name, external_id, currency, wallet_address):
"""
Wallet Name Operation
Create a new WalletName object with the required data. Execute save() to commit your changes to the API.
:param domain_name: Domain name to which the requested Wallet Name's belong. ``partnerdomain.com``
:param name: Customers Wallet Name appended to domain_name. ``joe``
:param external_id: Your unique customer identifier for this user's Wallet Name.
:param currency: Digital currency abbreviation noted in Netki API documentation
:param wallet_address: Digital currency address
:return: WalletName object
"""
wallet_name = WalletName(
domain_name=domain_name,
name=name,
external_id=external_id
)
wallet_name.set_currency_address(currency, wallet_address)
wallet_name.set_netki_client(self)
return wallet_name
# Partner Operations #
def get_partners(self):
"""
Sub-partner Operation
Get all partners (partner and sub-partners) associated with your account.
:return: List containing Partner objects
"""
response = process_request(self, '/v1/admin/partner', 'GET')
partner_objects = list()
for p in response.partners:
partner = Partner(id=p.id, name=p.name)
partner_objects.append(partner)
partner.set_netki_client(self)
return partner_objects
def create_partner(self, partner_name):
"""
Sub-partner Operation
Create a sub-partner.
:param partner_name: Partner Name
:return: Partner object
"""
response = process_request(self, '/v1/admin/partner/' + partner_name, 'POST')
partner = Partner(id=response.partner.id, name=response.partner.name)
partner.set_netki_client(self)
return partner
# Domain Operations #
def get_domains(self, domain_name=None):
"""
Domain Operation
Retrieve all domains associated with your partner_id or a specific domain_name if supplied
:return: List of Domain objects.
"""
response = process_request(self, '/api/domain/%s' % domain_name if domain_name else '/api/domain', 'GET')
if not response.get('domains'):
return []
domain_list = list()
for d in response.domains:
domain = Domain(d.domain_name)
domain.set_netki_client(self)
domain_list.append(domain)
return domain_list
def create_partner_domain(self, domain_name, sub_partner_id=None):
"""
Domain Operation
Create a partner domain used to offer Wallet Names.
:param domain_name: ``partnerdomain.com``
:param (Optional) sub_partner_id: When provided, create a domain_name under the sub_partner_id that you are
managing.
:return: Domain object with status and information required to complete domain setup.
"""
post_data = {'partner_id': sub_partner_id} if sub_partner_id else ''
response = process_request(self, '/v1/partner/domain/' + domain_name, 'POST', post_data)
domain = Domain(response.domain_name)
domain.status = response.status
domain.nameservers = response.nameservers
domain.set_netki_client(self)
return domain
# Certificate Operations #
def create_certificate(self, customer_data, product_id):
"""
Certificate Operation
Create a partner domain used to offer Wallet Names.
:param customer_data: Customer personal idenity information to be validated and used in the final certificate.
:param product_id: Specific product_id (Certificate type). Product IDs can be retrieved from
get_available_products() below.
:return: Certificate Object
"""
certificate = Certificate(customer_data, product_id)
certificate.set_netki_client(self)
return certificate
def get_certificate(self, id):
"""
Certificate Operation
Retrieve an existing certificate by certificate ID from the API.
:param id: Unique certificate ID issued after successful creation of a certificate object and save() to the API.
:return: Certificate Object
"""
if not id:
raise ValueError('Certificate ID Required')
certificate = Certificate()
certificate.id = id
certificate.set_netki_client(self)
certificate.get_status()
return certificate
def get_available_products(self):
"""
Certificate Operation
Get all available certificate products associated with your account including tier and pricing information.
:return: Dictionary containing product details.
"""
return process_request(self, '/v1/certificate/products', 'GET').get('products')
def get_ca_bundle(self):
"""
Certificate Operation
Download the root bundle used to validate the certificate chain for Netki issued certificates.
:return: Dictionary containing certificate bundle.
"""
return process_request(self, '/v1/certificate/cacert', 'GET').get('cacerts')
def get_account_balance(self):
"""
Certificate Operation
Get available balance for certificate purchases when using Deposit/Retainer billing.
:return: Dictionary containing available balance.
"""
return process_request(self, '/v1/certificate/balance', 'GET').get('available_balance')
|
On 19 November this year, the Innovation Conference “The Role of Innovations in Enhancing the Competitiveness of the National Economy” has opened its work. The event takes place in the period of 19-20 November in the framework of the ISE “Infoinvent” and is organized by the State Agency on Intellectual Property (AGEPI) in collaboration with the Academy of Sciences of Moldova (ASM) and the Agency for Innovation and Technology Transfer (AITT) under the patronage of the Ministry of Economy.
The conference was opened by the AGEPI Director General Lilia Bolocan, who welcomed the participants in the event and stressed the importance of themes and topics included in the conference agenda, encouraging the participants to actively engage in debates. In opening the conference with a word of greeting to the participants addressed the Deputy Minister of Economy Dumitru Godoroja, Director General of AITT Roman Chirca and European Patent Office representative David Evans, who noted the role and importance of innovations and new technologies for the economic growth and enhancement of competitiveness of the national economy.
On the first day of the conference the participants heard a series of papers presented by the Deputy Minister of Economy Dumitru Godoroja, Deputy Director General of AGEPI Svetlana Munteanu, Director General of AITT Roman Chirca, Deputy Director General of the Patent Information Center, IP Office of China Liu Yanxin, Executive Director of the E-Government Center of the Republic of Moldova Stela Mocanu and Director of the National Institute for Economic Research of ASM Alexandru Stratan.
Talking about current policies in supporting innovation and technology transfer activities to achieve the development goals of the country, Deputy Minister of Economy Dumitru Godoroja presented to the audience the innovational goals of the Innovation Strategy for 2013-2020 “Innovations for Competitiveness”, approved in September this year by the Government of the Republic of Moldova.
In her communication on the role of intellectual property for the development and use of innovations, Deputy Director General of AGEPI Svetlana Munteanu mentioned that one of the specific goals provided in the National IP Strategy until 2020 refers to increasing the role of intellectual property and innovation for economic development of the country.
The conference will be held on November 20 this year starting at 9.00.
The participants will discuss issues related to the development of the service and innovative product market, funding sources and mechanisms to support innovation and technology transfer activities.
Special attention will be given to best business practices and strategies in the field of exploitation of innovations, including with the presentation of success stories of implementation of innovations in medicine, agriculture and food industry, nanotechnologies and alternative energy sources, information technologies and communications.
|
from fabric.api import sudo
class PackageBase(object):
def update(self):
raise Exception('Not Implemented')
def upgrade():
raise Exception('Not Implemented')
def install():
raise Exception('Not Implemented')
class PackageOpenBSD(PackageBase):
pass
class PackageYum(PackageBase):
pass
class PackageApt(PackageBase):
def add_repo(self, repo):
self.install('python-software-properties', quiet=True)
sudo('add-apt-repository "%s"' % repo)
self.update()
def update(self):
sudo('apt-get update')
def upgrade(self, update=False, quiet=False):
if update:
self.update()
cmd = "%s apt-get %s upgrade" % (
"DEBIAN_FRONTEND=noninteractive" if quiet else "",
"--yes" if quiet else ""
)
sudo(cmd)
def install(self, packages, quiet=False):
if isinstance(packages, (list, tuple)):
packages = " ".join(packages)
cmd = "%s apt-get %s install %s %s" % (
"DEBIAN_FRONTEND=noninteractive" if quiet else "",
"--yes" if quiet else "",
"--force-yes" if quiet else "",
packages
)
sudo(cmd)
|
Home / Weight Loss / Real Weight Loss Stories / Culinary Nutrition Expert Cindy Santa Ana Reveals Her Weight Loss Journey!
Culinary Nutrition Expert Cindy Santa Ana Reveals Her Weight Loss Journey!
Cindy Santa Ana is an Integrative Nutrition Health Coach, Culinary Nutrition Expert and Author of Unprocessed Living & has a wonderful website www.UnlockBetterHealth.com.
Cindy lost 50 lbs a few years ago by simply changing her diet from typical processed foods (boxed meals, packaged snacks) to healthier whole foods, like grass-fed meats, whole veggies, and healthy fats. She also added daily walking and light weight resistance training.
She went from being a size 16 to a 6. Cindy saw such a profound change in her health (lower cholesterol, no more migraines, sinus infections, and more energy) that she decided to enroll in nutrition school and learn how to share this with more women.
To know her story, Women Fitness thought of catching up with Nutrition Expert Cindy Santa Ana.
I was 40 and on four medications. I couldn’t make it throughout the day without a nap with my then 2-year-old. I also kept buying pants in larger sizes every year.
My weight loss journey was so inspiring once that decision was made. It was exciting to see the numbers on the scale go down. I hit a couple of plateaus, but I changed things up and moved past them.
3. What was your fitness regime like during this period?
I simply started by walking 2 miles every day. After I lost 25 lbs, I started adding in light weight training with hand weights and kettlebells. I worked out 4-5 times per week, sometimes, 6 days.
4. In a typical day, what was your diet like?
Green smoothie with superfoods for breakfast, Power Salad for lunch (greens, protein like chicken or tuna, seeds, and homemade dressing) and a lean protein with 2 veggies for dinner. Snacks were nuts, seeds, dark chocolate and/or berries.
I wanted to prove to myself that I could do this and also to show my doctor that I didn’t need more pills to help me – that I could rely on whole foods, good nutrition, and lifestyle changes to improve my health and my weight.
I still workout 4-5 times per week and feel off when I don’t, since this is a new habit that has stuck with me. I eat whole foods, cook a lot at home, plan my meals when I’m traveling and pack snacks where ever I go so I’m not tempted with fast food.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created by Zoltan Bozoky on 2013.03.13.
Under GPL licence.
Module handling a Sparky NMR file without nmrglue or additional nmr proccessing
or handling softwares
Purpose:
========
* One program that reads, visualizes multidimensional NMR data, finds peaks,
fit peak shapes, calculate volumes and intensities
"""
import struct
import random
import math
from pylab import plt
# Color handling module
import zcolor
# Fitting module
import GeneralFit as GF
################################################################################
# TODO Make the plot axes ratio to the right N-H-C ratios
################################################################################
def ppm2fid(ppm, spectral_data):
"""
Convert ppm information into fid number
"""
Frequency, MiddlePPM, SpectralWidth, NumberOfPoints = spectral_data
return int((NumberOfPoints/2 - ((ppm-MiddlePPM) * Frequency * NumberOfPoints) / SpectralWidth) % NumberOfPoints)
###########################
def fid2ppm(fid, spectral_data):
"""
Convert fid number into ppm information
"""
Frequency, MiddlePPM, SpectralWidth, NumberOfPoints = spectral_data
return MiddlePPM + (NumberOfPoints*SpectralWidth - 2*fid*SpectralWidth) / (2.0*Frequency*NumberOfPoints)
###########################
def distance(pos1, pos2):
"""
Calculate Euclidean distance between two points
"""
distance_value = 0.0
for (p1, p2) in (pos1, pos2):
distance_value += (p1 - p2)**2
return math.sqrt(distance_value)
###########################
def ceil(number):
"""
Return the closest higher integer to the number
"""
if number - int(number) != 0:
number = int(number) + 1
return int(number)
###########################
def Gauss(Peak_coordinate, Coordinate, Szoras):
""" gaussian peak """
return (1/(Szoras*math.sqrt(2.0*math.pi)))*math.exp(-1*(Peak_coordinate-Coordinate)**2/(2.0*Szoras**2))
def Lorentz(Peak_coordinate, Coordinate, Szoras):
""" lorentzian peak """
return 1/(math.pi*Szoras*(1+((Peak_coordinate-Coordinate)/float(Szoras))**2))
###########################
def parabolic(x, p):
"""
Fit a parabolic to the tip of the peaks
"""
c, b, a = p
y = a*(x-b)**2 + c
return y
###########################
def linewidth_fit(x, p):
"""
Linewidth fit error function
"""
lw, [height, peak_pos] = p
value = height * math.exp(-1 * (peak_pos - x)**2 / (2.0 * lw**2))
print 'param',lw, height, peak_pos, value
return value
def linewidth_fit2(x, p):
"""
Linewidth fit error function 2
"""
height, lw, peak_pos = p
value = height * math.exp(-1 * (peak_pos - x)**2 / (2.0 * lw**2)) #- 38237.4296875
return value
################################################################################
################################################################################
################################################################################
############################
## Sparky file header class
############################
class SparkyFileHeader(object):
"""
Everything about the sparky file header
- the first 180 bytes in the sparky file
"""
def __init__(self, headerinfo):
"""
"""
self._extract_file_header_information(headerinfo)
self._header_info = {}
#
return None
##########
def _extract_file_header_information(self, header):
"""
"""
infos = struct.unpack('>10s 4c 9s 26s 80s 3x l 40s 4x', header)
self._header_info['Sparky ID' ] = str(infos[0]).strip('\x00')
self._header_info['Number of Axis' ] = ord(infos[1]) #
self._header_info['Number of Components'] = ord(infos[2]) # = 1 for real data
self._header_info['Encoding' ] = ord(infos[3])
self._header_info['Version' ] = ord(infos[4]) # = 2 for current format
self._header_info['Owner' ] = str(infos[5]).strip('\x00')
self._header_info['Date' ] = str(infos[6]).strip('\x00')
self._header_info['Comment' ] = str(infos[7]).strip('\x00')
self._header_info['Seek Position' ] = str(infos[8]).strip('\x00')
self._header_info['Scratch' ] = str(infos[9]).strip('\x00')
return None
##########
def _get_number_of_axis(self):
return self._header_info['Number of Axis']
##########
number_of_axis = property(_get_number_of_axis)
##########
################################################################################
################################################################################
################################################################################
############################
## Sparky axis class
############################
class SparkyFileAxis(object):
"""
Everything what axis must know
- 128 bytes for each axis
"""
def __init__(self, axisinfo):
"""
"""
self._extract_axis_information(axisinfo)
return None
##########
def _extract_axis_information(self, axisdata):
"""
"""
infos = struct.unpack('>6s h 3I 6f 84s', axisdata)
self._axis_info = {}
self._axis_info['Nucleus' ] = str(infos[0]).strip('\x00') # nucleus name (1H, 13C, 15N, 31P, ...
self._axis_info['Spectral Shift' ] = infos[1] # to left or right shift
self._axis_info['Number of Points' ] = infos[2] # # of active data points - integer number of data points along this axis
self._axis_info['Size' ] = infos[3] # total size of axis
self._axis_info['BlockSize' ] = infos[4] # # of points per cache block - integer tile size along this axis
self._axis_info['Spectrometer frequency'] = infos[5] # MHz - float spectrometer frequency for this nucleus (MHz)
self._axis_info['Spectral width' ] = infos[6] # Hz - float spectral width
self._axis_info['xmtr frequency' ] = infos[7] # transmitter offset (ppm) - float center of data (ppm)
self._axis_info['Zero order' ] = infos[8] # phase corrections
self._axis_info['First order' ] = infos[9] # phase corrections
self._axis_info['First pt scale' ] = infos[10] # scaling for first point
self._axis_info['Extended' ] = str(infos[11]).strip('\x00') #
#
self._axis_info['Scale'] = []
for fid in range(0, int(self._axis_info['Number of Points']) + 1, 1):
self._axis_info['Scale'].append(fid2ppm(fid, self.frq_carrier_sw_np))
return None
##########
def _get_parameter(self, parameter_name):
return self._axis_info[parameter_name]
##########
def _get_blocksize(self):
return self._get_parameter('BlockSize')
def _get_nucleus(self):
return self.nucleus_info[-1]
def _get_nucleus_info(self):
return self._get_parameter('Nucleus')
def _get_numberofpoints(self):
return self._get_parameter('Number of Points')
def _get_scale(self):
return self._get_parameter('Scale')
def _get_size(self):
return self._get_parameter('Size')
def _get_spectrometer_frequency(self):
return self._get_parameter('Spectrometer frequency')
def _get_spectral_width(self):
return self._get_parameter('Spectral width')
def _get_xmtr_frequency(self):
return self._get_parameter('xmtr frequency')
def _get_infos(self):
return (self.spectrometer_frequency, self.xmtr_frequency,
self.spectral_width, self.number_of_points)
def ppm2index(self, ppm):
index = 0
while (index < self.number_of_points) and (self.scale[index] > ppm):
index += 1
return index
def index2ppm(self, index):
return fid2ppm(index, self.frq_carrier_sw_np)
##########
blocksize = property(_get_blocksize)
nucleus = property(_get_nucleus)
nucleus_info = property(_get_nucleus_info)
number_of_points = property(_get_numberofpoints)
scale = property(_get_scale)
size = property(_get_size)
spectral_width = property(_get_spectral_width)
spectrometer_frequency = property(_get_spectrometer_frequency)
xmtr_frequency = property(_get_xmtr_frequency)
frq_carrier_sw_np = property(_get_infos)
##########
################################################################################
################################################################################
################################################################################
############################
## Sparky spectral object
############################
class SparkySpectrum(object):
"""
"""
def __init__(self, spectralinfo, blocksize_size_for_each_axis, log = True):
"""
Parameters:
===========
* spectralinfo = sparky file content with the spectral information
* blocksize_size_for_each_axis = blocksize,size pairs for the axis
* log = print out file processing information on the fly
"""
self._log = log
self._number_of_dimensions = len(blocksize_size_for_each_axis)
self._d1 = None
self._d2 = None
self._d3 = None
self._Spectrum = []
self._noise_level = None
#
if self._log:
print 'File read has started:',
eval('self._extract_' + str(self.number_of_dimensions) +
'D_data(spectralinfo, blocksize_size_for_each_axis)')
if self._log:
print '100% file read is done.'
return None
##########
def _extract_1D_data(self, Filecontent, Blocksize):
"""
"""
self._Spectrum = list(struct.unpack
('>'+'f'*(len(Filecontent)/4), Filecontent))
return None
##########
def _extract_2D_data(self, Filecontent, Blocksize):
"""
"""
# First dimensional data
FirstDimensionBlockSize = Blocksize[0]['BlockSize']
FirstDimensionSpectralSize = Blocksize[0]['Size']
# Second dimensional data
SecondDimensionBlockSize = Blocksize[1]['BlockSize']
SecondDimensionSpectralSize = Blocksize[1]['Size']
#
Blocksize = FirstDimensionBlockSize * SecondDimensionBlockSize
# The number of blocks needed for a spectral size is
# not necessary an integer number
NumberOfBlocksInSecondDimension = (
ceil(SecondDimensionSpectralSize / float(SecondDimensionBlockSize)))
#---------------------------------
# Rearrange the data from a list to an array
for i_FirstDimension in range(FirstDimensionSpectralSize):
# Print out info to follow the processing
if self._log and i_FirstDimension % 50 == 0:
print '{0:3.2f}%'.format(100.0 * i_FirstDimension
/ FirstDimensionSpectralSize),
#---------------------------------
BlockNumber = (i_FirstDimension / FirstDimensionBlockSize
* NumberOfBlocksInSecondDimension)
PositionWithinBlock = (i_FirstDimension
% FirstDimensionBlockSize
* SecondDimensionBlockSize)
# Concatenate the block portions in a list
SpectralInfo1D = []
#---------------------------------
# Go through all second dimension protion to get a line
for i_SecondDimension in range(NumberOfBlocksInSecondDimension):
# If this is the last Block in line then the dimension is
# not necessary the blocksize
if i_SecondDimension < NumberOfBlocksInSecondDimension:
SecondDimension = SecondDimensionBlockSize
else:
SecondDimension = (SecondDimensionSpectralSize
% SecondDimensionBlockSize)
#---------------------------------
# The actual position within the block; 1 float number = 4 bytes
pos = (4 * (Blocksize * (BlockNumber + i_SecondDimension)
+ PositionWithinBlock))
#---------------------------------
# Unpack the data. Note that the coding is big endian ">"
SpectralInfo1D += list(struct.unpack('>'+'f'*SecondDimension,
Filecontent[pos : pos + 4 * SecondDimension]))
#---------------------------------
# Add a line into the spectrum
self._Spectrum.append(SpectralInfo1D)
return None
##########
def _extract_3D_data(self, Filecontent, Blocksize):
"""
"""
# Third dimensional data
ThirdDimensionBlockSize = Blocksize[0]['BlockSize']
ThirdDimensionSpectralSize = Blocksize[0]['Size']
# Second dimensional data
SecondDimensionBlockSize = Blocksize[1]['BlockSize']
SecondDimensionSpectralSize = Blocksize[1]['Size']
# First dimensional data
FirstDimensionBlockSize = Blocksize[2]['BlockSize']
FirstDimensionSpectralSize = Blocksize[2]['Size']
#
Blocksize = (FirstDimensionBlockSize
* SecondDimensionBlockSize
* ThirdDimensionBlockSize)
#---------------------------------
# The number of blocks needed for a spectral size is not necessary
# an integer number
NumberOfBlocksInFirstDimension = ceil(FirstDimensionSpectralSize
/ float(FirstDimensionBlockSize ))
NumberOfBlocksInSecondDimension = ceil(SecondDimensionSpectralSize
/ float(SecondDimensionBlockSize))
#---------------------------------
# Rearrange the data from a list to an 3D array
for i_ThirdDimension in range(ThirdDimensionSpectralSize):
# Print out log information
if self._log and i_ThirdDimension % 10 == 0:
print '{0:3.2f}%'.format(100.0 * i_ThirdDimension
/ ThirdDimensionSpectralSize),
#---------------------------------
BlockNumberDim3 = ((i_ThirdDimension
/ ThirdDimensionBlockSize)
* NumberOfBlocksInSecondDimension
* NumberOfBlocksInFirstDimension)
PositionWithinBlockDim3 = ((i_ThirdDimension
% ThirdDimensionBlockSize)
* SecondDimensionBlockSize
* FirstDimensionBlockSize)
#---------------------------------
# Collect data of 2D in a variable
SpectralInfo2D = []
# Go through each block in 2D
#for i_SecondDimension in range(SecondDimensionBlockSize * NumberOfBlocksInSecondDimension):
for i_SecondDimension in range(SecondDimensionSpectralSize):
#
BlockNumberDim2 = (BlockNumberDim3
+ (i_SecondDimension / SecondDimensionBlockSize)
* NumberOfBlocksInFirstDimension)
PositionWithinBlockDim2 = (PositionWithinBlockDim3
+ (i_SecondDimension % SecondDimensionBlockSize)
* FirstDimensionBlockSize)
#---------------------------------
# Collect data of 1D in a variable
SpectralInfo1D = []
# Go through each block in 1D
for i_FirstDimension in range(NumberOfBlocksInFirstDimension):
# The last block size might be smaller than a blocksize
if i_FirstDimension < NumberOfBlocksInFirstDimension-1:
FirstDimension = FirstDimensionBlockSize
else:
FirstDimension = FirstDimensionSpectralSize % FirstDimensionBlockSize
#---------------------------------
# Position within block; 1 float number = 4 bytes
pos = 4 * (Blocksize * (BlockNumberDim2 + i_FirstDimension) + PositionWithinBlockDim2)
#---------------------------------
# Unpack the data. NOTE: big endian data storage ">"
SpectralInfo1D += list(struct.unpack('>'+'f'*FirstDimension,Filecontent[pos: pos + 4*FirstDimension]))
#---------------------------------
# Put each 1D slice into the 2D
SpectralInfo2D.append(SpectralInfo1D)
#---------------------------------
# Store a 2D slice into the final array
self._Spectrum.append(SpectralInfo2D)
return None
##########
def intensity(self, position):
"""
Return an intensity value corresponds to the position
"""
data_intensity = 0.0
if self.number_of_dimensions == 1:
data_intensity = (self._Spectrum[position[0] % self.dim1])
if self.number_of_dimensions == 2:
data_intensity = (self._Spectrum[position[1] % self.dim1]
[position[0] % self.dim2])
if self.number_of_dimensions == 3:
data_intensity = (self._Spectrum[position[2] % self.dim1]
[position[1] % self.dim2]
[position[0] % self.dim3])
return data_intensity
##########
def calculate_noise_level(self, number_of_points = 10000):
"""
"""
noise = 0.0
# calculate the average level on a small subset of data
average = 0.0
pre_set_size = 100
for i in range(pre_set_size):
if self.number_of_dimensions == 1:
average += self.intensity([random.randint(0, self.dim1 - 1)])
if self.number_of_dimensions == 2:
average += self.intensity([random.randint(0, self.dim1 - 1),
random.randint(0, self.dim2 - 1)])
if self.number_of_dimensions == 3:
average += self.intensity([random.randint(0, self.dim1 - 1),
random.randint(0, self.dim2 - 1),
random.randint(0, self.dim3 - 1)])
average /= float(pre_set_size)
# Calculate the actual noise level
numberofdata = 0
sumofdata = 0.0
highestvalue = 0.0
i = 0
while (i <= number_of_points*2) and (numberofdata <= number_of_points):
if self.number_of_dimensions == 1:
value = abs(self.intensity([random.randint(0, self.dim1 - 1)]))
if self.number_of_dimensions == 2:
value = abs(self.intensity([random.randint(0, self.dim1 - 1),
random.randint(0, self.dim2 - 1)]))
if self.number_of_dimensions == 3:
value = abs(self.intensity([random.randint(0, self.dim1 - 1),
random.randint(0, self.dim2 - 1),
random.randint(0, self.dim3 - 1)]))
# Only count a value if that is not far from the average
# (= not a peak)
if value < average * 5:
numberofdata += 1
sumofdata += value
average = sumofdata / float(numberofdata)
if value > highestvalue:
highestvalue = value
i += 1
# Cut back from the highest to have a bit of noise
noise = highestvalue / 1.2
# Return the value as well
return noise
##########
def slice_1d(self, minmax, orderXY):
"""
Return a 1D sub spectrum
"""
highestvalue = 0.0
lowestvalue = 0.0
spectrum = []
#---------------------------------
# 1D
if self.number_of_dimensions == 1:
for x in range(min(minmax['X']), max(minmax['X']), 1):
value = self.intensity([x])
spectrum.append(value)
if highestvalue < value:
highestvalue = value
if lowestvalue > value:
lowestvalue = value
#---------------------------------
# 2D
if self.number_of_dimensions == 2:
y = min(minmax['Y'])
for x in range(min(minmax['X']), max(minmax['X']), 1):
if orderXY[0] == '0':
value = self.intensity([y, x])
else:
value = self.intensity([x, y])
spectrum.append(value)
if highestvalue < value:
highestvalue = value
if lowestvalue > value:
lowestvalue = value
#---------------------------------
# 3D
if self.number_of_dimensions == 3:
y = min(minmax['Y'])
z = min(minmax['Z'])
for x in range(min(minmax['X']), max(minmax['X']), 1):
if orderXY[0:2] == '02':
value = self.intensity([y, z, x])
elif orderXY[0:2] == '01':
value = self.intensity([z, y, x])
elif orderXY[0:2] == '20':
value = self.intensity([x, z, y])
elif orderXY[0:2] == '21':
value = self.intensity([x, y, z])
elif orderXY[0:2] == '10':
value = self.intensity([z, x, y])
elif orderXY[0:2] == '12':
value = self.intensity([y, x, z])
else:
value = 0.0
spectrum.append(value)
if highestvalue < value:
highestvalue = value
if lowestvalue > value:
lowestvalue = value
return highestvalue, lowestvalue, spectrum
##########
def slice_2d(self, minmax, orderXY):
"""
Return a 2D sub spectrum
"""
highestvalue = 0.0
lowestvalue = 0.0
spectrum = []
#---------------------------------
# 2D
if self.number_of_dimensions == 2:
for y in range(min(minmax['Y']), max(minmax['Y']), 1):
fid = []
for x in range(min(minmax['X']), max(minmax['X']), 1):
if orderXY[0] == '0':
value = self.intensity([y, x])
else:
value = self.intensity([x, y])
fid.append(value)
if highestvalue < value:
highestvalue = value
if lowestvalue > value:
lowestvalue = value
spectrum.append(fid)
# 3D
if self.number_of_dimensions == 3:
z = min(minmax['Z'])
for y in range(min(minmax['Y']), max(minmax['Y']), 1):
fid = []
for x in range(min(minmax['X']), max(minmax['X']), 1):
if orderXY[0:2] == '02':
value = self.intensity([y, z, x])
elif orderXY[0:2] == '01':
value = self.intensity([z, y, x])
elif orderXY[0:2] == '20':
value = self.intensity([x, z, y])
elif orderXY[0:2] == '21':
value = self.intensity([x, y, z])
elif orderXY[0:2] == '10':
value = self.intensity([z, x, y])
elif orderXY[0:2] == '12':
value = self.intensity([y, x, z])
else:
value = 0.0
fid.append(value)
if highestvalue < value:
highestvalue = value
if lowestvalue > value:
lowestvalue = value
spectrum.append(fid)
return highestvalue, lowestvalue, spectrum
##########
def slice_3d(self, minmax, orderXY):
"""
Return a 3D sub spectrum
"""
highestvalue = 0.0
lowestvalue = 0.0
spectrum = []
#---------------------------------
# 3D
if self.number_of_dimensions == 3:
for z in range(min(minmax['Z']), max(minmax['Z']), 1):
fid2d = []
for y in range(min(minmax['Y']), max(minmax['Y']), 1):
fid = []
for x in range(min(minmax['X']), max(minmax['X']), 1):
if orderXY[0:2] == '02':
value = self.intensity([y, z, x])
elif orderXY[0:2] == '01':
value = self.intensity([z, y, x])
elif orderXY[0:2] == '20':
value = self.intensity([x, z, y])
elif orderXY[0:2] == '21':
value = self.intensity([x, y, z])
elif orderXY[0:2] == '10':
value = self.intensity([z, x, y])
elif orderXY[0:2] == '12':
value = self.intensity([y, x, z])
else:
value = 0.0
fid.append(value)
if highestvalue < value:
highestvalue = value
if lowestvalue > value:
lowestvalue = value
fid2d.append(fid)
spectrum.append(fid2d)
return highestvalue, lowestvalue, spectrum
##########
def _get_dimension1(self):
if not self._d1 and self.number_of_dimensions >= 1:
self._d1 = len(self._Spectrum)
return self._d1
def _get_dimension2(self):
if not self._d2 and self.number_of_dimensions >= 2:
self._d2 = len(self._Spectrum[0])
return self._d2
def _get_dimension3(self):
if not self._d3 and self.number_of_dimensions >= 3:
self._d3 = len(self._Spectrum[0][0])
return self._d3
def _get_dimensions(self):
return self._number_of_dimensions
def _get_noiselevel(self):
if not self._noise_level:
self._noise_level = self.calculate_noise_level()
return self._noise_level
##########
dim1 = property(_get_dimension1)
dim2 = property(_get_dimension2)
dim3 = property(_get_dimension3)
number_of_dimensions = property(_get_dimensions)
noise_level = property(_get_noiselevel)
##########
################################################################################
################################################################################
################################################################################
class ChemicalShift(object):
"""
Storing one chemical shift
"""
def __init__(self):
self._value = None
self._original_value = None
return None
##########
def shift(self, value):
self.chemical_shift = self.chemical_shift + value
return None
##########
def _get_cs(self):
if not self._value:
value = 0.0
else:
value = self._value
return value
def _set_cs(self, newvalue):
self._value = newvalue
if not self._original_value:
self._original_value = newvalue
return None
def _get_original_cs(self):
if not self._original_value:
value = 0.0
else:
value = self._original_value
return value
##########
chemical_shift = property(_get_cs, _set_cs)
original_chemical_shift = property(_get_original_cs)
##########
class Peak(object):
"""
Storing all chemical shift for a peak:
Parameters:
===========
* adjusted
* info
* name
* nucleus
* chemical_shift
* original_chemical_shift
* add_chemical_shift
* shift
"""
def __init__(self):
self.CSs = {}
self._adjusted = False
self._intensity = None
return None
def add_chemical_shift(self, nucleus, chemical_shift):
if not nucleus in self.CSs:
self.CSs[nucleus] = ChemicalShift()
self.CSs[nucleus].chemical_shift = float(chemical_shift)
return None
def chemical_shift(self, nucleus):
if nucleus in self.CSs:
value = self.CSs[nucleus].chemical_shift
else:
value = 0.0
return value
def original_chemical_shift(self, nucleus):
if nucleus in self.CSs:
value = self.CSs[nucleus].original_chemical_shift
else:
value = 0.0
return value
def shift(self, nucleus, value):
if nucleus in self.CSs:
self.CSs[nucleus].shift(value)
return None
def set_peak_info(self, line, peaknameinfo):
colomns = line.split()
self._info = colomns[0]
spins = self.info.split('-')
self._peakname = eval('spins[0]' + peaknameinfo)
for i,spin in enumerate(spins):
self.add_chemical_shift(spin[-1], colomns[i+1])
return None
##########
def _get_adjusted(self):
return self._adjusted
def _set_adjusted( self, value):
self._adjusted = value
return None
def _get_info(self):
return self._info
def _set_info(self, value):
self._info = value
return None
def _get_intensity(self):
if not self._intensity:
value = 0.0
else:
value = self._intensity
return value
def _set_intensity(self, value):
self._intensity = value
return None
def _get_name(self):
return self._peakname
def _set_name(self, value):
self._peakname = value
return None
def _get_nucleuses(self):
return self.CSs.keys()
##########
adjusted = property(_get_adjusted, _set_adjusted)
info = property(_get_info, _set_info)
intensity = property(_get_intensity, _set_intensity)
name = property(_get_name, _set_name)
nucleus = property(_get_nucleuses)
##########
class Peaklist(object):
"""
Everything about peaklists
"""
def __init__(self):
self._peaks = {}
self._number_of_peaks = -1
return None
# public procedures
def read_peaklist(self, filename, info):
"""
"""
self.filename = filename
try:
peaklist_file = open(filename, 'r')
except IOError:
print 'Error opening ' + filename + '!!! Please check it!'
exit()
lines = peaklist_file.readlines()
peaklist_file.close()
num = 0
for line in lines:
if (not 'Assignment' in line) and (len(line) > 5):
line.strip()
self._peaks[num] = Peak()
self._peaks[num].set_peak_info(line, info)
num += 1
self._number_of_peaks = num - 1
return None
def print_peaklist(self, filename = None):
"""
"""
if filename:
fil = open(filename,'w')
for i in range(self.number_of_peaks):
nucleus = self._peaks[i].nucleus
nucleus.reverse()
line = self._peaks[i].name
for j, nuc in enumerate(nucleus):
if j == 0:
line = ''.join([line, '_', nuc])
else:
line = ''.join([line, '-', nuc])
for nuc in nucleus:
line = ' '.join([line, str(self._peaks[i].chemical_shift(nuc))])
if filename:
line = ''.join([line, '\n'])
fil.write(line)
else:
print line
if filename:
fil.close()
return None
def add_peak(self, peak_info):
"""
Needs a hash line {'N':129.3,'H':8.5,'C':178.2}
"""
number = self.number_of_peaks
self._peaks[number] = Peak()
for info in peak_info:
self._peaks[number].add_chemical_shift(info, peak_info[info])
self._peaks[number].info = str(number + 1)
self._peaks[number].name = str(number + 1)
self._number_of_peaks += 1
return None
def adjust(self, num):
self._peaks[num].adjusted = True
return None
def adjusted(self, num):
return self._peaks[num].adjusted
def add_cs(self, num, nucleus, value):
self._peaks[num].add_chemical_shift(nucleus, value)
return None
def cs(self, num, nucleus):
return self._peaks[num].chemical_shift(nucleus)
def add_intensity(self, num, value):
self._peaks[num].intensity = value
return None
def intensity(self, num):
return self._peaks[num].intensity
def original_cs(self, num, nucleus):
return self._peaks[num].original_chemical_shift(nucleus)
def name(self, num):
return self._peaks[num].name
def nucleus(self, num):
return self._peaks[num].nucleus
def info(self, num):
return self._peaks[num].info
def shift(self, num, nucleus, value):
self._peaks[num].shift(nucleus, value)
return None
# private procedures
##########
def _get_number_of_peaks(self):
return self._number_of_peaks + 1
##########
number_of_peaks = property(_get_number_of_peaks)
##########
################################################################################
################################################################################
################################################################################
class Sparky_plot(object):
"""
Make a plot of 1d or 2d spectrum
"""
_plot_already = None
def __init__(self):
self._noiselevel = 0.0
self._number_of_contours = 25
self._factor = 1.1
self._colors = []
self._first_plot_on_figure = False
self._plot_negatives = True
#
self.mycolor = zcolor.MyColor()
self.colors = [self.mycolor.series(i, self.number_of_contours, 0, 330, 100.0) for i in range(self.number_of_contours)]
#
if not self._plot_already:
self._plot_already = 1
self.newfigure()
return None
##########
def newfigure(self):
plt.figure()
self._plot_already = 1
self._first_plot_on_figure = True
return None
##########
def plot_1d(self, x_axis_scale, spectral_data, axes_label, color = None):
"""
Plot a 1D slice
"""
if self._first_plot_on_figure:
# plot zero
plt.plot([x_axis_scale[0],x_axis_scale[-1]],[0.0,0.0],'k-')
# plot noise level
plt.plot([x_axis_scale[0],x_axis_scale[-1]],[self.noise_level,self.noise_level],'k--')
#----------------
# color selection
if not color:
try:
plotcolor = self.colors[0]
except IndexError:
plotcolor = 'k'
else:
plotcolor = color
# plot the data
plt.plot(x_axis_scale, spectral_data, color = plotcolor)
# set the x axis limit
plt.xlim(x_axis_scale[0],x_axis_scale[-1])
#
if self._first_plot_on_figure:
plt.xlabel(axes_label[0] + ' (ppm)', size = 15)
plt.ylabel('Intensity', size = 15)
return None
##########
def plot_2d(self, x_axis_scale, y_axis_scale, spectral_data, axes_label, color = None):
"""
Plot a 2D spectrum
"""
# Colors
if not color:
if len(self.colors) < self.number_of_contours:
plotcolors = []
for i in range(self.number_of_contours):
plotcolors.append([0.0, 0.0, 0.0])
else:
plotcolors = self.colors
else:
plotcolors = color
# Contour levels
contourlevels = [self.noise_level * self.factor**i for i in range(self.number_of_contours)]
# plot positive contours
plt.contour(x_axis_scale, y_axis_scale, spectral_data, contourlevels, colors = plotcolors)
if self.plot_negatives:
# plot negatives if needed!
plt.contour(x_axis_scale, y_axis_scale, spectral_data, [-1*i for i in contourlevels], colors = [[0.0,0.0,0.0] for i in range(self.number_of_contours)])
if self._first_plot_on_figure:
# Invert the axis direction
plt.gca().invert_xaxis()
plt.gca().invert_yaxis()
# Put label on axes
plt.xlabel(axes_label[0] + ' (ppm)', size = 15)
plt.ylabel(axes_label[1] + ' (ppm)', size = 15)
return None
##########
def show(self, filename = None):
"""
Show or save the figure depending on whether filename is provided
"""
if not filename:
plt.show()
else:
plt.savefig(filename)
return None
##########
def plot_peaklist_2d(self, peaklist, orderXY):
"""
"""
print 'Peaks on the plot:'
print ' # name cs1 cs2 intensity adjusted'
print '--------------------------------------------------------'
for number, peak in enumerate(peaklist):
#
info = peak
loc_x = peaklist[peak][orderXY[0]]
loc_y = peaklist[peak][orderXY[1]]
adj = peaklist[peak]['Adjusted']
intensity = peaklist[peak]['Intensity']
#
print '{0:3d}. {1:>5s} {2:7.3f} {3:7.3f} {4:14.3f} '.format(number + 1, peak, loc_y, loc_x, intensity),
if adj:
print 'true'
labelcolor = 'black'
else:
print 'false'
labelcolor = 'red'
#
dx = 0.0
dy = 0.2
plt.gca().annotate(info,
xy = (loc_x, loc_y),
color = labelcolor,
xytext = (loc_x - dx,loc_y - dy),
arrowprops = dict(arrowstyle = "-|>",
connectionstyle = "arc3",
facecolor = labelcolor))
print '--------------------------------------------------------'
return None
##########
def set_factor(self, highestvalue):
#
self.factor = math.exp(math.log(highestvalue /float(self.noise_level)) * 1.0/(float(self.number_of_contours)))
return self.factor
##########
def _set_colors(self, levels):
self._colors = levels
return None
def _get_colors(self):
return self._colors
def _set_factor(self, level):
self._factor = level
return None
def _get_factor(self):
return self._factor
def _set_noiselevel(self, level):
self._noiselevel = level
return None
def _get_noiselevel(self):
return self._noiselevel
def _set_number_of_contours(self, level):
self._number_of_contours = level
return None
def _get_number_of_contours(self):
return self._number_of_contours
def _set_plot_negatives(self, level):
self._plot_negatives = level
return None
def _get_plot_negatives(self):
return self._plot_negatives
##########
colors = property(_get_colors, _set_colors)
factor = property(_get_factor, _set_factor)
noise_level = property(_get_noiselevel, _set_noiselevel)
number_of_contours = property(_get_number_of_contours, _set_number_of_contours)
plot_negatives = property(_get_plot_negatives, _set_plot_negatives)
##########
################################################################################
################################################################################
################################################################################
class ZB_spectrum(object):
"""
"""
def __init__(self, filename):
"""
"""
self._peaklist = Peaklist()
#
try:
filehandler = open(filename, 'rb')
except IOError:
print ('ERROR!!!\nPlease check the ' + filename + ' location, '
'because an error happened during the file open...\n')
exit()
#---------------------------------
# Read the file header information
self.header = SparkyFileHeader(filehandler.read(180))
#---------------------------------
# Read the axes information
self.axis = {}
self.axis_order = ''
blocksize_info = []
for i in range(self.header.number_of_axis):
axis = SparkyFileAxis(filehandler.read(128))
self.axis_order += axis.nucleus
self.axis[self.axis_order[-1]] = axis
blocksize_info.append({'BlockSize':axis.blocksize, 'Size':axis.size})
#---------------------------------
# Read the spectral information
self.spectrum = SparkySpectrum(filehandler.read(), blocksize_info)
#---------------------------------
filehandler.close()
#
return None
##########
def _get_limits(self, limit, nucleus):
if limit[nucleus] == []:
index_min = 0
index_max = self.axis[nucleus].number_of_points - 1
else:
index_min = self.axis[nucleus].ppm2index(max(limit[nucleus]))
index_max = self.axis[nucleus].ppm2index(min(limit[nucleus]))
return index_min, index_max + 1
##########
def plot1d(self, limits, orderXY):
"""
Parameters:
===========
* limits: a hash with the limits in ppm
* orderxY:
example: plot1d({'H':[5.5,9.2],'N':[105,122]})
"""
if not orderXY:
orderXY = 'HN'
# Dealing with the limits
xy_limits = {}
xy_limits['X'] = self._get_limits(limits, orderXY[0])
if self.header.number_of_axis > 1:
xy_limits['Y'] = self._get_limits(limits, orderXY[1])
if self.header.number_of_axis > 2:
xy_limits['Z'] = self._get_limits(limits, orderXY[2])
# Dealing with the order
axes_order = ''
for i in range(len(orderXY)):
axes_order += str(self.axis_order.index(orderXY[i]))
#
highest, lowest, spectral_data = self.spectrum.slice_1d(xy_limits, axes_order)
scale = self.axis[orderXY[0]].scale[xy_limits['X'][0] : xy_limits['X'][1]]
self.figure = Sparky_plot()
self.figure.noise_level = self.spectrum.noise_level
self.figure.plot_1d(scale, spectral_data, self.axis[orderXY[0]].nucleus_info, 'b')
print '#############################################'
print '### P L O T # P A R A M E T E R S ###'
print '#############################################'
print 'Noise level =', self.figure.noise_level
print 'Highest value =', highest
print 'Lowest value =', lowest
print '#############################################'
return None
##########
def plot(self, limits, orderXY = None):
"""
Parameters:
===========
* limits: a hash with the limits in ppm
* orderxY:
example: plot1d({'H':[5.5,9.2],'N':[105,122]})
"""
if not orderXY:
orderXY = 'HN'
# Dealing with the limits
xy_limits = {}
xy_limits['X'] = self._get_limits(limits, orderXY[0])
xy_limits['Y'] = self._get_limits(limits, orderXY[1])
if self.header.number_of_axis > 2:
xy_limits['Z'] = self._get_limits(limits, orderXY[2])
# Dealing with the order
axes_order = ''
for i in range(len(orderXY)):
axes_order += str(self.axis_order.index(orderXY[i]))
# Axis labels
labels = []
for o in orderXY:
labels.append(self.axis[o].nucleus_info)
#----------------
highest, lowest, spectral_data = self.spectrum.slice_2d(xy_limits, axes_order)
x_scale = self.axis[orderXY[0]].scale[xy_limits['X'][0] : xy_limits['X'][1]]
y_scale = self.axis[orderXY[1]].scale[xy_limits['Y'][0] : xy_limits['Y'][1]]
self.figure = Sparky_plot()
self.figure.noise_level = self.spectrum.noise_level
self.figure.set_factor(highest)
print '#############################################'
print '### P L O T # P A R A M E T E R S ###'
print '#############################################'
print 'Noise level =', self.figure.noise_level
print 'Factor =', self.figure.factor
print 'Highest value =', highest
print 'Lowest value =', lowest
print '#############################################'
self.figure.plot_2d(x_scale, y_scale, spectral_data, labels)
# prepare peaklist
peaklist = {}
for i in range(self._peaklist.number_of_peaks):
within = True
for o in orderXY:
cs = self._peaklist.cs(i, o)
if limits[o] != []:
if (cs < min(limits[o])) or (max(limits[o]) < cs):
within = False
if within:
peaklist[self._peaklist.name(i)] = {}
peaklist[self._peaklist.name(i)][orderXY[0]] = self._peaklist.cs(i, orderXY[0])
peaklist[self._peaklist.name(i)][orderXY[1]] = self._peaklist.cs(i, orderXY[1])
peaklist[self._peaklist.name(i)]['Adjusted'] = self._peaklist.adjusted(i)
peaklist[self._peaklist.name(i)]['Intensity'] = self.spectrum.intensity([
self.axis[orderXY[0]].ppm2index(self._peaklist.cs(i, orderXY[0])),
self.axis[orderXY[1]].ppm2index(self._peaklist.cs(i, orderXY[1]))])
# plot peaklist
self.figure.plot_peaklist_2d(peaklist, orderXY)
return None
###########################
def show(self, filename = ''):
"""
"""
self.figure.show(filename)
return None
###########################
def _extremes_finder(self, position, dimension, axis_order, find_max = True):
"""
find positive and negative extremes on the spectrum
Parameters:
===========
* position = spectrum starting position for the peak finding,
order must be same as in the spectrum
* dimension = find local maximum or minimum in 2D or 3D
* find_max = maximum or minimum finding
Return:
=======
* local extreme
"""
checklist = [[-1, 0, 0],[+1, 0, 0], # x
[ 0,-1, 0],[ 0,+1, 0], # y
[-1,-1, 0],[+1,-1, 0], # xy
[-1,+1, 0],[+1,+1, 0], # xy
[ 0, 0,-1],[ 0, 0,+1], # z
[-1, 0,-1],[+1, 0,-1], # xz
[-1, 0,+1],[+1, 0,+1], # xz
[ 0,-1,-1],[ 0,-1,-1], # yz
[ 0,+1,+1],[ 0,+1,+1]] # yz
#
spectral_width = []
for o in axis_order:
spectral_width.append(eval('self.spectrum.dim' + str(int(o)+1)))
#spectral_width = [self.spectrum.dim2, self.spectrum.dim1, self.spectrum.dim3]
# If the dimension 2D, we find check the value in x,y otherwise in x,y,z
if dimension == 2:
checklist_size = 4
else:
checklist_size = len(checklist)
# minimum or maximum finder
finder_type = [['min','<'],['max','>']][find_max]
# It goes till it finds a local maximum
not_on_an_extreme_value = True
while not_on_an_extreme_value:
# check all values according to the checklist
checked_values = []
for check in checklist[0 : checklist_size]:
checked_values.append(self.spectrum.intensity([pos + ch for (pos, ch) in zip(position[0 : dimension], check[0 : dimension])]))
# if the position data is the most extreme, than we are done
most_extreme_in_array = eval(eval('finder_type[0]') + '(checked_values)')
if eval('self.spectrum.intensity(position)' + eval('finder_type[1]') + 'most_extreme_in_array'):
not_on_an_extreme_value = False
else:
# modifiy the position to the highest
checked_values_max_index = checked_values.index(most_extreme_in_array)
for i in range(dimension):
position[i] += checklist[checked_values_max_index][i]
position[i] %= spectral_width[i]
return position
###########################
def _find_peak_1d(self, data, noiselevel):
hits = []
direction = True
for i in range(len(data)-1):
if data[i] > data[i+1] and data[i] > noiselevel and direction:
hits.append(i)
direction = False
if data[i] < data[i+1]:
direction = True
if len(hits) > 0 and False:
plt.figure()
plt.plot(range(len(data)),data)
plt.plot(hits,[50000 for j in range(len(hits))], 'k', marker= 'o', linestyle = '')
plt.show()
return hits
###########################
def _find_peak_2d(self, data2d, noiselevel, order):
hits = {}
for i, data1d in enumerate(data2d):
hit1d = self._find_peak_1d(data1d, noiselevel)
for hit in hit1d:
hits[' '.join(str(d) for d in self._extremes_finder([hit, i], 2, order))] = 0
peaks = []
for hit in hits:
peaks.append(hit.split())
return peaks
###########################
def peak_finder_2d(self, orderXY = 'HN', times_noiselevel = 1.5):
# Dealing with the order
axes_order = ''
for i in range(len(orderXY)):
axes_order += str(self.axis_order.index(orderXY[i]))
#
xy = {}
xy['X'] = [0, self.axis[orderXY[0]].number_of_points - 1]
xy['Y'] = [0, self.axis[orderXY[1]].number_of_points - 1]
#
print 'Finding peaks...',
peaklist = {}
for i,peak in enumerate(self._find_peak_2d(self.spectrum.slice_2d(xy, axes_order)[-1],self.spectrum.noise_level*times_noiselevel, axes_order)):
peak_info = {}
for j, o in enumerate(orderXY):
peak_info[o] = self.axis[o].index2ppm(float(peak[j]))
self._peaklist.add_peak(peak_info)
self._peaklist.adjust(self._peaklist.number_of_peaks - 1)
print str(i + 1) + ' peaks found!'
return peaklist
###########################
def _one_peak(self, peaknumber, orderXY):
if (0 <= peaknumber) and (peaknumber < self._peaklist.number_of_peaks):
window = {'H':0.08,'N':0.5,'C':0.5}
limit = {}
for o in orderXY:
limit[o] = [self._peaklist.cs(peaknumber, o) - window[o],self._peaklist.cs(peaknumber, o) + window[o]]
self.plot(limit, orderXY)
lim1d = {}
o = orderXY[0]
lim1d[o] = [self._peaklist.cs(peaknumber, o) - window[o], self._peaklist.cs(peaknumber, o) + window[o]]
o = orderXY[1]
lim1d[o] = [self._peaklist.cs(peaknumber, o)]
self.plot1d(lim1d,orderXY)
lim1dd = {}
o = orderXY[1]
lim1dd[o] = [self._peaklist.cs(peaknumber, o) - window[o], self._peaklist.cs(peaknumber, o) + window[o]]
o = orderXY[0]
lim1dd[o] = [self._peaklist.cs(peaknumber, o)]
self.plot1d(lim1dd,orderXY[1]+orderXY[0])
return None
###########################
def _get_spectrum_around_peak(self, axis_order, position):
"""
It returns 1d slices of the spectrum for peak fitting
Parameters:
===========
* axis_order = nucleus order in XYZ format
* position = info as in spectrum
Returns:
========
* One dimensional slices: all, left, right, top
"""
topwindow = 2
permutation = {1 : {0: {'left':[ -1], 'right':[ +1]}},
2 : {0: {'left':[ 0, -1], 'right':[ 0, +1]},
1: {'left':[ -1, 0], 'right':[ +1, 0]}},
3 : {0: {'left':[ 0, 0, -1], 'right':[ 0, 0, +1]},
1: {'left':[ 0, -1, 0], 'right':[ 0, +1, 0]},
2: {'left':[-1, 0, 0], 'right':[+1, 0, 0]}}}
slices = {}
for dimension in axis_order:
slices[dimension] = {}
# Get the left and the right side of the peak separately
for direction in ['left','right']:
# Start from the original postion
pos = []
for p in position:
pos.append(p)
# Collect the data
tomb = []
while self.spectrum.intensity(pos) >= self.spectrum.noise_level:
tomb.append(self.spectrum.intensity(pos))
for j in range(len(pos)):
pos[j] += permutation[len(position)][axis_order.index(dimension)][direction][j]
# Store the data
slices[dimension][direction] = tomb
# extract the whole peak and just the top part
slices[dimension]['all'] = []
slices[dimension]['top'] = []
for i in range(len(slices[dimension]['left'])):
slices[dimension]['all'].append(slices[dimension]['left'][len(slices[dimension]['left']) - i - 1])
if i <= topwindow:
slices[dimension]['top'].append(slices[dimension]['left'][topwindow - i])
for i in range(1,len(slices[dimension]['right'])):
slices[dimension]['all'].append(slices[dimension]['right'][i])
if i <= topwindow:
slices[dimension]['top'].append(slices[dimension]['right'][i])
return slices
###########################
def _fit_one_slice_around_peak(self, spectrum, pos):
"""
Fit a 1d array with a gaussian or lorentian curve
"""
fit = GF.Fit_general(range(len(spectrum)),
spectrum,
[max(spectrum), len(spectrum)*0.7],
linewidth_fit2,
z = [pos for i in range(len(spectrum))],
Log = False,
NoErrors = 'NoErrors')
print fit.Value, fit.Chi2/len(spectrum)
# a,b = fit.GenerateCurve(0,len(spectrum))
# plt.figure()
# plt.plot(range(len(spectrum)), spectrum,'k',linestyle='',marker='o')
# plt.plot(a,b)
# plt.show()
return fit.Value
###########################
def peakfit(self, peaknumber):
"""
"""
peakposition = []
cs = []
axisorder = ''
for i in range(len(self.axis_order), 0, -1):
ax = self.axis_order[i - 1]
axisorder += ax
cs.append(self._peaklist.cs(peaknumber, ax))
peakposition.append(self.axis[ax].ppm2index(self._peaklist.cs(peaknumber, ax)))
#
slices = self._get_spectrum_around_peak(axisorder, peakposition)
# fitting the tip of the peak
intensity = []
new_index = []
linewidth = {}
for i,ax in enumerate(axisorder):
print ax
linewidth[ax] = []
spectrum = slices[ax]['top']
fit = GF.Fit_general(range(len(spectrum)), spectrum, [max(spectrum), len(spectrum)//2, -1E+5], parabolic, Log = False, NoErrors = 'NoErrors')
intensity.append(fit.Value[0])
new_index.append(fit.Value[1] - len(spectrum)//2)
# a,b = fit.GenerateCurve(0,len(spectrum)-1)
# plt.figure()
# plt.plot(range(len(spectrum)), spectrum,'k',linestyle='',marker='o')
# plt.plot(a,b)
# plt.show()
# fit the sides of the peak
for side in ['left','right','all']:
spectrum = slices[ax][side]
lw = self._fit_one_slice_around_peak(spectrum, spectrum.index(max(spectrum)) + new_index[-1])
linewidth[ax].append(lw[1])
print 'intensity:',sum(intensity) / len(intensity), intensity
for i,ax in enumerate(axisorder):
print 'position:',ax, self.axis[ax].index2ppm(peakposition[i] + new_index[i])
print 'lw:',min(linewidth[ax]),self.axis[ax].index2ppm(min(linewidth[ax]))*self.axis[ax].spectrometer_frequency
print axisorder
print cs
print peakposition
print new_index
exit()
window = 3
max_window_peak = 8
order = {1:['0'], 2:['10','01'],3:['210','102','021']}
axis = ['X','Y','Z']
nucleuses = self._peaklist.nucleus(peaknumber)
#
index = {}
for nuc in nucleuses:
index[nuc] = self.axis[nuc].ppm2index(self._peaklist.cs(peaknumber, nuc))
for orderXY in order[len(nucleuses)]:
xy = {}
xypeak_left = {}
xypeak_right = {}
for i, o in enumerate(orderXY):
nuc = nucleuses[int(o)]
ax = axis[i]
xy[ax] = [index[nuc]]
xypeak_left[ax] = [index[nuc]]
xypeak_right[ax] = [index[nuc]]
xy['X'] = [xy['X'][0] - window, xy['X'][0] + window + 1]
xypeak_left['X'] = [xypeak_left['X'][0] - max_window_peak, xypeak_left['X'][0]]
xypeak_right['X'] = [xypeak_right['X'][0], xypeak_right['X'][0] + max_window_peak + 1]
rev_order = ''
for o in orderXY:
rev_order = ''.join([o, rev_order])
# Fitting the tip of the peak with a parabolic
spectrum = self.spectrum.slice_1d(xy, rev_order)[2]
spectrum_peak_left = self.spectrum.slice_1d(xypeak_left, rev_order)[2]
spectrum_peak_right = self.spectrum.slice_1d(xypeak_right, rev_order)[2]
fit = GF.Fit_general(range(len(spectrum)), spectrum, [max(spectrum), window, -1E+5], parabolic, Log = False, NoErrors = 'NoErrors')
xaxisnuc = nucleuses[int(orderXY[0])]
index_diff = fit.Value[1] - window
new_index = index[xaxisnuc] + index_diff
ppm = self.axis[xaxisnuc].index2ppm(new_index)
intensity = fit.Value[0]
self._peaklist.add_cs(peaknumber, xaxisnuc, ppm)
if xaxisnuc == 'H':
self._peaklist.add_intensity(peaknumber, intensity)
# Fitting the peak with a gaussian
## fit_left = GF.Fit_general(range(len(spectrum_peak_left)),
## spectrum_peak_left,
## [intensity, 2],
## linewidth_fit2,
## z = [max_window_peak + index_diff for i in range(len(spectrum_peak_left))],
## #z = [(max_window_peak + index_diff, min(spectrum_peak_left)) for i in range(len(spectrum_peak_left))],
## Log = False,
## NoErrors = 'NoErrors')
### fit_left = GF.Fit_general(range(len(spectrum_peak_left)), spectrum_peak_left, [window_peak], linewidth_fit, z = [[intensity, window_peak + index_diff] for i in range(len(spectrum_peak_left))], Log = True, NoErrors = 'NoErrors')
### fit_right = GF.Fit_general(range(len(spectrum_peak_right)), spectrum_peak_right, [window_peak], linewidth_fit, z = [[intensity, index_diff] for i in range(len(spectrum_peak_right))], Log = False, NoErrors = 'NoErrors')
##
## print fit_left.Value
# print fit_right.Value
## a,b = fit_left.GenerateCurve(0,7)
xy = {}
for i, o in enumerate(orderXY):
nuc = nucleuses[int(o)]
ax = axis[i]
xy[ax] = [index[nuc]]
left = []
y = xy['Y'][0]
x = xy['X'][0]
dd = self._get_spectrum_around_peak([y,x], 2)
# print dd
exit()
while self.spectrum.intensity([y,x]) >= self.spectrum.noise_level:
left.append(self.spectrum.intensity([y,x]))
x = x - 1
left.append(self.spectrum.intensity([y,x]))
left.reverse()
print len(left) + index_diff
left_fit = GF.Fit_general(range(len(left)),
left,
[max(left), 1.0],
linewidth_fit2,
z = [len(left) - 1 + index_diff for i in range(len(left))],
Log = True,
NoErrors = 'NoErrors')
e,f = left_fit.GenerateCurve(0,7)
plt.figure()
## plt.plot(range(len(spectrum_peak_left)), spectrum_peak_left,'k',marker = 'o',linestyle= '')
## plt.plot(a,b)
plt.plot(range(len(left)), left,'r',marker = 'o',linestyle= '')
plt.plot(e,f,'r--')
plt.show()
exit()
return None
###########################
def read_peaklist(self, peaklist_filename, info = '[:-2]'):
self._peaklist.read_peaklist(peaklist_filename, info)
return None
###########################
def print_peaklist(self):
self._peaklist.print_peaklist()
return None
###########################
def save_peaklist(self, peaklist_filename):
self._peaklist.print_peaklist(peaklist_filename)
return None
###########################
def _get_noiselevel(self):
return self.spectrum.noise_level
###########################
noise_level = property(_get_noiselevel)
###########################
################################################################################
################################################################################
################################################################################
class SparkyFile(object):
"""
"""
###########################
Plotting_parameters = []
###########################
def __init__(self, filename, log = True):
"""
Parameters:
* filename = A sparky file with path information
* log = True to print out information during processing
"""
# Information on dimensionarity of the measured data
self._FileHeader_ = {}
# Information on measured axes
self.myaxis = {}
#
self._AxisOrder_ = []
# Spectrum data
self._Spectrum_ = []
# Peaklist information
self._Peaklist_ = {}
# Store the peaklist keys in order of the read in
self._Peaklistkeysorder_ = []
#
self._Peaklistchemicalshiftorder_ = []
#
self._PeaklistDoNotCare_ = []
#
self.Noiselevel = None
#---------------------------------
self.log = log
# Open the sparky file
try:
filehandler = open(filename, 'rb')
except IOError:
print ('ERROR!!!\nPlease check the ' + filename + ' location, '
'because an error happened during the file open...\n')
exit()
#---------------------------------
# Read the file header information
data = filehandler.read(180)
head = SparkyFileHeader(data)
print head.number_of_axis
self.GetFileHeaderInformation(data)
#---------------------------------
# Read all axis information
for AxisNumber in range(self._FileHeader_['Number of Axis']):
datax = filehandler.read(128)
self.GetAxisInformation(datax, AxisNumber)
self._AxisOrder_.append(self.myaxis[AxisNumber]['Nucleus'][-1])
# exit()
#---------------------------------
# Only 2D and 3D are ok
if not self.NumberOfAxis in [2, 3]:
print ('Sorry! The dimension of your spectrum (' +
str(self.NumberOfAxis) +
'D) is not handled by this program...\n')
exit()
#---------------------------------
# Calculate the block size information
Blocksize = 1
for AxisNumber in range(self.NumberOfAxis):
Blocksize *= self.myaxis[AxisNumber]['BlockSize']
#---------------------------------
# Read the data from the file
Filecontent = filehandler.read()
#---------------------------------
# Close the file
filehandler.close()
#---------------------------------
# Get the actual specral information
if self.log:
print 'File read has started',
self._Spectrum_ = []
# It can read 2D and 3D datafile
if self.NumberOfAxis in [2,3]:
eval('self._extract_'+str(self.NumberOfAxis)+'D_data(Filecontent, Blocksize)')
#---------------------------------
# Calculate a noise level for the spectrum
self.CalculateNoiseLevel()
#---------------------------------
if self.log:
print '100% file read is done.'
return None
###########################
def GetFileHeaderInformation(self, data):
infos = struct.unpack('>10s 4c 9s 26s 80s 3x l 40s 4x',data)
self._FileHeader_['Sparky ID' ] = str(infos[0]).strip('\x00')
self._FileHeader_['Number of Axis' ] = ord(infos[1]) #
self._FileHeader_['Number of Components'] = ord(infos[2]) # = 1 for real data
self._FileHeader_['Encoding' ] = ord(infos[3])
self._FileHeader_['Version' ] = ord(infos[4]) # = 2 for current format
self._FileHeader_['Owner' ] = str(infos[5]).strip('\x00')
self._FileHeader_['Date' ] = str(infos[6]).strip('\x00')
self._FileHeader_['Comment' ] = str(infos[7]).strip('\x00')
self._FileHeader_['Seek Position' ] = str(infos[8]).strip('\x00')
self._FileHeader_['Scratch' ] = str(infos[9]).strip('\x00')
return None
###########################
def GetAxisInformation(self, data, Number):
infos = struct.unpack('>6s h 3I 6f 84s',data)
self.myaxis[Number] = {}
self.myaxis[Number]['Nucleus' ] = str(infos[0]).strip('\x00') # nucleus name (1H, 13C, 15N, 31P, ...
self.myaxis[Number]['Spectral Shift' ] = infos[1] # to left or right shift
self.myaxis[Number]['Number of Points' ] = infos[2] # # of active data points - integer number of data points along this axis
self.myaxis[Number]['Size' ] = infos[3] # total size of axis
self.myaxis[Number]['BlockSize' ] = infos[4] # # of points per cache block - integer tile size along this axis
self.myaxis[Number]['Spectrometer frequency'] = infos[5] # MHz - float spectrometer frequency for this nucleus (MHz)
self.myaxis[Number]['Spectral width' ] = infos[6] # Hz - float spectral width
self.myaxis[Number]['xmtr frequency' ] = infos[7] # transmitter offset (ppm) - float center of data (ppm)
self.myaxis[Number]['Zero order' ] = infos[8] # phase corrections
self.myaxis[Number]['First order' ] = infos[9] # phase corrections
self.myaxis[Number]['First pt scale' ] = infos[10] # scaling for first point
self.myaxis[Number]['Extended' ] = str(infos[11]).strip('\x00') #
self.myaxis[Number]['Scale'] = []
for i in range(0, int(self.myaxis[Number]['Number of Points']) + 1, 1):
self.myaxis[Number]['Scale'].append(self._fid2ppm(i, infos[5], infos[7], infos[6], infos[2]))
return None
###########################
def _extract_2D_data(self, Filecontent, Blocksize):
"""
"""
# First dimensional data
FirstDimensionBlockSize = self.myaxis[0]['BlockSize']
FirstDimensionSpectralSize = self.myaxis[0]['Size']
# Second dimensional data
SecondDimensionBlockSize = self.myaxis[1]['BlockSize']
SecondDimensionSpectralSize = self.myaxis[1]['Size']
# The number of blocks needed for a spectral size is
# not necessary an integer number
NumberOfBlocksInSecondDimension = (
self._ceil(SecondDimensionSpectralSize /
float(SecondDimensionBlockSize)))
print FirstDimensionBlockSize, SecondDimensionBlockSize, Blocksize
exit()
#---------------------------------
# Rearrange the data from a list to an array
for i_FirstDimension in range(FirstDimensionSpectralSize):
# Print out info to follow the processing
if self.log and i_FirstDimension % 50 == 0:
print '{0:3.2f}%'.format(100.0 * i_FirstDimension
/ FirstDimensionSpectralSize),
#---------------------------------
BlockNumber = (i_FirstDimension / FirstDimensionBlockSize
* NumberOfBlocksInSecondDimension)
PositionWithinBlock = (i_FirstDimension
% FirstDimensionBlockSize
* SecondDimensionBlockSize)
# Concatenate the block portions in a list
SpectralInfo1D = []
#---------------------------------
# Go through all second dimension protion to get a line
for i_SecondDimension in range(NumberOfBlocksInSecondDimension):
# If this is the last Block in line then the dimension is
# not necessary the blocksize
if i_SecondDimension < NumberOfBlocksInSecondDimension:
SecondDimension = SecondDimensionBlockSize
else:
SecondDimension = (SecondDimensionSpectralSize
% SecondDimensionBlockSize)
#---------------------------------
# The actual position within the block; 1 float number = 4 bytes
pos = (4 * (Blocksize * (BlockNumber + i_SecondDimension)
+ PositionWithinBlock))
#---------------------------------
# Unpack the data. Note that the coding is big endian ">"
SpectralInfo1D += list(struct.unpack('>'+'f'*SecondDimension,
Filecontent[pos : pos + 4 * SecondDimension]))
#---------------------------------
# Add a line into the spectrum
self._Spectrum_.append(SpectralInfo1D)
self.myaxis[0]['Actual size'] = len(self._Spectrum_)
self.myaxis[1]['Actual size'] = len(self._Spectrum_[0])
return None
###########################
def _extract_3D_data(self, Filecontent, Blocksize):
"""
"""
# Third dimensional data
ThirdDimensionBlockSize = self.myaxis[0]['BlockSize']
ThirdDimensionSpectralSize = self.myaxis[0]['Size']
# Second dimensional data
SecondDimensionBlockSize = self.myaxis[1]['BlockSize']
SecondDimensionSpectralSize = self.myaxis[1]['Size']
# First dimensional data
FirstDimensionBlockSize = self.myaxis[2]['BlockSize']
FirstDimensionSpectralSize = self.myaxis[2]['Size']
#---------------------------------
# The number of blocks needed for a spectral size is not necessary an integer number
NumberOfBlocksInFirstDimension = self._ceil(FirstDimensionSpectralSize /float(FirstDimensionBlockSize ))
NumberOfBlocksInSecondDimension = self._ceil(SecondDimensionSpectralSize/float(SecondDimensionBlockSize))
#---------------------------------
# Rearrange the data from a list to an 3D array
for i_ThirdDimension in range(ThirdDimensionSpectralSize):
# Print out log information
if i_ThirdDimension % 10 == 0:
print '{0:3.2f}%'.format(100.0*i_ThirdDimension/ThirdDimensionSpectralSize),
#---------------------------------
BlockNumberDim3 = (i_ThirdDimension / ThirdDimensionBlockSize) * NumberOfBlocksInSecondDimension * NumberOfBlocksInFirstDimension
PositionWithinBlockDim3 = (i_ThirdDimension % ThirdDimensionBlockSize) * SecondDimensionBlockSize * FirstDimensionBlockSize
#---------------------------------
# Collect data of 2D in a variable
SpectralInfo2D = []
# Go through each block in 2D
#for i_SecondDimension in range(SecondDimensionBlockSize * NumberOfBlocksInSecondDimension):
for i_SecondDimension in range(SecondDimensionSpectralSize):
#
BlockNumberDim2 = BlockNumberDim3 + (i_SecondDimension / SecondDimensionBlockSize) * NumberOfBlocksInFirstDimension
PositionWithinBlockDim2 = PositionWithinBlockDim3 + (i_SecondDimension % SecondDimensionBlockSize) * FirstDimensionBlockSize
#---------------------------------
# Collect data of 1D in a variable
SpectralInfo1D = []
# Go through each block in 1D
for i_FirstDimension in range(NumberOfBlocksInFirstDimension):
# The last block size might be smaller than a blocksize
if i_FirstDimension < NumberOfBlocksInFirstDimension-1:
FirstDimension = FirstDimensionBlockSize
else:
FirstDimension = FirstDimensionSpectralSize % FirstDimensionBlockSize
#---------------------------------
# Position within block; 1 float number = 4 bytes
pos = 4 * (Blocksize * (BlockNumberDim2 + i_FirstDimension) + PositionWithinBlockDim2)
#---------------------------------
# Unpack the data. NOTE: big endian data storage ">"
SpectralInfo1D += list(struct.unpack('>'+'f'*FirstDimension,Filecontent[pos: pos + 4*FirstDimension]))
#---------------------------------
# Put each 1D slice into the 2D
SpectralInfo2D.append(SpectralInfo1D)
#---------------------------------
# Store a 2D slice into the final array
self._Spectrum_.append(SpectralInfo2D)
self.myaxis[0]['Actual size'] = len(self._Spectrum_)
self.myaxis[1]['Actual size'] = len(self._Spectrum_[0])
self.myaxis[2]['Actual size'] = len(self._Spectrum_[0][0])
return None
###########################
def DataIntensity(self, position):
if len(position) == 3:
intensity = (self._Spectrum_[position[0] % self.myaxis[0]['Actual size']]
[position[1] % self.myaxis[1]['Actual size']]
[position[2] % self.myaxis[2]['Actual size']])
else:
intensity = (self._Spectrum_[position[0] % self.myaxis[0]['Actual size']]
[position[1] % self.myaxis[1]['Actual size']])
return intensity
###########################
def distance(self, pos1, pos2):
distance_value = 0.0
for (p1, p2) in (pos1, pos2):
distance_value += (p1 - p2)**2
return math.sqrt(distance_value)
###########################
def read_peaklist(self, PeaklistFilename, Info='[0:-1]', shifts = [0.0, 0.0, 0.0]):
"""
Reads a sparky peaklist file
"""
try:
pfile = open(PeaklistFilename, 'r')
except IOError:
print 'Error opening ' + PeaklistFilename + '!!! Please check it!'
exit()
lines = pfile.readlines()
pfile.close()
for line in lines:
if (len(line) > 12) and (not 'Assig' in line):
data = line.split()
key = data[0]
self._Peaklistkeysorder_.append(key)
self._Peaklist_[key] = {}
order = key.split('-')
if self.NumberOfAxis == 2:
self._Peaklist_[key]['Info'] = eval('order[0]' + Info)
self._Peaklist_[key][order[-1][-1]] = float(data[2]) + shifts[1]
self._Peaklist_[key][order[-2][-1]] = float(data[1]) + shifts[0]
self._Peaklist_[key]['Adjusted'] = 'red'
#
if not self._Peaklistchemicalshiftorder_:
self._Peaklistchemicalshiftorder_.append(order[-2][-1])
self._Peaklistchemicalshiftorder_.append(order[-1][-1])
else:
self._Peaklist_[key]['Info'] = eval('order[0]'+Info)
self._Peaklist_[key][order[-1][-1]] = float(data[3]) + shifts[2]
self._Peaklist_[key][order[-2][-1]] = float(data[2]) + shifts[1]
self._Peaklist_[key][order[-3][-1]] = float(data[1]) + shifts[0]
self._Peaklist_[key]['Adjusted'] = 'red'
#
if not self._Peaklistchemicalshiftorder_:
self._Peaklistchemicalshiftorder_.append(order[-3][-1])
self._Peaklistchemicalshiftorder_.append(order[-2][-1])
self._Peaklistchemicalshiftorder_.append(order[-1][-1])
return None
###########################
def save_peaklist(self, filename):
pfile = open(filename, 'w')
for peak in self._Peaklistkeysorder_:
line = peak
for axis in self._Peaklistchemicalshiftorder_:
line = ' '.join([line, str(self._Peaklist_[peak][axis])])
line = ' '.join([line, str(self._Peaklist_[peak]['Intensity'])])
line = ' '.join([line, '{0:5.2f}'.format(self._Peaklist_[peak]['Intensity']/self.Noiselevel)])
line = ' '.join([line, '\n'])
pfile.write(line)
pfile.close()
return None
###########################
def extremes_finder(self, position, dimension, find_max = True):
"""
find positive and negative extremes on the spectrum
Parameters:
===========
* position = spectrum staring position for the peak finding,
order must be same as in the spectrum
* dimension = find local maximum or minimum in 2D or 3D
* find_max = maximum or minimum finding
Return:
=======
* local extreme
"""
checklist = [[-1, 0, 0],[+1, 0, 0], # x
[ 0,-1, 0],[ 0,+1, 0], # y
[-1,-1, 0],[+1,-1, 0], # xy
[-1,+1, 0],[+1,+1, 0], # xy
[ 0, 0,-1],[ 0, 0,+1], # z
[-1, 0,-1],[+1, 0,-1], # xz
[-1, 0,+1],[+1, 0,+1], # xz
[ 0,-1,-1],[ 0,-1,-1], # yz
[ 0,+1,+1],[ 0,+1,+1]] # yz
# If the dimension 2D, we find check the value in x,y otherwise in x,y,z
if dimension == 2:
checklist_size = 4
else:
checklist_size = len(checklist)
# minimum or maximum finder
finder_type = [['min','<'],['max','>']][find_max]
# It goes till it finds a local maximum
not_on_an_extreme_value = True
while not_on_an_extreme_value:
# check all values according to the checklist
checked_values = []
for check in checklist[0 : checklist_size]:
checked_values.append(self.DataIntensity([pos + ch for (pos, ch) in zip(position[0 : dimension], check[0 : dimension])]))
# if the position data is the most extreme, than we are done
most_extreme_in_array = eval(eval('finder_type[0]') + '(checked_values)')
if eval('self.DataIntensity(position)' + eval('finder_type[1]') + 'most_extreme_in_array'):
not_on_an_extreme_value = False
else:
# modifiy the position to the highest
checked_values_max_index = checked_values.index(most_extreme_in_array)
for i in range(dimension):
position[i] += checklist[checked_values_max_index][i]
position[i] %= self.myaxis[i]['Actual size']
return position
###########################
def ClimbUpTheHill3D(self,ResidueKey, Modify = False, delta = [0.0,0.0,0.0]):
if ResidueKey in self._Peaklistkeysorder_:
#
p = []
original = []
for i in range(3):
p.append(int(round(delta[i])) + self._FidNumberbyAxis(self._Peaklist_[ResidueKey][self._Peaklistchemicalshiftorder_[i]],self._AxisOrder_.index(self._Peaklistchemicalshiftorder_[i])))
original.append(int(round(delta[i])) + self._FidNumberbyAxis(self._Peaklist_[ResidueKey][self._Peaklistchemicalshiftorder_[i]],self._AxisOrder_.index(self._Peaklistchemicalshiftorder_[i])))
checklist = [[-1, 0, 0],[+1, 0, 0], # x
[ 0,-1, 0],[ 0,+1, 0], # y
[ 0, 0,-1],[ 0, 0,+1], # z
[-1,-1, 0],[+1,-1, 0], # xy
[-1,+1, 0],[+1,+1, 0], # xy
[-1, 0,-1],[+1, 0,-1], # xz
[-1, 0,+1],[+1, 0,+1], # xz
[ 0,-1,-1],[ 0,-1,-1], # yz
[ 0,+1,+1],[ 0,+1,+1]] # yz
Iteration = True
while Iteration:
tomb = []
for ch in checklist:
tomb.append(self.DataIntensity([p[0] + ch[0],p[1] + ch[1],p[2] + ch[2]]))
if self.DataIntensity(p) >= max(tomb):
Iteration = False
else:
ti = tomb.index(max(tomb))
for i in range(3):
p[i] = (p[i] + checklist[ti][i]) % self.myaxis[i]['Size']
if ResidueKey == 'T680_N-C-H':
print 'PPM:',self._PPMNumberbyAxis(p[2],2)
if Modify:
for i in range(3):
self._Peaklist_[ResidueKey][self._Peaklistchemicalshiftorder_[i]] = self._PPMNumberbyAxis(p[i],self._AxisOrder_.index(self._Peaklistchemicalshiftorder_[i]))
return p,original
###########################
def AdjustAllPeakPositions3D(self):
numberofpeaks = 0
diff = [0.0, 0.0, 0.0]
for key in self._Peaklistkeysorder_:
if not (key in self._PeaklistDoNotCare_):
a,b = self.ClimbUpTheHill3D(key)
numberofpeaks += 1
for i in range(3):
diff[i] += (a[i]-b[i])
for i in range(3):
diff[i] /= float(numberofpeaks)
print diff
for key in self._Peaklistkeysorder_:
if not (key in self._PeaklistDoNotCare_):
a,b = self.ClimbUpTheHill3D(key, Modify=True, delta= diff)
return None
###########################
def adjust_peaklist_2d(self):
numberofpeaks = 0
diff = [0.0, 0.0, 0.0]
peaks = {}
for key in self._Peaklistkeysorder_:
if not (key in self._PeaklistDoNotCare_):
position = [self._FidNumberbyAxis(self._Peaklist_[key]['N'],'N'),
self._FidNumberbyAxis(self._Peaklist_[key]['H'],'H')]
peaks[key] = {}
peaks[key]['original'] = []
peaks[key]['firsthit'] = []
peaks[key]['secondhit'] = []
#
for pos in position:
peaks[key]['original'].append(pos)
#
peaks[key]['firsthit'] = self.extremes_finder(position, 2)
numberofpeaks += 1
for i in range(len(position)):
diff[i] += (peaks[key]['firsthit'][i] - peaks[key]['original'][i])
for i in range(len(diff)):
diff[i] /= numberofpeaks
diff[i] = round(diff[i])
#
for key in self._Peaklistkeysorder_:
if not (key in self._PeaklistDoNotCare_):
position = []
for i,pos in enumerate(peaks[key]['original']):
position.append(int(pos + diff[i]))
peaks[key]['secondhit'] = self.extremes_finder(position, 2)
#
for i in range(len(self._Peaklistkeysorder_)):
key = self._Peaklistkeysorder_[i]
if not (key in self._PeaklistDoNotCare_):
multiple = []
j = 0
while j < len(self._Peaklistkeysorder_):
key2 = self._Peaklistkeysorder_[j]
if (peaks[key]['secondhit'] == peaks[key2]['secondhit']) and (i != j):
multiple.append(j)
j += 1
if not multiple:
# Unique peak found
peaks[key]['final'] = peaks[key]['secondhit']
peaks[key]['fit'] = 'black'
else:
# Move the peak which is the closest
closest = True
for j in multiple:
key2 = self._Peaklistkeysorder_[j]
if (self.distance(peaks[key]['original'], peaks[key]['secondhit']) >=
self.distance(peaks[key2]['original'], peaks[key2]['secondhit'])):
closest = False
# if this peak is the most likely
if closest:
peaks[key]['final'] = peaks[key]['secondhit']
peaks[key]['fit'] = 'black'
else:
# If other peaks are closer, than just move to the average
peaks[key]['final'] = []
for (i, o) in enumerate(peaks[key]['original']):
peaks[key]['final'].append(int(o + diff[i]))
peaks[key]['fit'] = 'red'
# print key, peaks[key]['original'], peaks[key]['firsthit'], peaks[key]['secondhit'],multiple, peaks[key]['final']
for key in self._Peaklistkeysorder_:
if not (key in self._PeaklistDoNotCare_):
self._Peaklist_[key]['N'] = self._PPMNumberbyAxis(peaks[key]['final'][0],'N')
self._Peaklist_[key]['H'] = self._PPMNumberbyAxis(peaks[key]['final'][1],'H')
self._Peaklist_[key]['Adjusted'] = peaks[key]['fit']
self._Peaklist_[key]['Intensity'] = self.DataIntensity(peaks[key]['final'])
# TODO Fit the tip?
return None
###########################
def find_peak_1d(self, data, noiselevel):
hits = []
direction = True
for i in range(len(data)-1):
if data[i] > data[i+1] and data[i] > noiselevel and direction:
hits.append(i)
direction = False
if data[i] < data[i+1]:
direction = True
return hits
###########################
def find_peak_2d(self, data2d, noiselevel):
hits = {}
for i, data1d in enumerate(data2d):
hit1d = self.find_peak_1d(data1d, noiselevel)
for hit in hit1d:
hits[' '.join(str(d) for d in self.extremes_finder([i, hit], 2))] = 0
peaks = []
for hit in hits:
peaks.append(hit.split())
return peaks
###########################
def peak_finder(self, times_noiselevel):
print 'Finding peaks...',
peaklist = {}
for i,peak in enumerate(self.find_peak_2d(self._Spectrum_,self.Noiselevel*times_noiselevel)):
peaklist[i] = {}
peaklist[i]['Info'] = str(i+1)
peaklist[i]['N'] = self._PPMNumberbyAxis(float(peak[0]),'N')
peaklist[i]['H'] = self._PPMNumberbyAxis(float(peak[1]),'H')
peaklist[i]['Adjusted'] = 'black'
print str(i + 1) + ' peaks found!'
return peaklist
###########################
def Plot1D(self, chemicalshift):
dim = self._ppm2fid(chemicalshift,self.myaxis[0]['Spectrometer frequency'],self.myaxis[0]['xmtr frequency'],self.myaxis[0]['Spectral width'],self.myaxis[0]['Number of Points'])
data = self._Spectrum_[dim]
plt.figure()
plt.plot(data)
plt.show()
return None
###########################
def Plot1Dfid(self, fid):
data = self._Spectrum_[fid]
plt.figure()
plt.plot(data)
plt.show()
return None
###########################
def PPM_to_index(self,ppm,axisnumber):
index = 0
while (index < self.myaxis[axisnumber]['Number of Points']) and (self.myaxis[axisnumber]['Scale'][index] > ppm):
index += 1
return index
###########################
def Limits_to_index(self, limits, axisnumber):
if not limits:
index_min = 0
index_max = self.myaxis[axisnumber]['Number of Points']-1
else:
index_min = self.PPM_to_index(max(limits), axisnumber)
index_max = self.PPM_to_index(min(limits), axisnumber)
if index_max > self.myaxis[axisnumber]['Actual size']:
index_max = self.myaxis[axisnumber]['Actual size']
return index_min, index_max
###########################
def spectrum_2d_slice(self, x_axis_min_index, x_axis_max_index,y_axis_min_index, y_axis_max_index, orderXY):
highestvalue = 0.0
lowestvalue = 0.0
spectrum = []
#---------------------------------
# 2D
if self.NumberOfAxis == 2:
for y in range(y_axis_min_index, y_axis_max_index, 1):
fid = []
for x in range(x_axis_min_index, x_axis_max_index, 1):
if orderXY[0] == 'H':
value = self._Spectrum_[y][x]
else:
value = self._Spectrum_[x][y]
fid.append(value)
if highestvalue < value:
highestvalue = value
if lowestvalue > value:
lowestvalue = value
spectrum.append(fid)
return highestvalue, lowestvalue, spectrum
###########################
def Plot_peaklist(self, Peaklist, x_min, x_max, y_min, y_max, orderXY):
print 'Peaks on the plot:'
number = 0
for k in Peaklist:
loc_x = Peaklist[k][orderXY[-2]]
loc_y = Peaklist[k][orderXY[-1]]
if ((x_min < loc_x) and (loc_x < x_max) and
(y_min < loc_y) and (loc_y < y_max)):
# TODO make is adjustable
peak_info_pos_x = 0.0
peak_info_pos_y = 0.0
# plt.text(loc_x + peak_info_pos_x, loc_y + peak_info_pos_y, Peaklist[k]['Info'])
number += 1
print '{0:3d}.'.format(number),Peaklist[k]['Info'], loc_y, loc_x,
if Peaklist[k]['Adjusted'] == 'black':
print 'ok'
else:
print ''
# TODO Make the dx,dy to be adjustable
dx = 0.05
dy = 0.2
plt.gca().annotate(Peaklist[k]['Info'],
xy=(loc_x,loc_y),
color = Peaklist[k]['Adjusted'],
xytext=(loc_x,loc_y - dy),
arrowprops=dict(arrowstyle="-|>",
connectionstyle="arc3",
facecolor = Peaklist[k]['Adjusted']))
#
# plt.plot([loc_x , loc_x + dx],[loc_y , loc_y + dy], 'k-')
# plt.plot([loc_x , loc_x + dx],[loc_y , loc_y - dy], 'k-')
return None
###########################
def Plot(self, limits, orderXY='HN', color = [0, 0, 0], nf = True, peaklist = None):
#
axis_x = self._nucleustype2axisindex(orderXY[0])
axis_y = self._nucleustype2axisindex(orderXY[1])
# Figure out the limits
x_axis_min_index, x_axis_max_index = self.Limits_to_index(limits[0],axis_x)
y_axis_min_index, y_axis_max_index = self.Limits_to_index(limits[1],axis_y)
x_scale = self.myaxis[axis_x]['Scale'][x_axis_min_index : x_axis_max_index]
y_scale = self.myaxis[axis_y]['Scale'][y_axis_min_index : y_axis_max_index]
# 2D
if self.NumberOfAxis == 2:
highestvalue, lowestvalue, spectrum = self.spectrum_2d_slice(x_axis_min_index, x_axis_max_index, y_axis_min_index, y_axis_max_index, orderXY)
#---------------------------------
mc = zcolor.MyColor()
contour_start = self.Noiselevel
contour_number = 25
contour_factor = math.exp(math.log((highestvalue) /float(contour_start)) * 1.0/(float(contour_number)))
contourlevels = [contour_start*contour_factor**i for i in range(contour_number)]
contourcolors = [mc.series(i,contour_number,0,300) for i in range(contour_number)]
print '#############################################'
print '### P L O T # P A R A M E T E R S ###'
print '#############################################'
print 'Noise level =', contour_start
print 'Factor =', contour_factor
print 'Highest value =', highestvalue
print 'Lowest value =', lowestvalue
print '#############################################'
if nf:
plt.figure()
plt.contour(x_scale, y_scale, spectrum, contourlevels, colors = contourcolors)
# plot negatives if needed!
plt.contour(x_scale, y_scale, spectrum, [-1*i for i in contourlevels], colors = [[0.0,0.0,0.0] for i in range(contour_number)])
if nf:
plt.xlabel(self.myaxis[axis_x]['Nucleus']+' (ppm)',size=15)
plt.ylabel(self.myaxis[axis_y]['Nucleus']+' (ppm)',size=15)
plt.gca().invert_xaxis()
plt.gca().invert_yaxis()
# If peak labels are needed
if self._Peaklist_ or peaklist:
if not peaklist:
self.Plot_peaklist(self._Peaklist_, x_scale[-1], x_scale[0], y_scale[-1], y_scale[0], orderXY)
else:
self.Plot_peaklist(peaklist, x_scale[-1], x_scale[0], y_scale[-1], y_scale[0], orderXY)
# plt.show()
return None
###########################
def Plot_ori(self, limits, orderXY='HN', color = [0, 0, 0], Negatives = False, Peaklist=True, negcolors = 'o', ContourNumber = 15, Factor = 0.0, Noiselevel = 0, linewidth = 1.0, newfigure = True, figuresize=(8,5), figdpi=72, textsize=15):
"""
Parameters:
* limits = an array of arrays with the PPM value limits, empty array means the whole spectral width
* color = one color value in [r,g,b] format eg. [1.0,0.0,0.0]
= array of color values (number must be the same as ContourNumber) eg. [[0.1,0.0,0.0],[0.2,0.0,0.0],...]
= built-in color eg. 'blue-cyan'
= built-in color + lighting info eg. ['g',0.5]
* ContourNumber = Number of contours on the figure
* Factor = factor between each contour level, provide 0.0 to calculate the value
* Noiselevel = If 0 is provided noise level is calculated from the sepctrum
* linewidth = contour line width, increase it when the zoom is high eg. 1.5
* newfigure = Boolean depending on the overlay plot option
* figuresize = figuresize in inch
* figdpi = dpi value, use 72 for screen, 300 for prints
* textsize = label size in pt eg. 12
Examples:
* Plot2D([[],[]],color = 'rainbow1')
* Plot2D([[110,125],[7.2,9.5]],color = ['green',0.5], ContourNumber = 20, Factor = 1.2, Noiselevel = 100000, linewidth = 1.5, NumberOfThicksXY=[3,8], newfigure=False, figuresize=(5,5), figdpi=300, textsize=18)
"""
ShowPeakLabelWithinPPM = [0.15,0.15,0.05] #NCH
ShiftLabel = [0.0,0.0,0.0]
#ShiftLabel = [0.05,0.05,0.02]
CrossSize = [0.05,0.05,0.01]
Nucleuses = ['N','C','H']
#---------------------------------
axisorder = []
for ch in orderXY.upper():
o = 0
while (o < self.NumberOfAxis) and self.myaxis[o]['Nucleus'][-1] != ch:
o += 1
if o < self.NumberOfAxis:
axisorder.append(o)
else:
print 'Please check the axes: ',orderXY
exit()
#---------------------------------
# Check the limits to be within the spectrum range
originallimits = limits
lim = []
for i in range(2):
lim.append(self._AxisLimitCheck(axisorder[i],limits[i]))
limits = lim
if len(originallimits) == 3:
limits.append(originallimits[2])
#---------------------------------
areamin = []
areamax = []
for i in range(2):
areamax.append(self._ppm2fid(min(limits[i]),self.myaxis[axisorder[i]]['Spectrometer frequency'],self.myaxis[axisorder[i]]['xmtr frequency'],self.myaxis[axisorder[i]]['Spectral width'],self.myaxis[axisorder[i]]['Number of Points']))
areamin.append(self._ppm2fid(max(limits[i]),self.myaxis[axisorder[i]]['Spectrometer frequency'],self.myaxis[axisorder[i]]['xmtr frequency'],self.myaxis[axisorder[i]]['Spectral width'],self.myaxis[axisorder[i]]['Number of Points']))
#exit()
# Get axis chemical shifts
xscale = []
for i in range(areamin[0],areamax[0]+1,1):
xscale.append(self.myaxis[axisorder[0]]['Scale'][len(self.myaxis[axisorder[0]]['Scale'])-i-1])
# print xscale[0],xscale[-1]
# exit()
yscale = []
for i in range(areamin[1],areamax[1]+1,1):
yscale.append(self.myaxis[axisorder[1]]['Scale'][len(self.myaxis[axisorder[1]]['Scale'])-i-1])
print 'limits = ',areamin[0],areamax[0]
#---------------------------------
# Get the spectral information to plot
highestvalue = 0.0
area = []
#---------------------------------
# 2D
if self.NumberOfAxis == 2:
# Proton is on x
if orderXY[0] == 'H':
#
for y in range(areamin[1],areamax[1]+1,1):
area.append(self._Spectrum_[y][areamin[0]:areamax[0]+1])
#
if max(self._Spectrum_[y][areamin[0]:areamax[0]+1]) > highestvalue:
highestvalue = max(self._Spectrum_[y][areamin[0]:areamax[0]+1])
# Proton is on y
else:
for y in range(areamin[1],areamax[1]+1,1):
data = []
for x in range(areamin[0],areamax[0]+1,1):
value = self._Spectrum_[x][y]
data.append(value)
if value > highestvalue:
highestvalue = value
area.append(data)
#---------------------------------
# 3D
if self.NumberOfAxis == 3:
# Calculate the third dimension fid number
zfid = self._ppm2fid(limits[2][0],self.myaxis[axisorder[2]]['Spectrometer frequency'],self.myaxis[axisorder[2]]['xmtr frequency'],self.myaxis[axisorder[2]]['Spectral width'],self.myaxis[axisorder[2]]['Number of Points'])
# Extract the 2D from the 3D
for y in range(areamin[1],areamax[1]+1,1):
data = []
for x in range(areamin[0],areamax[0]+1,1):
if orderXY[0:2] == 'HN':
value = self._Spectrum_[y][zfid][x]
elif orderXY[0:2] == 'HC':
value = self._Spectrum_[zfid][y][x]
elif orderXY[0:2] == 'NH':
value = self._Spectrum_[x][zfid][y]
elif orderXY[0:2] == 'NC':
value = self._Spectrum_[x][y][zfid]
elif orderXY[0:2] == 'CH':
value = self._Spectrum_[zfid][x][y]
elif orderXY[0:2] == 'CN':
value = self._Spectrum_[y][x][zfid]
else:
value = 0.0
# Store the value
data.append(value)
# Check whether it is the highest
if value > highestvalue:
highestvalue = value
area.append(data)
#---------------------------------
# If the user did not set up a noise level, use the calculated one
if Noiselevel == 0:
contour_start = self.Noiselevel
else:
contour_start = Noiselevel
contour_number = ContourNumber
#---------------------------------
# If the user do not provide factor information
if Factor == 0.0:
# Calculcate based on the noise level and the highest peak height
try:
contour_factor = math.exp(math.log((highestvalue) /float(contour_start)) * 1.0/(float(contour_number)))
except ValueError:
contour_factor = 0.0
# if the user provided the factor information
else:
contour_factor = Factor
#---------------------------------
# Set the contour levels
contourlevels = [contour_start*contour_factor**i for i in range(contour_number)]
#---------------------------------
# If the user provided a color
contourcolors = self._ColorChoise(color,contour_number)
if Negatives:
# Colors
negcontourcolors = self._ColorChoise(negcolors,contour_number)
# Levels
negcontourlevels = []
for level in contourlevels:
negcontourlevels.append(-1.0*level)
#---------------------------------
print '---------------'
print self.myaxis[axisorder[0]]['Nucleus']+':',min(limits[0]),'-',max(limits[0])
print self.myaxis[axisorder[1]]['Nucleus']+':',min(limits[1]),'-',max(limits[1])
if self.NumberOfAxis == 3:
print self.myaxis[axisorder[2]]['Nucleus']+':',limits[2][0]
print 'Noise level =', contour_start
print 'Factor =', contour_factor
print 'Highest value =', highestvalue
print '---------------'
#---------------------------------
# To be able to plot several figure on each other, the new figure is an option
if newfigure:
plt.figure(figsize=figuresize,dpi=figdpi)
#---------------------------------
# Generate the plot
plt.contour(xscale,yscale,area,contourlevels,colors = contourcolors,linewidths = linewidth)
if Negatives:
plt.contour(xscale,yscale,area,negcontourlevels,colors = negcontourcolors,linewidths = linewidth)
#---------------------------------
# Invert the axes direction
if newfigure:
plt.gca().invert_xaxis()
plt.gca().invert_yaxis()
#---------------------------------
# Put on axis label
plt.xlabel(self.myaxis[axisorder[0]]['Nucleus']+' (ppm)',size=textsize)
plt.ylabel(self.myaxis[axisorder[1]]['Nucleus']+' (ppm)',size=textsize)
if self.NumberOfAxis == 3:
plt.title(self.myaxis[axisorder[2]]['Nucleus']+': {0:6.3f} ppm'.format(limits[2][0]),size=textsize)
#---------------------------------
# If peak labels are needed
if Peaklist and (self._Peaklist_ != {}):
print 'Peaks on the plot:'
for k in self._Peaklistkeysorder_:
ItIsOn = True
p = []
for i in range(self.NumberOfAxis):
p.append(self._Peaklist_[k][self.myaxis[axisorder[i]]['Nucleus'][-1]])
i = 0
while (i < 2 ) and ItIsOn:
if (areamin[i] > p[i]) or (p[i] > areamax[i]):
ItIsOn = False
i += 1
if self.NumberOfAxis == 3:
if abs(p[2] - limits[2][0]) > ShowPeakLabelWithinPPM[axisorder[2]]:
ItIsOn = False
if ItIsOn:
print self._Peaklist_[k]['Info'],p[0],p[1],self._Peaklist_[k][Nucleuses[axisorder[2]]]
plt.text(p[0]-ShiftLabel[axisorder[0]],p[1]-ShiftLabel[axisorder[1]],self._Peaklist_[k]['Info'],size=textsize)
# Put on the crosspeak
dx = CrossSize[axisorder[0]]
dy = CrossSize[axisorder[1]]
#
plt.plot([p[0]-dx,p[0]+dx],[p[1]-dy,p[1]+dy],'k-')
plt.plot([p[0]-dx,p[0]+dx],[p[1]+dy,p[1]-dy],'k-')
#
return None
###########################
def Show(self,FileName = ''):
if FileName == '':
plt.show()
else:
plt.savefig(FileName)
return None
###########################
def _AxisTicks(self,limits,number,PPMscale = True):
# Calculate the step size
step = abs(limits[0]-limits[1])/float(number-1)
# Store the scales in data
data = []
for i in range(number):
# if it is a ppm scale, then the values go down
if PPMscale:
value = max(limits)-i*step
# if it is point scale then it goes up
else:
value = i*step
#---------------------------------
# if the value is extreme, then let 3 digits
if int(value*1000) != value*1000:
value = '{0:6.3f}'.format(value)
data.append(value)
return data
###########################
def _AxisLimitCheck(self,Axisnumber,limits):
# If there is no data provided, use the full spectrum
if limits == []:
limits = [-9.99E-99,+9.99E+99]
# Store the data
newlimits = []
# Spectrum information
ppmlimit = self.PPM_limit[Axisnumber]
# Lower limit
if min(ppmlimit) > min(limits):
newlimits.append(self.myaxis[Axisnumber]['Scale'][1])
else:
newlimits.append(min(limits))
# Upper limit
if max(ppmlimit) < max(limits):
newlimits.append(max(ppmlimit))
else:
newlimits.append(max(limits))
return newlimits
###########################
def _ppm2fid(self, ppm, Frequency, MiddlePPM, SpectralWidth, NumberOfPoints):
return int((NumberOfPoints/2 - ((ppm-MiddlePPM) * Frequency * NumberOfPoints) / SpectralWidth) % NumberOfPoints)
###########################
def _fid2ppm(self, fid, Frequency, MiddlePPM, SpectralWidth, NumberOfPoints):
return MiddlePPM + (NumberOfPoints*SpectralWidth - 2*fid*SpectralWidth) / (2.0*Frequency*NumberOfPoints)
###########################
def _nucleustype2axisindex(self, nucleus):
axis = 0
while (axis < self.NumberOfAxis) and (self.myaxis[axis]['Nucleus'][-1] != nucleus):
axis += 1
return axis
###########################
def _axisindex2nucleustype(self, axisindex):
return self.myaxis[axisindex]['Nucleus'][-1]
###########################
def _FidNumberbyAxis(self, ppm, Axis):
if type(Axis) == type(''):
Axis = self._nucleustype2axisindex(Axis)
return self._ppm2fid(ppm,
self.myaxis[Axis]['Spectrometer frequency'],
self.myaxis[Axis]['xmtr frequency'],
self.myaxis[Axis]['Spectral width'],
self.myaxis[Axis]['Number of Points'])
###########################
def _PPMNumberbyAxis(self, fid, Axis):
if type(Axis) == type(''):
Axis = self._nucleustype2axisindex(Axis)
return self._fid2ppm(fid,
self.myaxis[Axis]['Spectrometer frequency'],
self.myaxis[Axis]['xmtr frequency'],
self.myaxis[Axis]['Spectral width'],
self.myaxis[Axis]['Number of Points'])
###########################
def _ceil(self, number):
if number - int(number) != 0:
number = int(number) + 1
return int(number)
###########################
def CalculateNoiseLevel(self,NumberOfDataPoints = 10000):
Noise = 0.0
# calculate the average level on a small subset of data
average = 0.0
for i in range(100):
# 2D
if self.NumberOfAxis == 2:
average += abs(self._Spectrum_[random.randint(0,self.myaxis[0]['Number of Points']-1)][random.randint(0,self.myaxis[1]['Number of Points']-150)])
# 3D
if self.NumberOfAxis == 3:
average += abs(self._Spectrum_[random.randint(0,self.myaxis[0]['Number of Points']-1)][random.randint(0,self.myaxis[1]['Number of Points']-1)][random.randint(0,self.myaxis[2]['Number of Points']-1)])
average /= 100.0
# Calculate the actual noise level
numberofdata = 0
sumofdata = 0.0
highestvalue = 0.0
i = 0
while (i <= NumberOfDataPoints*2) and (numberofdata <= NumberOfDataPoints):
# 2D
if self.NumberOfAxis == 2:
value = abs(self._Spectrum_[random.randint(0,self.myaxis[0]['Number of Points']-1)][random.randint(0,self.myaxis[1]['Number of Points']-150)])
# 3D
if self.NumberOfAxis == 3:
value = abs(self._Spectrum_[random.randint(0,self.myaxis[0]['Number of Points']-1)][random.randint(0,self.myaxis[1]['Number of Points']-1)][random.randint(0,self.myaxis[2]['Number of Points']-1)])
# Only count a value if that is not far from the average (= not a peak)
if value < average * 5:
numberofdata += 1
sumofdata += value
average = sumofdata / float(numberofdata)
if value > highestvalue:
highestvalue = value
i += 1
# Cut back from the highest to have a bit of noise
Noise = highestvalue / 1.2
# Assign the self.Noise to this value
self.Noiselevel = Noise
# Return the value as well
return Noise
###########################
def _ColorChoise(self,color,contour_number):
if (type(color) == type([])) and (len(color) == 3):
contourcolors = [color for _ in range(contour_number)]
# if the user provided all the colors
elif (type(color) == type([])) and (len(color) == contour_number):
contourcolors = color
# if the color is selected and light information is provided as well
elif (type(color) == type([])) and (len(color) == 2):
light = color[1]
if (0.0 < light) or (light < 1.0):
light = 1.0
contourcolors = self.ColorSchemer(contour_number,color[0],light)
# if there is no color information or built in colors has been selected
else:
contourcolors = self.ColorSchemer(contour_number,color)
return contourcolors
###########################
def ColorSchemer(self, Number, color, light = 1.0):
data = []
step = 1 / float(Number-1)
for i in range(Number):
element = [0.0,0.0,0.0]
if (color == 'r') or (color == 'red'):
element = [1.0,0.0,0.0]
if (color == 'g') or (color == 'green'):
element = [0.0,1.0,0.0]
if (color == 'b') or (color == 'blue'):
element = [0.0,0.0,1.0]
#---------------------------------
if (color == 'c') or (color == 'cyan'):
element = [0.0,1.0,1.0]
if (color == 'y') or (color == 'yellow'):
element = [1.0,1.0,0.0]
if (color == 'p') or (color == 'purple'):
element = [1.0,0.0,1.0]
#---------------------------------
if (color == 'm') or (color == 'magenta'):
element = [1.0,0.0,0.5]
if (color == 'pi') or (color == 'pink'):
element = [1.0,0.5,0.5]
if (color == 'o') or (color == 'orange'):
element = [1.0,0.5,0.0]
#---------------------------------
if (color == 'g1') or (color == 'grey1'):
element = [0.1 for _ in range(3)]
if (color == 'g2') or (color == 'grey2'):
element = [0.2 for _ in range(3)]
if (color == 'g3') or (color == 'grey3'):
element = [0.3 for _ in range(3)]
if (color == 'g4') or (color == 'grey4'):
element = [0.4 for _ in range(3)]
if (color == 'g5') or (color == 'grey5'):
element = [0.5 for _ in range(3)]
if (color == 'g6') or (color == 'grey6'):
element = [0.6 for _ in range(3)]
if (color == 'g7') or (color == 'grey7'):
element = [0.7 for _ in range(3)]
if (color == 'g8') or (color == 'grey8'):
element = [0.8 for _ in range(3)]
if (color == 'g9') or (color == 'grey9'):
element = [0.9 for _ in range(3)]
#---------------------------------
if (color == 'w') or (color == 'white'):
element = [1.0, 1.0, 1.0]
#---------------------------------
if (color == 'kr') or (color == 'black-red'):
element = [0.0 + i * step, 0.0, 0.0]
if (color == 'kg') or (color == 'black-green'):
element = [0.0, 0.0 + i * step, 0.0]
if (color == 'kb') or (color == 'black-blue'):
element = [0.0, 0.0, 0.0 + i * step]
#---------------------------------
if (color == 'kc') or (color == 'black-cyan'):
element = [0.0, 0.0 + i * step, 0.0 + i * step]
if (color == 'ky') or (color == 'black-yellow'):
element = [0.0 + i * step, 0.0 + i * step, 0.0]
if (color == 'kp') or (color == 'black-purple'):
element = [0.0 + i * step, 0.0, 0.0 + i * step]
#---------------------------------
if (color == 'km') or (color == 'black-magenta'):
element = [0.0 + i * step, 0.0, 0.0 + (i / 2.0) * step]
if (color == 'kpi') or (color == 'black-pink'):
element = [0.0 + i * step, 0.0 + (i / 2.0) * step, 0.0 + (i / 2.0) * step]
if (color == 'ko') or (color == 'black-orange'):
element = [0.0 + i * step, 0.0 +(i / 2.0) * step, 0.0]
#---------------------------------
if (color == 'kw') or (color == 'black-white'):
element = [0.0 + i * step, 0.0 + i * step, 0.0 + i * step]
#---------------------------------
if (color == 'rr') or (color == 'red-ring'):
if i % 5 != 0:
element = [1.0, 0.0, 0.0]
else:
element = [0.0, 0.0, 0.0]
if (color == 'gr') or (color == 'green-ring'):
if i % 5 != 0:
element = [0.0, 1.0, 0.0]
else:
element = [0.0, 0.0, 0.0]
if (color == 'br') or (color == 'blue-ring'):
if i % 5 != 0:
element = [0.0, 0.0, 1.0]
else:
element = [0.0, 0.0, 0.0]
#---------------------------------
if (color == 'red-yellow') or (color == 'rainbow1'):
element = [1.0, 0.0 + i * step, 0.0]
#---------------------------------
if (color == 'blue-cyan') or (color == 'rainbow2'):
element = [0.0, 0.0 + i * step, 1.0]
#---------------------------------
if (color == 'green-red') or (color == 'rainbow3'):
element = [0.0 + i * step, 0.5 - (i / 2.0) * step, 0.0]
#---------------------------------
if type(light) != type(1.0):
light = 1.0
element = [element[c] * light for c in range(3)]
#---------------------------------
data.append(element)
return data
###########################
def _getNumberOfAxis(self):
return len(self.myaxis.keys())
###########################
def _getAxisInfo(self, field):
info = []
for axisnumber in range(self.NumberOfAxis):
info.append(self.myaxis[axisnumber][field])
return info
###########################
def _getNucleus(self):
return self._getAxisInfo('Nucleus')
###########################
def _getFrequency(self):
return self._getAxisInfo('Spectrometer frequency')
###########################
def _getSpectralwidth(self):
return self._getAxisInfo('Spectral width')
###########################
def _getxmtrfreqency(self):
return self._getAxisInfo('xmtr frequency')
###########################
def _getscales(self):
return self._getAxisInfo('Scale')
###########################
def _getnumberofpoints(self):
return self._getAxisInfo('Number of Points')
###########################
def _getlimit(self):
info = []
for axisnumber in range(self.NumberOfAxis):
info.append([self.myaxis[axisnumber]['Scale'][0],self.myaxis[axisnumber]['Scale'][-1]])
return info
###########################
NumberOfAxis = property(_getNumberOfAxis)
Nucleus = property(_getNucleus)
Frequency = property(_getFrequency)
SpectralWidth = property(_getSpectralwidth)
MiddlePPM = property(_getxmtrfreqency)
Scale = property(_getscales)
NumberOfPoints = property(_getnumberofpoints)
PPM_limit = property(_getlimit)
#########################################
myspectrum= ZB_spectrum('13030_tcs_e.fid_1.ucsf')
print myspectrum.noise_level
# 1D proton plot
myspectrum.plot1d({'H':[] },'H')
myspectrum.show()
# Find peaks and plot a region with peaks and labeles
peaks = myspecrum.peak_finder(1.5)
print peaks
myspectrum.plot([[6.8,10.2],[]],orderXY = 'HN', color = [],peaklist = peaks)
myspectrum.Show()
|
A local in the window?
A post on my personal blog yesterday noted the encouraging success of writer, editor and publisher Colleen Dunn Bates of Pasadena, whose book Hometown Pasadena was the subject of a story this week in the LA Times.
I was disappointed with the story, which I thought could have been much more fun and informative. But, Bates saw the blog post and was kind enough to write a note that filled in some of my blanks.
Most important to writers of unpublished books (that would be me) was how Bates managed to get the gatekeepers of a Pasadena Barnes & Noble to stock her book, let alone put it in the window.
The answer: A good book (well written and attractively packaged) really does sell itself, if you talk to the right person.
Once contacted about the product, the chain's regional buyer realized the value of what they were looking at and gave it front-window display space.
To understand how significant that is, it helps to know a bit about the industry. Display placement doesn't just happen. Most authors never see the light of a center table in a bookstore (bargain bins excluded), let alone the front window. The displays shoppers see just inside the front door of corporate bookstores, those tables with copies of the latest book artfully arranged, are often purchased, which means they have nothing to do with what the wise employees of the store think about the book.
Publishers pay fees for prime placement, often what's called a "co-op" (I think the grocery industry calls them "endcaps"). Only the biggest names, or those expected to become big names, can count on a publisher to invest in this kind of play, so for a local writer to make it into the front window on merit is a very big deal.
There's a lot more to the marketing stragegy employed by Bates. She obviously did not go into this effort blindly. As a veteran of the New York publishing machine, she knows her way around the engine. That aside, anyone can do what she did. This success wasn't the product of a complicated rebuild, nor was it the result of name dropping, or calling in favors to do the heavy lifting. Rather, it looks to be proof of the benefits of smart and deliberate planning.
Also encouraging was Bates' experience with Borders, yet another chain that signed up to stock her book. Say what you will about the corporate monsters (and I've said a few things about them lately), but Borders provides a budget to buy and sell the work of local authors. They don't get credit for many socially responsible acts, but this one is worthy of it (also worthy of note is the fact that I'm unaware of how big, or small, this local book budget may be).
Previous blog post: Rat runner!
|
#!/usr/bin/env ambari-python-wrap
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python imports
import imp
import os
import traceback
import re
import socket
import fnmatch
import math
from resource_management.core.logger import Logger
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../stacks/')
PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
try:
with open(PARENT_FILE, 'rb') as fp:
service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
except Exception as e:
traceback.print_exc()
print "Failed to load parent"
class LogSearchServiceAdvisor(service_advisor.ServiceAdvisor):
def __init__(self, *args, **kwargs):
self.as_super = super(LogSearchServiceAdvisor, self)
self.as_super.__init__(*args, **kwargs)
# Always call these methods
self.modifyMastersWithMultipleInstances()
self.modifyCardinalitiesDict()
self.modifyHeapSizeProperties()
self.modifyNotValuableComponents()
self.modifyComponentsNotPreferableOnServer()
self.modifyComponentLayoutSchemes()
def modifyMastersWithMultipleInstances(self):
"""
Modify the set of masters with multiple instances.
Must be overriden in child class.
"""
# Nothing to do
pass
def modifyCardinalitiesDict(self):
"""
Modify the dictionary of cardinalities.
Must be overriden in child class.
"""
# Nothing to do
pass
def modifyHeapSizeProperties(self):
"""
Modify the dictionary of heap size properties.
Must be overriden in child class.
"""
pass
def modifyNotValuableComponents(self):
"""
Modify the set of components whose host assignment is based on other services.
Must be overriden in child class.
"""
# Nothing to do
pass
def modifyComponentsNotPreferableOnServer(self):
"""
Modify the set of components that are not preferable on the server.
Must be overriden in child class.
"""
# Nothing to do
pass
def modifyComponentLayoutSchemes(self):
"""
Modify layout scheme dictionaries for components.
The scheme dictionary basically maps the number of hosts to
host index where component should exist.
Must be overriden in child class.
"""
# Nothing to do
pass
def getServiceComponentLayoutValidations(self, services, hosts):
"""
Get a list of errors.
Must be overriden in child class.
"""
return []
def getServiceConfigurationRecommendations(self, configurations, clusterData, services, hosts):
putLogSearchProperty = self.putProperty(configurations, "logsearch-properties", services)
putLogSearchAttribute = self.putPropertyAttribute(configurations, "logsearch-properties")
putLogSearchCommonEnvProperty = self.putProperty(configurations, "logsearch-common-env", services)
putLogSearchCommonEnvAttribute = self.putPropertyAttribute(configurations, "logsearch-common-env")
putLogSearchEnvAttribute = self.putPropertyAttribute(configurations, "logsearch-env")
putLogFeederEnvAttribute = self.putPropertyAttribute(configurations, "logfeeder-env")
logSearchServerHosts = self.getComponentHostNames(services, "LOGSEARCH", "LOGSEARCH_SERVER")
# if there is no Log Search server on the cluster, i.e. there is an external server
if logSearchServerHosts is None or len(logSearchServerHosts) == 0:
# hide logsearch specific attributes
for key in services['configurations']['logsearch-env']['properties']:
putLogSearchEnvAttribute(key, 'visible', 'false')
for key in services['configurations']['logsearch-properties']['properties']:
putLogSearchAttribute(key, 'visible', 'false')
for key in services['configurations']['logsearch-audit_logs-solrconfig']['properties']:
self.putPropertyAttribute(configurations, "logsearch-audit_logs-solrconfig")(key, 'visible', 'false')
for key in services['configurations']['logsearch-service_logs-solrconfig']['properties']:
self.putPropertyAttribute(configurations, "logsearch-service_logs-solrconfig")(key, 'visible', 'false')
for key in services['configurations']['logsearch-log4j']['properties']:
self.putPropertyAttribute(configurations, "logsearch-log4j")(key, 'visible', 'false')
for key in services['configurations']['logsearch-admin-json']['properties']:
self.putPropertyAttribute(configurations, "logsearch-admin-json")(key, 'visible', 'false')
# if there is a Log Search server on the cluster
else:
infraSolrHosts = self.getComponentHostNames(services, "AMBARI_INFRA", "INFRA_SOLR")
# if there is AMBARI_INFRA, calculate the min/max shards and recommendations based on the number of infra solr hosts
if infraSolrHosts is not None and len(infraSolrHosts) > 0 and "logsearch-properties" in services["configurations"]:
replicationReccomendFloat = math.log(len(infraSolrHosts), 5)
recommendedReplicationFactor = int(1 + math.floor(replicationReccomendFloat))
recommendedMinShards = len(infraSolrHosts)
recommendedShards = 2 * len(infraSolrHosts)
recommendedMaxShards = 3 * len(infraSolrHosts)
# if there is no AMBARI_INFRA (i.e. external solr is used), use default values for min/max shards and recommendations
else:
recommendedReplicationFactor = 2
recommendedMinShards = 1
recommendedShards = 1
recommendedMaxShards = 100
putLogSearchCommonEnvProperty('logsearch_use_external_solr', 'true')
# recommend number of shard
putLogSearchAttribute('logsearch.collection.service.logs.numshards', 'minimum', recommendedMinShards)
putLogSearchAttribute('logsearch.collection.service.logs.numshards', 'maximum', recommendedMaxShards)
putLogSearchProperty("logsearch.collection.service.logs.numshards", recommendedShards)
putLogSearchAttribute('logsearch.collection.audit.logs.numshards', 'minimum', recommendedMinShards)
putLogSearchAttribute('logsearch.collection.audit.logs.numshards', 'maximum', recommendedMaxShards)
putLogSearchProperty("logsearch.collection.audit.logs.numshards", recommendedShards)
# recommend replication factor
putLogSearchProperty("logsearch.collection.service.logs.replication.factor", recommendedReplicationFactor)
putLogSearchProperty("logsearch.collection.audit.logs.replication.factor", recommendedReplicationFactor)
kerberos_authentication_enabled = self.isSecurityEnabled(services)
# if there is no kerberos enabled hide kerberor related properties
if not kerberos_authentication_enabled:
putLogSearchCommonEnvProperty('logsearch_external_solr_kerberos_enabled', 'false')
putLogSearchCommonEnvAttribute('logsearch_external_solr_kerberos_enabled', 'visible', 'false')
putLogSearchEnvAttribute('logsearch_external_solr_kerberos_keytab', 'visible', 'false')
putLogSearchEnvAttribute('logsearch_external_solr_kerberos_principal', 'visible', 'false')
putLogFeederEnvAttribute('logfeeder_external_solr_kerberos_keytab', 'visible', 'false')
putLogFeederEnvAttribute('logfeeder_external_solr_kerberos_principal', 'visible', 'false')
def getServiceConfigurationsValidationItems(self, configurations, recommendedDefaults, services, hosts):
"""
Entry point.
Validate configurations for the service. Return a list of errors.
The code for this function should be the same for each Service Advisor.
"""
#Logger.info("Class: %s, Method: %s. Validating Configurations." %
# (self.__class__.__name__, inspect.stack()[0][3]))
return []
|
Continuing to bring innovations to the field of Gynecology, Dr. K. Warren Volker pushes through society stigmas to move the industry forward to offer women safer choices for their gynecological care. Dr. Volker along with his team doesn’t leave any stone unturned.
Here is a list of current and most recent research projects. To view a full list, please click here.
Minimally Invasive Hysterectomy – Outcomes of total laparoscopic hysterectomy with complications.
Dysfunctional Uterine Bleeding in Women – (STOP-DUB) Surgical Treatment Outcomes Project. A national randomized control trial being conducted, which federally funded through is funding through (AHCPR) Agency of Health Care Policy and Research.
Contraception in Lactating Women – Effects of giving hormone contraception immediately (i.e., within 48 hours of giving birth) and the impact on milk production, menstruation, cardiovascular status, and weight retention.
Comparison of Flexible Hysteroscopy Directed Biopsy, Saline Infusion Sonohysteroscopy and Endometrial Biopsy for Detection of Uterine Pathology. Funding provided through Olympus Instruments, Inc. in the form instrumentation to us at no cost. This study also has a formal training program incorporated tlo teach 3rd and 4th year resident physicians new operative and diagnostic techniques.
The Effect of Pre-term Pre-mature Rupture of Membranes (PPROM) on Maternal Estriol Levels – Funding provided by Biex Industries, Inc., in the form of laboratory contributions. This project investigates the use of the new SALEST test in determining its usefulness in the management of perinatal patient care.
Development of a Screening Program and Treatment Strategies for women and adolescents in von Willebrands Disease – A collaborative study with Dept. of Hematology/Oncology and Dept. of Ob/Gyn, University of Nevada School of Medicine. This project will be an on-going project and has endorsement from the Hemophilia Foundation of Southern Nevada.
|
"""
The documentation for python-tdl. A Pythonic port of
U{libtcod<http://doryen.eptalys.net/libtcod/>}.
You can find the project page on Google Code
U{here<http://code.google.com/p/python-tdl/>}.
Report any bugs or issues to the Google Code issue tracker
U{here<https://code.google.com/p/python-tdl/issues/list>}.
Getting Started
===============
Once the library is imported you can load the font you want to use with
L{tdl.setFont}.
This is optional and when skipped will use a decent default font.
After that you call L{tdl.init} to set the size of the window and get the
root console in return.
This console is the canvas to what will appear on the screen.
Indexing Consoles
=================
For most methods taking a position you can use Python-style negative
indexes to refer to the opposite side of a console with (-1, -1)
starting at the bottom right.
You can also check if a point is part of a console using containment
logic i.e. ((x, y) in console).
You may also iterate over a console using a for statement. This returns
every x,y coordinate available to draw on but it will be extremely slow
to actually operate on every coordinate individualy.
Try to minimize draws by using an offscreen L{Console}, only drawing
what needs to be updated, and using L{Console.blit}.
Drawing
=======
Once you have the root console from L{tdl.init} you can start drawing on
it using a method such as L{Console.drawChar}.
When using this method you can have the char parameter be an integer or a
single character string.
The fgcolor and bgcolor parameters expect a three item list
[red, green, blue] with integers in the 0-255 range with [0, 0, 0] being
black and [255, 255, 255] being white.
Or instead you can use None in the place of any of the three parameters
to tell the library to not overwrite colors.
After the drawing functions are called a call to L{tdl.flush} will update
the screen.
"""
import sys
import os
import ctypes
import weakref
import array
import itertools
import textwrap
import struct
import re
import warnings
from . import event, map, noise
from .__tcod import _lib, _Color, _unpackfile
_IS_PYTHON3 = (sys.version_info[0] == 3)
if _IS_PYTHON3: # some type lists to use with isinstance
_INTTYPES = (int,)
_NUMTYPES = (int, float)
_STRTYPES = (str, bytes)
else:
_INTTYPES = (int, long)
_NUMTYPES = (int, long, float)
_STRTYPES = (str,)
def _encodeString(string): # still used for filepaths, and that's about it
"changes string into bytes if running in python 3, for sending to ctypes"
if _IS_PYTHON3 and isinstance(string, str):
return string.encode()
return string
#def _formatString(string):
# pass
def _formatChar(char):
"""Prepares a single characters for passing to ctypes calls, needs to return
an integer but can also pass None which will keep the current characters
instead of overwriting it.
This is called often and needs to be optimized whenever possible.
"""
if char is None:
return None
#if isinstance(char, _INTTYPES):
# return char
if isinstance(char, _STRTYPES) and len(char) == 1:
return ord(char)
return int(char) # conversion faster than type check
#raise TypeError('Expected char parameter to be a single characters string, number, or None, got: %s' % repr(char))
_fontinitialized = False
_rootinitialized = False
_rootConsoleRef = None
# remove dots from common functions
_setchar = _lib.TCOD_console_set_char
_setfore = _lib.TCOD_console_set_char_foreground
_setback = _lib.TCOD_console_set_char_background
_setcharEX = _lib.TCOD_console_put_char_ex
def _verify_colors(*colors):
"""Used internally.
Raise an assertion error if the parameters can not be converted into colors.
"""
for color in colors:
assert _iscolor(color), 'a color must be a 3 items tuple, web format, or None, received %s' % repr(color)
return True
def _iscolor(color):
"""Used internally.
A debug function to see if an object can be used as a TCOD color struct.
None counts as a parameter to keep the current colors instead.
This function is often part of an inner-loop and can slow a program down.
It has been made to work with assert and can be skipped with the -O flag.
Still it's called often and must be optimized.
"""
if color is None:
return True
if isinstance(color, (tuple, list, _Color)):
return len(color) == 3
if isinstance(color, _INTTYPES):
return True
return False
## not using this for now
#class Color(object):
#
# def __init__(self, r, g, b):
# self._color = (r, g, b)
# self._ctype = None
#
# def _getCType(self):
# if not self._ctype:
# self._ctype = _Color(*self._color)
# return self._ctype
#
# def __len__(self):
# return 3
# Format the color to ctypes, will preserve None and False
_formatColor = _Color.new
def _getImageSize(filename):
"""Try to get the width and height of a bmp of png image file"""
file = open(filename, 'rb')
if file.read(8) == b'\x89PNG\r\n\x1a\n': # PNG
while 1:
length, = struct.unpack('>i', file.read(4))
chunkID = file.read(4)
if chunkID == '': # EOF
return None
if chunkID == b'IHDR':
# return width, height
return struct.unpack('>ii', file.read(8))
file.seek(4 + length, 1)
file.seek(0)
if file.read(8) == b'BM': # Bitmap
file.seek(18, 0) # skip to size data
# return width, height
return struct.unpack('<ii', file.read(8))
# return None on error, unknown file
class TDLError(Exception):
"""
The catch all for most TDL specific errors.
"""
class _MetaConsole(object):
"""
Contains methods shared by both the L{Console} and L{Window} characters.
"""
__slots__ = ('width', 'height', 'console', '_cursor', '_fgcolor',
'_bgcolor', '_bgblend', '_colorLock', '__weakref__', '__dict__')
def __init__(self):
self._cursor = (0, 0)
self._scrollMode = 'error'
self._fgcolor = _formatColor((255, 255, 255))
self._bgcolor = _formatColor((0, 0, 0))
self._bgblend = 1 # SET
self._colorLock = None # which object sets the ctype color options
def _normalizePoint(self, x, y):
"""Check if a point is in bounds and make minor adjustments.
Respects Pythons negative indexes. -1 starts at the bottom right.
Replaces the _drawable function
"""
#assert isinstance(x, _INTTYPES), 'x must be an integer, got %s' % repr(x)
#assert isinstance(y, _INTTYPES), 'y must be an integer, got %s' % repr(y)
# force int, always faster than type checking
x = int(x)
y = int(y)
assert (-self.width <= x < self.width) and (-self.height <= y < self.height), \
('(%i, %i) is an invalid postition on %s' % (x, y, self))
# handle negative indexes
if x < 0:
x += self.width
if y < 0:
y += self.height
return (x, y)
def _normalizeRect(self, x, y, width, height):
"""Check if the rectangle is in bounds and make minor adjustments.
raise AssertionError's for any problems
"""
x, y = self._normalizePoint(x, y) # inherit _normalizePoint logic
assert width is None or isinstance(width, _INTTYPES), 'width must be an integer or None, got %s' % repr(width)
assert height is None or isinstance(height, _INTTYPES), 'height must be an integer or None, got %s' % repr(height)
# if width or height are None then extend them to the edge
if width is None:
width = self.width - x
elif width < 0: # handle negative numbers
width += self.width
width = max(0, width) # a 'too big' negative is clamped zero
if height is None:
height = self.height - y
height = max(0, height)
elif height < 0:
height += self.height
# reduce rect size to bounds
width = min(width, self.width - x)
height = min(height, self.height - y)
return x, y, width, height
def _normalizeCursor(self, x, y):
"""return the normalized the cursor position."""
width, height = self.getSize()
assert width != 0 and height != 0, 'can not print on a console with a width or height of zero'
while x >= width:
x -= width
y += 1
while y >= height:
if self._scrollMode == 'scroll':
y -= 1
self.scroll(0, -1)
elif self._scrollMode == 'error':
# reset the cursor on error
self._cursor = (0, 0)
raise TDLError('Cursor has reached the end of the console')
return (x, y)
def _lockColors(self, forceUpdate=False):
"""Make sure the color options on the root console match ths instance"""
if self.console._lockColors is not self or forceUpdate:
self.console._lockColors = self
_lib.TCOD_console_set_default_background(self.console, self.bgcolor)
_lib.TCOD_console_set_default_foreground(self.console, self.fgcolor)
#
def setMode(self, mode):
"""Configure how this console will react to the cursor writing past the
end if the console.
This is for methods that use the virtual cursor, such as L{printStr}.
@type mode: string
@param mode: Possible settings are:
- 'error' - A TDLError will be raised once the cursor
reaches the end of the console. Everything up until
the error will still be drawn.
This is the default setting.
- 'scroll' - The console will scroll up as stuff is
written to the end.
You can restrict the region with L{tdl.Window} when
doing this.
"""
MODES = ['error', 'scroll']
if mode.lower() not in MODES:
raise TDLError('mode must be one of %s, got %s' % (MODES, repr(mode)))
self._scrollMode = mode.lower()
def setColors(self, fg=None, bg=None):
"""Sets the colors to be used with the L{printStr} function.
Values of None will only leave the current values unchanged.
"""
if self.console._lockColors is self:
self.console._lockColors = None
if fg is not None:
self._fgcolor = _formatColor(fg)
if bg is not None:
self._bgcolor = _formatColor(bg)
def printStr(self, string):
"""Print a string at the virtual cursor.
Handles special characters such as '\\n' and '\\r'.
Printing past the bottom of the console will scroll everying upwards.
Colors can be set with L{setColors} and the virtual cursor can be moved
with L{move}.
@type string: string
@param string:
"""
x, y = self._cursor
for char in string:
if char == '\n': # line break
x = 0
y += 1
continue
if char == '\r': # return
x = 0
continue
x, y = self._normalizeCursor(x, y)
self.drawChar(x, y, char, self._fgcolor, self._bgcolor)
x += 1
self._cursor = (x, y)
def write(self, string):
"""This method mimics basic file-like behaviour.
Because of this method you can replace sys.stdout or sys.stderr with
a L{Typewriter} instance.
This is a convoluted process and behaviour seen now can be excepted to
change on later versions.
@type string: string
"""
# some 'basic' line buffer stuff.
# there must be an easier way to do this. The textwrap module didn't
# help much.
x, y = self._normalizeCursor(*self._cursor)
width, height = self.getSize()
wrapper = textwrap.TextWrapper(initial_indent=(' '*x), width=width)
writeLines = []
for line in string.split('\n'):
if line:
writeLines += wrapper.wrap(line)
wrapper.initial_indent = ''
else:
writeLines.append([])
for line in writeLines:
x, y = self._normalizeCursor(x, y)
self.drawStr(x, y, line[x:], self._fgcolor, self._bgcolor)
y += 1
x = 0
y -= 1
self._cursor = (x, y)
def drawChar(self, x, y, char, fgcolor=(255, 255, 255), bgcolor=(0, 0, 0)):
"""Draws a single characters.
@type x: int
@param x: X coordinate to draw at.
@type y: int
@param y: Y coordinate to draw at.
@type char: int, string, or None
@param char: Should be an integer, single characters string, or None.
You can set the char parameter as None if you only want to change
the colors of the tile.
@type fgcolor: (r, g, b) or None
@param fgcolor: For fgcolor and bgcolor you use a 3 items list with
integers ranging 0-255 or None.
None will keep the current color at this position unchanged.
@type bgcolor: (r, g, b) or None
@param bgcolor: Background color. See fgcolor
@raise AssertionError: Having x or y values that can't be placed inside
of the console will raise an AssertionError.
You can use always use ((x, y) in console) to
check if a tile is drawable.
"""
assert _verify_colors(fgcolor, bgcolor)
x, y = self._normalizePoint(x, y)
x, y = ctypes.c_int(x), ctypes.c_int(y)
self._setChar(x, y, _formatChar(char),
_formatColor(fgcolor), _formatColor(bgcolor))
def drawStr(self, x, y, string, fgcolor=(255, 255, 255), bgcolor=(0, 0, 0)):
"""Draws a string starting at x and y. Optinally colored.
A string that goes past the right side will wrap around. A string
wraping to below the console will raise a L{TDLError} but will still be
written out. This means you can safely ignore the errors with a
try... except block if you're fine with partily written strings.
\\r and \\n are drawn on the console as normal characters tiles. No
special encoding is done and any string will translate to the characters
table as is.
For a string drawing operation that respects special characters see the
L{Typewriter} class.
@type x: int
@param x: X coordinate to draw at.
@type y: int
@param y: Y coordinate to draw at.
@type string: string or iterable
@param string: Can be a string or an iterable of numbers.
Special characters are ignored and rendered as any other
characters.
@type fgcolor: (r, g, b) or None
@param fgcolor: For fgcolor and bgcolor you use a 3 items list with
integers ranging 0-255 or None.
None will keep the current color at this position unchanged.
@type bgcolor: (r, g, b) or None
@param bgcolor: Background color. See fgcolor
@raise AssertionError: Having x or y values that can't be placed inside
of the console will raise an AssertionError.
You can use always use ((x, y) in console) to
check if a tile is drawable.
"""
x, y = self._normalizePoint(x, y)
assert _verify_colors(fgcolor, bgcolor)
fgcolor, bgcolor = _formatColor(fgcolor), _formatColor(bgcolor)
width, height = self.getSize()
batch = [] # prepare a batch operation
def _drawStrGen(x=x, y=y, string=string, width=width, height=height):
"""Generator for drawStr
Iterates over ((x, y), ch) data for _setCharBatch, raising an
error if the end of the console is reached.
"""
for char in string:
if y == height:
raise TDLError('End of console reached.')
#batch.append(((x, y), _formatChar(char))) # ((x, y), ch)
yield((x, y), _formatChar(char))
x += 1 # advance cursor
if x == width: # line break
x = 0
y += 1
self._setCharBatch(_drawStrGen(), fgcolor, bgcolor)
def drawRect(self, x, y, width, height, string, fgcolor=(255, 255, 255), bgcolor=(0, 0, 0)):
"""Draws a rectangle starting from x and y and extending to width and height.
If width or height are None then it will extend to the edge of the console.
@type x: int
@param x: x coordinate to draw at.
@type y: int
@param y: y coordinate to draw at.
@type width: int or None
@param width: Width of the rectangle.
Can be None to extend to the bottom right of the
console or can be a negative number to be sized reltive
to the total size of the console.
@type height: int or None
@param height: Height of the rectangle. See width.
@type string: int, string, or None
@param string: Should be an integer, single characters string, or None.
You can set the char parameter as None if you only want
to change the colors of an areas.
@type fgcolor: (r, g, b) or None
@param fgcolor: For fgcolor and bgcolor you use a 3 items list with
integers ranging 0-255 or None.
None will keep the current color at this position unchanged.
@type bgcolor: (r, g, b) or None
@param bgcolor: Background color. See fgcolor
@raise AssertionError: Having x or y values that can't be placed inside
of the console will raise an AssertionError.
You can use always use ((x, y) in console) to
check if a tile is drawable.
"""
x, y, width, height = self._normalizeRect(x, y, width, height)
assert _verify_colors(fgcolor, bgcolor)
fgcolor, bgcolor = _formatColor(fgcolor), _formatColor(bgcolor)
char = _formatChar(string)
# use itertools to make an x,y grid
# using ctypes here reduces type converstions later
grid = itertools.product((ctypes.c_int(x) for x in range(x, x + width)),
(ctypes.c_int(y) for y in range(y, y + height)))
# zip the single characters in a batch variable
batch = zip(grid, itertools.repeat(char, width * height))
self._setCharBatch(batch, fgcolor, bgcolor, nullChar=(char is None))
def drawFrame(self, x, y, width, height, string, fgcolor=(255, 255, 255), bgcolor=(0, 0, 0)):
"""Similar to L{drawRect} but only draws the outline of the rectangle.
@type x: int
@param x: x coordinate to draw at.
@type y: int
@param y: y coordinate to draw at.
@type width: int or None
@param width: Width of the rectangle.
Can be None to extend to the bottom right of the
console or can be a negative number to be sized reltive
to the total size of the console.
@type height: int or None
@param height: Height of the rectangle. See width.
@type string: int, string, or None
@param string: Should be an integer, single characters string, or None.
You can set the char parameter as None if you only want
to change the colors of an areas.
@type fgcolor: (r, g, b) or None
@param fgcolor: For fgcolor and bgcolor you use a 3 items list with
integers ranging 0-255 or None.
None will keep the current color at this position unchanged.
@type bgcolor: (r, g, b) or None
@param bgcolor: Background color. See fgcolor
@raise AssertionError: Having x or y values that can't be placed inside
of the console will raise an AssertionError.
You can use always use ((x, y) in console) to
check if a tile is drawable.
"""
x, y, width, height = self._normalizeRect(x, y, width, height)
assert _verify_colors(fgcolor, bgcolor)
fgcolor, bgcolor = _formatColor(fgcolor), _formatColor(bgcolor)
char = _formatChar(string)
if width == 1 or height == 1: # it's just a single width line here
return self.drawRect(x, y, width, height, char, fgcolor, bgcolor)
# draw sides of frame with drawRect
self.drawRect(x, y, 1, height, char, fgcolor, bgcolor)
self.drawRect(x, y, width, 1, char, fgcolor, bgcolor)
self.drawRect(x + width - 1, y, 1, height, char, fgcolor, bgcolor)
self.drawRect(x, y + height - 1, width, 1, char, fgcolor, bgcolor)
def blit(self, source, x=0, y=0, width=None, height=None, srcX=0, srcY=0):
"""Blit another console or Window onto the current console.
By default it blits the entire source to the topleft corner.
@type source: L{Console} or L{Window}
@param source: Source window can be a L{Console} or L{Window} instance.
It can even blit to itself without any problems.
@type x: int
@param x: X coordinate to blit to.
@type y: int
@param y: Y coordinate to blit to.
@type width: int or None
@param width: Width of the rectangle.
Can be None to extend as far as possible to the
bottom right corner of the blit areas or can be a negative
number to be sized reltive to the total size of the
B{destination} console.
@type height: int or None
@param height: Height of the rectangle. See width.
@type srcX: int
@param srcX: The source consoles x coordinate to blit from.
@type srcY: int
@param srcY: The source consoles y coordinate to blit from.
"""
# hardcode alpha settings for now
fgalpha=1.0
bgalpha=1.0
assert isinstance(source, (Console, Window)), "source muse be a Window or Console instance"
# handle negative indexes and rects
# negative width and height will be set realtive to the destination
# and will also be clamped to the smallest Console
x, y, width, height = self._normalizeRect(x, y, width, height)
srcX, srcY, width, height = source._normalizeRect(srcX, srcY, width, height)
# translate source and self if any of them are Window instances
srcX, srcY = source._translate(srcX, srcY)
source = source.console
x, y = self._translate(x, y)
self = self.console
if self == source:
# if we are the same console then we need a third console to hold
# onto the data, otherwise it tries to copy into itself and
# starts destroying everything
tmp = Console(width, height)
_lib.TCOD_console_blit(source, srcX, srcY, width, height, tmp, 0, 0, fgalpha, bgalpha)
_lib.TCOD_console_blit(tmp, 0, 0, width, height, self, x, y, fgalpha, bgalpha)
else:
_lib.TCOD_console_blit(source, srcX, srcY, width, height, self, x, y, fgalpha, bgalpha)
def getCursor(self):
"""Return the virtual cursor position.
@rtype: (x, y)
@return: Returns (x, y) a 2-integer tuple containing where the next
L{addChar} or L{addStr} will start at.
This can be changed with the L{move} method."""
x, y = self._cursor
width, height = self.parent.getSize()
while x >= width:
x -= width
y += 1
if y >= height and self.scrollMode == 'scroll':
y = height - 1
return x, y
def getSize(self):
"""Return the size of the console as (width, height)
@rtype: (width, height)
"""
return self.width, self.height
def __iter__(self):
"""Return an iterator with every possible (x, y) value for this console.
It goes without saying that working on the console this way is a
slow process, especially for Python, and should be minimized.
@rtype: iter((x, y), ...)
"""
return itertools.product(range(self.width), range(self.height))
def move(self, x, y):
"""Move the virtual cursor.
@type x: int
@param x: X position to place the cursor.
@type y: int
@param y: Y position to place the cursor.
"""
self._cursor = self._normalizePoint(x, y)
def scroll(self, x, y):
"""Scroll the contents of the console in the direction of x,y.
Uncovered areas will be cleared.
Does not move the virutal cursor.
@type x: int
@param x: Distance to scroll along x-axis
@type y: int
@param y: Distance to scroll along y-axis
@rtype: iter((x, y), ...)
@return: Iterates over the (x, y) of any tile uncovered after scrolling.
"""
assert isinstance(x, _INTTYPES), "x must be an integer, got %s" % repr(x)
assert isinstance(y, _INTTYPES), "y must be an integer, got %s" % repr(x)
def getSlide(x, length):
"""get the parameters needed to scroll the console in the given
direction with x
returns (x, length, srcx)
"""
if x > 0:
srcx = 0
length -= x
elif x < 0:
srcx = abs(x)
x = 0
length -= srcx
else:
srcx = 0
return x, length, srcx
def getCover(x, length):
"""return the (x, width) ranges of what is covered and uncovered"""
cover = (0, length) # everything covered
uncover = None # nothing uncovered
if x > 0: # left side uncovered
cover = (x, length - x)
uncover = (0, x)
elif x < 0: # right side uncovered
x = abs(x)
cover = (0, length - x)
uncover = (length - x, x)
return cover, uncover
width, height = self.getSize()
if abs(x) >= width or abs(y) >= height:
return self.clear() # just clear the console normally
# get the ranges of the areas that will be uncovered
coverX, uncoverX = getCover(x, width)
coverY, uncoverY = getCover(y, height)
# so at this point we know that coverX and coverY makes a rect that
# encases the areas that we end up blitting to. uncoverX/Y makes a
# rect in the corner of the uncovered areas. So we need to combine
# the uncoverX/Y with coverY/X to make what's left of the uncovered
# areas. Explaining it makes it mush easier to do now.
# But first we need to blit.
x, width, srcx = getSlide(x, width)
y, height, srcy = getSlide(y, height)
self.blit(self, x, y, width, height, srcx, srcy)
if uncoverX: # clear sides (0x20 is space)
self.drawRect(uncoverX[0], coverY[0], uncoverX[1], coverY[1], 0x20, 0x000000, 0x000000)
if uncoverY: # clear top/bottom
self.drawRect(coverX[0], uncoverY[0], coverX[1], uncoverY[1], 0x20, 0x000000, 0x000000)
if uncoverX and uncoverY: # clear corner
self.drawRect(uncoverX[0], uncoverY[0], uncoverX[1], uncoverY[1], 0x20, 0x000000, 0x000000)
def getChar(self, x, y):
"""Return the characters and colors of a tile as (ch, fg, bg)
This method runs very slowly as is not recommended to be called
frequently.
@rtype: (int, (r, g, b), (r, g, b))
@returns: Returns a 3-items tuple. The first items is an integer of the
characters at the position (x, y) the second and third are the
foreground and background colors respectfully.
"""
raise NotImplementedError('Method here only exists for the docstring')
def __contains__(self, position):
"""Use ((x, y) in console) to check if a position is drawable on this console.
"""
x, y = position
return (0 <= x < self.width) and (0 <= y < self.height)
class Console(_MetaConsole):
"""Contains characters and color data and can be drawn to.
The console created by the L{tdl.init} function is the root console and is the
console that is rendered to the screen with L{flush}.
Any console created from the Console class is an off-screen console that
can be drawn on before being L{blit} to the root console.
"""
__slots__ = ('_as_parameter_', '_typewriter')
def __init__(self, width, height):
"""Create a new offscreen console.
@type width: int
@param width: Width of the console in tiles
@type height: int
@param height: Height of the console in tiles
"""
_MetaConsole.__init__(self)
if not _rootinitialized:
raise TDLError('Can not create Console\'s before tdl.init')
self._as_parameter_ = _lib.TCOD_console_new(width, height)
self.console = self
self.width = width
self.height = height
self._typewriter = None # "typewriter lock", makes sure the colors are set to the typewriter
# will be phased out with the Typewriter class
@classmethod
def _newConsole(cls, console):
"""Make a Console instance, from a console ctype"""
self = cls.__new__(cls)
_MetaConsole.__init__(self)
self._as_parameter_ = console
self.console = self
self.width = _lib.TCOD_console_get_width(self)
self.height = _lib.TCOD_console_get_height(self)
self._typewriter = None
return self
def __del__(self):
"""
If the main console is garbage collected then the window will be closed as well
"""
# If this is the root console the window will close when collected
try:
if isinstance(self._as_parameter_, ctypes.c_void_p):
global _rootinitialized, _rootConsoleRef
_rootinitialized = False
_rootConsoleRef = None
_lib.TCOD_console_delete(self)
except StandardError:
pass # I forget why I put this here but I'm to afraid to delete it
def __copy__(self):
# make a new class and blit
clone = self.__class__(self.width, self.height)
clone.blit(self)
return clone
def __getstate__(self):
# save data from getChar
data = [self.getChar(x, y) for x,y in
itertools.product(range(self.width), range(self.height))]
return self.width, self.height, data
def __setstate__(self, state):
# make console from __init__ and unpack a getChar array
width, height, data = state
self.__init__(width, height)
for (x, y), graphic in zip(itertools.product(range(width),
range(height)), data):
self.drawChar(x, y, *graphic)
def _replace(self, console):
"""Used internally
Mostly used just to replace this Console object with the root console
If another Console object is used then they are swapped
"""
if isinstance(console, Console):
self._as_parameter_, console._as_parameter_ = \
console._as_parameter_, self._as_parameter_ # swap tcod consoles
else:
self._as_parameter_ = console
self.width = _lib.TCOD_console_get_width(self)
self.height = _lib.TCOD_console_get_height(self)
return self
def _translate(self, x, y):
"""Convertion x and y to their position on the root Console for this Window
Because this is a Console instead of a Window we return the paramaters
untouched"""
return x, y
def clear(self, fgcolor=(0, 0, 0), bgcolor=(0, 0, 0)):
"""Clears the entire Console.
@type fgcolor: (r, g, b)
@param fgcolor: Foreground color.
Must be a 3-items list with integers that range 0-255.
Unlike most other operations you cannot use None here.
@type bgcolor: (r, g, b)
@param bgcolor: Background color. See fgcolor.
"""
assert _verify_colors(fgcolor, bgcolor)
assert fgcolor and bgcolor, 'Can not use None with clear'
self._typewriter = None
_lib.TCOD_console_set_default_background(self, _formatColor(bgcolor))
_lib.TCOD_console_set_default_foreground(self, _formatColor(fgcolor))
_lib.TCOD_console_clear(self)
def _setChar(self, x, y, char, fgcolor=None, bgcolor=None, bgblend=1):
"""
Sets a characters.
This is called often and is designed to be as fast as possible.
Because of the need for speed this function will do NO TYPE CHECKING
AT ALL, it's up to the drawing functions to use the functions:
_formatChar and _formatColor before passing to this."""
# buffer values as ctypes objects
console = self._as_parameter_
if char is not None and fgcolor is not None and bgcolor is not None:
_setcharEX(console, x, y, char, fgcolor, bgcolor)
return
if char is not None:
_setchar(console, x, y, char)
if fgcolor is not None:
_setfore(console, x, y, fgcolor)
if bgcolor is not None:
_setback(console, x, y, bgcolor, bgblend)
def _setCharBatch(self, batch, fgcolor, bgcolor, bgblend=1, nullChar=False):
"""
Try to perform a batch operation otherwise fall back to _setChar.
If fgcolor and bgcolor are defined then this is faster but not by very
much.
batch is a iterable of [(x, y), ch] items
"""
if fgcolor and not nullChar:
# buffer values as ctypes objects
self._typewriter = None # clear the typewriter as colors will be set
console = self._as_parameter_
bgblend = ctypes.c_int(bgblend)
if not bgcolor:
bgblend = 0
else:
_lib.TCOD_console_set_default_background(console, bgcolor)
_lib.TCOD_console_set_default_foreground(console, fgcolor)
_putChar = _lib.TCOD_console_put_char # remove dots and make local
for (x, y), char in batch:
_putChar(console, x, y, char, bgblend)
else:
for (x, y), char in batch:
self._setChar(x, y, char, fgcolor, bgcolor, bgblend)
def getChar(self, x, y):
# inherit docstring
x, y = self._normalizePoint(x, y)
char = _lib.TCOD_console_get_char(self, x, y)
bgcolor = _lib.TCOD_console_get_char_background_wrapper(self, x, y)
fgcolor = _lib.TCOD_console_get_char_foreground_wrapper(self, x, y)
return char, tuple(fgcolor), tuple(bgcolor)
def __repr__(self):
return "<Console (Width=%i Height=%i)>" % (self.width, self.height)
class Window(_MetaConsole):
"""A Window contains a small isolated part of a Console.
Drawing on the Window draws on the Console.
Making a Window and setting its width or height to None will extend it to
the edge of the console.
"""
__slots__ = ('parent', 'x', 'y')
def __init__(self, console, x, y, width, height):
"""Isolate part of a L{Console} or L{Window} instance.
@type console: L{Console} or L{Window}
@param console: The parent object which can be a L{Console} or another
L{Window} instance.
@type x: int
@param x: X coordinate to place the Window.
This follows the normal rules for indexing so you can use a
negative integer to place the Window relative to the bottom
right of the parent Console instance.
@type y: int
@param y: Y coordinate to place the Window.
See x.
@type width: int or None
@param width: Width of the Window.
Can be None to extend as far as possible to the
bottom right corner of the parent Console or can be a
negative number to be sized reltive to the Consoles total
size.
@type height: int or None
@param height: Height of the Window.
See width.
"""
_MetaConsole.__init__(self)
assert isinstance(console, (Console, Window)), 'console parameter must be a Console or Window instance, got %s' % repr(console)
self.parent = console
self.x, self.y, self.width, self.height = console._normalizeRect(x, y, width, height)
if isinstance(console, Console):
self.console = console
else:
self.console = self.parent.console
def _translate(self, x, y):
"""Convertion x and y to their position on the root Console"""
# we add our position relative to our parent and then call then next parent up
return self.parent._translate((x + self.x), (y + self.y))
def clear(self, fgcolor=(0, 0, 0), bgcolor=(0, 0, 0)):
"""Clears the entire Window.
@type fgcolor: (r, g, b)
@param fgcolor: Foreground color.
Must be a 3-items list with integers that range 0-255.
Unlike most other operations you can not use None here.
@type bgcolor: (r, g, b)
@param bgcolor: Background color. See fgcolor.
"""
assert _verify_colors(fgcolor, bgcolor)
assert fgcolor and bgcolor, 'Can not use None with clear'
self.drawRect(0, 0, None, None, 0x20, fgcolor, bgcolor)
def _setChar(self, x, y, char=None, fgcolor=None, bgcolor=None, bgblend=1):
self.parent._setChar((x + self.x), (y + self.y), char, fgcolor, bgcolor, bgblend)
def _setCharBatch(self, batch, fgcolor, bgcolor, bgblend=1):
myX = self.x # remove dots for speed up
myY = self.y
self.parent._setCharBatch((((x + myX, y + myY), ch) for ((x, y), ch) in batch),
fgcolor, bgcolor, bgblend)
def drawChar(self, x, y, char, fgcolor=(255, 255, 255), bgcolor=(0, 0, 0)):
# inherit docstring
x, y = self._normalizePoint(x, y)
self.parent.drawChar(x + self.x, y + self.y, char, fgcolor, bgcolor)
def drawRect(self, x, y, width, height, string, fgcolor=(255, 255, 255), bgcolor=(0, 0, 0)):
# inherit docstring
x, y, width, height = self._normalizeRect(x, y, width, height)
self.parent.drawRect(x + self.x, y + self.y, width, height, string, fgcolor, bgcolor)
def drawFrame(self, x, y, width, height, string, fgcolor=(255, 255, 255), bgcolor=(0, 0, 0)):
# inherit docstring
x, y, width, height = self._normalizeRect(x, y, width, height)
self.parent.drawFrame(x + self.x, y + self.y, width, height, string, fgcolor, bgcolor)
def getChar(self, x, y):
# inherit docstring
x, y = self._normalizePoint(x, y)
return self.console.getChar(self._translate(x, y))
def __repr__(self):
return "<Window(X=%i Y=%i Width=%i Height=%i)>" % (self.x, self.y,
self.width,
self.height)
def init(width, height, title=None, fullscreen=False, renderer='OPENGL'):
"""Start the main console with the given width and height and return the
root console.
Call the consoles drawing functions. Then remember to use L{tdl.flush} to
make what's drawn visible on the console.
@type width: int
@param width: width of the root console (in tiles)
@type height: int
@param height: height of the root console (in tiles)
@type title: string
@param title: Text to display as the window title.
If left None it defaults to the running scripts filename.
@type fullscreen: boolean
@param fullscreen: Can be set to True to start in fullscreen mode.
@type renderer: string
@param renderer: Can be one of 'GLSL', 'OPENGL', or 'SDL'.
Due to way Python works you're unlikely to see much of an
improvement by using 'GLSL' or 'OPENGL' as most of the
time Python is slow interacting with the console and the
rendering itself is pretty fast even on 'SDL'.
@rtype: L{Console}
@return: The root console. Only what is drawn on the root console is
what's visible after a call to L{tdl.flush}.
After the root console is garbage collected, the window made by
this function will close.
"""
RENDERERS = {'GLSL': 0, 'OPENGL': 1, 'SDL': 2}
global _rootinitialized, _rootConsoleRef
if not _fontinitialized: # set the default font to the one that comes with tdl
setFont(_unpackfile('terminal8x8.png'), None, None, True, True)
if renderer.upper() not in RENDERERS:
raise TDLError('No such render type "%s", expected one of "%s"' % (renderer, '", "'.join(RENDERERS)))
renderer = RENDERERS[renderer.upper()]
# If a console already exists then make a clone to replace it
if _rootConsoleRef and _rootConsoleRef():
oldroot = _rootConsoleRef()
rootreplacement = Console(oldroot.width, oldroot.height)
rootreplacement.blit(oldroot)
oldroot._replace(rootreplacement)
del rootreplacement
if title is None: # use a default title
if sys.argv:
# Use the script filename as the title.
title = os.path.basename(sys.argv[0])
else:
title = 'python-tdl'
_lib.TCOD_console_init_root(width, height, _encodeString(title), fullscreen, renderer)
#event.get() # flush the libtcod event queue to fix some issues
# issues may be fixed already
event._eventsflushed = False
_rootinitialized = True
rootconsole = Console._newConsole(ctypes.c_void_p())
_rootConsoleRef = weakref.ref(rootconsole)
return rootconsole
def flush():
"""Make all changes visible and update the screen.
Remember to call this function after drawing operations.
Calls to flush will enfore the frame rate limit set by L{tdl.setFPS}.
This function can only be called after L{tdl.init}
"""
if not _rootinitialized:
raise TDLError('Cannot flush without first initializing with tdl.init')
_lib.TCOD_console_flush()
def setFont(path, columns=None, rows=None, columnFirst=False,
greyscale=False, altLayout=False):
"""Changes the font to be used for this session.
This should be called before L{tdl.init}
If the font specifies its size in its filename (i.e. font_NxN.png) then this
function can auto-detect the tileset formatting and the parameters columns
and rows can be left None.
While it's possible you can change the font mid program it can sometimes
break in rare circumstances. So use caution when doing this.
@type path: string
@param path: Must be a string filepath where a bmp or png file is found.
@type columns: int
@param columns: Number of columns in the tileset.
Can be left None for auto-detection.
@type rows: int
@param rows: Number of rows in the tileset.
Can be left None for auto-detection.
@type columnFirst: boolean
@param columnFirst: Defines if the characer order goes along the rows or
colomns.
It should be True if the charater codes 0-15 are in the
first column.
And should be False if the characters 0-15
are in the first row.
@type greyscale: boolean
@param greyscale: Creates an anti-aliased font from a greyscale bitmap.
Otherwise it uses the alpha channel for anti-aliasing.
Unless you actually need anti-aliasing from a font you
know uses a smooth greyscale channel you should leave
this on False.
@type altLayout: boolean
@param altLayout: An alternative layout with space in the upper left
corner.
The colomn parameter is ignored if this is True,
find examples of this layout in the font/libtcod/
directory included with the python-tdl source.
@raise TDLError: Will be raised if no file is found at path or if auto-
detection fails.
@note: A png file that's been optimized can fail to load correctly on
MAC OS X creating a garbled mess when rendering.
Don't use a program like optipng or just use bmp files instead if
you want your program to work on macs.
"""
# put up some constants that are only used here
FONT_LAYOUT_ASCII_INCOL = 1
FONT_LAYOUT_ASCII_INROW = 2
FONT_TYPE_GREYSCALE = 4
FONT_LAYOUT_TCOD = 8
global _fontinitialized
_fontinitialized = True
flags = 0
if altLayout:
flags |= FONT_LAYOUT_TCOD
elif columnFirst:
flags |= FONT_LAYOUT_ASCII_INCOL
else:
flags |= FONT_LAYOUT_ASCII_INROW
if greyscale:
flags |= FONT_TYPE_GREYSCALE
if not os.path.exists(path):
raise TDLError('no file exists at: "%s"' % path)
path = os.path.abspath(path)
# and the rest is the auto-detect script
imgSize = _getImageSize(path) # try to find image size
if imgSize:
imgWidth, imgHeight = imgSize
# try to get font size from filename
match = re.match('.*?([0-9]+)[xX]([0-9]+)', os.path.basename(path))
if match:
fontWidth, fontHeight = match.groups()
fontWidth, fontHeight = int(fontWidth), int(fontHeight)
# estimate correct tileset size
estColumns, remC = divmod(imgWidth, fontWidth)
estRows, remR = divmod(imgHeight, fontHeight)
if remC or remR:
warnings.warn("Font may be incorrectly formatted.")
if not columns:
columns = estColumns
if not rows:
rows = estRows
else:
# the font name excluded the fonts size
if not (columns and rows):
# no matched font size and no tileset is given
raise TDLError('%s has no font size in filename' % os.path.basename(path))
if columns and rows:
# confirm user set options
if (fontWidth * columns != imgWidth or
fontHeight * rows != imgHeight):
warnings.warn("setFont parameters are set as if the image size is (%d, %d) when the detected size is actually (%i, %i)"
% (fontWidth * columns, fontHeight * rows,
imgWidth, imgHeight))
else:
warnings.warn("%s is probably not an image." % os.path.basename(path))
if not (columns and rows):
# didn't auto-detect
raise TDLError('Can not auto-detect the tileset of %s' % os.path.basename(path))
_lib.TCOD_console_set_custom_font(_encodeString(path), flags, columns, rows)
def getFullscreen():
"""Returns True if program is fullscreen.
@rtype: boolean
@return: Returns True if the window is in fullscreen mode.
Otherwise returns False.
"""
if not _rootinitialized:
raise TDLError('Initialize first with tdl.init')
return _lib.TCOD_console_is_fullscreen()
def setFullscreen(fullscreen):
"""Changes the fullscreen state.
@type fullscreen: boolean
"""
if not _rootinitialized:
raise TDLError('Initialize first with tdl.init')
_lib.TCOD_console_set_fullscreen(fullscreen)
def setTitle(title):
"""Change the window title.
@type title: string
"""
if not _rootinitialized:
raise TDLError('Not initilized. Set title with tdl.init')
_lib.TCOD_console_set_window_title(_encodeString(title))
def screenshot(path=None):
"""Capture the screen and save it as a png file
@type path: string
@param path: The filepath to save the screenshot.
If path is None then the image will be placed in the current
folder with the names:
screenshot001.png, screenshot002.png, ...
"""
if not _rootinitialized:
raise TDLError('Initialize first with tdl.init')
if isinstance(path, str):
_lib.TCOD_sys_save_screenshot(_encodeString(path))
elif path is None: # save to screenshot001.png, screenshot002.png, ...
filelist = os.listdir('.')
n = 1
filename = 'screenshot%.3i.png' % n
while filename in filelist:
n += 1
filename = 'screenshot%.3i.png' % n
_lib.TCOD_sys_save_screenshot(_encodeString(filename))
else: # assume file like obj
#save to temp file and copy to file-like obj
tmpname = os.tempnam()
_lib.TCOD_sys_save_screenshot(_encodeString(tmpname))
with tmpname as tmpfile:
path.write(tmpfile.read())
os.remove(tmpname)
#else:
# raise TypeError('path is an invalid type: %s' % type(path))
def setFPS(frameRate):
"""Set the maximum frame rate.
@type frameRate: int
@param frameRate: Further calls to L{tdl.flush} will limit the speed of
the program to run at <frameRate> frames per second. Can
also be set to 0 to run without a limit.
Defaults to None.
"""
if frameRate is None:
frameRate = 0
assert isinstance(frameRate, _INTTYPES), 'frameRate must be an integer or None, got: %s' % repr(frameRate)
_lib.TCOD_sys_set_fps(frameRate)
def getFPS():
"""Return the current frames per second of the running program set by
L{setFPS}
@rtype: int
@return: Returns the frameRate set by setFPS.
If set to no limit, this will return 0.
"""
return _lib.TCOD_sys_get_fps()
def forceResolution(width, height):
"""Change the fullscreen resoulution
@type width: int
@type height: int
"""
_lib.TCOD_sys_force_fullscreen_resolution(width, height)
__all__ = [_var for _var in locals().keys() if _var[0] != '_' and _var not in
['sys', 'os', 'ctypes', 'array', 'weakref', 'itertools', 'textwrap',
'struct', 're', 'warnings']] # remove modules from __all__
__all__ += ['_MetaConsole'] # keep this object public to show the documentation in epydoc
__license__ = "New BSD License"
__email__ = "[email protected]"
file = open(os.path.join(os.path.dirname(__file__), 'VERSION.txt'), 'r')
__version__ = file.read()
file.close()
|
Experience The Best Auto Repair Shop and Limousine Service in the South Sound!
We speak Spanish and English for your convenience!
Welcome to Jimmy’s Auto Services. We believe in providing our customers with a clean and friendly shop in a family atmosphere, and we offer a full line of repair.
Jimmy’s Limousine Service is located in the same building. Jimmy’s Limo Service features the “Party On Wheels” H2 Hummer limousine along with our other Luxury Stretch Limousines.
Jimmy’s Auto Services provides service for Lacey, Tumwater, Olympia, Thurston County and for that matter all of the southern Puget Sound region of Washington State with high quality, professional auto services whether you need your vehicle to get you there, or need someone to take you there!
|
import io, csv, json
from datetime import datetime
from sqlalchemy import desc, inspect, func, text
from sqlalchemy.orm import subqueryload
from flask import abort, Markup
from flask.ext.login import current_user
from typeseam.app import db
from .models import (
TypeformResponse,
Typeform, SeamlessDoc,
FormSubmission,
LogEntry
)
from .serializers import (
TypeformResponseSerializer,
FlatResponseSerializer,
TypeformSerializer,
SerializationError,
DeserializationError
)
response_serializer = TypeformResponseSerializer()
flat_response_serializer = FlatResponseSerializer()
typeform_serializer = TypeformSerializer()
def save_new_form_submission(data, county="sanfrancisco"):
submission = FormSubmission(
answers=data,
county=county
)
db.session.add(submission)
db.session.commit()
return submission
def get_submissions(uuids):
query = db.session.query(FormSubmission).filter(
FormSubmission.uuid.in_(uuids))
return query.all()
def get_submission_by_uuid(submission_uuid):
q = db.session.query(FormSubmission).filter(
FormSubmission.uuid == submission_uuid)
return q.first()
def delete_submission_forever(submission_uuid):
q = db.session.query(FormSubmission).filter(
FormSubmission.uuid == submission_uuid)
submission = q.first()
db.session.delete(submission)
db.session.commit()
def get_unopened_submissions():
data = get_submissions_with_logs()
unopened = []
for row in data:
if 'logs' not in row:
unopened.append(row['submission'])
else:
if not row['submission'].was_opened(row['logs']):
unopened.append(row['submission'])
return unopened
def get_latest_logentry():
q = db.session.query(LogEntry).\
filter(LogEntry.source == 'front').\
order_by(desc(LogEntry.datetime))
return q.first()
def save_new_logentries_from_front_events(events=None):
for event in events:
logentry = LogEntry.from_parsed_front_event(event)
db.session.add(logentry)
db.session.commit()
def get_all_submissions():
q = db.session.query(FormSubmission).\
order_by(desc(FormSubmission.date_received))
return q.all()
def get_logentries():
q = db.session.query(LogEntry).\
order_by(desc(LogEntry.datetime))
return q.all()
def save_new_logentry(uuid, event_type):
log = LogEntry(
datetime=datetime.now(),
user=current_user.email,
submission_key=uuid,
event_type=event_type,
source='form_filler'
)
db.session.add(log)
db.session.commit()
def save_multiple_logentries(uuids, event_type):
for uuid in uuids:
log = LogEntry(
datetime=datetime.now(),
user=current_user.email,
submission_key=uuid,
event_type=event_type,
source='form_filler'
)
db.session.add(log)
db.session.commit()
def get_submissions_with_logs():
lookups = {}
submissions = get_all_submissions()
logs = get_logentries()
for submission in submissions:
lookups[submission.uuid] = {'submission': submission}
for log in logs:
uuid = log.submission_key
if uuid in lookups:
if 'logs' not in lookups[uuid]:
lookups[uuid]['logs'] = [log]
else:
lookups[uuid]['logs'].append(log)
results = list(lookups.values())
for row in results:
if 'logs' in row:
row['logs'].sort(key=lambda e: e.datetime, reverse=True)
return sorted(results, key=lambda s: s['submission'].date_received, reverse=True)
def get_stats():
base_data = get_submissions_with_logs()
stats = {
'received': len(base_data),
'opened': len([
s for s in base_data
if s['submission'].was_opened(s['logs'])
]),
'days':[]
}
day_lookup = {}
for row in base_data:
for log in row['logs']:
day = log.day()
if day in day_lookup:
day_lookup[day].append(log)
else:
day_lookup[day] = [log]
for day, logs in day_lookup.items():
stats['days'].append({
'date': day,
'received': len([
n for n in logs if n.event_type == 'received']),
'referred': len([
n for n in logs if n.event_type == 'referred']),
'opened': len([
n for n in logs if (
n.event_type == 'opened' and n.user == '[email protected]'
)]),
})
stats['days'].sort(key=lambda d: d['date'])
stats['days'] = Markup(json.dumps(stats['days']))
return stats
def save_new_typeform_data(data, typeform=None):
if typeform:
data['user_id'] = typeform.user_id
data['typeform_id'] = typeform.id
data['translator'] = typeform.translator
models, errors = response_serializer.load(
data, many=True, session=db.session)
new_responses = []
if errors:
raise DeserializationError(str(errors))
if not models:
return []
for m in models:
if not inspect(m).persistent:
db.session.add(m)
new_responses.append(m)
if new_responses and typeform:
update_typeform_with_new_responses(typeform, new_responses)
db.session.commit()
def update_typeform_with_new_responses(typeform, responses):
latest_date = max(responses, key=lambda r: r.date_received).date_received
typeform.latest_response = latest_date
db.session.add(typeform)
def get_typeforms_for_user(user):
q = db.session.query(Typeform).\
options(subqueryload(Typeform.responses)).\
filter(Typeform.user_id == user.id).\
order_by(desc(Typeform.latest_response))
return typeform_serializer.dump(q.all(), many=True).data
def get_responses_for_typeform(typeform_id):
q = db.session.query(TypeformResponse).\
filter(TypeformResponse.typeform_id == typeform_id).\
order_by(desc(TypeformResponse.date_received))
responses = q.all()
responses_data = response_serializer.dump(responses, many=True).data
return responses_data
def get_responses_csv(user, typeform_key):
typeform = get_typeform(model=True, user_id=user.id, form_key=typeform_key)
# get responses
results = db.session.query(TypeformResponse, Typeform.form_key).\
join(Typeform, TypeformResponse.typeform_id == Typeform.id).\
filter(Typeform.user_id == user.id, Typeform.form_key == typeform_key).\
order_by(desc(TypeformResponse.date_received)).all()
# serialize them
data = flat_response_serializer.dump(results, many=True).data
if len(data) < 1:
abort(404)
# build csv
keys = list(data[0].keys())
keys.sort()
with io.StringIO() as csvfile:
writer = csv.DictWriter(
csvfile, fieldnames=keys, quoting=csv.QUOTE_NONNUMERIC)
writer.writeheader()
writer.writerows(data)
return csvfile.getvalue()
def get_seamless_doc_key_for_response(response):
return SeamlessDoc.query.get(response.seamless_id).seamless_key
def get_response_model(response_id):
return TypeformResponse.query.get(int(response_id))
def get_response_detail(user, response_id):
response = get_response_model(response_id)
if user.id != response.user_id:
abort(403)
return response_serializer.dump(response).data
def get_response_count():
return db.session.query(func.count(TypeformResponse.id)).scalar()
def create_typeform(form_key, title, user_id, translator, **kwargs):
params = dict(form_key=form_key, title=title, user_id=user_id)
if not all([form_key, title, user_id, translator]):
raise TypeError(
"Creating a new Typeform requires form_key, title, user_id, and translator arguments")
typeform = db.session.query(Typeform).filter_by(**params).first()
if not typeform:
params.update(dict(translator=translator, **kwargs))
typeform = Typeform(**params)
db.session.add(typeform)
db.session.commit()
return typeform
def get_typeform(model=False, **kwargs):
params = {k: v for k, v in kwargs.items() if v}
if not params:
abort(404)
typeform = db.session.query(Typeform).filter_by(**params).first()
if not typeform:
abort(404)
if model:
return typeform
return typeform_serializer.dump(typeform).data
|
Manillo v. Gorski 54 N.J. 378, 255 A.2d 258, 1969 N.J.
O'Keeffe v. Snyder 83 N.J. 478, 416 A.2d 862, 1980 N.J.
Newman v. Bost 122 N.C. 524, 29 S.E. 848, 1898 N.C.
Featuring Black's Law Dictionary, 2nd Ed.
Featuring Black's Law Dictionary 2nd Ed.
Citation. K.B. 509 (King’s Bench 1945).
Brief Fact Summary. Plaintiff, who was a soldier staying in the house owned (but not occupied) by Defendant, found a brooch and then gave the brooch to the police who later, after not finding the rightful owner, gave the brooch to Defendant, who then sold the brooch.
Synopsis of Rule of Law. Because Defendant was not physically present in the house at any time, Plaintiff’s find was defensible against all parties except the rightful owner.
Facts. Defendant owned a house, but did not occupy the house. Then, in October of 1939, the house was requisitioned by the government to house soldiers. Defendant was compensated with 250 pounds per month. Plaintiff was housed in the residence in 1940. During his stay he found a brooch. He then alerted Defendant of his find, and took the brooch to the police to find the rightful owner, receiving a receipt. Thereafter, in 1942, the rightful owner was still not located, and the police gave the brooch over to Defendant. Defendant sold the brooch for 66 pounds to a jeweler who resold the brooch for 88 pounds. Plaintiff, through counsel, demanded return of the brooch from Defendant, who refused. The Plaintiff sued claiming return of the brooch, or its value, and damages for its detention.
Issue. Does Plaintiff have a claim of right for finding the brooch against Defendant, who owned the house where the brooch was found but did not ever occupy the house?
Everything you need to succeed in law school.
Access hundreds of law school topic videos, thousands of case briefs, exam prep materials, law professor takeaways and much more.
The most widely used law student study supplement … ever!
|
#!/usr/bin/env python
## $Id: constraint.py,v 1.3 2003/07/03 23:07:42 euske Exp $
##
## constraint.py - Pattern matching / constraint checker
##
import sys, re
import pstring
from regpat import PatternActionSet, PatCounter
from sentence import Sentence, TextTokenizer, SentenceSplitter, POSTagger
from abstfilter import AbstractFeeder, AbstractFilter, AbstractConsumer
from document import HTMLProcessor, TexProcessor, PlainTextProcessor
from unification import Unifier, UnificationError, forall, exists
from postagfix import POSTagFixer
from output import TerminalOutput
from grammarerror import GrammarNounAgreementError, GrammarVerbAgreementError, GrammarNonDeterminerError
def ispos(w, t):
return w.pos_pref == t or (w.pos_pref == None and t in w.pos)
class ParsePatternActionSet(PatternActionSet):
def __init__(self, observer, warntypes, debug_action=False):
self.debug_action = debug_action
PatternActionSet.__init__(self)
self.observer = observer
self.check_determiner = "det" in warntypes
self.check_plural = "plural" in warntypes
return
def compile_item0(self, t):
return lambda w: not w.processed and ispos(w, t)
def compile_item2(self, s):
return lambda w: not w.processed and (not isinstance(w.s, Sentence)) and s.lower() == w.s.lower()
def inherit_prop(self, m, inherit=None):
if inherit:
m.prop = inherit.prop
m.prop.match = m
else:
m.prop = Unifier()
return
c = PatCounter().inc
debug_action = True
def action_wrapper(self, n, pat1, action, m):
print "called:", n, map(str,m.getseq())
action(m)
return
## CONSTRAINTS
##
pat_det_pos = c('DT | DT1 | DTS | WDT | PRP$ | WP$')
def act_det_pos(self, m):
self.inherit_prop(m)
w = m.submatch.item
m.prop["determiner"] = True
if ispos(w, "DT1"):
m.prop["plural"] = False
elif ispos(w, "DTS"):
m.prop["plural"] = True
return
pat_pdts = c('PDT | PDT1 | PDTS')
def act_pdts(self, m):
self.inherit_prop(m)
w = m.submatch.item
if ispos(w, "PDT1"):
m.prop["plural"] = False
elif ispos(w, "PDTS"):
m.prop["plural"] = True
return
pat_modifiers = c('CD | JJ | JJR | JJS | NN | NNR')
pat_ng_3rdsing = c('<det_pos> <pdts>? <modifiers>* (NN | NNR)')
def act_ng_3rdsing(self, m):
self.inherit_prop(m)
m.prop["3rdsing"] = True
return
pat_ng_non3rdsing = c('<det_pos>? <pdts>? <modifiers>* NNS')
def act_ng_non3rdsing(self, m):
self.inherit_prop(m)
m.prop["3rdsing"] = False
return
pat_pron = c('WP | PRP | PRP2 | PRPS')
def act_pron(self, m):
self.inherit_prop(m)
w = m.submatch.item
if ispos(w, "PRP2") or ispos(w, "PRPS"):
m.prop["3rdsing"] = False
elif ispos(w, "PRP"):
m.prop["3rdsing"] = True
return
pat_ng = c('<ng_non3rdsing> | <pron> | <ng_3rdsing> ')
def act_ng(self, m):
self.inherit_prop(m, m.submatch)
return
pat_adv1 = c('RB')
pat_there = c('"there" | "here"')
pat_have1 = c('"have" | "\'ve"')
pat_has1 = c('"has" | "\'s"')
pat_had1 = c('"had" | "\'d"')
pat_is1 = c('"is" | "isn\'t" | "\'s"')
pat_are1 = c('"are" | "aren\'t" | "\'re"')
pat_rel1 = c('"which" | "who" | "whom" | "that"')
pat_vg_ven = c('VBN')
pat_vg_ving = c('VBG | "being" <vg_ven>')
pat_vg_perf = c('<adv1>? <vg_ven> | "been" <adv1>? <vg_ven> | "been" <adv1>? <vg_ving>')
# Verb group infinite - ignore
pat_vg_inf = c('MD <adv1>? "be" <vg_ving> | MD <adv1>? "be" <vg_ven> | MD <adv1>? VB')
def act_vg_inf(self, m):
self.inherit_prop(m)
return
# Verb group past tense - ignore
pat_vg_past = c('<had1> <vg_perf> | VBD')
act_vg_past = act_vg_inf
pat_vg_non3rdsing = c('<have1> <vg_perf> | <are1> <vg_ving> | VBP')
def act_vg_non3rdsing(self, m):
self.inherit_prop(m)
m.prop["3rdsing"] = False
return
pat_vg_3rdsing = c('<has1> <vg_perf> | <is1> <vg_ving> | VBZ | ' +
'MDZ <adv1>? "be" <vg_ving> | MDZ <adv1>? "be" <vg_ven> | MDZ <adv1>? VB')
def act_vg_3rdsing(self, m):
self.inherit_prop(m)
m.prop["3rdsing"] = True
return
pat_be_non3rdsing = c('"are" | "\'re" | "were" | "weren\'t"')
act_be_non3rdsing = act_vg_non3rdsing
pat_be_3rdsing = c('"is" | "isn\'t" | "\'s" | "was" | "wasn\'t"')
act_be_3rdsing = act_vg_3rdsing
pat_vg_there = c('<there> (<be_non3rdsing> | <be_3rdsing>)')
def act_vg_there(self, m):
self.inherit_prop(m, m.subseq[1].submatch)
return
pat_vg = c('<vg_inf> | <vg_past> | <vg_non3rdsing> | <vg_3rdsing>')
def act_vg(self, m):
self.inherit_prop(m, m.submatch)
return
pat_rel = c('IN? <rel1>')
pat_pp = c('IN <ng>')
pat_sv1_check = c('<ng> <adv1>? <pp>? <rel>? <vg>')
def act_sv1_check(self, m):
self.check_sv(m, m.subseq[0], m.subseq[4])
return
pat_sv2_check = c('<ng> <adv1>? <rel>? <vg>')
def act_sv2_check(self, m):
self.check_sv(m, m.subseq[0], m.subseq[3])
return
pat_sv3_check = c('<vg_there> <ng>')
def act_sv3_check(self, m):
self.check_sv(m, m.subseq[1], m.subseq[0])
return
pat_ng_single = c('(<det_pos>? <pdts>?) (<modifiers>* (NN | NNR))')
def act_ng_single(self, m):
if exists(lambda w: w.processed, m.getseq()):
return
(mdet, mnoun) = (m.subseq[0], m.subseq[1])
if mdet.subseq[0].repseq:
self.inherit_prop(m, mdet.subseq[0].repseq[0]) # inherit <det_pos>
else:
self.inherit_prop(m)
w = mnoun.subseq[1].submatch.item
if ispos(w, "NNR") or w.is_sent:
m.prop["determiner"] = True
if mdet.subseq[1].repseq:
if self.check_ng(m, mdet, mnoun, mdet.subseq[1].repseq[0].prop["plural"]):
return
self.check_ng(m, mdet, mnoun, False)
return
pat_ng_plural = c('(<det_pos>? <pdts>?) (<modifiers>* NNS)')
def act_ng_plural(self, m):
if exists(lambda w: w.processed, m.getseq()):
return
(mdet, mnoun) = (m.subseq[0], m.subseq[1])
if mdet.subseq[0].repseq:
self.inherit_prop(m, mdet.subseq[0].repseq[0]) # inherit <det_pos>
else:
self.inherit_prop(m)
m.prop["determiner"] = True
if mdet.subseq[1].repseq:
if self.check_ng(m, mdet, mnoun, mdet.subseq[1].repseq[0].prop["plural"]):
return
self.check_ng(m, mdet, mnoun, True)
return
pat_ng_check = c('<ng_single> | <ng_plural>')
del c
def check_sv(self, m, ms, mv):
if exists(lambda w: w.processed, m.getseq()):
return
try:
ms.prop.unify(mv.prop)
except UnificationError:
self.observer(GrammarVerbAgreementError(ms, mv))
for w in m.getseq():
w.processed = True
return
def check_ng(self, m, mdet, mnoun, plural):
for w in m.getseq():
w.processed = True
if self.check_plural:
try:
m.prop["plural"] = plural
except UnificationError:
self.observer(GrammarNounAgreementError(mdet, mnoun))
return True
if self.check_determiner and not m.prop["determiner"]:
self.observer(GrammarNonDeterminerError(m))
return True
return False
##
##
class ConstraintChecker(AbstractFilter):
def __init__(self, next_filter, warntypes, debug_action=False):
AbstractFilter.__init__(self, next_filter)
self.actionset = ParsePatternActionSet(self.notify, warntypes, debug_action)
self.warntypes = warntypes
return
def notify(self, e):
self.feed_next((self.sent, e))
return
def feed(self, sent):
if sent.words[0].s == "[[":
return
for w in sent.words:
if w.is_sent:
self.feed(w.s)
self.sent = sent
if "sv1" in self.warntypes:
self.actionset.perform_longest_first("sv1_check", sent.words)
if "sv2" in self.warntypes:
self.actionset.perform_longest_first("sv2_check", sent.words)
if "sv3" in self.warntypes:
self.actionset.perform_longest_first("sv3_check", sent.words)
self.actionset.perform_longest_first("ng_check", sent.words)
return
#
if __name__ == "__main__":
if sys.argv[1] == "-t":
docproc = TexProcessor
elif sys.argv[1] == "-l":
docproc = HTMLProcessor
elif sys.argv[1] == "-p":
docproc = PlainTextProcessor
else:
assert 0
import dictionary
dict = dictionary.Dictionary("LOCAL/dict.txt")
out = TerminalOutput()
pipeline = docproc(TextTokenizer(SentenceSplitter(POSTagger(dict, POSTagFixer(ConstraintChecker(out, ["sv1","sv2","sv3","det","plural"]))))))
pipeline.read(pstring.PFile(sys.stdin))
|
An Assessed Internship is an excellent way to get to know Shell from the inside and immerse yourself in our industry. It will also help you decide if your potential career is right for you. You’ll get full day-to-day involvement in actual projects, selected to match your interests and abilities.
A supervisor and mentor will support you directly and you’ll undertake regular assessments throughout your internship to ensure you get the most from the experience and receive feedback on your performance.
You’ll be evaluated on your capacity for analysis, decision-making and creating workable solutions, as well as your drive and enthusiasm, resilience and confidence.
During an Assessed Internship, you’ll have a formal mid-term review with your mentor and supervisor. Following this, there will be a final review where you’ll be asked to complete an online simulation exercise and written task. You’ll also be asked to prepare a final presentation about your project.
Succeed in your Assessed Internship and you could be offered a place on the Shell Graduate Programme.
Search for internship opportunities below on this page, then apply and upload your CV.
We will review your application before inviting you to complete our online assessment. This starts with a competency-based questionnaire around twenty minutes long. If you successfully complete this part of the assessment, we'll send you an e-invite to part two. This is made up of a decision-making task and a final, timed problem-solving task, which you'll have 18 minutes to complete.
If you make it to the interview stage we will either talk with you over the phone or meet you face-to-face.
Passing the interview could see you moving on to the next step, an Assessed Internship. You'll have a mentor and supervisor who will guide you through the process and help you get a clear sense of what it's like to work for our global company. Half way through your internship you'll have a mid-term review followed by a range of exercises that include an online simulation exercise and written task. You'll also prepare a presentation based on the project you've been working on at Shell, before a final review.
We will be in touch to let you know what opportunities there are on the Shell Graduate Programme.
Thank you for considering Shell and good luck!
|
from scipy.optimize import minimize
import pandas as pd, random
import numpy as np, datetime
import scipy.stats
FLAG_BAD_RETURN=-99999.0
CALENDAR_DAYS_IN_YEAR = 365.25
BUSINESS_DAYS_IN_YEAR = 256.0
ROOT_BDAYS_INYEAR = BUSINESS_DAYS_IN_YEAR**.5
WEEKS_IN_YEAR = CALENDAR_DAYS_IN_YEAR / 7.0
ROOT_WEEKS_IN_YEAR = WEEKS_IN_YEAR**.5
MONTHS_IN_YEAR = 12.0
ROOT_MONTHS_IN_YEAR = MONTHS_IN_YEAR**.5
ARBITRARY_START=pd.datetime(1900,1,1)
DEFAULT_CAPITAL = 1.0
DEFAULT_ANN_RISK_TARGET = 0.16
contract_month_codes = ['F', 'G', 'H', 'J', 'K', 'M','N', 'Q', 'U', 'V', 'X', 'Z']
contract_month_dict = dict(zip(contract_month_codes,\
range(1,len(contract_month_codes)+1)))
def shift(lst,empty):
res = lst[:]
temp = res[0]
for index in range(len(lst) - 1): res[index] = res[index + 1]
res[index + 1] = temp
res[-1] = empty
return res
def stitch_prices(dfs, price_col, dates):
res = []
datesr = list(reversed(dates))
dfsr = list(reversed(dfs))
dfsr_pair = shift(dfsr,pd.DataFrame())
for i,v in enumerate(datesr):
tmp1=float(dfsr[i].ix[v,price_col])
tmp2=float(dfsr_pair[i].ix[v,price_col])
dfsr_pair[i].loc[:,price_col] = dfsr_pair[i][price_col] + tmp1-tmp2
dates.insert(0,'1900-01-01')
dates_end = shift(dates,'2200-01-01')
for i,v in enumerate(dates):
tmp = dfs[i][(dfs[i].index > dates[i]) & (dfs[i].index <= dates_end[i])]
res.append(tmp.Settle)
return pd.concat(res)
def which_contract(contract_list, cycle, offset, expday, expmon):
assert len(contract_list) > 0
start_date = contract_list[contract_list.keys()[0]].head(1).index[0] # first dt of first contract
end_date = contract_list[contract_list.keys()[-1]].tail(1).index[0] # last date of last contract
delta = end_date - start_date
dates = []
for i in range(delta.days + 1):
day = start_date + datetime.timedelta(days=i)
if day.weekday() < 5: dates.append(day)
df = pd.DataFrame(index=dates)
def closest_biz(d): # get closest biz day
diffs = np.abs((d - df.index).days)
return df.index[np.argmin(diffs)]
cycle_d = [contract_month_dict[x] for x in cycle]
df['effcont'] = np.nan
for year in np.unique(df.index.year):
for c in cycle_d:
v = "%d%02d" % (year,c)
exp_d = datetime.datetime(year, c, expday)
if expmon=="prev": exp_d = exp_d - datetime.timedelta(days=30)
df.loc[closest_biz(exp_d),'effcont'] = v
df = df.fillna(method='bfill')
df['effcont'] = df.effcont.shift(-int(offset*2/3 + 3))
return df.fillna(method='ffill')
def create_carry(df, offset, contract_list):
df2 = df.copy()
df2['effcont'] = df2.effcont.astype(str)
def offset_contract(con):
s = pd.to_datetime(con + "15", format='%Y%m%d')
ss = s + datetime.timedelta(days=30*offset)
return "%d%02d" % (int(ss.year), int(ss.month))
df2['carrycont'] = df2.effcont.map(offset_contract)
df2['effprice'] = df2.apply(lambda x: contract_list.get(x.effcont).s.get(x.name) if x.effcont in contract_list else np.nan,axis=1)
df2['carryprice'] = df2.apply(lambda x: contract_list.get(x.carrycont).s.get(x.name) if x.carrycont in contract_list else np.nan,axis=1)
return df2
def ccy_returns(price, forecast):
base_capital = DEFAULT_CAPITAL
daily_risk_capital = DEFAULT_CAPITAL * DEFAULT_ANN_RISK_TARGET / ROOT_BDAYS_INYEAR
ts_capital=pd.Series([DEFAULT_CAPITAL]*len(price), index=price.index)
ann_risk = ts_capital * DEFAULT_ANN_RISK_TARGET
daily_returns_volatility = robust_vol_calc(price.diff())
multiplier = daily_risk_capital * 1.0 * 1.0 / 10.0
numerator = forecast * multiplier
positions = numerator.ffill() / daily_returns_volatility.ffill()
cum_trades = positions.shift(1).ffill()
price_returns = price.diff()
instr_ccy_returns = cum_trades.shift(1)*price_returns
instr_ccy_returns=instr_ccy_returns.cumsum().ffill().reindex(price.index).diff()
return instr_ccy_returns
def skew(price, forecast):
base_capital = DEFAULT_CAPITAL
pct = 100.0 * ccy_returns(price, forecast) / base_capital
return scipy.stats.skew(pct[pd.isnull(pct) == False])
def sharpe(price, forecast):
instr_ccy_returns = ccy_returns(price, forecast)
tval,pval = scipy.stats.ttest_1samp(instr_ccy_returns.dropna(), 0)
mean_return = instr_ccy_returns.mean() * BUSINESS_DAYS_IN_YEAR
vol = instr_ccy_returns.std() * ROOT_BDAYS_INYEAR
return mean_return / vol, tval, pval
def ewma(price, slow, fast):
fast_ewma = pd.ewma(price, span=slow)
slow_ewma = pd.ewma(price, span=fast)
raw_ewmac = fast_ewma - slow_ewma
vol = robust_vol_calc(price.diff())
return raw_ewmac / vol
def bollinger(df,col,lev):
signals = pd.DataFrame(index=df.index)
signals['signal'] = np.nan
middle = pd.rolling_mean(df[col], 40, min_periods=1)
std = pd.rolling_std(df[col], 40, min_periods=1)
df['middle'] = middle
df['top'] = middle+2*std
df['bottom'] = middle-2*std
signals['signal'] = np.where(df[col] > middle+2*std, -1, np.nan)
signals['signal'] = np.where(df[col] < middle-2*std, 1, np.nan)
signals['signal'] = signals['signal'].fillna(method='ffill')
df['ret'] = df[col].pct_change() * signals['signal'].shift(1)
ret = df.ret.dropna() * lev
return ret
def crossover(df,col,lev):
signals = pd.DataFrame(index=df.index)
signals['signal'] = 0
short_ma = pd.rolling_mean(df[col], 40, min_periods=1)
long_ma = pd.rolling_mean(df[col], 100, min_periods=1)
signals['signal'] = np.where(short_ma > long_ma, 1, 0)
df['signal'] = signals['signal'].shift(1)
df['ret'] = df[col].pct_change() * df['signal']
ret = df.ret.dropna() * lev
return ret
def carry(daily_ann_roll, vol, diff_in_years, smooth_days=90):
ann_stdev = vol * ROOT_BDAYS_INYEAR
raw_carry = daily_ann_roll / ann_stdev
smooth_carry = pd.ewma(raw_carry, smooth_days) / diff_in_years
return smooth_carry.fillna(method='ffill')
def estimate_forecast_scalar(x, window=250000, min_periods=500):
target_abs_forecast = 10.
x=x.abs().iloc[:,0]
avg_abs_value=x.mean()
return target_abs_forecast/avg_abs_value
def vol_equaliser(mean_list, stdev_list):
if np.all(np.isnan(stdev_list)):
return (([np.nan]*len(mean_list), [np.nan]*len(stdev_list)))
avg_stdev=np.nanmean(stdev_list)
norm_factor=[asset_stdev/avg_stdev for asset_stdev in stdev_list]
norm_means=[mean_list[i]/norm_factor[i] for (i, notUsed) in enumerate(mean_list)]
norm_stdev=[stdev_list[i]/norm_factor[i] for (i, notUsed) in enumerate(stdev_list)]
return (norm_means, norm_stdev)
def apply_with_min_periods(xcol, my_func=np.nanmean, min_periods=0):
not_nan=sum([not np.isnan(xelement) for xelement in xcol])
if not_nan>=min_periods:
return my_func(xcol)
else:
return np.nan
def vol_estimator(x, using_exponent=True, min_periods=20, ew_lookback=250):
vol=x.apply(apply_with_min_periods,axis=0,min_periods=min_periods, my_func=np.nanstd)
stdev_list=list(vol)
return stdev_list
def mean_estimator(x, using_exponent=True, min_periods=20, ew_lookback=500):
means=x.apply(apply_with_min_periods,axis=0,min_periods=min_periods, my_func=np.nanmean)
mean_list=list(means)
return mean_list
def str2Bool(x):
if type(x) is bool:
return x
return x.lower() in ("t", "true")
def correlation_single_period(data_for_estimate,
using_exponent=True, min_periods=20, ew_lookback=250,
floor_at_zero=True):
## These may come from config as str
using_exponent=str2Bool(using_exponent)
if using_exponent:
## If we stack there will be duplicate dates
## So we massage the span so it's correct
## This assumes the index is at least daily and on same timestamp
## This is an artifact of how we prepare the data
dindex=data_for_estimate.index
dlenadj=float(len(dindex))/len(set(list(dindex)))
## Usual use for IDM, FDM calculation when whole data set is used
corrmat=pd.ewmcorr(data_for_estimate, span=int(ew_lookback*dlenadj), min_periods=min_periods)
## only want the final one
corrmat=corrmat.values[-1]
else:
## Use normal correlation
## Usual use for bootstrapping when only have sub sample
corrmat=data_for_estimate.corr(min_periods=min_periods)
corrmat=corrmat.values
if floor_at_zero:
corrmat[corrmat<0]=0.0
return corrmat
def fix_mus(mean_list):
def _fixit(x):
if np.isnan(x):
return FLAG_BAD_RETURN
else:
return x
mean_list=[_fixit(x) for x in mean_list]
return mean_list
def fix_sigma(sigma):
def _fixit(x):
if np.isnan(x):
return 0.0
else:
return x
sigma=[[_fixit(x) for x in sigma_row] for sigma_row in sigma]
sigma=np.array(sigma)
return sigma
def addem(weights):
## Used for constraints
return 1.0 - sum(weights)
def neg_SR(weights, sigma, mus):
## Returns minus the Sharpe Ratio (as we're minimising)
estreturn=(np.matrix(weights)*mus)[0,0]
std_dev=(variance(weights,sigma)**.5)
return -estreturn/std_dev
def variance(weights, sigma):
## returns the variance (NOT standard deviation) given weights and sigma
return (np.matrix(weights)*sigma*np.matrix(weights).transpose())[0,0]
def un_fix_weights(mean_list, weights):
def _unfixit(xmean, xweight):
if xmean==FLAG_BAD_RETURN:
return np.nan
else:
return xweight
fixed_weights=[_unfixit(xmean, xweight) for (xmean, xweight) in zip(mean_list, weights)]
return fixed_weights
def optimise( sigma, mean_list):
## will replace nans with big negatives
mean_list=fix_mus(mean_list)
## replaces nans with zeros
sigma=fix_sigma(sigma)
mus=np.array(mean_list, ndmin=2).transpose()
number_assets=sigma.shape[1]
start_weights=[1.0/number_assets]*number_assets
## Constraints - positive weights, adding to 1.0
bounds=[(0.0,1.0)]*number_assets
cdict=[{'type':'eq', 'fun':addem}]
ans=minimize(neg_SR, start_weights, (sigma, mus), method='SLSQP', bounds=bounds, constraints=cdict, tol=0.00001)
## anything that had a nan will now have a zero weight
weights=ans['x']
## put back the nans
weights=un_fix_weights(mean_list, weights)
return weights
def sigma_from_corr_and_std(stdev_list, corrmatrix):
stdev=np.array(stdev_list, ndmin=2).transpose()
sigma=stdev*corrmatrix*stdev
return sigma
def markosolver(period_subset_data):
mean_list=mean_estimator(period_subset_data)
corrmatrix=correlation_single_period(period_subset_data)
stdev_list=vol_estimator(period_subset_data)
(mean_list, stdev_list)=vol_equaliser(mean_list, stdev_list)
sigma=sigma_from_corr_and_std(stdev_list, corrmatrix)
unclean_weights=optimise( sigma, mean_list)
weights=unclean_weights
diag=dict(raw=(mean_list, stdev_list), sigma=sigma, mean_list=mean_list,
unclean=unclean_weights, weights=weights)
return (weights, diag)
def bootstrap_portfolio(subset_data, monte_runs=100, bootstrap_length=50):
all_results=[bs_one_time(subset_data, bootstrap_length) for unused_index in range(monte_runs)]
### We can take an average here; only because our weights always add
### up to 1. If that isn't true then you will need to some kind
### of renormalisation
weightlist=np.array([x[0] for x in all_results], ndmin=2)
diaglist=[x[1] for x in all_results]
theweights_mean=list(np.mean(weightlist, axis=0))
diag=dict(bootstraps=diaglist)
return (theweights_mean, diag)
def bs_one_time(subset_data, bootstrap_length):
## choose the data
bs_idx=[int(random.uniform(0,1)*len(subset_data)) for notUsed in range(bootstrap_length)]
returns=subset_data.iloc[bs_idx,:]
(weights, diag)=markosolver(returns)
return (weights, diag)
def robust_vol_calc(x, days=35, min_periods=10, vol_abs_min=0.0000000001, vol_floor=True,
floor_min_quant=0.05, floor_min_periods=100,
floor_days=500):
"""
Robust exponential volatility calculation, assuming daily series of prices
We apply an absolute minimum level of vol (absmin);
and a volfloor based on lowest vol over recent history
:param x: data
:type x: Tx1 pd.Series
:param days: Number of days in lookback (*default* 35)
:type days: int
:param min_periods: The minimum number of observations (*default* 10)
:type min_periods: int
:param vol_abs_min: The size of absolute minimum (*default* =0.0000000001) 0.0= not used
:type absmin: float or None
:param vol_floor Apply a floor to volatility (*default* True)
:type vol_floor: bool
:param floor_min_quant: The quantile to use for volatility floor (eg 0.05 means we use 5% vol) (*default 0.05)
:type floor_min_quant: float
:param floor_days: The lookback for calculating volatility floor, in days (*default* 500)
:type floor_days: int
:param floor_min_periods: Minimum observations for floor - until reached floor is zero (*default* 100)
:type floor_min_periods: int
:returns: pd.DataFrame -- volatility measure
"""
# Standard deviation will be nan for first 10 non nan values
vol = pd.ewmstd(x, span=days, min_periods=min_periods)
vol[vol < vol_abs_min] = vol_abs_min
if vol_floor:
# Find the rolling 5% quantile point to set as a minimum
vol_min = pd.rolling_quantile(
vol, floor_days, floor_min_quant, floor_min_periods)
# set this to zero for the first value then propogate forward, ensures
# we always have a value
vol_min.set_value(vol_min.index[0], 0.0)
vol_min = vol_min.ffill()
# apply the vol floor
vol_with_min = pd.concat([vol, vol_min], axis=1)
vol_floored = vol_with_min.max(axis=1, skipna=False)
else:
vol_floored = vol
return vol_floored
def ewmac(price, Lfast, Lslow):
price=price.resample("1B", how="last")
fast_ewma = pd.ewma(price, span=Lfast)
slow_ewma = pd.ewma(price, span=Lslow)
raw_ewmac = fast_ewma - slow_ewma
return raw_ewmac.PRICE / robust_vol_calc(price.diff()).vol
|
Mifan Mama supports many orphans around China with cash to help them achieve their educational goals. This is a powerful means of improving their life chances and breaking the cycle of poverty which often goes with life without parents.
Below are pictures of a number of children that need your help today.
You can donate towards their care by clicking on the PayPal button below. Your donation will make a difference to the lives of these children.
|
'''
Provides commands to globally modify the bot's behaviour.
'''
import logging
import pydoc
from .users_plugin import isadmin
from tombot.registry import get_easy_logger, Command, Subscribe, BOT_START
from tombot.registry import COMMAND_DICT, COMMAND_CATEGORIES
from tombot.helper_functions import determine_sender, extract_query, reply_directly
LOGGER = get_easy_logger('plugins.system')
HELP_OVERVIEW = ''
@Command('ping', 'system')
def ping_cb(bot=None, message=None, *args, **kwargs):
''' Return 'pong' to indicate non-deadness. '''
return 'Pong'
@Command('forcelog', 'system', hidden=True)
def forcelog_cb(bot, message, *args, **kwargs):
''' Write a message to the root logger. '''
logging.info('Forcelog from %s: %s', message.getFrom(), message.getBody())
return
@Command(['shutdown', 'halt'], 'system')
def shutdown_cb(bot, message, *args, **kwargs):
''' Shut down the bot. '''
LOGGER.info('Stop message received from %s, content "%s"',
message.getFrom(), message.getBody())
if not isadmin(bot, message):
LOGGER.warning('Unauthorized shutdown attempt from %s',
determine_sender(message))
return 'Not authorized.'
bot.stop()
@Command('restart', 'system')
def restart_cb(bot, message, *args, **kwargs):
''' Restart the bot. '''
LOGGER.info('Restart message received from %s, content "%s"',
message.getFrom(), message.getBody())
if not isadmin(bot, message):
LOGGER.warning('Unauthorized shutdown attempt from %s',
determine_sender(message))
return 'Not authorized.'
bot.stop(True)
@Command('logdebug', 'system')
def logdebug_cb(bot, message=None, *args, **kwargs):
''' Temporarily set the loglevel to debug. '''
if message:
if not isadmin(bot, message):
return 'Not authorized.'
logging.getLogger().setLevel(logging.DEBUG)
return 'Ok.'
@Command('loginfo', 'system')
def loginfo_cb(bot, message=None, *args, **kwargs):
''' Temporarily (re)set the loglevel to info. '''
if message:
if not isadmin(bot, message):
return 'Not authorized.'
logging.getLogger().setLevel(logging.INFO)
return 'Ok.'
@Subscribe(BOT_START)
def build_help_cb(bot, *args, **kwargs):
'''
Build the help overview so it can be cached and poked at from shell.
'''
global HELP_OVERVIEW
HELP_OVERVIEW += 'Available commands:\n'
for category in sorted(COMMAND_CATEGORIES):
if category:
HELP_OVERVIEW += '- {}:\n'.format(category)
for command in sorted(COMMAND_CATEGORIES[category]):
HELP_OVERVIEW += '{}: {}\n'.format(
command[0], pydoc.splitdoc(command[2].__doc__)[0])
@Command(['help', '?'], 'system')
@reply_directly
def help_cb(bot, message, *args, **kwargs):
'''
Give moral and spiritual guidance in using this bot.
When you select one command, a longer text will be sent!
'''
cmd = extract_query(message)
if not cmd:
return HELP_OVERVIEW
else:
try:
return pydoc.getdoc(COMMAND_DICT[cmd.upper()])
except KeyError:
return 'Sorry, that command is not known.'
|
While many students were traveling home for Thanksgiving break Nov. 16, eight members of the University of Miami’s UAstronomy club embarked on a 13-hour drive to the Great Smoky Mountains for a breathtaking, yet challenging, five-day backpacking expedition.
Each participant came face to face with both the trials and joys of the natural world, hiking miles uphill through a nearly untouched woodland. They saw the Smoky Mountains’ iconic rapids and enjoyed sleeping under a sky littered with shimmering stars rather than light pollution.
Because the star-gazing adventurers carried everything they needed to survive on their backs, the journey was no walk in the park. On average, each person’s backpack weighed about 50 pounds.
However, many of the students who went on the trip said the beautiful views made the long days, heavy baggage and freezing temperatures all worthwhile. By the end of their expedition, the hikers had trekked over twenty miles, scaling more than 4,000 feet of elevation along the way.
Sihan Chen, UAstronomy vice president and an international student from China, said this was his first time fully experiencing America’s mountainlands.
Kashuv also said he enjoyed reaching the mountain’s summit.
“It was a very good trip, and I would definitely do it again,” said senior Alexander Berne, who is double majoring in mechanical engineering and physics and minoring in mathematics. “We saw things that we’ve never seen before and had experiences that are pretty hard to come by just casually,” he said.
Berne is currently a Sky Watcher for UAstronomy, keeping an eye out for occurrences in the night sky. He served as president last fall semester, and played a key role in reviving the club four years ago. Since then, UAstronomy has grown to where it is today in part due to his dedication.
This is not the first time UAstronomy has taken its members on an unforgettable journey, and it will likely not be the last. Last year, members flew to Iceland to experience the natural wonders of the country first-hand. There is now talk among board members about traveling to the Atacama Desert in Chile this summer to view a total solar eclipse.
|
# Generated by Django 2.2.3 on 2019-07-14 13:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ElectionSetting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='date_created')),
('date_updated', models.DateTimeField(auto_now=True, null=True, verbose_name='date_updated')),
('key', models.CharField(default=None, max_length=30, unique=True, verbose_name='key')),
('value', models.CharField(blank=True, default=None, max_length=128, null=True, verbose_name='value')),
],
options={
'verbose_name_plural': 'election settings',
'verbose_name': 'election setting',
'ordering': ['key'],
},
),
migrations.AddField(
model_name='batch',
name='date_created',
field=models.DateTimeField(auto_now_add=True, null=True, verbose_name='date_created'),
),
migrations.AddField(
model_name='batch',
name='date_updated',
field=models.DateTimeField(auto_now=True, null=True, verbose_name='date_updated'),
),
migrations.AddField(
model_name='section',
name='date_created',
field=models.DateTimeField(auto_now_add=True, null=True, verbose_name='date_created'),
),
migrations.AddField(
model_name='section',
name='date_updated',
field=models.DateTimeField(auto_now=True, null=True, verbose_name='date_updated'),
),
migrations.AddField(
model_name='user',
name='date_created',
field=models.DateTimeField(auto_now_add=True, null=True, verbose_name='date_created'),
),
migrations.AddField(
model_name='user',
name='date_updated',
field=models.DateTimeField(auto_now=True, null=True, verbose_name='date_updated'),
),
migrations.AddIndex(
model_name='electionsetting',
index=models.Index(fields=['key'], name='core_electi_key_1a53c9_idx'),
),
]
|
Raith officials have advised us that adult admission at Starks Park tomorrow is by cash at turnstile.
However concession admission is by ticket only.
These tickets can be purchased at the ticket office window in the Main Stand.
Stewards will point you in the right direction.
|
import tensorflow as tf
from tensorflow.contrib.rnn import BasicLSTMCell
class RNNModel(object):
def __init__(self,args,text_data):
self.args = args
self.text_data = text_data
self.input_x = None
self.input_y = None
self.dropout = None
self.losses = None
self.train = None
self.prediction = None
self.accuracy = None
self.build_network()
def build_network(self):
embedding_size = self.args.embedding_size
rnn_cell_size = self.args.rnn_cell_size
batch_size = self.args.batch_size
learning_rate = self.args.learning_rate
max_doc_len = self.args.max_doc_len
label_num = self.text_data.label_num
vocab_size = self.text_data.vocab_size
print('vocab_size: {} label_num: {} max_doc_len: {} batch_size: {} embedding_size: {} rnn_cell_size: {}'.format(vocab_size,label_num,max_doc_len,batch_size,embedding_size,rnn_cell_size))
self.input_x = tf.placeholder(tf.int32,[None,max_doc_len],name='input_x')
self.input_y = tf.placeholder(tf.float32,[None,label_num],name='input_y')
self.dropout = tf.placeholder(tf.float32,name='drop_out')
We = tf.Variable(tf.random_uniform([vocab_size,embedding_size],-1.0,1.0))
embedding_char = tf.nn.embedding_lookup(We,self.input_x)
embedding_char_expand = tf.reshape(embedding_char,[-1,embedding_size])
W_in = tf.Variable(tf.random_uniform([embedding_size,rnn_cell_size]))
b_in = tf.Variable(tf.constant(0.1,dtype=tf.float32,shape=[rnn_cell_size,]))
X_in = tf.matmul(embedding_char_expand,W_in)+b_in
Xs = tf.reshape(X_in,[-1,max_doc_len,rnn_cell_size])
cell = BasicLSTMCell(rnn_cell_size)
init_state = cell.zero_state(batch_size,dtype=tf.float32)
outputs,final_state = tf.nn.dynamic_rnn(cell,Xs,initial_state=init_state)
#outputs:(batch,time_step,input)
output = outputs[:,-1,:]
tf.nn.xw_plus_b(output,)
scores = tf.layers.dense(outputs[:,-1,:],label_num)
self.prediction = tf.argmax(scores, 1)
self.losses = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y,logits=scores))
self.train = tf.train.AdamOptimizer(learning_rate).minimize(self.losses)
correct_predictions = tf.equal(self.prediction,tf.argmax(self.input_y,1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions,tf.float32))
|
View 8th Class Result Bahawalpur Board online at hornchic.com for free ( Grade 8 Gazette Online). Young boys and girls student of Middle 8th class can check. 3 Dec PEC Bahawalpur 8th Class hornchic.com online hornchic.com 8th Class Result All Punjab Bise Bahawalpur Matric Result Board of Intermediate and Secondary. a song of ice and fire ebook pdf download. 20 Mar PEC Bahawalpur Board 8th Class Result will be available on the 31st of March. You can get your result from the link provided. You just.
29 Mar The 5th and 8th class result will be available at below links on in Punjab and these are Attock, Bahawalnagar, Bahawalpur, Bhakkar. 31 Mar 8th and 5th class Result for Check your results for 8th and 5th class for year by clicking this link: hornchic.com Please note Bahawalpur board 8th class Result Download; Bhakkar board 8th class. Bahawalpur Board 8th Class Result Learn Pakistan Dot Com. Uploaded by . s t a n. C o m. Grade 8 Result Punjab Examination Commission Roll No.
18 Dec PEC 5th 8th Class Result News Update Bahawalpur Board can check pec results from official website of the board which is hornchic.com Bahawalpur Board's Missions is to provide Education and Examination service to the nation. The future of a nation depends mainly on its Educational system. Class Result All Punjab District Wise Grade 5 Result of hornchic.com Punjab Bahawalpur Board 8th Class Result Rahim yar Khan Bahawalnagar Bise DG khan Board 10th Class Result Online are available on this page so get. Result See more. Bahawalpur BWP Board 5th and 8th Class Result announced by the hornchic.com so students you can get the online class result of these three districts Pakistan Squad For T20 Series Against West Indies Announced. Yes. This is the most popular result app in Pakistan that provides you latest results for all annual and supplimentary results for all boards and all exams in.
27 Jan Bahawalpur Board 10th Class Date Sheet So hereby all the students of board of intermediate are looking for bahawalpur board 5th class result and 8th class result bahawalpur. You can check pec results from official website of the board which is hornchic.com Connecting to %s. Result of M.A Education (LCC) Supplementary Examination · Result of M.A Arabic Gazette Notification hornchic.com Civil Technology (5th Term Session ) . Gazzette hornchic.com S/15 Examination · BA/hornchic.com Merit .. Result Guzzate hornchic.com Computer system Engineering 8th Term Session (March 31, ). S/O SHARIF UD DIN,. SS, GHSS, ABBASSIA, B/PUR. His result for the year and are below board. Bahawalpur. 8. ASIA TASLEEM ANWAR. 29 Mar A Magazine about the People, Living and Culture of Pakistan. Home Enter your roll number in comments to get the result. Results for – Class 8th 3, Bahawalpur, Click Here to download PEC 8th Class Result . sheh zeb s /o jhangir ahmed April 19, at am | Permalink | Reply.
|
import attr
import sqlite3
import logging
from ..entities import Source
from .nodes import NodesProcessor
from ..connectors import BmaConnector
from duniterpy.api import bma, errors
@attr.s
class SourcesProcessor:
"""
:param sakia.data.repositories.SourcesRepo _repo: the repository of the sources
:param sakia.data.connectors.bma.BmaConnector _bma_connector: the bma connector
"""
_repo = attr.ib()
_bma_connector = attr.ib()
_logger = attr.ib(default=attr.Factory(lambda: logging.getLogger('sakia')))
@classmethod
def instanciate(cls, app):
"""
Instanciate a blockchain processor
:param sakia.app.Application app: the app
"""
return cls(app.db.sources_repo,
BmaConnector(NodesProcessor(app.db.nodes_repo), app.parameters))
def commit(self, source):
try:
self._repo.insert(source)
except sqlite3.IntegrityError:
self._logger.debug("Source already known : {0}".format(source.identifier))
def amount(self, currency, pubkey):
"""
Get the amount value of the sources for a given pubkey
:param str currency: the currency of the sources
:param str pubkey: the pubkey owning the sources
:return:
"""
sources = self._repo.get_all(currency=currency, pubkey=pubkey)
return sum([s.amount * (10**s.base) for s in sources])
def available(self, currency, pubkey):
""""
:param str currency: the currency of the sources
:param str pubkey: the owner of the sources
:rtype: list[sakia.data.entities.Source]
"""
return self._repo.get_all(currency=currency, pubkey=pubkey)
def consume(self, sources):
"""
:param currency:
:param sources:
:return:
"""
for s in sources:
self._repo.drop(s)
def insert(self, source):
try:
self._repo.insert(source)
except sqlite3.IntegrityError:
self._logger.debug("Source already exist : {0}".format(source))
def drop(self, source):
try:
self._repo.drop(source)
except sqlite3.IntegrityError:
self._logger.debug("Source already dropped : {0}".format(source))
def drop_all_of(self, currency, pubkey):
self._repo.drop_all(currency=currency, pubkey=pubkey)
|
12:45pm lunch around 550-600 i think...lunch was healthy and balanced, but have gas due to not using the bathroom this AM yup...ugh!
4:45 snack before gym: bar and caffeine 100 and earlier was a darn really tasty chocolate from a coworker 50. and an orange 100.
Going to bed sorta annoyed at the day's events.
Progress as of today: 1.4 lbs lost so far, only 3.6 lbs to go!
It was a typical relationship type of thing that happened late in the day with a friend, all is basically ok now.
Work 8am: banana 120, wrap 60 with extra cream cheese 150, turkey pepperoni 100?
Lunch: healthy, lower carb and decided to only have an orange for carbs.. turkey with skin 350 at least, roasted greens 150, orange 100. maybe 650.
dinner: maybe hot chicken and some veg...or at least staying away from my beloved light ice cream...i may have dessert for dinner - canned pumpkin sweetened and with cinnamon, stevia, cooked cranberries, yogurt...we'll see what i feel like..
dinner was high, but, no sugar...around 700. it was all healthy....extra on rice and chick peas and the rest was sauteed greens / cabbage and mock tofu fish....VERY tasty and a GREAT change of pace from my sugary dinners. Trying to improve my skin.
I realize i am eating too much sugar (well have been) and i'm working to be a little healthier and at least clear up my skin.
...anyways, i have the CD's and lyrics....fully prepared to learn the music...feels good to challenge myself and it's fun!
I like a few songs of hers - My Church and 80's Mercedes - I don't listen to country radio too often, I think these are the only two I know of her singing. good luck learning the albums.
I split a bottle of Kervita kombucha with my husband and I can't say I was a fan. I will stick to the green tea.
oh wow! the kevita kombucha is the strongest...i had 1.5 bottles this AM...so tired right now...so so so tired.
That is a fun challenge for yourself. It will make more fun to join in at the concert. Have a great day J.
yes i figure it's a gift to myself :-) good for my mind, great for enjoying the concert...and her music is good - and understandable...and easy to sing with! so it's a plus...she has a think a more alto (lower) voice which is what i am...works out quite nicely this time around!
8am breakfast: wrap 60 with turkey pepperoni 70, potato 100, saag 200?, yogurt with cooked cranberries 200: 650..
11am ice cream lol...100, and 30 min later some more almonds 50, tea with monk fruit (friend gave me a sample of this calorie free sweetener) added in.
2pm healthy, larger, lunch: tons of sauteed veggies 300? at least, chicken 100, and chips 150: 550-600.
gym around 3-3:30 till evening. early meal & bed again early tonight.
bigger dinner with little ice cream for dessert (but dinner was healthy sauteed greens and turkey breast cooked with skin on) 600.
total today is still within healthy range and no bingeing: 2150, good.
Plan is to pick up a lot of prepared food to get me thru this week pretty much and later the gym. No other plans, content just staying home and relaxing...body is demanding it...in some ways i should skip gym...but....as long as i get lots of sleep and everything, the gym should be good for me.
We had a snowstorm until like 1am last night. I was supposed to drive to visit my grandma but my dad suggested i don't drive there as roads may not be good...i'll take his advice as I still need to run some errands anyways and pick up groceries for this week...and i need to go to the gym today...Lots to do anyways....but not having to drive to grandma's area will save MAJOR time.
Not that i don't want to see her, but lately i am feeling really tired and sleeping a lot. It's the winter season i'm sure. I slept really well last night again. Lately i just want to sleep - but it seems to be really healthy right now for me to get in these extra z's as it's winter and i'm training harder.
Grandma is not alert, by the way. So, it would be nice for me to go see her, but truth is taking care of my own health is much more important at this time.
Good idea to stay in town. Not worth the risk! Glad to hear that I'm not the only one sleeping more these days. I attribute it to winter as well.
yeah, even the gym by me is more local than visiting my grandma. It's more important right now and thru winter to take care of myself....visiting her is important but not worth the stress today of getting ready early and getting out so early on the roads, etc.
My dad may not understand that - but last night he told me it was ok not to go - so that's what i chose today.
i don't have a lot of wiggle room this winter for stressful events...lol....but relaxation i have extra room for...sorry for such a strange comment back!
I know what you mean the weather is challenging here also. Get that rest you need and take care sound like you have all your food planned out. Have a great evening J.
and i am LOVING the extra rest.
I feel happier and happier.
slept really well :-) yes a sleeping pill, but also yes to treating my body sooo right!
My sleep has improved greatly as I am making way better choices. Slowly also improving my diet, eating, drinking (caffeine) habits too.
12:30pm 300 cal snack..1150 or so.
content on relaxing today, lots of sleeping - fell back to sleep after 12:30pm snack...and doing laundry now finishing it up by 6:30pm = relaxed saturday.!!!
Sounds like a good day, HP.
Love to relax....do it a lot at times!
It's nice to be home & relax...I think my body knows best...and it knows to rest with winter!
Work: wrap 70, banana 130, cream cheese 100, peanut butter 150: 450, and some grean tea.
And a processed but high protein bar which i needed, the peanut butter and stuff is good, but not as satisfying as meat in the AM. 220.
total so far: around 670... nice.
1pm Healthy lunch: low protein (dinner will be chicken over a salad...): lots of veggies for lunch (glad i went food shopping for extra food this week, worked out really well with extra & didn't throw a thing out)...a lot...maybe like 750?
I am feeling great...so nice.
My skin on my face is doing MUCH, MUCH better already...just by skipping the coffee and the ice cream...I still had a little of my diet soda and stuff...but quanitites have been lessened and skin is definitely healing big time...as my skin is finally clearing up and that feels so wonderful!
Luckily the snow didn't amount to more than a coating. So thankful for that. Now an easier commute along with not needing to shovel the few feet in front of my garage. I may bring the shovel just to get all snow removed so it doesn't freeze over, but it's only a light coating. Yay.
And then in March, when I don't have any days left, I told myself I can take one extra day - no pay - and enjoy one last day off until April when the new days begin again! That's the plan.
One day of no pay isn't going to ruin my bank account and that one day makes me feel so much better about the long month of March.
I'm taking better care of myself than I ever have. It feels really good to do this. I'm not getting sick in the least.
My workouts are good. I keep getting stronger.
My left ankle is getting stronger and so are my legs. I am getting there.
I'm doing really good in my goal to get into the NYPD for this summer! :) Everything is going according to schedule. More walking and getting stronger in my lower body. I feel really good. Better than ever.
Glad your skin is clearing up. You're doing great!
feels good. less salt a bit too as i have been having less meat in the AM. (have a lot of salami / pepperoni / salted meats / lox in my wraps...feels good to take a break.
Sounds like a good plan all the way around. Keep healthy and enjoy J.
Certainly does feel good to be feeling better on inside and out. Lately feeling so good.
Skin is a plus especially helps with self esteem. I'm glad you can feel yourself getting stronger.
Days off are essential, that is why I left my last job because I couldn't earn enough time off for my needs. I have years with the gov't so I get more hours off, it is a blessing.
What was the previous job? not with the gov't?
Everyone deserves time off for their well-being. It's really sad that not everyone can get it.
So you got a little snow, too...We are looking for some...Thanks for your advice...I especially liked the one about drinking a lot of tea or water before eating...hope you have a good day.
1908 6-day average, EXCELLENT!!!!!!!!! To go from almost a 2200 daily avg around New Year's Day to tomorrow around 1900 is pretty good....see!?...stick to your goals...it's managable :-D !!!!
Office : banana 120, whipped cream cheese 80, wrap 70, peanut butter 160 430 excellent and green tea. not in a meat mood today. but whipped cream cheese and peanut butter was awesome, duh!
lunch: to be healthy... had some basmati rice 150?, some roasted sweet potato 50?, some roasted okra 100, some veggie chicken with a few veggies (all leftovers and tasty!) 250? around 550-600 for lunch. oh and the clementine, calories included.
Home: protein muffin 220 with squash 100 & yogurt 100: 420.. (stevia added to squash).
Congrats on your 1908/6 days avg...that is good. That takes a lot of discipline. Maybe I should take reducing my cals a lot slower instead of expecting to get by on 1700, and end up at 2500 for the day, just go for 1900 a day...that would be a big improvement for me. Have a good evening.
YES...1700 is low. 1200 is too low I think for most women.
...but for me to maintain a low weight (and i really don't have much muscle in lower body) of 115-120lbs, I can eat 1800 and not have a problem with maintenance. Even 1900 seems to be possibly alright to maintain.
You do not need drastic reductions...and...i find that after a binge day, it's hard to eat much lower the following day...work your way into it - as you said - I agree with you.
When I'm at home and counting calories I normally have about 3 days a week at 1200, then the other days are 1300, 1400, or up to 1700.
Yes that's really low and easier to lose weight at those levels...right now mine is maintenance...sometimes i get a hair up in weight bc i like to push my limits with calories, but i've learned my maintenance is in the 1800s or so.
1950 5-day avg and today was a healthy day and far less sugar and caffeine = success.
Here at the office doing my 'thang. Feeling decent despite a lack of sleep. It was the caffeine but i am not worried in the least as tonight I'll get adequate sleep to keep on going.
8:30am breakfast: banana 120, wrap 70 with extra cream cheese 100 and turkey pepperoni 100: 400, decent.
Dinner was totally healthy - but around 700 calories. No added sugars or fake things - except for tofu chicken and some tofu. also included cashews, some white basmati rice and veggies sauteed in some oil - all leftovers.
total today is good - 1850 - success :-D need to get off the quick fix dinners and eat healthier meals which i am starting to do...it helps to have enough already prepared at home. so i will be doing more of this. What i grabbed was one of the lunches i made for this week...so now i must put together another lunch :-D maybe i'll cook some eggs either tonight or tomorrow night to add to friday's lunch since i need a good protein source since i already ate friday's meal tonight! was tasty!
Wax was GREAT. She took long but she did a nice job for half price! NICE! I always tip her $10 which is 50%. Love her!
Feeling very good this week in general - 3-day weekends are NICE! March will be hard for me...but...i did plan for one 3-day weekend so that should really suffice.
There's something in early February i have plans for - on a Thursday early evening it starts in NYC. It's a country concert. I would have liked to take the Thursday & Friday off so that I could get home late that Thursday (which i still will get home late) and sleep in & stuff...but it's a no go.
I need my days (2 left) to last till end of March..so i have to choose which day i want to take off - that Thursday or that Friday the day after the concert.
AND MAYBE I WILL JUST TAKE BOTH THE 2 DAYS AND CALL IT A LONG A$$ WEEKEND...BUT THEN MARCH WILL BE LONGGG. LOL. ONE DAY, YEP, THAT'S ENOUGH.
Feels good to start making better choices.
Home: a lot of ACV but didn’t work bc I ate dinner lil later than expected. Really nice to eat a crab cake as well as half a 6oz filet minon. Good to get the real protein in me and natural food last night, thanks to my coworker.
8:30am work: banana 120, wrap 70 with cream cheese 100, lox 50: 350., green tea throughout morning, 2 cups, no sugar no equal.
11am very hungry! Snack: very low sugar bar, 230 cal. high protein and decent fat. natural type of bar - i may buy these on Amazon - they are good.
1pm - Lunch: healthy – some starch (few chunks of roasted sweet potato! tasted great!) some meat (tiny portion of around 1.5oz filet minon leftovers the other half will be tomorrow!) in a bit of oil some oil:500? - if I finish it.
1200 after gym...will have something small tonight...maybe even a protein bar on the way home and bed early. tired today. NO ICE CREAM. but i may have some squash i cooked up in a protein muffin - as the total sugar is 10g but i doubt i'll wait till i get home tonight to eat...we'll see. either way, it's early to bed tonight again.
I have no choice but to make the right choice – I am laying off all equal and Monster energy drinks until Friday because my skin is inflamed and breaking out too much in whiteheads which is full-out inflammation. I sorta need to fix this problem since I am an adult….perhaps I should start facing the problem like an adult. Either way, I am not going to drink the Monster Energy drinks but still drinking caffeine in a couple different forms this week. I’m going to LESSEN the bad stuff, see if it helps, versus lay off everything bad which would be a bit of a nuisance as it would really mess with just about everything I eat. Ohhh – I am also not going to have any ice cream this week until Friday. There’s a lot of sugar in it when I eat the whole pint (around 20g) which can be affecting my acne also – in addition to it being a milk product.
So, I will see on Friday if there’s any change in inflammation by not having the Monster drinks, less caffeine, and no Halo Top Ice Cream. I do need to clear up my skin.
I have cut my caffeine down as well. I can relate to the skin issues, I have the worst skin...ever! Hope you have a great day.
Yeah, i gotta do something about my skin bc it's really not improved for a long time. I want cleaner & clearer skin like I should have at 34yrs old...so I guess that can be an additional New Years Resolution to work on :-D i don't want to give up my beloved Monster energy drinks but i maybe can just cut them down...a lot!?
Monster Energy Drinks! I tied my first one a month or so ago when I had the sinus infection and I drank the Rehab one. I loved it! Won't let it be a habit, though and I try to drink 1/2 at a time! Guess what? My daughter is being recruited for the local police force! I don't know if she will pursue it, but she is strongly considering it. I thought of you when she started talking about it. Of course here is very different than in NYC and she has some compelling reasons to consider it - more on that later. Thanks for your comments to me!
yes i want to hear more about it...your daughter...is it because of the good benefits and decent pay? that's also the case in NYC...decent pay & great benefits.
Yeah i love the taste of many of those Monster drinks. It's odd - some flavors I hate and some I love. Laying low on them right now though. Half is the actual serving size...so you are smart...and half is same caffeine as cup of coffee. approx.
Yea, I'd cut way down on the Monster drinks and sugar. Hate zits! I still occasionally get one even at my age. Hardly had any as a teen.
yes, i'm working on cutting those to things down. Not eliminating, bc i feel like i'm being restrained and i binge back on them...but lessening.
Yeah, as a teen i had few in high school though in college i sometimes had bad ones due to late hours and bad eating habits. but the worst acne i ever had was now.
i know i need to eat better...that's a main culprit right now. Already my skin is getting a little better in just a few days of having LESS. Not NONE, but LESS.
I have the day off as a union holiday for New Years Day. If it falls on a Sunday, we get the Monday :-D So nice!
I was up around 8 and decided that since i forgot to put a couple of clothes in the laundry yesterday, that i would do a small load today since it's already built up after yesterday's workout and also added some relatively clean towels in for good measure. haha...i feel better with an empty hamper...less laundry for next week, sorta.
I am reading & relaxing this morning.
Later, I'm meeting up with an older male friend (maybe late 50's), as he lives close and had asked if i wanted to see a movie today if i was free. So we are meeting at the theater (he lives close and i'm driving there) at 1pm to see the Star Wars movie. We are getting an early dinner after that. It's nice to have friends that are close.
...I wouldn't have gotten together with him earlier on say back in my late 20's, but now i have known him several years and i know he is a nice guy. I wouldn't date him, as I'm not attracted like that and the age difference is WAY too much. Also, his health isn't the best. But either way, he's only a few years younger than my dad and no i am totally uninterested in that. But he is a nice person and friend to me.
I was going to also walk today, but i do not care to schedule any exercise in today. I am a bit sore from increases in exercise lately (cardio) and my body could use the day off.
Hope you enjoy your walk and have a good day off. Thank you for your comment about the goal weight.
I am thankful to be part of this select and small group of DDers because of the wonderful support we are! Year after year, we've always had a great group of people here. It's awesome because we aren't Sparkpeople or some large network but a very small and strong group. #underdog?!
11am Breakfast: banana 120, almond milk with 2 cups fiber one 270, dried goldeberries added in 80, later caffeine: 470 great!
2:30pm large lunch: potatoes in a curry type seasoning, but no sauce, 150-200, tofu 100, bamboo shoots and veggies in light seasoning 150, 450, and chips 230. 680 - was getting shaky low blood sugar from such a high carb breakfast....and deciding to increase my carbs right now.
2150 today. A bit high but decent.
Later, in about 1hr, I'm off to my cheap gym next to my job bc it's opened normal hours. I've NEVER been there on a weekend. One thing - I KNOW parking will be very simple bc the gym is on an industrial road so less residents are parking there and work isn't open so no workers should be working there as well as the street signs for no parking aren't in effect on Sundays - so parking will be simple.
I hope nobody takes offense...it is what it is....and the neighborhoods in Queens, NY vary greatly depending wherever you go!!!
yes, we do. I'd say we are a pretty smart bunch as well!
|
from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin
from django.forms import ValidationError, ModelForm
from django.http import HttpResponseRedirect
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User as AuthUser
from mezzanine.conf import settings
from mezzanine.core.forms import DynamicInlineAdminForm
from mezzanine.core.models import (Orderable, SitePermission,
CONTENT_STATUS_PUBLISHED)
from mezzanine.utils.urls import admin_url
if settings.USE_MODELTRANSLATION:
from django.utils.datastructures import SortedDict
from django.utils.translation import activate, get_language
from modeltranslation.admin import (TranslationAdmin,
TranslationInlineModelAdmin)
class BaseTranslationModelAdmin(TranslationAdmin):
"""
Mimic modeltranslation's TabbedTranslationAdmin but uses a
custom tabbed_translation_fields.js
"""
class Media:
js = (
"modeltranslation/js/force_jquery.js",
"mezzanine/js/%s" % settings.JQUERY_UI_FILENAME,
"mezzanine/js/admin/tabbed_translation_fields.js",
)
css = {
"all": ("mezzanine/css/admin/tabbed_translation_fields.css",),
}
else:
class BaseTranslationModelAdmin(admin.ModelAdmin):
"""
Abstract class used to handle the switch between translation
and no-translation class logic. We define the basic structure
for the Media class so we can extend it consistently regardless
of whether or not modeltranslation is used.
"""
class Media:
js = ()
css = {"all": ()}
User = get_user_model()
class DisplayableAdminForm(ModelForm):
def clean_content(form):
status = form.cleaned_data.get("status")
content = form.cleaned_data.get("content")
if status == CONTENT_STATUS_PUBLISHED and not content:
raise ValidationError(_("This field is required if status "
"is set to published."))
return content
class DisplayableAdmin(BaseTranslationModelAdmin):
"""
Admin class for subclasses of the abstract ``Displayable`` model.
"""
list_display = ("title", "status", "admin_link")
list_display_links = ("title",)
list_editable = ("status",)
list_filter = ("status", "keywords__keyword")
date_hierarchy = "publish_date"
radio_fields = {"status": admin.HORIZONTAL}
fieldsets = (
(None, {
"fields": ["title", "status", ("publish_date", "expiry_date")],
}),
(_("Meta data"), {
"fields": ["_meta_title", "slug",
("description", "gen_description"),
"keywords", "in_sitemap"],
"classes": ("collapse-closed",)
}),
)
form = DisplayableAdminForm
def __init__(self, *args, **kwargs):
super(DisplayableAdmin, self).__init__(*args, **kwargs)
try:
self.search_fields = list(set(list(self.search_fields) + list(
self.model.objects.get_search_fields().keys())))
except AttributeError:
pass
def save_model(self, request, obj, form, change):
"""
Save model for every language so that field auto-population
is done for every each of it.
"""
super(DisplayableAdmin, self).save_model(request, obj, form, change)
if settings.USE_MODELTRANSLATION:
lang = get_language()
for code in SortedDict(settings.LANGUAGES):
if code != lang: # Already done
try:
activate(code)
except:
pass
else:
obj.save()
activate(lang)
class BaseDynamicInlineAdmin(object):
"""
Admin inline that uses JS to inject an "Add another" link which
when clicked, dynamically reveals another fieldset. Also handles
adding the ``_order`` field and its widget for models that
subclass ``Orderable``.
"""
form = DynamicInlineAdminForm
extra = 20
def get_fields(self, request, obj=None):
fields = super(BaseDynamicInlineAdmin, self).get_fields(request, obj)
if issubclass(self.model, Orderable):
fields = list(fields)
try:
fields.remove("_order")
except ValueError:
pass
fields.append("_order")
return fields
def get_fieldsets(self, request, obj=None):
fieldsets = super(BaseDynamicInlineAdmin, self).get_fieldsets(
request, obj)
if issubclass(self.model, Orderable):
for fieldset in fieldsets:
fields = [f for f in list(fieldset[1]["fields"])
if not hasattr(f, "translated_field")]
try:
fields.remove("_order")
except ValueError:
pass
fieldset[1]["fields"] = fields
fieldsets[-1][1]["fields"].append("_order")
return fieldsets
def get_inline_base_class(cls):
if settings.USE_MODELTRANSLATION:
class InlineBase(TranslationInlineModelAdmin, cls):
"""
Abstract class that mimics django-modeltranslation's
Translation{Tabular,Stacked}Inline. Used as a placeholder
for future improvement.
"""
pass
return InlineBase
return cls
class TabularDynamicInlineAdmin(BaseDynamicInlineAdmin,
get_inline_base_class(admin.TabularInline)):
template = "admin/includes/dynamic_inline_tabular.html"
class StackedDynamicInlineAdmin(BaseDynamicInlineAdmin,
get_inline_base_class(admin.StackedInline)):
template = "admin/includes/dynamic_inline_stacked.html"
def __init__(self, *args, **kwargs):
"""
Stacked dynamic inlines won't work without grappelli
installed, as the JavaScript in dynamic_inline.js isn't
able to target each of the inlines to set the value of
the order field.
"""
grappelli_name = getattr(settings, "PACKAGE_NAME_GRAPPELLI")
if grappelli_name not in settings.INSTALLED_APPS:
error = "StackedDynamicInlineAdmin requires Grappelli installed."
raise Exception(error)
super(StackedDynamicInlineAdmin, self).__init__(*args, **kwargs)
class OwnableAdmin(admin.ModelAdmin):
"""
Admin class for models that subclass the abstract ``Ownable``
model. Handles limiting the change list to objects owned by the
logged in user, as well as setting the owner of newly created
objects to the logged in user.
Remember that this will include the ``user`` field in the required
fields for the admin change form which may not be desirable. The
best approach to solve this is to define a ``fieldsets`` attribute
that excludes the ``user`` field or simple add ``user`` to your
admin excludes: ``exclude = ('user',)``
"""
def save_form(self, request, form, change):
"""
Set the object's owner as the logged in user.
"""
obj = form.save(commit=False)
if obj.user_id is None:
obj.user = request.user
return super(OwnableAdmin, self).save_form(request, form, change)
def get_queryset(self, request):
"""
Filter the change list by currently logged in user if not a
superuser. We also skip filtering if the model for this admin
class has been added to the sequence in the setting
``OWNABLE_MODELS_ALL_EDITABLE``, which contains models in the
format ``app_label.object_name``, and allows models subclassing
``Ownable`` to be excluded from filtering, eg: ownership should
not imply permission to edit.
"""
opts = self.model._meta
model_name = ("%s.%s" % (opts.app_label, opts.object_name)).lower()
models_all_editable = settings.OWNABLE_MODELS_ALL_EDITABLE
models_all_editable = [m.lower() for m in models_all_editable]
qs = super(OwnableAdmin, self).get_queryset(request)
if request.user.is_superuser or model_name in models_all_editable:
return qs
return qs.filter(user__id=request.user.id)
class SingletonAdmin(admin.ModelAdmin):
"""
Admin class for models that should only contain a single instance
in the database. Redirect all views to the change view when the
instance exists, and to the add view when it doesn't.
"""
def handle_save(self, request, response):
"""
Handles redirect back to the dashboard when save is clicked
(eg not save and continue editing), by checking for a redirect
response, which only occurs if the form is valid.
"""
form_valid = isinstance(response, HttpResponseRedirect)
if request.POST.get("_save") and form_valid:
return redirect("admin:index")
return response
def add_view(self, *args, **kwargs):
"""
Redirect to the change view if the singleton instance exists.
"""
try:
singleton = self.model.objects.get()
except (self.model.DoesNotExist, self.model.MultipleObjectsReturned):
kwargs.setdefault("extra_context", {})
kwargs["extra_context"]["singleton"] = True
response = super(SingletonAdmin, self).add_view(*args, **kwargs)
return self.handle_save(args[0], response)
return redirect(admin_url(self.model, "change", singleton.id))
def changelist_view(self, *args, **kwargs):
"""
Redirect to the add view if no records exist or the change
view if the singleton instance exists.
"""
try:
singleton = self.model.objects.get()
except self.model.MultipleObjectsReturned:
return super(SingletonAdmin, self).changelist_view(*args, **kwargs)
except self.model.DoesNotExist:
return redirect(admin_url(self.model, "add"))
return redirect(admin_url(self.model, "change", singleton.id))
def change_view(self, *args, **kwargs):
"""
If only the singleton instance exists, pass ``True`` for
``singleton`` into the template which will use CSS to hide
the "save and add another" button.
"""
kwargs.setdefault("extra_context", {})
kwargs["extra_context"]["singleton"] = self.model.objects.count() == 1
response = super(SingletonAdmin, self).change_view(*args, **kwargs)
return self.handle_save(args[0], response)
###########################################
# Site Permissions Inlines for User Admin #
###########################################
class SitePermissionInline(admin.TabularInline):
model = SitePermission
max_num = 1
can_delete = False
class SitePermissionUserAdmin(UserAdmin):
inlines = [SitePermissionInline]
# only register if User hasn't been overridden
if User == AuthUser:
admin.site.unregister(User)
admin.site.register(User, SitePermissionUserAdmin)
|
Search San Juan Capistrano homes for sale to find the most recent home listings in San Juan Capistrano, CA, find San Juan Capistrano Realtors to help you buy or sell a house, view San Juan Capistrano real estate listings, and try the Home Sale Maximizer home improvement tool on HomeGain to see top home improvement recommendations that will increase your property values in San Juan Capistrano, CA.
Search Ladera Ranch new homes for sale and Ladera Ranch homes for sale listings.
Search Dana Point new homes for sale and Dana Point homes for sale listings.
Search Lake Forest new homes for sale and Lake Forest homes for sale listings.
|
''' a Triangle
'''
import math
import collections
import itertools
from . import Polygon, Point, Segment, Circle
from .constants import Epsilon, Half_Pi, nearly_eq, Sqrt_3
from .exceptions import *
class Triangle(Polygon):
'''a pythonic Triangle
Implements a Triangle object in the XY plane having three
non-coincident vertices and three intersecting edges.
Vertices are labeled; 'A', 'B' and 'C'.
Edges are labeled; 'AB', 'BC' and 'AC'.
The length of edges opposite each vertex are labeled:
'a' for the side opposite vertex A.
'b' for the side opposite vertex B.
'c' for the side opposite vertex C.
Interior angles in radians are labeled:
'alpha' for CAB
'beta' for ABC
'gamma' for BCA
Usage:
>>> a = Triangle()
>>> b = Triangle(A,B,C) # A,B,C are Points or Point equivalents
>>> c = Triangle([p,q,r]) # p,q,r are Points or Point equivalents
>>> d = Triangle([x,y,z],[x,y,z],[x,y,z])
'''
@classmethod
def withAngles(cls, origin=None, base=1, alpha=None,
beta=None, gamma=None, inDegrees=False):
'''
:origin: optional Point
:alpha: optional float describing length of the side opposite A
:beta: optional float describing length of the side opposite B
:gamma: optional float describing length of the side opposite C
:return: Triangle initialized with points comprising the triangle
with the specified angles.
'''
raise NotImplementedError("withAngles")
@classmethod
def withSides(cls, origin=None, a=1, b=1, c=1):
'''
:origin: optional Point
:a: optional float describing length of the side opposite A
:b: optional float describing length of the side opposite B
:c: optional float describing length of the side opposite C
:return: Triangle initialized with points comprising the triangle
with the specified side lengths.
If only 'a' is specified, an equilateral triangle is returned.
'''
raise NotImplementedError("withSides")
@classmethod
def unit(cls,scale=1):
return cls(Point.units(scale))
def __init__(self, *args, **kwds):
'''
:args: iterable of Point or Point equivalents
:kwds: named Points where recognized names are 'A', 'B' and 'C'.
If A is an iterable containing Point or Point equivalent objects
it will be used to initialize up to three points in the triangle.
'''
kwds['defaults'] = Point(),Point(1,0),Point(0,1)
super().__init__(*args,**kwds)
if len(self) != 3:
raise ValueError(len(self))
@property
def AB(self):
return self.pairs('AB')
@AB.setter
def AB(self, iterable):
self.A, self.B = iterable
@property
def BA(self):
return self.pairs('BA')
@BA.setter
def BA(self, iterable):
self.B, self.A = iterable
@property
def BC(self):
return self.pairs('BC')
@BC.setter
def BC(self, iterable):
self.B, self.C = iterable
@property
def CB(self):
return self.pairs('CB')
@CB.setter
def CB(self, iterable):
self.C, self.B = iterable
@property
def AC(self):
return self.pairs('AC')
@AC.setter
def AC(self, iterable):
self.A, self.C = iterable
@property
def CA(self):
return self.pairs('CA')
@CA.setter
def CA(self, iterable):
self.C, self.A = iterable
@property
def ABC(self):
return [self.A, self.B, self.C]
@ABC.setter
def ABC(self, iterable):
self.A, self.B, self.C = iterable
@property
def ccw(self):
'''
Result of A.ccw(B,C), float.
See Point.ccw
'''
return self.A.ccw(self.B, self.C)
@property
def isCCW(self):
'''
True if ABC has a counter-clockwise rotation, boolean.
'''
return self.A.isCCW(self.B,self.C)
@property
def area(self):
'''
Area of the triangle, float.
Performance note: computed via Triangle.ccw (subtractions and
multiplications and a divison).
'''
return abs(self.ccw) / 2
@property
def heronsArea(self):
'''
Heron's forumla for computing the area of a triangle, float.
Performance note: contains a square root.
'''
s = self.semiperimeter
return math.sqrt(s * ((s - self.a) * (s - self.b) * (s - self.c)))
@property
def inradius(self):
'''
The radius of the triangle's incircle, float.
'''
return (self.area * 2) / self.perimeter
@property
def circumcenter(self):
'''
The intersection of the median perpendicular bisectors, Point.
The center of the circumscribed circle, which is the circle that
passes through all vertices of the triangle.
https://en.wikipedia.org/wiki/Circumscribed_circle#Cartesian_coordinates_2
BUG: only finds the circumcenter in the XY plane
'''
if self.isRight:
return self.hypotenuse.midpoint
if self.A.isOrigin:
t = self
else:
t = Triangle(self.A - self.A, self.B - self.A, self.C - self.A)
if not t.A.isOrigin:
raise ValueError('failed to translate {} to origin'.format(t))
BmulC = t.B * t.C.yx
d = 2 * (BmulC.x - BmulC.y)
bSqSum = sum((t.B ** 2).xy)
cSqSum = sum((t.C ** 2).xy)
x = (((t.C.y * bSqSum) - (t.B.y * cSqSum)) / d) + self.A.x
y = (((t.B.x * cSqSum) - (t.C.x * bSqSum)) / d) + self.A.y
return Point(x, y)
@property
def circumradius(self):
'''
Distance from the circumcenter to all the verticies in
the Triangle, float.
'''
return (self.a * self.b * self.c) / (self.area * 4)
@property
def circumcircle(self):
'''
A circle whose center is equidistant from all the
vertices of the triangle, Circle.
'''
return Circle(self.circumcenter, self.circumradius)
@property
def orthocenter(self):
'''
The intersection of the altitudes of the triangle, Point.
'''
raise NotImplementedError('orthocenter')
@property
def hypotenuse(self):
'''
The longest edge of the triangle, Segment.
'''
return max(self.edges(),key=lambda s:s.length)
@property
def alpha(self):
'''
The angle described by angle CAB in radians, float.
'''
return Segment(self.CA).radiansBetween(Segment(self.BA))
@property
def beta(self):
'''
The angle described by angle ABC in radians, float.
'''
return Segment(self.AB).radiansBetween(Segment(self.CB))
@property
def gamma(self):
'''
The angle described by angle BCA in radians, float.
'''
return Segment(self.BC).radiansBetween(Segment(self.AC))
@property
def angles(self):
'''
A list of the interior angles of the triangle, list of floats.
'''
return [self.alpha, self.beta, self.gamma]
@property
def a(self):
'''
The length of line segment BC, opposite vertex A, float.
'''
return abs(self.B.distance(self.C))
@property
def b(self):
'''
The length of line segment AC, opposite vertex B, float.
'''
return abs(self.A.distance(self.C))
@property
def c(self):
'''
The length of line segment AB, opposite vertex C, float.
'''
return abs(self.A.distance(self.B))
@property
def sides(self):
'''
A list of edge lengths [a, b, c], list of floats.
'''
return [self.a, self.b, self.c]
@property
def altitudes(self):
'''
A list of the altitudes of each vertex [AltA, AltB, AltC], list of
floats.
An altitude is the shortest distance from a vertex to the side
opposite of it.
'''
A = self.area * 2
return [A / self.a, A / self.b, A / self.c]
@property
def isEquilateral(self):
'''
True iff all side lengths are equal, boolean.
'''
return self.a == self.b == self.c
@property
def isIsosceles(self):
'''
True iff two side lengths are equal, boolean.
'''
return (self.a == self.b) or (self.a == self.c) or (self.b == self.c)
@property
def isScalene(self):
'''
True iff all side lengths are unequal, boolean.
'''
return self.a != self.b != self.c
@property
def isRight(self):
'''
True if one angle measures 90 degrees (Pi/2 radians), float.
'''
return any([nearly_eq(v,Half_Pi) for v in self.angles])
@property
def isObtuse(self):
'''
True if one angle measures greater than 90 degrees (Pi/2 radians),
float.
'''
return any([v > Half_Pi for v in self.angles])
@property
def isAcute(self):
'''
True iff all angles measure less than 90 degrees (Pi/2 radians),
float.
'''
return all([v < Half_Pi for v in self.angles])
def congruent(self, other):
'''
A congruent B
True iff all angles of 'A' equal angles in 'B' and
all side lengths of 'A' equal all side lengths of 'B', boolean.
'''
a = set(self.angles)
b = set(other.angles)
if len(a) != len(b) or len(a.difference(b)) != 0:
return False
a = set(self.sides)
b = set(other.sides)
return len(a) == len(b) and len(a.difference(b)) == 0
|
A funeral director is on-call 24 hours to assist you whenever your need for funeral services arises. Contact us at (734) 453-3333.
We never know when the loss of a loved one will occur. Whenever it happens, it will be a difficult and an emotional time for those involved. The Schrader-Howell Funeral Home will handle immediate details that are involved, so you can concentrate on matters of the family and friends.
Regardless of the time, day or night, we'll be here for you and your family. You can speak with a licensed funeral director from our funeral home by calling (734) 453-3333.
When a death occurs, a few things need to occur in a timely manner depending on the location of the death.
The local police and/or coroner may need to be notified, and any details of the death that are communicated.
Arrangements must be made to have your loved one taken into our care at the funeral home or the county morgue.
The next of kin should be notified (if not already aware of the death).
Our licensed fruneral directors will handle the matter swiftly, calmly and with compassion for you and your loved one.
When a death occurs away from home, our licensed funeral directors will take care of everything, whether the death has occurred in another state or country. If the death has occurred away from home, Schrader-Howell Funeral Home will coordinate with those local services until such time that your loved one can be taken into our care.
|
# -*- coding: utf-8 -*-
from selenium import webdriver
import time
#auxiliary functions
def read_saved_track_names(track_file):
tracks = set()
with open(track_file) as f:
for line in f:
line2 = line.strip()
tracks.add(line2)
return tracks
def save_garmin_tracks(activity_links, track_file, mode):
with open(track_file, mode) as myfile:
for link in activity_links:
link = link.strip()
myfile.write(link+'\n')
def extract_activity_links(browser, new_links, activity_links):
activities_el = browser.find_element_by_id('gridForm:gridList:tb')
for anchor in activities_el.find_elements_by_tag_name('a'):
activity_link = anchor.get_attribute("href")
if not activity_link is None:
if '/activity/' in activity_link:
activity_links.add(activity_link)
new_links.add(activity_link)
def move_to_next_page(browser):
footer_el = browser.find_element_by_class_name('resultsFooter')
btn_found = False
for btn in footer_el.find_elements_by_class_name('rich-datascr-button'):
if btn.text == '»':
btn_found = True
btn.click()
break
return btn_found
def select_start_date(browser, n_years):
#move one year back..
for i in range(1, n_years):
calendar1 = browser.find_element_by_id('exploreSearchForm:startDateCalendarPopupButton')
calendar1.click()
time.sleep(1)
calendar_button = browser.find_element_by_class_name('rich-calendar-tool-btn')
calendar_button.click()
time.sleep(1)
#choose date..
date_button = browser.find_element_by_id('exploreSearchForm:startDateCalendarDayCell7')
date_button.click()
time.sleep(2)
def zoom_out_map(browser, n_zooms):
for i in range(1, n_zooms):
mapZoomOut = browser.find_element_by_class_name("map-zoom-out")
mapZoomOut.click()
time.sleep(5)
################################################
# saves the GARMIN activity links for selected
# CITY and the number of the past years
################################################
def save_garmin_activity_links(city, n_years, track_file):
activity_links = read_saved_track_names(track_file)
new_links = set()
browser = webdriver.Firefox()
url = "https://sso.garmin.com/sso/login?service=https%3A%2F%2Fconnect.garmin.com%2FminExplore&webhost=olaxpw-connect00&source=https%3A%2F%2Fconnect.garmin.com%2Fen-US%2Fsignin&redirectAfterAccountLoginUrl=https%3A%2F%2Fconnect.garmin.com%2Fpost-auth%2Flogin&redirectAfterAccountCreationUrl=https%3A%2F%2Fconnect.garmin.com%2Fpost-auth%2Flogin&gauthHost=https%3A%2F%2Fsso.garmin.com%2Fsso&locale=en_US&id=gauth-widget&cssUrl=https%3A%2F%2Fstatic.garmincdn.com%2Fcom.garmin.connect%2Fui%2Fcss%2Fgauth-custom-v1.1-min.css&clientId=GarminConnect&rememberMeShown=true&rememberMeChecked=false&createAccountShown=true&openCreateAccount=false&usernameShown=false&displayNameShown=false&consumeServiceTicket=false&initialFocus=true&embedWidget=false&generateExtraServiceTicket=false"
browser.get(url)
time.sleep(10)
username = browser.find_element_by_id("username")
password = browser.find_element_by_id("password")
username.send_keys("[email protected]")
password.send_keys("AnnAgnps(v1)")
login_attempt = browser.find_element_by_xpath("//*[@type='submit']")
login_attempt.submit()
#now show filters..
time.sleep(10)
show_filters = browser.find_element_by_id("showFilters")
show_filters.click()
#select the activity type option
el = browser.find_element_by_id('exploreSearchForm:activityType')
for option in el.find_elements_by_tag_name('option'):
if option.text == 'Cross Country Skiing':
option.click()
break
#select the time period option
time.sleep(2)
time_el = browser.find_element_by_id('exploreSearchForm:timePeriodSelect')
for option in time_el.find_elements_by_tag_name('option'):
if option.text == 'Custom Dates':
option.click()
break
#select the start date (10 years back..)
select_start_date(browser, n_years)
#select the end date (start of current month..)
time.sleep(2)
calendar2 = browser.find_element_by_id('exploreSearchForm:endDateCalendarPopupButton')
calendar2.click()
date_button = browser.find_element_by_id('exploreSearchForm:endDateCalendarDayCell7')
date_button.click()
#now search a new location ..
time.sleep(5)
location = browser.find_element_by_id("exploreSearchForm:location")
location.send_keys(city)
searchButton = browser.find_element_by_id("searchButton")
searchButton.submit()
#find the grid list
next_active = True
while next_active:
time.sleep(10)
len1 = len(new_links)
extract_activity_links(browser, new_links, activity_links)
len2 = len(new_links)
next_active = len2 > len1
time.sleep(2)
move_to_next_page(browser)
save_garmin_tracks(activity_links, track_file, "w")
browser.close()
print(city + ' : ' + str(len(new_links)))
f = "garmin_tracks2.txt"
trk = read_saved_track_names(f)
save_garmin_tracks(trk, f, "w")
trk = []
#save_garmin_activity_links('Brno', 10, f)
#save_garmin_activity_links('Karlovy Vary', 10, f)
#save_garmin_activity_links('Chomutov', 10, f)
#save_garmin_activity_links('Kvilda', 10, f)
#save_garmin_activity_links('Klingenthal', 10, f)
#save_garmin_activity_links('Jablunkov', 10, f)
#save_garmin_activity_links('Svratka', 10, f)
#save_garmin_activity_links('Jilemnice', 10, f)
#save_garmin_activity_links('Trutnov', 10, f)
#save_garmin_activity_links('Mladkov', 10, f)
#save_garmin_activity_links('Mikulovice', 10, f)
#save_garmin_activity_links('Olomouc', 10, f)
#save_garmin_activity_links('Protivanov', 10, f)
#save_garmin_activity_links('Karolinka', 10, f)
#save_garmin_activity_links('Jihlava', 10, f)
#save_garmin_activity_links('Kocelovice', 10, f)
#save_garmin_activity_links('Altenberg', 10, f)
#save_garmin_activity_links('Oberwiesenthal', 10, f)
#save_garmin_activity_links('Zittau', 10, f)
#save_garmin_activity_links('Heroltovice', 10, f)
#save_garmin_activity_links('Rokytno', 10, f)
cities1 = [
'Flossenburg', 'Olbernhau', 'Hora Svateho Sebestiana',
'Kvan', 'Rozmital', 'Ceska Kubice', 'Primda', 'Honezovice',
'Tremosna', 'Cunkov', 'Jistebnice', 'Hartvikov', 'Frymburk',
'Ceske Budejovice', 'Pisek', 'Pribram', 'Havlickuv Brod',
'Hradec Kralove', 'Ceska Trebova', 'Ricany', 'Chotebor',
'Hlinsko', 'Napajedla', 'Zlin', 'Rajnochovice', 'Papajci', 'Orlicke Zahori',
'Zdobnice', 'Sedlonov', 'Krnov', 'Vitkov', 'Mala Moravka', 'Kouty nad Desnou',
'Dolni Morava', 'Kralicky Sneznik', 'Dlouhe Strane', 'Bruntal',
'Moravsky Beroun']
cities2 = ['Sternberk', 'Svaty Kopecek', 'Kralovo Pole',
'Uhersky Brod', 'Uherske Hradiste', 'Hodonin', 'Hartmanice',
'Brcalnik', 'Keply', 'Vimperk', 'Klet', 'Teskov', 'Moravske Budejovice',
'Novy Hojkov', 'Teskov', 'Letohrad','Johanngeorgenstadt','Pernink','Medenec',
'Bublava','Horni Halze', 'Johstadt', 'Vejprty', 'Bolebor']
cities3 = ['Holzhau',
'Moldava', 'Horazdovice','Sedlcany','Neveklov','Rymarov','Hanusovice',
'Sumperk']
cities4 = ['Zelezny Brod', 'Ceska Lipa', 'Novy Bor', 'Varnsdorf',
'Modlibohov','Hodkovice nad Mohelkou', 'Jablonec nad Nisou','Rakovnik']
cities5 = ['Kladno', 'Luhacovice','Vyskov','Vizovice','Roznov pod Radhostem',
'Celadna','Hrcava', 'Rokytnice v Orlickych Horach','Hostinne',
'Vrchlabi','Hejnice']
cities6 = ['Nove Mesto pod Smrkem','Vernerice',
'Zdar nad Sazavou','Nova Bystrice','Kamenice nad Lipou','Telc']
cities7 = ['Bad Brambach','Becov nad Teplou','Rokycany','Stozec','Borova Lada',
'Lam','Zelezna Ruda','Karlstift','Svetla nad Sazavou','Cechtice',
'Policka','Jimramov','Cenkovice','Kraliky','Miedzylesie','Zacler',
'Janske Lazne','Spindleruv Mlyn','Pec pod Snezkou','Horice',
'Dvur Kralove','Strakonice','Kralovice','Strani','Lazy pod Makytou',
'Seiffen','Znojmo','Drahany','Kurim','Decinsky Sneznik','Capartice',
'Rusava','Javornik','Vapenna','Lipova Lazne','Usti nad Orlici',
'Hronov','Police nad Metuji','Mezimesti','Jetrichovice','Dobris',
'Pelhrimov','Sec','Kyjov','Kaplice','Volary','Bayerisch Eisenstein',
'Grosser Arber','Aigen im Muhlkreis','Litschau','Waldmunchen',
'Selb','Auersberg','Sindelova','Nejdek','Marianska','Abertamy']
for city in cities7:
save_garmin_activity_links(city, 10, f)
|
Chase Dorothy all the way down the Yellow Brick Road in this deluxe witch costume for girls! The full length garb is made from high quality polyester and is fitted in the chest, arms and waist. A zipper is located in the back while silver buttons line the front. A full tulle petticoat slips on under the skirt for extra volume. A pointed cone shaped hat with a wire rim is also included. A black tulle sash, which is removable, wraps around the hat and hangs in the back. Get a look straight from the movie when you add our green makeup!
|
class ColorMap:
def __init__ (self, base, bound, num):
self.base = base
self.bound = bound
self.num = float (num)
def index (self, val):
val = float (val)
return self.base.interpolate (self.bound, val / (self.num - 1))
def __iter__ (self):
return ColorMapIter (self)
class ColorMapIter:
def __init__ (self, cm):
self.current = 0
self.cm = cm
def __iter__ (self):
return self
def next (self):
if self.current == self.cm.num:
raise StopIteration ()
r_val = self.cm.index (self.current)
self.current += 1
return r_val
def color_to_css (c):
r = hex (int (c.red * 255)) + '0'
g = hex (int (c.green * 255)) + '0'
b = hex (int (c.blue * 255)) + '0'
return '#' + r[2:4] + g[2:4] + b[2:4]
def hex_to_color (hex):
hex = str (hex)
if hex.startswith ('0x'):
hex = hex[2:]
if len (hex) != 6:
raise RuntimeError (hex + ' is not a hex color')
red = int ('0x' + hex[0:2], 0)
green = int ('0x' + hex[2:4], 0)
blue = int ('0x' + hex[4:6], 0)
return Color (*map (clampInt, [red, green, blue]))
def clampInt (value):
value = int (value)
if value > 255:
return 255
elif value < 0:
return 0
else:
return value
def clampFloat (value):
value = float (value)
if value > 1.0:
return 1.0
elif value < 0.0:
return 0.0
else:
return value
class Color:
def __init__ (self, red, green, blue):
self.red = float (red)
self.green = float (green)
self.blue = float (blue)
def interpolate (self, c, percent):
percent = float (percent)
if percent > 1.0 or percent < 0.0:
raise RuntimeError ('Cannot interpolate color: perecent out of range')
return ((c * percent) + (self * (1.0 - percent)))
def __add__ (self, c):
r = self.red + c.red
g = self.green + c.green
b = self.blue + c.blue
return Color (r, g, b)
def __mul__ (self, scalar):
r = self.red * scalar
g = self.green * scalar
b = self.blue * scalar
return Color (r, g, b)
def __str__ (self):
rgb = 'rgb('
rgb += str(int(self.red))+ ','
rgb += str(int(self.green))+ ','
rgb += str(int(self.blue))+ ')'
return rgb
red = Color (255, 0, 0)
green = Color (0, 255, 0)
blue = Color (0, 0, 255)
black = Color (0, 0, 0)
white = Color (255, 255, 255)
|
This paper presents the results of an experimental study of wave run-ups on a semi-submersible offshore structure. A series of model tests with a 1:80 scale ratio were carried out in the two-dimensional wave basin of MOERI/KIOST. The experimental model had two columns and one pontoon. The model was fixed and wave elevations were measured at five points per column. Two different draft (operational & survival) conditions and three wave heights were considered under regular wave conditions. First, the nonlinear characteristics of wave run-ups are discussed by using the time series data. Then, the wave heights are compared with numerical results based on the potential flow model. The comparison shows fairly good correlation between the experiments and computations. Finally, wave run-ups under the operational and survival conditions are suggested.
Barltrop, N.D.P, 1998. Floating Structures: a guide for design and analysis. 1, Oilfield Publications, Houston, 1. 9-10.
Hong, S.Y., Lee, P.M., Hong, D.C., 1989. A Study on the Slowly Varying Wave Drift Force Actin on a Semi-Submersible Platform in Waves. Journal of the Society of Naval Architects of Korea, 26(2), 49-63.
Lee, D.Y., Lee, J.H., Kim, S.E., 2012. A Study of Non-linearity of Heave Motion in Waves. 2012 The Korean Associantion of Ocean Science and Technology Societies, Annual Conference, 119-124.
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from hashlib import md5
import logging
import re
import zlib
# 3p
import requests
import simplejson as json
# project
from config import get_version
from utils.proxy import set_no_proxy_settings
set_no_proxy_settings()
# urllib3 logs a bunch of stuff at the info level
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.WARN)
requests_log.propagate = True
# From http://stackoverflow.com/questions/92438/stripping-non-printable-characters-from-a-string-in-python
control_chars = ''.join(map(unichr, range(0, 32) + range(127, 160)))
control_char_re = re.compile('[%s]' % re.escape(control_chars))
def remove_control_chars(s):
return control_char_re.sub('', s)
def http_emitter(message, log, agentConfig, endpoint):
"Send payload"
url = agentConfig['dd_url']
log.debug('http_emitter: attempting postback to ' + url)
# Post back the data
try:
payload = json.dumps(message)
except UnicodeDecodeError:
message = remove_control_chars(message)
payload = json.dumps(message)
zipped = zlib.compress(payload)
log.debug("payload_size=%d, compressed_size=%d, compression_ratio=%.3f"
% (len(payload), len(zipped), float(len(payload))/float(len(zipped))))
apiKey = message.get('apiKey', None)
if not apiKey:
raise Exception("The http emitter requires an api key")
url = "{0}/intake/{1}?api_key={2}".format(url, endpoint, apiKey)
try:
headers = post_headers(agentConfig, zipped)
r = requests.post(url, data=zipped, timeout=5, headers=headers)
r.raise_for_status()
if r.status_code >= 200 and r.status_code < 205:
log.debug("Payload accepted")
except Exception:
log.exception("Unable to post payload.")
try:
log.error("Received status code: {0}".format(r.status_code))
except Exception:
pass
def post_headers(agentConfig, payload):
return {
'User-Agent': 'Datadog Agent/%s' % agentConfig['version'],
'Content-Type': 'application/json',
'Content-Encoding': 'deflate',
'Accept': 'text/html, */*',
'Content-MD5': md5(payload).hexdigest(),
'DD-Collector-Version': get_version()
}
|
Respecting your privacy is important to Mapmygenome. The purpose of this statement is to inform you of the kinds of information that we may gather when you use our website, how we may use the information, and how we may disclose it to other parties. By using our products and services, you agree that this Privacy Statement is acceptable to you and that Mapmygenome is permitted to use your personal information as stated below.
From visitors to our website, we collect their demographics, IP address, and behavior on our website.
Newsletter subscribers provide their name and email address.
Registered customers provide details of name, email, mobile, and billing and shipping address. This information is securely stored in our CRM.
Customers who purchase genetic tests or counseling service provide their health history through a questionnaire (available offline/online). Once the genetic test is done, their data is analyzed and reports are sent. All this information is stored in Biotracker, our secure Laboratory Information Management System.
We may periodically send promotional emails about new products, special offers, or other information that we think you may find interesting using the email address which you have provided.
A cookie is a small file that asks permission to be placed on your computer's hard drive. Once you agree, the file is added and the cookie helps analyse web traffic or lets you know when you visit a particular site. Cookies allow web applications to respond to you as an individual. The web application can tailor its operations to your needs, likes, and dislikes by gathering and remembering information about your preferences.
If you have previously agreed to us using your personal information for direct marketing purposes, you may change your mind at any time by writing to or emailing us at [email protected].
If you believe that any information we are holding on you is incorrect or incomplete, please write to us at the above address or email us at [email protected] as soon as possible. We will promptly correct any information found to be incorrect.
Your health and genetic information is confidential and we make sure all information is stored in Biotracker. The primary use of genetic information is to assess your genetic predisposition and report this to you (please note that your health history is not used in this report). Following your genetic counseling , we prepare an action plan to achieve your health goals - this report utilizes your health history and genetic information.
Before your blood/saliva/other sample enters our lab, we make sure it is assigned a unique ID and de-identified, so that your personal information is not accessible to all. Only authorised personnel from our team have access to this information. Throughout the processing, your sample is tracked by Biotracker.
Considering the importance of genetic and health information in research, we use this information without any personal identification for further analysis, with customer’s consent. During sample collection, we send out a consent form to all customers.
We do not share any information with unaffiliated parties, except where required to provide you or your organization the ability to use the product as desired or where required to do so by law. If your sample has come to us from a channel partner or a healthcare provider, they may have access to your reports and other information. Mapmygenome will not be held responsible for any violation of privacy in such cases.
In the future, Mapmygenome may sell business units, product lines, or other assets. In such cases, customer information may be one of the transferred business assets. Also, if Mapmygenome or substantially all of its assets were to be sold, customer information would be one of the transferred assets. In all such cases, Mapmygenome may disclose the customer information prior to the transaction to a prospective purchaser and its representatives in connection with the purchaser's due diligence investigation.
Our website may have links to partner sites that are not under Mapmygenome's control. This Privacy Statement applies only to our site and products and not to the linked sites.
|
"""The tests for the MQTT switch platform."""
import json
from unittest.mock import ANY
from asynctest import patch
import pytest
from homeassistant.components import mqtt, switch
from homeassistant.components.mqtt.discovery import async_start
from homeassistant.const import (
ATTR_ASSUMED_STATE,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
import homeassistant.core as ha
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
async_fire_mqtt_message,
async_mock_mqtt_component,
mock_coro,
mock_registry,
)
from tests.components.switch import common
@pytest.fixture
def mock_publish(hass):
"""Initialize components."""
yield hass.loop.run_until_complete(async_mock_mqtt_component(hass))
async def test_controlling_state_via_topic(hass, mock_publish):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_on": 1,
"payload_off": 0,
}
},
)
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", "1")
state = hass.states.get("switch.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "state-topic", "0")
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
async def test_sending_mqtt_commands_and_optimistic(hass, mock_publish):
"""Test the sending MQTT commands in optimistic mode."""
fake_state = ha.State("switch.test", "on")
with patch(
"homeassistant.helpers.restore_state.RestoreEntity.async_get_last_state",
return_value=mock_coro(fake_state),
):
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"payload_on": "beer on",
"payload_off": "beer off",
"qos": "2",
}
},
)
state = hass.states.get("switch.test")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "switch.test")
mock_publish.async_publish.assert_called_once_with(
"command-topic", "beer on", 2, False
)
mock_publish.async_publish.reset_mock()
state = hass.states.get("switch.test")
assert state.state == STATE_ON
await common.async_turn_off(hass, "switch.test")
mock_publish.async_publish.assert_called_once_with(
"command-topic", "beer off", 2, False
)
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
async def test_controlling_state_via_topic_and_json_message(hass, mock_publish):
"""Test the controlling state via topic and JSON message."""
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_on": "beer on",
"payload_off": "beer off",
"value_template": "{{ value_json.val }}",
}
},
)
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "state-topic", '{"val":"beer on"}')
state = hass.states.get("switch.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "state-topic", '{"val":"beer off"}')
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
async def test_default_availability_payload(hass, mock_publish):
"""Test the availability payload."""
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"availability_topic": "availability_topic",
"payload_on": 1,
"payload_off": 0,
}
},
)
state = hass.states.get("switch.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability_topic", "online")
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability_topic", "offline")
state = hass.states.get("switch.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "state-topic", "1")
state = hass.states.get("switch.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability_topic", "online")
state = hass.states.get("switch.test")
assert state.state == STATE_ON
async def test_custom_availability_payload(hass, mock_publish):
"""Test the availability payload."""
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"availability_topic": "availability_topic",
"payload_on": 1,
"payload_off": 0,
"payload_available": "good",
"payload_not_available": "nogood",
}
},
)
state = hass.states.get("switch.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability_topic", "good")
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability_topic", "nogood")
state = hass.states.get("switch.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "state-topic", "1")
state = hass.states.get("switch.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability_topic", "good")
state = hass.states.get("switch.test")
assert state.state == STATE_ON
async def test_custom_state_payload(hass, mock_publish):
"""Test the state payload."""
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_on": 1,
"payload_off": 0,
"state_on": "HIGH",
"state_off": "LOW",
}
},
)
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", "HIGH")
state = hass.states.get("switch.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "state-topic", "LOW")
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test-topic",
"json_attributes_topic": "attr-topic",
}
},
)
async_fire_mqtt_message(hass, "attr-topic", '{ "val": "100" }')
state = hass.states.get("switch.test")
assert state.attributes.get("val") == "100"
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test-topic",
"json_attributes_topic": "attr-topic",
}
},
)
async_fire_mqtt_message(hass, "attr-topic", '[ "list", "of", "things"]')
state = hass.states.get("switch.test")
assert state.attributes.get("val") is None
assert "JSON result was not a dictionary" in caplog.text
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test-topic",
"json_attributes_topic": "attr-topic",
}
},
)
async_fire_mqtt_message(hass, "attr-topic", "This is not JSON")
state = hass.states.get("switch.test")
assert state.attributes.get("val") is None
assert "Erroneous JSON: This is not JSON" in caplog.text
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data1 = (
'{ "name": "Beer",'
' "command_topic": "test_topic",'
' "json_attributes_topic": "attr-topic1" }'
)
data2 = (
'{ "name": "Beer",'
' "command_topic": "test_topic",'
' "json_attributes_topic": "attr-topic2" }'
)
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data1)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "attr-topic1", '{ "val": "100" }')
state = hass.states.get("switch.beer")
assert state.attributes.get("val") == "100"
# Change json_attributes_topic
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data2)
await hass.async_block_till_done()
# Verify we are no longer subscribing to the old topic
async_fire_mqtt_message(hass, "attr-topic1", '{ "val": "50" }')
state = hass.states.get("switch.beer")
assert state.attributes.get("val") == "100"
# Verify we are subscribing to the new topic
async_fire_mqtt_message(hass, "attr-topic2", '{ "val": "75" }')
state = hass.states.get("switch.beer")
assert state.attributes.get("val") == "75"
async def test_unique_id(hass):
"""Test unique id option only creates one switch per unique_id."""
await async_mock_mqtt_component(hass)
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"command_topic": "command-topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"state_topic": "test-topic",
"command_topic": "command-topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
},
)
async_fire_mqtt_message(hass, "test-topic", "payload")
assert len(hass.states.async_entity_ids()) == 1
async def test_discovery_removal_switch(hass, mqtt_mock, caplog):
"""Test removal of discovered switch."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data = (
'{ "name": "Beer",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data)
await hass.async_block_till_done()
state = hass.states.get("switch.beer")
assert state is not None
assert state.name == "Beer"
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", "")
await hass.async_block_till_done()
state = hass.states.get("switch.beer")
assert state is None
async def test_discovery_update_switch(hass, mqtt_mock, caplog):
"""Test update of discovered switch."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data1 = (
'{ "name": "Beer",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
data2 = (
'{ "name": "Milk",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get("switch.beer")
assert state is not None
assert state.name == "Beer"
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data2)
await hass.async_block_till_done()
state = hass.states.get("switch.beer")
assert state is not None
assert state.name == "Milk"
state = hass.states.get("switch.milk")
assert state is None
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data1 = '{ "name": "Beer" }'
data2 = (
'{ "name": "Milk",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get("switch.beer")
assert state is None
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data2)
await hass.async_block_till_done()
state = hass.states.get("switch.milk")
assert state is not None
assert state.name == "Milk"
state = hass.states.get("switch.beer")
assert state is None
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT switch device registry integration."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
await async_start(hass, "homeassistant", {}, entry)
registry = await hass.helpers.device_registry.async_get_registry()
data = json.dumps(
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"command_topic": "test-command-topic",
"device": {
"identifiers": ["helloworld"],
"connections": [["mac", "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
"unique_id": "veryunique",
}
)
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.identifiers == {("mqtt", "helloworld")}
assert device.connections == {("mac", "02:5b:26:a8:dc:12")}
assert device.manufacturer == "Whatever"
assert device.name == "Beer"
assert device.model == "Glass"
assert device.sw_version == "0.1-beta"
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
await async_start(hass, "homeassistant", {}, entry)
registry = await hass.helpers.device_registry.async_get_registry()
config = {
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"command_topic": "test-command-topic",
"device": {
"identifiers": ["helloworld"],
"connections": [["mac", "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
"unique_id": "veryunique",
}
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.name == "Beer"
config["device"]["name"] = "Milk"
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.name == "Milk"
async def test_entity_id_update(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
registry = mock_registry(hass, {})
mock_mqtt = await async_mock_mqtt_component(hass)
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: [
{
"platform": "mqtt",
"name": "beer",
"state_topic": "test-topic",
"command_topic": "command-topic",
"availability_topic": "avty-topic",
"unique_id": "TOTALLY_UNIQUE",
}
]
},
)
state = hass.states.get("switch.beer")
assert state is not None
assert mock_mqtt.async_subscribe.call_count == 2
mock_mqtt.async_subscribe.assert_any_call("test-topic", ANY, 0, "utf-8")
mock_mqtt.async_subscribe.assert_any_call("avty-topic", ANY, 0, "utf-8")
mock_mqtt.async_subscribe.reset_mock()
registry.async_update_entity("switch.beer", new_entity_id="switch.milk")
await hass.async_block_till_done()
state = hass.states.get("switch.beer")
assert state is None
state = hass.states.get("switch.milk")
assert state is not None
assert mock_mqtt.async_subscribe.call_count == 2
mock_mqtt.async_subscribe.assert_any_call("test-topic", ANY, 0, "utf-8")
mock_mqtt.async_subscribe.assert_any_call("avty-topic", ANY, 0, "utf-8")
|
The City of Ormond Beach utilizes several advisory boards to solicit input from citizens and assist the work of the City Commission.
Advisory board volunteers are appointed by the City Commission and serve two-year terms that coincide with the City Commission terms, except for members of the pension boards that have varying terms of service.
If you are interested in serving on an Advisory Board or Committee, please print and complete an Advisory Board Application or Apply Online.
If you have questions concerning the duties and responsibilities of any of the advisory boards, please contact Lisa Dahme, Assistant City Clerk, at (386) 676-3297.
|
# vim: ts=4:sw=4:expandtab
# BleachBit
# Copyright (C) 2008-2015 Andrew Ziem
# http://bleachbit.sourceforge.net
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test case for module Update
"""
import os
import os.path
import socket
import sys
import types
import unittest
import urllib2
sys.path.append('.')
from bleachbit import Common
from bleachbit.Update import check_updates, update_winapp2, user_agent
class UpdateTestCase(unittest.TestCase):
"""Test case for module Update"""
def test_UpdateCheck(self):
"""Unit tests for class UpdateCheck"""
update_tests = []
wa = '<winapp2 url="http://katana.oooninja.com/bleachbit/winapp2.ini" sha512="ce9e18252f608c8aff28811e372124d29a86404f328d3cd51f1f220578744bb8b15f55549eabfe8f1a80657fc940f6d6deece28e0532b3b0901a4c74110f7ba7"/>'
update_tests.append(
('<updates><stable ver="0.8.4">http://084</stable><beta ver="0.8.5beta">http://085beta</beta>%s</updates>' % wa,
((u'0.8.4', u'http://084'), (u'0.8.5beta', u'http://085beta'))))
update_tests.append(
('<updates><stable ver="0.8.4">http://084</stable>%s</updates>' % wa,
((u'0.8.4', u'http://084'), )))
update_tests.append(
('<updates><beta ver="0.8.5beta">http://085beta</beta>%s</updates>' % wa,
((u'0.8.5beta', u'http://085beta'), )))
update_tests.append(('<updates></updates>', ()))
# fake network
original_open = urllib2.build_opener
xml = ""
class fake_opener:
def add_headers(self):
pass
def read(self):
return xml
def open(self, url):
return self
urllib2.build_opener = fake_opener
for update_test in update_tests:
xml = update_test[0]
updates = check_updates(True, False, None, None)
self.assertEqual(updates, update_test[1])
urllib2.build_opener = original_open
# real network
for update in check_updates(True, False, None, None):
if not update:
continue
ver = update[0]
url = update[1]
self.assert_(isinstance(ver, (type(None), unicode)))
self.assert_(isinstance(url, (type(None), unicode)))
# test failure
Common.update_check_url = "http://localhost/doesnotexist"
self.assertRaises(
urllib2.URLError, check_updates, True, False, None, None)
def test_update_winapp2(self):
from bleachbit.Common import personal_cleaners_dir
fn = os.path.join(personal_cleaners_dir, 'winapp2.ini')
if os.path.exists(fn):
print 'note: deleting %s' % fn
os.unlink(fn)
url = 'http://www.winapp2.com/Winapp2.ini'
def append_text(s):
print s
succeeded = {'r': False} # scope
def on_success():
succeeded['r'] = True
# bad hash
self.assertRaises(RuntimeError, update_winapp2, url, "notahash",
append_text, on_success)
self.assert_(not succeeded['r'])
# blank hash, download file
update_winapp2(url, None, append_text, on_success)
self.assert_(succeeded['r'])
# blank hash, do not download again
update_winapp2(url, None, append_text, on_success)
succeeded['r'] = False
update_winapp2(url, None, append_text, on_success)
self.assert_(not succeeded['r'])
def test_user_agent(self):
"""Unit test for method user_agent()"""
agent = user_agent()
print "debug: user agent = '%s'" % (agent, )
self.assert_(isinstance(agent, str))
def suite():
return unittest.makeSuite(UpdateTestCase)
if __name__ == '__main__':
unittest.main()
|
The Amasa Back trail is one of the classic rides of Moab. It ascends from Kane Creek on the Colorado River just west of Moab, climbing up onto an outcropping of rock surrounded on three sides by the Colorado.
The Porcupine Rim Trail is the "other" famous classic ride at Moab. At 15.6 miles, requiring expert technical skills, this ride is for advanced bikers who are in good shape. It's one-way with a shuttle car, or a 34-mile loop for the truly insane (10 uphill miles from Moab to the trailhead, 8 flat miles from Jackass Canyon back to the middle of Moab).
This is the world-famous Slickrock Trail near Moab, Utah. This 10.6 mile loop twists and rolls through Navajo sandstone to a breathtaking view over the Colorado River. The Slickrock Trail is rated high technical difficulty and high aerobic requirement. But many MANY "not-very-good" bikers ride this trail, with a few short hikes over the tough spots.
The Poison Spider Mesa trail is one of the "Must Do" rides of Moab. Combining physically demanding biking challenges with great views, this trail is for expert bikers. The trail can be ridden as an out-and-back, but here I describe a 13-mile loop that includes the infamous Portal Trail as the return-route off the mesa. Altitude gain is 1000 feet.
|
from __future__ import absolute_import
__author__ = 'Tony Beltramelli - www.tonybeltramelli.com'
from keras.layers import Input, Dense, Dropout, \
RepeatVector, LSTM, concatenate, \
Conv2D, MaxPooling2D, Flatten
from keras.models import Sequential, Model
from keras.optimizers import RMSprop
from keras import *
from .Config import *
from .AModel import *
class pix2code(AModel):
def __init__(self, input_shape, output_size, output_path):
AModel.__init__(self, input_shape, output_size, output_path)
self.name = "pix2code"
image_model = Sequential()
image_model.add(Conv2D(32, (3, 3), padding='valid', activation='relu', input_shape=input_shape))
image_model.add(Conv2D(32, (3, 3), padding='valid', activation='relu'))
image_model.add(MaxPooling2D(pool_size=(2, 2)))
image_model.add(Dropout(0.25))
image_model.add(Conv2D(64, (3, 3), padding='valid', activation='relu'))
image_model.add(Conv2D(64, (3, 3), padding='valid', activation='relu'))
image_model.add(MaxPooling2D(pool_size=(2, 2)))
image_model.add(Dropout(0.25))
image_model.add(Conv2D(128, (3, 3), padding='valid', activation='relu'))
image_model.add(Conv2D(128, (3, 3), padding='valid', activation='relu'))
image_model.add(MaxPooling2D(pool_size=(2, 2)))
image_model.add(Dropout(0.25))
image_model.add(Flatten())
image_model.add(Dense(1024, activation='relu'))
image_model.add(Dropout(0.3))
image_model.add(Dense(1024, activation='relu'))
image_model.add(Dropout(0.3))
image_model.add(RepeatVector(CONTEXT_LENGTH))
visual_input = Input(shape=input_shape)
encoded_image = image_model(visual_input)
language_model = Sequential()
language_model.add(LSTM(128, return_sequences=True, input_shape=(CONTEXT_LENGTH, output_size)))
language_model.add(LSTM(128, return_sequences=True))
textual_input = Input(shape=(CONTEXT_LENGTH, output_size))
encoded_text = language_model(textual_input)
decoder = concatenate([encoded_image, encoded_text])
decoder = LSTM(512, return_sequences=True)(decoder)
decoder = LSTM(512, return_sequences=False)(decoder)
decoder = Dense(output_size, activation='softmax')(decoder)
self.model = Model(inputs=[visual_input, textual_input], outputs=decoder)
optimizer = RMSprop(lr=0.0001, clipvalue=1.0)
self.model.compile(loss='categorical_crossentropy', optimizer=optimizer)
def fit(self, images, partial_captions, next_words):
self.model.fit([images, partial_captions], next_words, shuffle=False, epochs=EPOCHS, batch_size=BATCH_SIZE, verbose=1)
self.save()
def fit_generator(self, generator, steps_per_epoch):
self.model.fit_generator(generator, steps_per_epoch=steps_per_epoch, epochs=EPOCHS, verbose=1)
self.save()
def predict(self, image, partial_caption):
return self.model.predict([image, partial_caption], verbose=0)[0]
def predict_batch(self, images, partial_captions):
return self.model.predict([images, partial_captions], verbose=1)
|
SZS Horizontal Water Tube Gas Fired Boiler Central Combustion Gas( Oil) Fired Steam Boiler Imported advanced design philosophy from Europe, central combustion structure is adopted, which has the advantage of simple structure and reliable and safe operation.Economizer is used developed by ourselves, which has improved the operation efficiency.
SZS D type water tube gas oil boiler structure animation demonstration designed and made by Zhengzhou Boiler Group. If you need boiler or pressure vessel,you can contact me by the following means.
(PDF) Steam Boiler – ResearchGate. Sep 27, 2017 On Sep 27, 2017, Mohd Parvez published the chapter: Steam Boiler in a book. Cochran boiler can be divided into three parts such as fire box, coal or oil fuels, or else used for heat recovery from the exhaust of large diesel.
The technology advantages of industrial steam boiler Coke oven gas is made up with several kinds of bituminous coal coking coal, after high temperature carbonization, hitching in coke oven in the production of coke and tar products at the same time of combustible gas, which is a by-product of coking products.
Qingdao Enneng Machinery Co., Ltd. (Enneng) is located at Qingdao, China, specialized in design, research and fabrication at environment-friendly boiler. The products and service include coal water slurry boiler, biomass boiler, gas and oil fired boiler and boiler revamping.
SZS Biogas Fired Steam Boiler . 1. Automatic run, secure, boilers equipped with various control and protection device, vapor-pressure combustion systems, pressure and completely automatic control, boiler also has programmed control and temperature, time control. Easy to use, province of the saving, maintenance simple. 2.
Water jacket boiler is a new product of Zhongding Boiler. It is horizontal type with internal combustion and shell structure. The fuel could be oil and gas. It has the advantages of high efficiency, energy saving, long working life, safe operation, and convenient maintenance.
|
#
# queue/smtp_foward.py
#
# Copyright (C) 2011 Damien Churchill <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
#
import smtplib
import logging
from vsmtpd.error import DenyError
from vsmtpd.hooks import hook
from vsmtpd.plugins.plugin import PluginBase
log = logging.getLogger(__name__)
class Plugin(PluginBase):
def __init__(self, config):
self.smtp_server = config.get('smtp_server')
self.smtp_port = config.getint('smtp_port') or 25
@hook
def queue(self, transaction):
log.info('forwarding to %s:%d', self.smtp_server, self.smtp_port)
smtp = smtplib.SMTP(self.smtp_server, self.smtp_port)
code, msg = smtp.mail(str(transaction.sender or ''))
if code != 250:
raise DenyError(msg)
for rcpt in transaction.recipients:
code, msg = smtp.rcpt(str(rcpt))
if code != 250:
raise DenyError(msg)
code, msg = smtp.docmd('data')
if code != 354:
raise smtplib.SMTPDataError(code, msg)
msg = transaction.body
header = smtplib.quotedata(msg.headers.as_string())
smtp.send(header)
msg.seek(msg.body_start)
for line in msg:
smtp.send(smtplib.quotedata(line))
smtp.send(smtplib.CRLF + '.' + smtplib.CRLF)
code, msg = smtp.getreply()
if code != 250:
raise DenyError(msg)
code, msg = smtp.quit()
log.info('finished queueing')
return True
|
The Kitsap Alliance of Property Owners (Kitsap Alliance) and/or its contributors have made an honest effort to provide clear, accurate, and current information. However, applicable rules, regulations, and laws often change, and you are advised that inadvertent errors can occur. Additionally, Kitsap Alliance and/or its respective contributors may make improvements and/or changes in the products and/or services at any time.
The information on this Web site has been carefully researched and provides some detail on matters of interest to users, however, is intended as general guidelines. The application and impact of rules, regulations, and laws can vary widely based upon the specific or unique facts involved. Thus, the information on this Web site is not intended to serve as legal, accounting, tax, or other professional advice, and you are encouraged to consult with your professional advisors on specific matters.
Kitsap Alliance and/or its contributors disclaim any responsibility for positions taken by readers in individual cases or for any misunderstanding on the part of readers.
Kitsap Alliance and/or its suppliers are providing links to third-party sites to you as a convenience and resource. A link to a non-Kitsap Alliance website does not mean that Kitsap Alliance and/or its suppliers endorse or accept any responsibility for the content, or the use, of such web site. It is up to you to take precautions to ensure that whatever you select for your use is free of computer viruses and other harmful code.
Kitsap Alliance makes no representations whatsoever about any other Web sites which you may access from our site and neither endorses or accepts any responsibility for the content or use of these sites.
Notices regarding documents, software, and services available on this website Permission to use documents (such as white papers, press releases, data sheets and FAQs) from this server is granted, provided that (1) the below copyright notice appears in all copies and that both the copyright notice and this permission notice appear, and (2) use of such documents from this server is for informational and non-commercial or personal use only. Use for any other purpose is expressly prohibited by law.
By sending Kitsap Alliance any information or material, you grant an unrestricted, irrevocable license to use, reproduce, display, perform, modify, transmit and distribute those materials or information. Kitsap Alliance will not release your name or otherwise publicize the fact that you submitted materials or other information to us unless: (a) we ask your permission to use your name; or (b) we first notify you that the materials or other information you submit to a particular part of this site will be published or otherwise used with your name on it; or (c) we are required to do so by law.
Opinions expressed in these reports are the opinions of the authors and not necessarily of and may change without prior notice.
The contents of this website or any communications from are intended solely for informational purposes. Statements made on this site by various authors and other contributors do not necessarily reflect the opinions of Kitsap Alliance , and should not be construed as an endorsement by Kitsap Alliance, either expressed or implied. is not responsible for typographic errors or other inaccuracies in the content provided on our web site. We believe the information contained here to be accurate and reliable. However, errors may occasionally occur. Therefore, all information and materials are provided “AS IS” without any warranty of any kind.
Kitsap Alliance reserves the right to modify any information contained on our Web site without notifying current or prospective customers. In no event will Kitsap Alliance be held liable for any indirect, special, incidental or consequential damages arising out of the use of, or payments based on, the information contained in our website.
Any comments or materials sent to, including feedback such as questions, comments, suggestions and any other response shall be considered to be non-confidential. Kitsap Alliance or its authors and contributors shall incur no obligation of any kind, with respect to such response, and Kitsap Alliance shall be free to use, reproduce, distribute and publicly display such comments and materials without limitation or liability.
|
"""Setup file for iotile-transport-jlink package."""
from setuptools import setup, find_packages
import version
setup(
name="iotile-transport-jlink",
packages=find_packages(exclude=("test",)),
version=version.version,
license="LGPLv3",
install_requires=[
"iotile-core>=5.2",
"pylink-square>=0.10",
"pylibftdi>=0.19"
],
python_requires=">=3.7,<4",
include_package_data=True,
entry_points={'iotile.device_adapter': ['jlink = iotile_transport_jlink.jlink:JLinkAdapter']},
description="IOTile JLINK Transport Plugin",
author="Arch",
author_email="[email protected]",
url="http://github.com/iotile/coretools",
keywords=["iotile", "arch", "embedded", "hardware", "firmware"],
classifiers=[
"Programming Language :: Python",
"Development Status :: 2 - Pre-Alpha",
"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Software Development :: Libraries :: Python Modules"
],
long_description="""\
IOTile JLink Transport Plugin
-------------------------------
A python plugin into IOTile Coretools that allows for using a JLink adapter to
send RPCs over an IOTile module's SWD interface. The IOTile device needs to be
compiled with support for the SWD RPC interface for this to work.
"""
)
|
We’re a week into the start of December and what better way to celebrate advent than with 24 of the most frequently asked questions we at Know You More receive on the subject of coaching.
None have a chocolate behind them but they are nevertheless very sweet.
Disclaimer: This post may have broken the world record for most uses of the word ‘coach’.
1. What is virtual coaching?
Virtual coaching is a face-to-face coaching conversation conducted over a video chat service like Zoom, Skype or Facetime.
2. What is the difference between a coach and a mentor?
There is a difference. You can read a previous article on this – here is the link.
3. Do coaches have coaches or do they just coach themselves?
Yes, many coaches have coaches and will also undertake professional development which is known as coaching supervision.
4. What is the biggest benefit of coaching?
For me, it is in the developing of thinking patterns and building your self-knowledge. It helps you see your blind spots, smash through your limiting beliefs and achieve great clarity. That and the space in your life to stop and invest in developing yourself.
5. Is it like sports coaching?
There is a long-standing connection with sports coaching. A great example being Tim Gallwey and his book The Inner Game of Tennis. Sports coaching still tends to be more directive. Professional coaching is probably more akin to sports psychology.
6. Is it like Life Coaching?
The art of coaching is based fundamentally on a conversation. A skilled coach will ask great questions, challenge and provide feedback to help open up a person’s thinking. Life, executive, leadership, career are all the different fields where practitioners apply their trade.
7. What does the ‘professional’ mean in professional coach?
In our eyes, it is a coach that has completed recognised training, achieved a coaching practitioner qualification and coaches to a recognised standard of practice and ethics.
8. Do I need to have a qualification to call myself a professional coach?
No, but much like many vocations, you’re more likely to be employed by those looking for a coach if you have one.
9. How many coaching sessions should I have?
10. What is a coaching contract?
11. What can a coach tell me that I don’t already know?
Coaching is based on the premise that we all know more than we think we know. A coach won’t tell you anything just help you get to your own answer (unless you ask them really nicely).
12. Is it like therapy?
Whilst coaching is not therapy, and should not be viewed as therapy, it can provide an alternative to people who have previously considered counselling to resolve a situation. It is normal for coaching to be less remedial and more future focussed.
13. Do coaches just solve your problems for you?
No. Coaching is based on the principle that we are all responsible for our own lives and the results we are getting. If we acknowledge that we are responsible for something, it follows that we have the power and influence over it.
14. How did you get into coaching?
I was really fortunate to have the opportunity to work with a coach within my organisation. It completely opened up my thinking and I thought ‘I want to do this for other people’.
15. How do I become qualified as a coach?
Ah the hat-trick! Another article for you – here is the link.
16. Will coaching help build my confidence?
Yes. We all have internal stories that we tell ourselves, inner critics and limiting beliefs. In seeing these from different perspectives and the actions we then take, we certainly build self-confidence.
17. Can anyone be coached?
I don’t think so. A coachee has to have the ability to be open, to trust and commit to the process. Not everyone can do that.
18. How much does coaching cost?
There is no ‘one size fits all’ price – it will vary depending on the style of coaching, whether it is private or within an organisation and so many other factors. Within an organisational setting, the price point is in the region of £250 per hour. Privately, around £100 per hour.
19. Which is better face to face or virtual coaching?
Historically, face to face has been perceived as the better coaching experience; however, the line is thinning with people opting more the for convenience and flexibility technology provides.
20. What makes a good coaching relationship?
Much like all good relationships, it is based on building trust and rapport with the other person. It is the cornerstone of a successful coaching relationship.
21. How do I choose a coach?
Research what you would like to achieve. As the majority of coaching relationships are made through referral, it’s good to ask around. Like hiring a tradesman. Meet a few, get some references and go with the one that is the right fit.
22. Do I have to have a goal to begin coaching?
It is of benefit to have something in mind that you want to work on. More importantly is the commitment to the coaching programme. It is normal to start coaching thinking you want to work on one thing and then, through exploring your own thinking, find out your ‘real’ goal was something else.
23. What kind of person works with a coach?
As coaching is just a conversation it can benefit people from all walks of life.
Bill Gates famously opened up his TedTalk with: “Everyone needs a coach”. Whether we are a CEO, leader, teacher, basketball player or bridge player, we all need people who will help us reach our goals and give us feedback.
24. Does Santa have a coach?
0 Comments on "Coaching: Answering the most frequently asked questions."
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Run from Cryptchat
# python3 -m unittest discover
import unittest
from ..crypto.aes import AESCipher
class testAESCipher(unittest.TestCase):
def test_encrypt_decrypt(self):
key = "TTTcPolAhIqZZJY0IOH7Orecb/EEaUx8/u/pQlCgma8="
cipher = AESCipher(key)
m = "[TOP SECRET] I like k-pop"
c = cipher.encrypt(m)
m2 = cipher.decrypt(c)
self.assertEqual(m, m2)
def test_encrypt_decrypt_long(self):
key = "TTTcPolAhIqZZJY0IOH7Orecb/EEaUx8/u/pQlCgma8="
cipher = AESCipher(key)
m = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
c = cipher.encrypt(m)
m2 = cipher.decrypt(c)
self.assertEqual(m, m2)
def test_encrypt_decrypt_unicode(self):
key = "TTTcPolAhIqZZJY0IOH7Orecb/EEaUx8/u/pQlCgma8="
cipher = AESCipher(key)
m = "『秘密』K-popは好きです"
c = cipher.encrypt(m)
m2 = cipher.decrypt(c)
self.assertEqual(m, m2)
def test_encrypt_decrypt_128(self):
key = "Ya/C/EvmwW1xWhjM1BgZ/g=="
cipher = AESCipher(key)
m = "Private stuff"
c = cipher.encrypt(m)
m2 = cipher.decrypt(c)
self.assertEqual(m, m2)
def test_encrypt_decrypt_unicode_128(self):
key = "Ya/C/EvmwW1xWhjM1BgZ/g=="
cipher = AESCipher(key)
m = "『秘密』K-popは好きです"
c = cipher.encrypt(m)
m2 = cipher.decrypt(c)
self.assertEqual(m, m2)
def main():
unittest.main()
if __name__ == "__main__":
main()
|
Apex Legends has a hidden monster lurking in the map - SKOAR!
Apex Legends has already completed more than a week since its launch and yet the game is constantly in news due to some reason or the other. This time its the easter egg that no one could have expected so soon. There are around ten Nessy plushies scattered around the map. If the players happen to kill those Nessy plushies in a specific order then they would be in for a very special visit from the Loch Ness monster herself.
Players also happen to notice a special phrase “A Nessy appears” in the kill feed whenever a Nessy is nearby. It appears as more of a clue or hint so that the player doesn’t overlook it. A dedicated Apex Legends subreddit covers just that. A Twitch streamer also captured what happens if the players kill all the ten Nessy plushies in the specified order. Below is the map displaying the locations of all 10 Nessy dolls on the map with the order in which they should be killed to summon the legendary sea monster.
The image itself was first posted by a Reddit user whose account is now mysteriously deleted. However, if you want more details on these head to this subreddit /r/nessysecrets. If you want to have a look at the Loch Ness Monster without having to complete this mission, check out this clip by Twitch user Hewchy. After the 10 Nessy kills, you can see a hazy shape of the legendary sea monster coming out of the water.
Apex Legends has been a massively successful game lately by a hitting a whopping 25 million player count within a week and 2 million concurrent players. With these many numbers of players playing over a week, it was quite justified that someone could soon notice this easter egg. Currently, there’s an Apex Legends tournament on Twitch where 48 top streamers on Twitch are competing for a prize pool of $100,000. Furthermore, Apex Legends also plans to introduce some Valentine’s cosmetic items.
|
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.ext.declarative import declared_attr
from indico.core.db import db
from indico.util.locators import locator_property
from indico.util.string import format_repr, return_ascii, text_to_repr
def _get_next_position(context):
"""Get the next contribution field position for the event."""
event_id = context.current_parameters['event_id']
res = db.session.query(db.func.max(ContributionField.position)).filter_by(event_id=event_id).one()
return (res[0] or 0) + 1
class ContributionField(db.Model):
__tablename__ = 'contribution_fields'
__table_args__ = (db.UniqueConstraint('event_id', 'legacy_id'),
{'schema': 'events'})
id = db.Column(
db.Integer,
primary_key=True
)
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
index=True,
nullable=False
)
legacy_id = db.Column(
db.String,
nullable=True
)
position = db.Column(
db.Integer,
nullable=False,
default=_get_next_position
)
title = db.Column(
db.String,
nullable=False
)
description = db.Column(
db.Text,
nullable=False,
default=''
)
is_required = db.Column(
db.Boolean,
nullable=False,
default=False
)
is_active = db.Column(
db.Boolean,
nullable=False,
default=True
)
field_type = db.Column(
db.String,
nullable=True
)
field_data = db.Column(
JSON,
nullable=False,
default={}
)
event = db.relationship(
'Event',
lazy=True,
backref=db.backref(
'contribution_fields',
order_by=position,
cascade='all, delete-orphan',
lazy='dynamic'
)
)
# relationship backrefs:
# - abstract_values (AbstractFieldValue.contribution_field)
# - contribution_values (ContributionFieldValue.contribution_field)
def _get_field(self, management=False):
from indico.modules.events.contributions import get_contrib_field_types
try:
impl = get_contrib_field_types()[self.field_type]
except KeyError:
return None
return impl(self, management=management)
@property
def field(self):
return self._get_field()
@property
def mgmt_field(self):
return self._get_field(management=True)
@property
def filter_choices(self):
return {x['id']: x['option'] for x in self.field_data.get('options', {})}
@return_ascii
def __repr__(self):
return format_repr(self, 'id', 'field_type', is_required=False, is_active=True, _text=self.title)
@locator_property
def locator(self):
return dict(self.event.locator, contrib_field_id=self.id)
class ContributionFieldValueBase(db.Model):
__abstract__ = True
#: The name of the backref on the `ContributionField`
contribution_field_backref_name = None
data = db.Column(
JSON,
nullable=False
)
@declared_attr
def contribution_field_id(cls):
return db.Column(
db.Integer,
db.ForeignKey('events.contribution_fields.id', name='fk_{}_contribution_field'.format(cls.__tablename__)),
primary_key=True,
index=True
)
@declared_attr
def contribution_field(cls):
return db.relationship(
'ContributionField',
lazy=False,
backref=db.backref(
cls.contribution_field_backref_name,
cascade='all, delete-orphan',
lazy=True
)
)
@property
def friendly_data(self):
return self.contribution_field.field.get_friendly_value(self.data)
class ContributionFieldValue(ContributionFieldValueBase):
__tablename__ = 'contribution_field_values'
__table_args__ = {'schema': 'events'}
contribution_field_backref_name = 'contribution_values'
contribution_id = db.Column(
db.Integer,
db.ForeignKey('events.contributions.id'),
index=True,
nullable=False,
primary_key=True
)
# relationship backrefs:
# - contribution (Contribution.field_values)
@return_ascii
def __repr__(self):
text = text_to_repr(self.data) if isinstance(self.data, unicode) else self.data
return format_repr(self, 'contribution_id', 'contribution_field_id', _text=text)
|
Put your finished creation on display! These white, 8" cake circles from Wilton are crafted from corrugated cardboard with a grease-resistant finish. For heavier cakes, stack multiple cake circles together for additional support.
|
__author__ = 'scott'
import numpy as np
# Set the random seed for reproducibility
seed = np.random.randint(2**16)
print "Seed: ", seed
np.random.seed(seed)
import matplotlib.pyplot as plt
from optofit.models.model import Model
from optofit.population.population import Population
from optofit.neuron.neuron import Neuron
from optofit.neuron.compartment import Compartment, CalciumCompartment
from optofit.neuron.channels import LeakChannel, NaChannel, KdrChannel, Ca3KdrChannel, Ca3KaChannel, Ca3NaChannel, Ca3CaChannel, Ca3KahpChannel, Ca3KcChannel
from optofit.simulation.stimulus import PeriodicStepStimulusPattern, DirectCompartmentCurrentInjection
from optofit.simulation.simulate import simulate
from optofit.observation.observable import NewDirectCompartmentVoltage, IndependentObservations, LinearFluorescence
from optofit.plotting.plotting import plot_latent_compartment_state, plot_latent_compartment_V_and_I
from optofit.inference.fitting import fit_mcmc
from optofit.models.hyperparameters import hypers
def make_model():
"""Make a model of a single compartment neuron with a handful of channels and a directly
observable voltage
"""
model = Model()
# The population object doesn't do anything yet, but eventually it could support
# synapses between neurons
population = Population('population', model)
# Explicitly build the neuron
neuron = Neuron('neuron', population)
# The single compartment corresponds to the cell body
body = Compartment('body', neuron)
# body = CalciumCompartment('body', neuron)
# Add a few channels
# body.add_channel(LeakChannel('leak', body))
# body.add_channel(NaChannel('na', body))
body.add_channel(KdrChannel('kdr', body))
# ca3kdr = Ca3KdrChannel('ca3kdr', body)
# ca3ka = Ca3KaChannel('ca3ka', body)
# ca3na = Ca3NaChannel('ca3na', body)
# ca3ca = Ca3CaChannel('ca3ca', body)
# ca3kahp = Ca3KahpChannel('ca3kahp', body)
# ca3kc = Ca3KcChannel('ca3kc', body)
#
#body.add_channel(ca3kdr)
#body.add_channel(ca3ka)
#body.add_channel(ca3na)
#body.add_channel(ca3ca)
#body.add_channel(ca3kahp)
#body.add_channel(ca3kc)
# Now connect all the pieces of the neuron together
neuron.add_compartment(body, None)
population.add_neuron(neuron)
model.add_population(population)
# Create the observation model
observation = IndependentObservations('observations', model)
body_voltage = NewDirectCompartmentVoltage('body voltage', model, body)
# body_fluorescence = LinearFluorescence('body fluorescence' , model, body)
# observation.add_observation(body_fluorescence)
observation.add_observation(body_voltage)
model.add_observation(observation)
return model
# Instantiate the true model
true_model = make_model()
# Create a stimulus for the neuron
# Stimulate the neuron by injecting a current pattern
stim_on = 2.0
stim_off = 50.0
stim_on_dur = .5
stim_off_dur = 1.5
stim_I = 500.0
stim_pattern = PeriodicStepStimulusPattern(stim_on, stim_off, stim_on_dur, stim_off_dur, stim_I)
stim = DirectCompartmentCurrentInjection(true_model.population.neurons[0].compartments[0], stim_pattern)
# Set the recording duration
t_start = 0
t_stop = 0.2
dt = 0.1
t = np.arange(t_start, t_stop, dt)
# Simulate the model to create synthetic data
data_sequence = simulate(true_model, t, stim)
true_model.add_data_sequence(data_sequence)
# Plot the true and observed voltage
plt.ion()
fig = plt.figure(figsize=(8,6))
# axs = plot_latent_compartment_state(t, z, true_model.population.neurons[0].compartments[0])
axs = plot_latent_compartment_V_and_I(t, data_sequence,
true_model.population.neurons[0].compartments[0],
true_model.observation.observations[0],)
i = {'i' : 0}
# Add a callback to update the plots
def plot_sample(m):
plt.gcf().clf()
# latent = m.data_sequences[0].latent
# plot_latent_compartment_state(t, m.data_sequences[0].latent,
# m.data_sequences[0].states,
# m.population.neurons[0].compartments[0])
axs = plot_latent_compartment_V_and_I(t, m.data_sequences[0],
m.population.neurons[0].compartments[0],
m.observation.observations[0])
print '%d: g_leak: %f' % (i['i'], m.population.neurons[0].compartments[0].channels[0].g.value)
print '%d: g_na: %f' % (i['i'], m.population.neurons[0].compartments[0].channels[1].g.value)
print '%d: g_kdr: %f' % (i['i'], m.population.neurons[0].compartments[0].channels[2].g.value)
fig.suptitle('Iteration: %d' % i['i'])
i['i'] += 1
plt.pause(0.001)
def print_g_leak(m):
if np.mod(i['i'], 1) == 0:
# print '%d: g_leak: %f' % (i['i'], m.population.neurons[0].compartments[0].channels[0].g.value)
# print '%d: g_na: %f' % (i['i'], m.population.neurons[0].compartments[0].channels[1].g.value)
print '%d: g_kdr: %f' % (i['i'], m.population.neurons[0].compartments[0].channels[0].g.value)
i['i'] += 1
# Generic fitting code will enumerate the components of the model and determine
# which MCMC updates to use.
raw_input("Press enter to begin MCMC")
print "Running particle MCMC"
# samples = fit_mcmc(true_model, N_samples=1000, callback=plot_sample, geweke=True)
samples = fit_mcmc(true_model, N_samples=10000, callback=print_g_leak, print_interval=10, geweke=True)
# Plot the results
import scipy.stats
def plot_channel(samples, index, name, a, b, xlim=None):
gs = np.array([m.population.neurons[0].compartments[0].channels[index].g.value for m in samples])
plt.figure()
_,bins,_ = plt.hist(gs, 50, normed=True, alpha=0.5)
if xlim is None:
plt.plot(bins, scipy.stats.gamma.pdf(bins, a, scale=b))
else:
xx = np.linspace(xlim[0], xlim[1])
plt.plot(xx, scipy.stats.gamma.pdf(xx, a, scale=1.0/b))
plt.title('$g_{%s}' % name)
# plot_channel(samples, 0, 'leak', hypers['a_g_leak'].value, hypers['b_g_leak'].value, (1e-4,3))
# plot_channel(samples, 1, 'na', hypers['a_g_na'].value, hypers['b_g_na'].value, (1,30))
plot_channel(samples, 0, 'kdr', hypers['a_g_kdr'].value, hypers['b_g_kdr'].value, (1,14))
plt.ioff()
plt.show()
|
Research opportunities for local undergraduate and regional conferences.
Internship opportunities at the correctional facilities, community organizations and local schools.
Skills that apply to graduate school or immediate entry into the work force.
Psychology, as the crossroad science of the cultural mind, individual behavior, and the organic brain, prepares you for a wide variety of careers. The major provides you with a unique blend of action, reflection, Christian spiritual development, and intellectual growth.
As a Psychology major, you will cultivate a unique view of human beings. You will become critically aware of the limitations and biases in current knowledge. You will learn to seek out new approaches to understanding others. You will discover how to observe, record, analyze, discuss, interpret, and explain what humans do and why.
What are Psychology majors doing?
To prepare you for a career in psychology, all psychology majors conduct original research and most student projects are presented at regional undergraduate or professional conferences. Recent majors have presented at the Blue Ridge Undergraduate Research Conference, Southeastern Psychological Association, and the Eastern Psychological Association.
Students studying psychology are encouraged to participate in an internship that serves the community where they hope to work. Recent majors have worked in mental health facilities, school systems, crisis prevention organizations, non-profit organizations, local businesses and correction facilities.
The psychology program is designed to provide students with converging viewpoints addressing the diversity of psychology, the integration of faith and psychology and practical experiences to enhance skills appropriate for graduate study or an immediate career.
To view all required Daytime courses for a BS in Psychology click here.
To view all required Evening and Weekend courses for a BS in Psychology click here.
To view all required courses for a Minor in Psychology click here.
The Psychology program requires a laptop with Microsoft Office 2010 or later, and wireless and webcam capability.
|
# -*- coding: utf-8 -*-
# Mathmaker creates automatically maths exercises sheets
# with their answers
# Copyright 2006-2017 Nicolas Hainaux <[email protected]>
# This file is part of Mathmaker.
# Mathmaker is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
# Mathmaker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Mathmaker; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import random
from decimal import Decimal
from string import ascii_uppercase as alphabet
from mathmakerlib.calculus.unit import LENGTH_UNITS
from mathmaker.lib import shared
from mathmaker.lib.constants import pythagorean
from mathmaker.lib.constants.numeration import (PRECISION, HUNDREDTH, TENTH,
UNIT, THOUSANDTH,
TEN_THOUSANDTH)
from .Q_Structure import Q_Structure
from mathmaker.lib.core.base_calculus import Item, Sum
from mathmaker.lib.core.root_calculus import Value
from mathmaker.lib.core.calculus import Equation, Equality
from mathmaker.lib.core.geometry import RightTriangle
AVAILABLE_Q_KIND_VALUES = {'pythagorean_theorem': ['calculate_hypotenuse',
'calculate_one_leg'],
'converse_of_pythagorean_theorem': ['default'],
'contrapositive_of_pythagorean_theorem':
['default'],
'cosinus': ['calculate_hypotenuse',
'calculate_one_leg',
'calculate_angle'],
'sinus': ['calculate_hypotenuse',
'calculate_one_leg',
'calculate_angle'],
'tangente': ['calculate_hypotenuse',
'calculate_one_leg',
'calculate_angle']}
# ------------------------------------------------------------------------------
# --------------------------------------------------------------------------
# ------------------------------------------------------------------------------
##
# @class Q_RightTriangle
# @brief All questions about the right triangle
class Q_RightTriangle(Q_Structure):
# --------------------------------------------------------------------------
##
# @brief Constructor.
# @options
# @return One instance of question.Q_RightTriangle
def __init__(self, q_kind='default_nothing', **options):
self.derived = True
# The call to the mother class __init__() method will set the
# fields matching optional arguments which are so far:
# self.q_kind, self.q_subkind
# plus self.options (modified)
Q_Structure.__init__(self,
q_kind, AVAILABLE_Q_KIND_VALUES,
**options)
# The purpose of this next line is to get the possibly modified
# value of **options
options = self.options
# Set the default values of the different options
use_pythagorean_triples = False
if (('use_pythagorean_triples' in options
and options['use_pythagorean_triples'])
or (self.q_kind == 'converse_of_pythagorean_theorem')):
# __
use_pythagorean_triples = True
use_decimals = True
if 'use_decimals' in options and not options['use_decimals']:
use_decimals = False
self.round_to = ""
if 'round_to' in options and options['round_to'] in PRECISION:
self.round_to = options['round_to']
if not use_pythagorean_triples:
if self.round_to == "":
if use_decimals:
self.round_to = HUNDREDTH
else:
self.round_to = TENTH
self.use_pythagorean_triples = use_pythagorean_triples
self.figure_in_the_text = True
if ('figure_in_the_text' in options
and not options['figure_in_the_text']):
# __
self.figure_in_the_text = False
rotation_option = 'no'
if 'rotate_around_barycenter' in options:
rotation_option = options['rotate_around_barycenter']
self.final_unit = ""
if ('final_unit' in options
and options['final_unit'] in LENGTH_UNITS):
# __
self.final_unit = options['final_unit']
sides_units = [self.final_unit,
self.final_unit,
self.final_unit]
# Later, allow to use a different length unit for the sides
# than the final expected unit ; allow different units for different
# sides (for instance giving a list in option 'sides_units')...
# So far we will do with only ONE unit
# if 'sides_units' in options \
# and options['sides_units'] in LENGTH_UNITS:
# # __
# sides_units = options['sides_units']
self.right_triangle = None
self.unknown_side = None
self.known_sides = []
# Now set some randomly values
letters = [elt for elt in alphabet]
random.shuffle(letters)
vertices_names = (letters.pop(), letters.pop(), letters.pop())
# Here you can begin to write code for the different
# q_kinds & q_subkinds
if self.q_kind == 'pythagorean_theorem':
sides_values = [None, None, None]
if use_pythagorean_triples:
triples = pythagorean.ALL_TRIPLES_5_100
if use_decimals:
triples = pythagorean.ALL_TRIPLES_5_100 \
+ pythagorean.TRIPLES_101_200_WO_TEN_MULTIPLES
sides_values = random.choice(triples)
if use_decimals:
sides_values = \
[Decimal(str(Decimal(sides_values[0]) / 10)),
Decimal(str(Decimal(sides_values[1]) / 10)),
Decimal(str(Decimal(sides_values[2]) / 10))]
if self.q_subkind == 'calculate_hypotenuse':
sides_values[2] = ""
sides_units[2] = ""
else:
# case: self.q_subkind == 'calculate_one_leg'
leg0_or_1 = random.choice([0, 1])
sides_values[leg0_or_1] = ""
sides_units[leg0_or_1] = ""
else:
# NO pythagorean triples.
# The two generated values must NOT match any pythagorean
# triple
if use_decimals:
min_side_value = 5
max_side_value = 200
else:
min_side_value = 5
max_side_value = 100
if self.q_subkind == 'calculate_hypotenuse':
first_leg = random.randint(min_side_value, max_side_value)
# we will take the leg values between
# at least 25% and at most 150% of the length of first leg
# (and smaller than max_side_value)
second_leg_values = []
for i in range(int(first_leg * 1.5)):
if (i + int(first_leg * 0.25) <= 1.5 * first_leg
and i + int(first_leg * 0.25) <= max_side_value):
# __
second_leg_values += [i + int(first_leg * 0.25)]
second_leg_unauthorized_values = \
pythagorean.get_legs_matching_given_leg(first_leg)
second_leg_possible_values = \
list(set(second_leg_values)
- set(second_leg_unauthorized_values))
if random.choice([True, False]):
sides_values = \
[first_leg,
random.choice(second_leg_possible_values),
""]
sides_units[2] = ""
else:
sides_values = \
[random.choice(second_leg_possible_values),
first_leg,
""]
sides_units[2] = ""
else:
# case: self.q_subkind == 'calculate_one_leg'
hypotenuse = random.randint(min_side_value, max_side_value)
# we will take the leg values between
# at least 25% and at most 90% of the length of hypotenuse
# to avoid "weird" cases (with a very subtle difference
# between the given values and the one to calculate)
leg_values = []
for i in range(int(hypotenuse * 0.9)):
if i + int(hypotenuse * 0.25) <= 0.9 * hypotenuse:
leg_values += [i + int(hypotenuse * 0.25)]
leg_unauthorized_values = \
pythagorean\
.get_legs_matching_given_hypotenuse(hypotenuse)
leg_possible_values = list(set(leg_values)
- set(leg_unauthorized_values))
if random.choice([True, False]):
sides_values = ["",
random.choice(leg_possible_values),
hypotenuse]
sides_units[0] = ""
else:
sides_values = [random.choice(leg_possible_values),
"",
hypotenuse]
sides_units[1] = ""
self.right_triangle = \
RightTriangle((vertices_names,
'sketch'),
rotate_around_isobarycenter=rotation_option)
self.right_triangle.leg[0].label = Value(sides_values[0],
unit=sides_units[0])
self.right_triangle.leg[1].label = Value(sides_values[1],
unit=sides_units[1])
self.right_triangle.hypotenuse.label = Value(sides_values[2],
unit=sides_units[2])
for side in self.right_triangle.side:
if side.label.raw_value == "":
self.unknown_side = side.clone()
else:
self.known_sides += [side.clone()]
elif self.q_kind in ['converse_of_pythagorean_theorem',
'contrapositive_of_pythagorean_theorem']:
# __
sides_values = [None, None, None]
triples = list(pythagorean.ALL_TRIPLES_5_100)
if use_decimals:
triples += list(pythagorean.TRIPLES_101_200_WO_TEN_MULTIPLES)
sides_values = random.choice(triples)
if self.q_kind == 'contrapositive_of_pythagorean_theorem':
# We'll change exactly one value to be sure the triplet
# is NOT pythagorean
if random.choice([True, False]):
# We will decrease the lowest value
max_delta = int(0.1 * sides_values[0])
min_delta = 1
if min_delta > max_delta:
max_delta = min_delta
chosen_delta = random.choice(
[i + min_delta
for i in range(max_delta - min_delta + 1)])
sides_values = [sides_values[0] - chosen_delta,
sides_values[1],
sides_values[2]]
else:
# We will increase the highest value
max_delta = int(0.1 * sides_values[2])
min_delta = 1
if min_delta > max_delta:
max_delta = min_delta
chosen_delta = random.choice(
[i + min_delta
for i in range(max_delta - min_delta + 1)])
sides_values = [sides_values[0],
sides_values[1],
sides_values[2] + chosen_delta]
if use_decimals:
sides_values = [Decimal(str(Decimal(sides_values[0]) / 10)),
Decimal(str(Decimal(sides_values[1]) / 10)),
Decimal(str(Decimal(sides_values[2]) / 10))]
self.right_triangle = \
RightTriangle((vertices_names,
'sketch'),
rotate_around_isobarycenter=rotation_option)
self.right_triangle.leg[0].label = Value(sides_values[0],
unit=sides_units[0])
self.right_triangle.leg[1].label = Value(sides_values[1],
unit=sides_units[1])
self.right_triangle.hypotenuse.label = Value(sides_values[2],
unit=sides_units[2])
self.right_triangle.right_angle.mark = ""
# --------------------------------------------------------------------------
##
# @brief Returns the text of the question as a str
def text_to_str(self):
PRECISION_IDIOMS = {UNIT: _("to the unit"),
TENTH: _("to the tenth"),
HUNDREDTH: _("to the hundreth"),
THOUSANDTH: _("to the thousandth"),
TEN_THOUSANDTH: _("to the ten thousandth")}
M = shared.machine
result = self.displayable_number
if self.q_kind == 'pythagorean_theorem':
if self.figure_in_the_text:
result += M.insert_picture(self.right_triangle)
else:
result += _("The triangle {triangle_name} has a right \
angle in {right_vertex}.")\
.format(triangle_name=str(self.right_triangle.name),
right_vertex=str(self.right_triangle.vertex[1]
.name))
result += " " + str(self.known_sides[0].length_name) \
+ " = " \
+ self.known_sides[0].label.into_str(display_unit=True)\
+ ". " \
+ str(self.known_sides[1].length_name) \
+ " = " \
+ self.known_sides[1].label.into_str(display_unit=True)\
+ ". " + M.write_new_line()
result += _("Calculate the length of {this_side}.")\
.format(this_side=self.unknown_side.name)
if self.final_unit != "":
result += " " + _("Give the result in {this_unit}.")\
.format(this_unit=self.final_unit)
if self.round_to != "":
result += " " + _("Round the result {at_this_precision}.")\
.format(at_this_precision=PRECISION_IDIOMS[self.round_to])
elif self.q_kind in ['converse_of_pythagorean_theorem',
'contrapositive_of_pythagorean_theorem']:
# __
if self.figure_in_the_text:
result += M.insert_picture(self.right_triangle)
else:
sides_copy = [self.right_triangle.side[0].clone(),
self.right_triangle.side[1].clone(),
self.right_triangle.side[2].clone()]
random.shuffle(sides_copy)
side0, side1, side2 = sides_copy
result += _("{triangle_name} is a triangle such as "
"{side_length0} = {nb0}, {side_length1} = {nb1} "
"and {side_length2} = {nb2}")\
.format(triangle_name=str(self.right_triangle.name),
side_length0=str(side0.length_name),
nb0=side0.label.into_str(display_unit=True),
side_length1=str(side1.length_name),
nb1=side1.label.into_str(display_unit=True),
side_length2=str(side2.length_name),
nb2=side2.label.into_str(display_unit=True))
result += _("Is it a right triangle ? Prove your answer and if "
"the triangle is right, give the name of the right "
"angle.")
result += M.write_new_line()
return result + M.write_new_line()
# --------------------------------------------------------------------------
##
# @brief Returns the answer of the question as a str
def answer_to_str(self):
M = shared.machine
if self.q_kind == 'pythagorean_theorem':
# Resolution (and the part with the figure will be dealed later)
result = _("The triangle {triangle_name} has a right angle in "
"{right_vertex}.")\
.format(triangle_name=str(self.right_triangle.name),
right_vertex=str(self.right_triangle.vertex[1].name))
result += M.write_new_line()
result += _("Then by Pythagoras theorem") + ":"
pyth_eq = self.right_triangle.pythagorean_substequality()
result += M.write_math_style1(pyth_eq.into_str())
if self.use_pythagorean_triples:
result += M.write(Equation(pyth_eq.substitute())
.auto_resolution(
dont_display_equations_name=True,
pythagorean_mode=True,
unit=self.final_unit,
underline_result=True))
else:
result += M.write(Equation(pyth_eq.substitute())
.auto_resolution(
dont_display_equations_name=True,
decimal_result=self.round_to,
pythagorean_mode=True,
unit=self.final_unit,
underline_result=True))
if self.figure_in_the_text:
return self.displayable_number + result
else:
content = [self.displayable_number
+ _("Sketch") + ":"
+ M.write_new_line()
+ M.insert_picture(self.right_triangle),
result]
return M.write_layout((1, 2), [9, 9], content)
elif self.q_kind in ['converse_of_pythagorean_theorem',
'contrapositive_of_pythagorean_theorem']:
# __
hyp_equality = Equality([Item(('+',
self.right_triangle.
hypotenuse.length_name,
2)),
Item(('+',
self.right_triangle.
hypotenuse.label.raw_value,
2))])
hyp_equality_step2 = Equality([Item(('+',
self.right_triangle.
hypotenuse.length_name,
2)),
Item(Item(('+',
self.right_triangle.
hypotenuse.label.raw_value,
2)).evaluate())])
legs_equality = Equality([
Sum([Item(('+',
self.right_triangle.leg[0].length_name,
2)),
Item(('+',
self.right_triangle.leg[1].length_name,
2))]),
Sum([Item(('+',
self.right_triangle.leg[0].label.raw_value,
2)),
Item(('+',
self.right_triangle.leg[1].label.raw_value,
2))])])
legs_equality_step2 = Equality([
Sum([Item(('+',
self.right_triangle.leg[0].length_name,
2)),
Item(('+',
self.right_triangle.leg[1].length_name,
2))]),
Item(Sum([Item(('+',
self.right_triangle.leg[0].label.raw_value,
2)),
Item(('+',
self.right_triangle.leg[1].label.raw_value,
2))]).evaluate())])
result = _("On one hand:") + M.write_new_line()
result += M.write_math_style1(hyp_equality.into_str())
result += M.write_math_style1(hyp_equality_step2.into_str())
result += _("On the other hand:") + M.write_new_line()
result += M.write_math_style1(legs_equality.into_str())
result += M.write_math_style1(legs_equality_step2.into_str())
result += _("Hence:")
if self.q_kind == 'converse_of_pythagorean_theorem':
result += M.write_math_style1(
self.right_triangle.pythagorean_equality().into_str())
result += _("So, by the converse of the pythagorean theorem,")
# result += M.write_new_line()
result += " "
result += _("{triangle_name} has a right angle "
"in {right_vertex}.")\
.format(triangle_name=str(self.right_triangle.name),
right_vertex=str(
self.right_triangle.vertex[1].name))
elif self.q_kind == 'contrapositive_of_pythagorean_theorem':
result += M.write_math_style1(
self.right_triangle.pythagorean_equality(
equal_signs=['neq']).into_str())
result += _("So, by the contrapositive of the pythagorean "
"theorem,")
result += " "
result += _("{triangle_name} has no right angle.")\
.format(triangle_name=str(self.right_triangle.name))
if self.figure_in_the_text:
return self.displayable_number + result
else:
content = [self.displayable_number
+ _("Sketch") + ":"
+ M.write_new_line()
+ M.insert_picture(self.right_triangle),
result]
return M.write_layout((1, 2), [6, 12], content)
|
A celebration of life will be held next May 20, the 75th birthday of cycling, walking and “Car-lite” philosophy advocate, Green Party activist and municipal worker Chris Bradshaw, who died earlier this month.
Born in Vancouver to a Canadian father and American mother, Bradshaw’s family moved to Dover, Ohio when he was a child.
As he writes in an online blog, his first “full-time” year as an adult in his native land was in Centennial year, 1967, the year after his graduation from Ohio’s Oberlin College with a degree in political science.
As was more common in those times, and perhaps a harbinger of his later activities as a co-founder of both the OttaWalk group and the Vrtucar network, he hitchhiked in.
Like it was for most visitors, Expo 67 was a life-changer for Bradshaw, particularly Moshe Safdie’s iconic model community, Habitat 67.
“In thinking back,” he writes in his 2017 blog, “The Safdie exhibit in Montreal was probably biggest influence in my life.
“I was drawn to housing and planning and transportation ever since, although with quite a bit of interest in freedom of information, electoral reform, and futures thrown in.
Leaving his GM job in 1969, he joined the Company of Young Canadians in Ottawa in 1969, also the year he “eloped” for a Wakefield wedding to his sweetheart Maryann Bradshaw, whom he’d first met on his earlier travels, and they moved into an apartment in the Glebe.
Over the years, he became executive director of the Canadian Organization of Public Housing Tenants, before joining the then-Regional Municipality of Ottawa-Carleton, where he served as community relations specialist for 22 years, retiring in 1995.
Five years later, Bradshaw co-founded the Vrtucar car-sharing service with one car and four members. Today, the network has dozens of locations across the national capital and partnerships with services in Quebec, Paris and Halifax. Bradshaw sold his share to a partner in 2006.
His life in politics began with a run in the provincial election of 1999, collecting more than 1,200 votes for the Green party in Ottawa Centre. He represented the Greens in a number of federal and provincial elections over the years, frequently posting respectable numbers in the party’s early years.
In 2001, the federal Green Party board named Bradshaw the party’s interim leader, and he was elected to the post a year leader on the understanding a new leader would be elected in 2003. In 2003 he was responsible for moving the party’s head office from Toronto to Ottawa.
Bradshaw is survived by his wife Maryann Bradshaw and daughters Laura (Ian Carter) and Karen (Cameron Nelson) Bradshaw, as well as grandchildren Gryff, Declan, Soren and Marae.
He also leaves his siblings Michael, Greg, Judy, Jerry and Dierdre.
In lieu of flowers, the family suggests donations in Bradshaw’s honour to the Elisabeth Bruyère Hospital (palliative care) www.bruyere.org or Cycling Without Age https://cyclingwithoutage.ca/ottawa/.
|
'''
Conversion of basis sets to Dalton format
'''
from .. import lut, manip, sort, misc, printing
def write_dalton(basis):
'''Converts a basis set to Dalton format
'''
s = '! Basis = {}\n\n'.format(basis['name'])
basis = manip.make_general(basis, False, True)
basis = sort.sort_basis(basis, False)
# Elements for which we have electron basis
electron_elements = [k for k, v in basis['elements'].items() if 'electron_shells' in v]
# Elements for which we have ECP
ecp_elements = [k for k, v in basis['elements'].items() if 'ecp_potentials' in v]
# Electron Basis
if electron_elements:
for z in electron_elements:
data = basis['elements'][z]
#sym = lut.element_sym_from_Z(z, True)
elname = lut.element_name_from_Z(z).upper()
cont_string = misc.contraction_string(data)
s += 'a {}\n'.format(z)
s += '! {} {}\n'.format(elname, cont_string)
for shell in data['electron_shells']:
exponents = shell['exponents']
coefficients = shell['coefficients']
ncol = len(coefficients) + 1
nprim = len(exponents)
ngen = len(coefficients)
am = shell['angular_momentum']
amchar = lut.amint_to_char(am, hij=True)
s += '! {} functions\n'.format(amchar)
# Is this a bug in the original EMSL?
#s += '{} {} 1.00\n'.format(sym, r, nprim)
s += '{} {} {}\n'.format('H', nprim, ngen)
point_places = [8 * i + 15 * (i - 1) for i in range(1, ncol + 1)]
s += printing.write_matrix([exponents, *coefficients], point_places, convert_exp=False)
# Write out ECP
if ecp_elements:
s += '\n\nECP\n'
for z in ecp_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z, normalize=True)
max_ecp_am = max([x['angular_momentum'][0] for x in data['ecp_potentials']])
# Sort lowest->highest, then put the highest at the beginning
ecp_list = sorted(data['ecp_potentials'], key=lambda x: x['angular_momentum'])
ecp_list.insert(0, ecp_list.pop())
s += '{} nelec {}\n'.format(sym, data['ecp_electrons'])
for pot in ecp_list:
rexponents = pot['r_exponents']
gexponents = pot['gaussian_exponents']
coefficients = pot['coefficients']
am = pot['angular_momentum']
amchar = lut.amint_to_char(am).upper()
if am[0] == max_ecp_am:
s += '{} ul\n'.format(sym)
else:
s += '{} {}\n'.format(sym, amchar)
point_places = [0, 9, 32]
s += printing.write_matrix([rexponents, gexponents, *coefficients], point_places, convert_exp=False)
s += 'END\n'
return s
|
I think it's a great idea. People are going to buy chocolate regardless so why not put their splurges to good use?
At my work, parents often bring in fundraising chocolates. It is placed in the kitchen next to the vending machine which is also stocked with chocolate. The employees will mostly buy the fundraising chocolate instead of the chocolate from the vending machine because it's not as big as the whole sized bars in the machine so they still get a treat, it's just a smaller one. If the fundraising chocolates weren't there they would still buy chocolate, just from the vending machine instead.
I agree it's not the healthiest of options, but I'd rather buy 1 or 2 Freddo's for $1 each than buy pies and lamingtons from a lamington drive, cookie dough from a cookie dough drive or cakes and slices from a slice drive that I've seen as well.
A lof of families can't afford to spend $15 on a tub of cookie dough (and why would you anyway?) and this can make children feel excluded. Something that costs $1 is affordable, won't break the bank and everyone can contribute.
I understand where you are coming from. People who buy chocolate will buy it any way. That said, I still don't like it as a fundraiser for schools.
Interestingly my work got rid of the chocolate vending machine and coke machine!
The problem I and other parents at the school have is that a number of children have intolerances, allergies or behavioural food plans which mean that they are excluded. They can sell, sure, but they can't eat it. This is with lactose and gluten intolerance as some Cadbury products are no longer gluten free. This is a learning experience for those kids, but is tough and can be tricky for parents.
I feel bad for my kids and others who are like them. I love chocolate though, just not as a fundraiser.
I am one of those children that was not allowed to buy/eat fundraising chocolates due to illness in the family. My mother would not let us eat the fundraising chocolates because it wasn't fair on family members who medically could not eat it.
I was taught that fundraisers were to raise money and my contribution to selling was what was important. I was proud of my contribution. I was brought up that fundraising chocolates were for fundraising not for eating so I never knew any differently.
Why not contact a fruit shop or local farmer/s and see if they'll allow your school to sell fruit/veg boxes instead. I've always thought this was a great idea and have not seen it done. The same as a pie drive where you get a different list of products and the buyers can pick what they would like in their fruit box. The fruito delivers the boxes to the school once a month/week. A bit like online shopping without the online bit. It would be a great way to support local farmers as well as the school.
No. Given the obesity crises I do not think fundraising with chocolate is a good idea.
Hosting a fun and active event that the kids can participate in and get sponsored for is better. The Heart Foundation successfully raises money for cardiovascular research in the Jump Rope for Heart program.
Schools could mirror this with an alternative fun, activity that could be linked to the reason for the fundraising event, then it's not fundraising for fundraising sake.
I know brownies sell cookies each year. I guess it was OK back in the day but now with the poor diet the children have its not such a good idea.
Yep, the Brownies still sell cookies, even (*cue dramatic music*) chocolate ones. I keep hearing people say that kids have a poor diet "these days" and I wonder if it is actually any worse than what people ate 20, or 50 years ago. I doubt it. There were certainly plenty of sweets and junk food around when I was a child (in the 80s), and we enjoy a wider variety of fruit and vegetables than was available when my grandmother was young. I think it's a media beat up.
As a seasoned fundraiser of close to 30 years experience, I know that chocolate fundraisers are probably not ideal and don't send out quite the right message, but the simple fact is that they sell, and make a lot of fast money quickly. You don't have to buy them and eat them. Our kids ran around the street selling them and they sold out very quickly, plus they developed their social skills and got exercise at the same time. Those Jump Rope For Kids, and fun runs etc take A LOT of planning. Over a year in most cases, but once you have done it once, and made your contacts, it should work like clockwork the following year. There will always be parents who stand on principle with chocolate fundraisers. The goal of fundraising is to have many small fundraisers throughout the year, and one big one, like a trivia night or a fete. Our local kindergarten has a Sustainability fete, with a focus on teaching about sustainability, and the role the children at the center play in that. Why don't you join the P and F if you have ideas? They would love to have new blood I'm sure. In my fundraising we sometimes have hampers and raffle them off. I don't think a hamper filled with Fit Bits, gym membership, fresh fruit and Paleo recipe books would sell as many as one with chocolates, wine, cheese and other goodies. You just have to be practical, and do things that sell and make money, with a proven track record of doing so. The kids like to get involved, so it has to be something achievable for them too.
I agree that chocolate sells and usually sells fast. This year they aren't selling quick and we are not sure why. There is a main fundraiser event that does well and usually the chocolates complement this activity. This year though we wondered whether it was as parents were starting to be more treat-wise?
I'm not sure why. There is nothing to say that parents have to buy them. Sell them to family, friends, neighbors, anyone. Most people like chocolate and that is the premise behind them as fundraisers. My friend is a fitness and health fanatic, but still sells chocolate for the kinder or school fundraiser. At the end of the day, the money goes toward the school, and that is the end goal. If we found a fundraiser wasn't working, we would put it on the back blocks for a couple of years and then try again. That's where the original ideas come from, because then you have to think of a new idea, which just might be a real winner.
I agree Lluxi. I haven't been very involved with fundraising at my kids' school (coming up with ideas etc.) because transportation (I don't drive) and time are a problem for me, so I'd hesitate to criticise all the work other women have done to raise money. I think whatever they do in that regard is great, they're doing the best they can, and their time and resources are limited, so something that's easy to do and works is a good choice. They also do mother's day and father's day stalls and Easter and Christmas raffles of hampers full of donated goods (each family puts in something). It cracked me up when our vegetarian household won the ham. My non-vegetarian Dad was pleased with it though.
Chocolate is not the right product, seriously there must be something else?
Sure they may make a lot of money.....but it's not just the kids that need to cut down on the stuff!
It's 2017 time to re-invent the wheel, I don't care how much money it makes.
Is your chocolate photo of milk or DARK chocolate, Jonaj?!
Miro :) it's just the lighting, of how the pic was taken ..
I never bought them, when the kids came around after school to the houses, as I have never liked milk chocolate!
I caught a crowded school bus home last Friday. A young high school girl who was standing near my seat had a box of chocolates to sell. I thought straight away, "Oh you poor thing, having to sell all the chocolates." I really didn't think they 'had' them anymore! I love the card w/seeds idea better!
I don't like it . I know it sells . My son was sent home with a package of chocolate. We bought some and I asked him to return the rest to school. He was on a diet that restricted his sugar intake due to behavioural issues. I didn't want the temptation. We received a bill from the school for the whole amount. Sadly I t turned out my son stole the chocolate and ate the lot. Not good.
I don't think we were the only family this happened to. No wonder they are successful fund raisers!.
Oh no!!! You poor thing. Your poor son as well, I hope it didn't make him too sick.
Thanks Naomi. You are kind. If he was sick I didn't know what caused it at the time. Sugar affected his behaviour. I probably wondered why he was out of control. Sugar made him emotional , make bad choices and act out. When not on sugar he was like a different child. Happy and eager to cooperate. Like chalk and cheese.
I was at a play date with a new mother who didn't believe me when I said my child was allergic (not intolerant) to lactose when questioned why I was not giving the yoghurt that was provided. I had given my child fruit. I took something to the sink and turned to find her force feeding yoghurt. The consequences were dire. Not just physically but emotionally for my child. Needless to say we do not see her anymore.
Oh no Naomi! How terrible. Was your son alright? I hope that the other Mother learnt from it. So many people bandy the word allergy around now, it can be very dangerous.
Trust is a big issue now.
The other woman was upset but I don't know why she did it!!
I imagine that she thought she knew best and would prove your son wasn't allergic. Silly woman. I don't think I would remain friends with her under the same circumstances, although I guess she wouldn't do that again.
We had nightmares etc for over six months. It was traumatic, We saw her once after that with months of nightmares. She decided she would cut off contact as she felt guilty and couldn't cope with the trauma she had caused (her words not mine).
When I think about kids getting this box of chocolate to sell....in all honesty! What kid would not want it for themselves? Seriously....I have a grandson, who takes other peoples chocolate at Easter! He hides it, and he is a very skinny boy.(lucky him) and I have to be totally honest, I think a box of it would tempt any normal kid. Chocolate is addictive, my eldest son has a real problem with it.
That's terrible Naomi. The other mother had no right to interfere with your parenting decisions and force feed your child. :( I have had also had someone feed my child something she wasn't supposed to eat against my wishes, but at least they didn't force feed her. That's pretty much assault.
Re Annfi's comment, maybe there should be an age limit, so only kids over a certain age would have to sell the chocolates, or younger kids' would have the box collected by the parents? When I was in high school we were given a carton of blocks of chocolate to sell, and did eat a fair few of them, though I paid for them. At 15 I was old enough to understand that the consequences- that I'd have to buy them, not steal them.
I remember when we had one when I was in school. I don't have any problem with it as a fundraiser in moderation, though I guess you wouldn't want to have them all the time since there are other things you can do (my kids' school had a contest in each classroom to see which class could fill a jar with 5 cent coins from home first, to pick a random example). It's not as if families can't buy chocolate at any time anyway, it's just that this time the money they spend on it goes to the school (for stuff like playground upgrades, I think the last fundraiser we had was for air conditioning in the classrooms, badly needed when temperatures can reach 42 degrees celsius during summer days here).
I don't mind it, it doesn't do any harm, it's totally up to the individual if they want to but them, I know I do.
It's a great way to make money - people don't need much convincing to buy chocolate for a good cause.
But I don't think it sends a positive message to the kids, or does your coworkers' health any favours.
Last Christmas our kindy sold Christmas cards with native flower seeds inside them, instead of chocolates. I thought this was a terrific idea.
I love the card/seed idea, Wellness in Words. Hooray 4 no more chocolate!
|
# -*- coding: utf-8 -*-
from django.conf.urls import url
from tastypie import fields
from tastypie.authentication import MultiAuthentication
from tastypie.authorization import Authorization
from tastypie.bundle import Bundle
from tastypie.exceptions import NotFound
from tastypie.resources import Resource, ModelResource
from tastypie.validation import CleanedDataFormValidation
from apps.mds_auth.authorization import ReadAllSessionAuthentication, ReadAllTokenAuthentication
from apps.muni_scales.api_authorization import UXXAuthorization
from apps.muni_scales.fields import MscaleFieldMixin
from apps.muni_scales.forms import UDHscaleForm, UXCscaleForm
from apps.muni_scales.models import UDHscale, UXCscale
from apps.muni_scales.mscale import Mscale, MSCALES
class MscaleField(fields.ApiField, MscaleFieldMixin):
'''
A field that accepts an Mscale Resource but stores the integer value in the db.
'''
dehydrated_type = 'apps.muni_scales.mscale.Mscale'
help_text = 'an mscale object'
def convert(self, value):
if value is None:
return None
return self.to_mscale(value)
def hydrate(self, bundle):
'''
Prepare data before saving to the model.
'''
#check if value present
if bundle.data.has_key(self.instance_name):
value = bundle.data[self.instance_name]
mscale = self.to_mscale(value)
return mscale.number
else:
return None
def dehydrate(self, bundle, **kwargs):
'''
Prepare data for serialization before sending to the client.
'''
return self.convert(bundle.obj.__getattribute__(self.instance_name))
class MscaleResource(Resource):
'''
A read-only Mscale resource.
'''
id = fields.DecimalField(attribute='number')
underground = fields.CharField(attribute='underground')
slope = fields.CharField(attribute='slope')
obstacles = fields.ListField(attribute='obstacles')
characteristics = fields.ListField(attribute='characteristics')
class Meta:
resource_name = 'mscales'
object_class = Mscale
authorization = Authorization()
allowed_methods = ['get']
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<pk>[0-9]+)/$" %
self._meta.resource_name,
self.wrap_view('dispatch_detail'),
name="api_dispatch_detail"),
]
def apply_sorting(self, obj_list, options=None):
"""
sorts by number (always ascending)
"""
return sorted(obj_list, key=lambda m: m.number)
def detail_uri_kwargs(self, bundle_or_obj):
kwargs = {}
if isinstance(bundle_or_obj, Bundle):
kwargs['pk'] = bundle_or_obj.obj.number
else:
kwargs['pk'] = bundle_or_obj.number
return kwargs
def get_object_list(self, request):
return MSCALES.values()
def obj_get_list(self, request=None, **kwargs):
# TODO: proper filtering
return self.get_object_list(request)
def obj_get(self, request=None, **kwargs):
try:
pk = float(kwargs['pk'])
return MSCALES[pk]
except KeyError:
raise NotFound("Invalid lookup ID provided.")
except ValueError:
raise NotFound()
class ScaleCalcMixin(object):
'''
Adds endpoint for score calculation.
'''
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/calculate/$" %
self._meta.resource_name, self.wrap_view('get_score'), name="api_calc_score"),
]
def get_score(self, request, **kwargs):
'''
Return the score for the calculation
'''
scale = self.__class__()
bundle = scale.build_bundle(data=request.POST, request=request)
scale_obj = scale.full_hydrate(bundle).obj
errors = scale_obj.full_clean()
if errors:
return self.create_response(request, errors)
score = scale_obj.get_score()
return self.create_response(request, score)
class UDHResource(ScaleCalcMixin, ModelResource):
'''
UDH rating
'''
max_difficulty = MscaleField(attribute="max_difficulty")#fields.ToOneField(MscaleResource, attribute="max_difficulty")
avg_difficulty = MscaleField(attribute="avg_difficulty")#fields.ToOneField(MscaleResource, attribute="avg_difficulty")
score = fields.DictField(attribute='get_score', readonly=True, use_in="detail")
trail = fields.ToOneField("apps.trails.api.TrailResource", "trail", related_name="udhscale", blank=True)
class Meta:
queryset = UDHscale.objects.all()
resource_name = 'udh-scale'
validation = CleanedDataFormValidation(form_class = UDHscaleForm)
always_return_data = True
authentication = MultiAuthentication(ReadAllSessionAuthentication(), ReadAllTokenAuthentication())
authorization = UXXAuthorization()
class UXCResource(ScaleCalcMixin, ModelResource):
'''
UXC Rating
'''
max_difficulty = MscaleField(attribute="max_difficulty")
avg_difficulty = MscaleField(attribute="avg_difficulty")
score = fields.DictField(attribute='get_score', readonly=True, use_in="detail")
trail = fields.ToOneField("apps.trails.api.TrailResource", "trail", related_name="uxcscale", blank=True)
class Meta:
queryset = UXCscale.objects.all()
resource_name = 'uxc-scale'
always_return_data = True
validation = CleanedDataFormValidation(form_class = UXCscaleForm)
authentication = MultiAuthentication(ReadAllSessionAuthentication(), ReadAllTokenAuthentication())
authorization = UXXAuthorization()
|
Comments Off on Are you human?
Comments Off on Have you heard about Tribe Responses?
Have you heard about Tribe Responses?
|
#!/usr/bin/env python
"""
Greps and returns the first svn log entry containing a line matching the regular
expression pattern passed as the only arg.
Example:
svn log -v | grep-svn-log.py '^ D.+why_are_you_missing.h$'
"""
import fileinput, re, sys, StringIO
# Separator string for "svn log -v" output.
separator = '-' * 72
usage = """Usage: grep-svn-log.py line-pattern
Example:
svn log -v | grep-svn-log.py '^ D.+why_are_you_missing.h'"""
class Log(StringIO.StringIO):
"""Simple facade to keep track of the log content."""
def __init__(self):
self.reset()
def add_line(self, a_line):
"""Add a line to the content, if there is a previous line, commit it."""
global separator
if self.prev_line != None:
print >> self, self.prev_line
self.prev_line = a_line
self.separator_added = (a_line == separator)
def del_line(self):
"""Forget about the previous line, do not commit it."""
self.prev_line = None
def reset(self):
"""Forget about the previous lines entered."""
StringIO.StringIO.__init__(self)
self.prev_line = None
def finish(self):
"""Call this when you're finished with populating content."""
if self.prev_line != None:
print >> self, self.prev_line
self.prev_line = None
def grep(regexp):
# The log content to be written out once a match is found.
log = Log()
LOOKING_FOR_MATCH = 0
FOUND_LINE_MATCH = 1
state = LOOKING_FOR_MATCH
while 1:
line = sys.stdin.readline()
if not line:
return
line = line.splitlines()[0]
if state == FOUND_LINE_MATCH:
# At this state, we keep on accumulating lines until the separator
# is encountered. At which point, we can return the log content.
if line == separator:
log.finish()
print log.getvalue()
return
log.add_line(line)
elif state == LOOKING_FOR_MATCH:
if line == separator:
log.reset()
log.add_line(line)
# Update next state if necessary.
if regexp.search(line):
state = FOUND_LINE_MATCH
def main():
if len(sys.argv) != 2:
print usage
sys.exit(0)
regexp = re.compile(sys.argv[1])
grep(regexp)
sys.stdin.close()
if __name__ == '__main__':
main()
|
aka GTC Cherokee 16 Cinemas, GTC Cherokee Stadium Cinemas 16 (The Georgia Theatre Company). It became the Regal Cherokee 16 Cinemas in 2015 (?).
This movie theater is near Woodstock, Acworth, Holly Springs, Marietta, Kennesaw, Lebanon, Canton.
|
# cd pySU/pyMultidark/trunk/bin/fortranfile-0.2.1/
import numpy as n
import os
from os.path import join
from astropy.io import fits
import time
import fortranfile
import cPickle
DFdir = join("/data2", "users", "gustavo", "BigMD", "1Gpc_3840_Planck1_New", "DENSFIELDS")
mockDir = "/data1/DATA/eBOSS/Multidark-box-mocks/v1.0/parts/"
inFiles = n.array(["dmdens_cic_104.dat", "dmdens_cic_101.dat", "dmdens_cic_097.dat", "dmdens_cic_087.dat"])
bins = n.hstack((0,n.logspace(-3, 4, 1000)))
for infi in inFiles:
print infi
DFfile = join(DFdir,infi)
f = fortranfile.FortranFile(DFfile)
gridx, gridy, gridz = f.readInts()
Ntot = gridx/2
res0 = n.empty((Ntot, len(bins)-1))
NS = n.arange(Ntot)
for kk in NS:
print kk, time.time()
DFa = f.readReals()
DFb = f.readReals()
DFaR = DFa.reshape((gridx, gridx))
DFbR = DFb.reshape((gridx, gridx))
DF = n.mean(n.array([DFaR,DFbR]), axis=0)
DFdg = n.array([ n.array([ n.mean([DF[2*i][2*j:2*j+2], DF[2*i+1][2*j:2*j+2]]) for j in NS]) for i in NS])
res0[kk] = n.histogram(n.hstack((DFdg)), bins=bins)[0]
f.close()
path_to_outputCat = join(mockDir,infi[:-4] + "_DF_dg2_hist.dat")
f=open(path_to_outputCat, 'w')
cPickle.dump( [bins, n.sum(res0, axis=0)], f )
f.close()
sys.exit()
inFiles = n.array(["vx_cic_104.dat", "vx_cic_101.dat", "dmdens_cic_097.dat", "dmdens_cic_087.dat"])
inFiles = n.array(["vx_cic_104.dat", "vx_cic_101.dat", "vx_cic_097.dat", "vx_cic_087.dat", "vy_cic_104.dat", "vy_cic_101.dat", "vy_cic_097.dat", "vy_cic_087.dat", "vz_cic_104.dat", "vz_cic_101.dat", "vz_cic_097.dat", "vz_cic_087.dat"])
bins = n.arange(-2000.,2000., 5.)
for infi in inFiles:
print infi
DFfile = join(DFdir,infi)
f = fortranfile.FortranFile(DFfile)
gridx, gridy, gridz = f.readInts()
res0 = n.empty((gridx, len(bins)-1))
res1 = n.empty((gridx, len(bins)-1))
resH = n.empty((gridx, len(bins)-1, len(bins)-1))
for kk in range(gridx):
DF = f.readReals()
i = n.arange(1, gridx-1, 1)
j = n.arange(1, gridx-1, 1)
DF0 = DF[n.hstack((n.outer(i,j)))]
N1 = n.transpose([ n.hstack((n.outer(i-1,j-1))), n.hstack((n.outer(i,j-1))), n.hstack((n.outer(i-1,j))), n.hstack((n.outer(i+1,j+1))), n.hstack((n.outer(i+1,j))), n.hstack((n.outer(i,j+1))), n.hstack((n.outer(i+1,j+1))), n.hstack((n.outer(i-1,j+1))) ])
# N1 = n.transpose([ (i-1) + gridx * (j -1), (i) + gridx * (j -1), (i-1) + gridx * (j), (i+1) + gridx * (j +1), (i+1) + gridx * (j ), (i) + gridx * (j +1), (i+1) + gridx * (j -1), (i-1) + gridx * (j +1) ])
DF1 = n.array([ n.mean(DF[el]) for el in N1 ])
res0[kk] = n.histogram(DF0,bins=bins)[0]
res1[kk] = n.histogram(DF1,bins=bins)[0]
resH[kk] = n.histogram2d(DF0, DF1, bins)[0]
f.close()
path_to_outputCat = join(mockDir,infi[:-4] + "_DF0DF1hist.dat")
f=open(path_to_outputCat, 'w')
cPickle.dump([bins,n.sum(res0,axis=0), n.sum(res1,axis=0), n.sum(resH,axis=0)],f)
f.close()
|
Animatronic robotic replica of an Allosaurus dinosaur returns from the paint shop to the Dinamation factory via freeway in Orange County, southern California. Dinamation makes robotic dinosaurs for museum displays around the world. From the book Robo sapiens: Evolution of a New Species, page 17.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""pdp_script.py
Implements the pdp script for finding differential primers
(c) The James Hutton Institute 2017-2019
Author: Leighton Pritchard
Contact: [email protected]
Leighton Pritchard,
Information and Computing Sciences,
James Hutton Institute,
Errol Road,
Invergowrie,
Dundee,
DD2 5DA,
Scotland,
UK
The MIT License
Copyright (c) 2017-2019 The James Hutton Institute
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import time
from diagnostic_primers import __version__
from diagnostic_primers.scripts import parsers
from diagnostic_primers.scripts.logger import build_logger
def run_pdp_main(argv=None, logger=None):
"""Main process for pdp script"""
# If we need to (i.e. a namespace isn't passed), parse the command-line
if argv is None:
args = parsers.parse_cmdline()
else:
args = parsers.parse_cmdline(argv)
# Catch execution with no arguments
if len(sys.argv) == 1:
sys.stderr.write("pdp version: {0}\n".format(__version__))
return 0
# Set up logging
time0 = time.time()
if logger is None:
logger = build_logger("pdp", args)
# Run the subcommand
returnval = args.func(args, logger)
logger.info("Completed. Time taken: %.3f", (time.time() - time0))
return returnval
|
All State; All Region MVP Co-Athlete; Team Captain Award; Team Offensive MVP; Started 10 games as QB. Started 7 games as F/S.
1125 yds passsing w/ 11 TDs. 625 Rushing w/ 8 TDs.
Team Raider Award. Team Alumni Award. Lettered. Played in 10 games. Started 2 as QB. Holder for PATs and FGs.
Team Alumni Award. Lettered. Played in 10 games. Holder for PATs and FGs.
|
import numpy as np
from AnyQt.QtWidgets import QTreeWidget, QTreeView, QTreeWidgetItem
from Orange.data import Table
from Orange.widgets import gui
from Orange.widgets.settings import Setting
from Orange.widgets.widget import OWWidget, Msg
from orangecontrib.text.util import np_sp_sum
from orangecontrib.text.stats import false_discovery_rate, hypergeom_p_values
class OWWordEnrichment(OWWidget):
# Basic widget info
name = "Word Enrichment"
description = "Word enrichment analysis for selected documents."
icon = "icons/SetEnrichment.svg"
priority = 60
# Input/output
inputs = [("Selected Data", Table, "set_data_selected"),
("Data", Table, "set_data"),]
want_main_area = True
class Error(OWWidget.Error):
no_words_overlap = Msg('No words overlap!')
empty_selection = Msg('Selected data is empty!')
all_selected = Msg('All examples can not be selected!')
# Settings
filter_by_p = Setting(False)
filter_p_value = Setting(0.01)
filter_by_fdr = Setting(True)
filter_fdr_value = Setting(0.2)
def __init__(self):
super().__init__()
# Init data
self.data = None
self.selected_data = None
self.selected_data_transformed = None # used for transforming the 'selected data' into the 'data' domain
self.words = []
self.p_values = []
self.fdr_values = []
# Info section
fbox = gui.widgetBox(self.controlArea, "Info")
self.info_all = gui.label(fbox, self, 'Cluster words:')
self.info_sel = gui.label(fbox, self, 'Selected words:')
self.info_fil = gui.label(fbox, self, 'After filtering:')
# Filtering settings
fbox = gui.widgetBox(self.controlArea, "Filter")
hbox = gui.widgetBox(fbox, orientation=0)
self.chb_p = gui.checkBox(hbox, self, "filter_by_p", "p-value",
callback=self.filter_and_display,
tooltip="Filter by word p-value")
self.spin_p = gui.doubleSpin(hbox, self, 'filter_p_value',
1e-4, 1, step=1e-4, labelWidth=15,
callback=self.filter_and_display,
callbackOnReturn=True,
tooltip="Max p-value for word")
self.spin_p.setEnabled(self.filter_by_p)
hbox = gui.widgetBox(fbox, orientation=0)
self.chb_fdr = gui.checkBox(hbox, self, "filter_by_fdr", "FDR",
callback=self.filter_and_display,
tooltip="Filter by word FDR")
self.spin_fdr = gui.doubleSpin(hbox, self, 'filter_fdr_value',
1e-4, 1, step=1e-4, labelWidth=15,
callback=self.filter_and_display,
callbackOnReturn=True,
tooltip="Max p-value for word")
self.spin_fdr.setEnabled(self.filter_by_fdr)
gui.rubber(self.controlArea)
# Word's list view
self.cols = ['Word', 'p-value', 'FDR']
self.sig_words = QTreeWidget()
self.sig_words.setColumnCount(len(self.cols))
self.sig_words.setHeaderLabels(self.cols)
self.sig_words.setSortingEnabled(True)
self.sig_words.setSelectionMode(QTreeView.ExtendedSelection)
self.sig_words.sortByColumn(2, 0) # 0 is ascending order
for i in range(len(self.cols)):
self.sig_words.resizeColumnToContents(i)
self.mainArea.layout().addWidget(self.sig_words)
def set_data(self, data=None):
self.data = data
def set_data_selected(self, data=None):
self.selected_data = data
def handleNewSignals(self):
self.check_data()
def check_data(self):
self.Error.clear()
if isinstance(self.data, Table) and \
isinstance(self.selected_data, Table):
if len(self.selected_data) == 0:
self.Error.empty_selection()
self.clear()
return
self.selected_data_transformed = Table.from_table(
self.data.domain, self.selected_data)
if np_sp_sum(self.selected_data_transformed.X) == 0:
self.Error.no_words_overlap()
self.clear()
elif len(self.data) == len(self.selected_data):
self.Error.all_selected()
self.clear()
else:
self.apply()
else:
self.clear()
def clear(self):
self.sig_words.clear()
self.info_all.setText('Cluster words:')
self.info_sel.setText('Selected words:')
self.info_fil.setText('After filtering:')
def filter_enabled(self, b):
self.chb_p.setEnabled(b)
self.chb_fdr.setEnabled(b)
self.spin_p.setEnabled(b)
self.spin_fdr.setEnabled(b)
def filter_and_display(self):
self.spin_p.setEnabled(self.filter_by_p)
self.spin_fdr.setEnabled(self.filter_by_fdr)
self.sig_words.clear()
count = 0
if self.words:
for word, pval, fval in zip(self.words, self.p_values, self.fdr_values):
if (not self.filter_by_p or pval <= self.filter_p_value) and \
(not self.filter_by_fdr or fval <= self.filter_fdr_value):
it = EATreeWidgetItem(word, pval, fval, self.sig_words)
self.sig_words.addTopLevelItem(it)
count += 1
for i in range(len(self.cols)):
self.sig_words.resizeColumnToContents(i)
self.info_all.setText('Cluster words: {}'.format(len(self.selected_data_transformed.domain.attributes)))
self.info_sel.setText('Selected words: {}'.format(np.count_nonzero(np_sp_sum(self.selected_data_transformed.X, axis=0))))
if not self.filter_by_p and not self.filter_by_fdr:
self.info_fil.setText('After filtering:')
self.info_fil.setEnabled(False)
else:
self.info_fil.setEnabled(True)
self.info_fil.setText('After filtering: {}'.format(count))
def progress(self, p):
self.progressBarSet(p)
def apply(self):
self.clear()
self.progressBarInit()
self.filter_enabled(False)
self.words = [i.name for i in self.selected_data_transformed.domain.attributes]
self.p_values = hypergeom_p_values(self.data.X,
self.selected_data_transformed.X,
callback=self.progress)
self.fdr_values = false_discovery_rate(self.p_values)
self.filter_and_display()
self.filter_enabled(True)
self.progressBarFinished()
fp = lambda score: "%0.5f" % score if score > 10e-3 else "%0.1e" % score
fpt = lambda score: "%0.9f" % score if score > 10e-3 else "%0.5e" % score
class EATreeWidgetItem(QTreeWidgetItem):
def __init__(self, word, p_value, f_value, parent):
super().__init__(parent)
self.data = [word, p_value, f_value]
self.setText(0, word)
self.setText(1, fp(p_value))
self.setToolTip(1, fpt(p_value))
self.setText(2, fp(f_value))
self.setToolTip(2, fpt(f_value))
def __lt__(self, other):
col = self.treeWidget().sortColumn()
return self.data[col] < other.data[col]
|
Ride a train filled to the brim with PIKACHUS!! The Pokemon With You train is the only way to travel with a mountain of everyone’s favorite pokemon character, and will have you rolling around in Pikachu goods and seeing yellow for hours! It runs between Ichinoseki Station in Iwate Prefecture and Kesennuma Station in Miyagi Prefecture.
|
import nltk
from ground_truth import (ARTICLES, PREPOSITIONS, CONJUNCTIONS)
from operator import itemgetter
def make_capitalized_title(title = None, title_words = None):
"""
>>> make_capitalized_title(title = "This translation app helps professionals traveling in China and Japan")
['This', 'Translation', 'App', 'Helps', 'Professionals', 'Traveling', 'in', 'China', 'and', 'Japan']
>>> make_capitalized_title(title = "Russia to see surge of investments if sanctions lifted: VTB Bank Head")
['Russia', 'to', 'See', 'Surge', 'of', 'Investments', 'if', 'Sanctions', 'Lifted', ':', 'VTB', 'Bank', 'Head']
>>> make_capitalized_title(title = "CIS FMs hold summit in Belarus")
['CIS', 'FMs', 'Hold', 'Summit', 'in', 'Belarus']
"""
trans_words = []
if title_words:
words = title_words
elif title:
words = nltk.word_tokenize(title)
else:
raise ValueError("Receive nothing..")
for i, word in enumerate(words):
if i == 0:
trans_words.append(word if word[0] == word[0].upper() else word.capitalize())
elif (word in ARTICLES or word in PREPOSITIONS or word in CONJUNCTIONS):
trans_words.append(word)
elif word[0] == word[0].upper(): #already capitalized
trans_words.append(word)
else:
trans_words.append(word.capitalize())
return trans_words
def make_uppercase_title(title_words):
"""make the title uppercase
>>> make_uppercase_title(["This", "translation", "app", "helps", "professionals", "traveling", "in", "China", "and", "Japan"])
['THIS', 'TRANSLATION', 'APP', 'HELPS', 'PROFESSIONALS', 'TRAVELING', 'IN', 'CHINA', 'AND', 'JAPAN']
"""
words = []
for w in title_words:
words.append(w.upper())
return words
def make_lowercase_title(title_words):
"""make the title lowercase
>>> make_lowercase_title(["This", "translation", "app", "helps", "professionals", "traveling", "in", "China", "and", "Japan"])
['this', 'translation', 'app', 'helps', 'professionals', 'traveling', 'in', 'china', 'and', 'japan']
"""
words = []
for w in title_words:
words.append(w.lower())
return words
def transform_data(data, sent_transform_func):
"""
Transform the data on the sentence level
>>> input = [[(u'The', 'IC'), (u'Sun', 'IC'), (u'Life', 'IC'), (u'Building', 'IC'), (u'receives', 'AL'), (u'LEED', 'AU'), (u'Silver', 'IC'), (u'Certification', 'IC')]]
>>> transform_data(input, make_capitalized_title)
[[(u'The', 'IC'), (u'Sun', 'IC'), (u'Life', 'IC'), (u'Building', 'IC'), (u'Receives', 'AL'), (u'LEED', 'AU'), (u'Silver', 'IC'), (u'Certification', 'IC')]]
>>> transform_data(input, make_lowercase_title)
[[(u'the', 'IC'), (u'sun', 'IC'), (u'life', 'IC'), (u'building', 'IC'), (u'receives', 'AL'), (u'leed', 'AU'), (u'silver', 'IC'), (u'certification', 'IC')]]
"""
assert callable(sent_transform_func)
new_data = []
for instance in data:
new_data.append(
zip(sent_transform_func(title_words = map(itemgetter(0), instance)),
map(itemgetter(1), instance))
)
return new_data
|
Required to be available seasonally for at least two nights per week and Saturdays and Sundays.
Administrative duties are required of the role and are included in the hourly coaching rate. Including but not limited to: Seasonal Plans, Online Session Planning, Online Time-sheet submissions, game analysis report, closing out your program reports.
Coaching hours and summer camps assignments are allocated based on required need and match up to availability.
|
#!/usr/bin/env python
"""
Copyright(c)2009 Internet Archive. Software license AGPL version 3.
This file is part of bookserver.
bookserver is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
bookserver is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with bookserver. If not, see <http://www.gnu.org/licenses/>.
The bookserver source is hosted at http://github.com/internetarchive/bookserver/
"""
class Catalog:
"""
Catalog class init
"""
def __init__(self,
title = 'Internet Archive OPDS',
urn = 'urn:x-internet-archive:bookserver:catalog',
url = 'http://bookserver.archive.org/catalog/',
datestr = '1970-01-01T00:00:00Z',
author = 'Internet Archive',
authorUri = 'http://www.archive.org',
crawlableUrl = None
):
self._entries = []
self._opensearch = None
self._navigation = None
self._title = title
self._urn = urn
self._url = url
self._datestr = datestr
self._author = author
self._authorUri = authorUri
self._crawlableUrl = crawlableUrl
def addEntry(self, entry):
self._entries.append(entry)
def addNavigation(self, nav):
self._navigation = nav
def addOpenSearch(self, opensearch):
self._opensearch = opensearch
def getEntries(self):
return self._entries
|
Posted on March 12, 2012, in 100th Anniversary, Girl Scout Day (March 12), Girl Scout Holidays, Juliette Low, WAGGGS and tagged birthday, Girl Scout Week, postaweek2012. Bookmark the permalink. 2 Comments.
Thank you for providing “a blast into the past” as the old saying goes. Have a joyous 100th. Which I could come his summer to GA for the 100th year celebration like I did in Los Angeles,CA for I believe our 50th. It was very fun. Have a good one everyone.
For the Girl Gudies, I have a movie called “Her Majesty”. A story about a Murai princesses, a girl guide named Elizabeth (whose idol is Queen Elizabeth) and the struggles she goes through for being the Murai islanders friend. It’s a nice movie if you haven’t seen it.
|
# Rewritten by RayzoR
import sys
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "112_WalkOfFate"
# ~~~~~ npcId list: ~~~~~
Livina = 30572
Karuda = 32017
# ~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~ itemId list: ~~~~~~
EnchantD = 956
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onAdvEvent (self,event,npc,player) :
st = player.getQuestState(qn)
if not st: return
htmltext = event
cond = st.getInt("cond")
if event == "32017-02.htm" and cond == 1 :
st.giveItems(57,22308)
st.giveItems(EnchantD,1)
st.addExpAndSp(112876,5774)
st.exitQuest(False)
st.playSound("ItemSound.quest_finish")
elif event == "30572-02.htm" :
st.playSound("ItemSound.quest_accept")
st.setState(STARTED)
st.set("cond","1")
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><head><body>You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
state = st.getState()
npcId = npc.getNpcId()
cond = st.getInt("cond")
if state == COMPLETED :
htmltext = "<html><body>This quest has already been completed.</body></html>"
elif state == CREATED :
if npcId == Livina :
if player.getLevel() >= 20 :
htmltext = "30572-01.htm"
else:
htmltext = "30572-00.htm"
st.exitQuest(1)
elif state == STARTED :
if npcId == Livina :
htmltext = "30572-03.htm"
elif npcId == Karuda :
htmltext = "32017-01.htm"
return htmltext
QUEST = Quest(112,qn,"Walk of Fate")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(Livina)
QUEST.addTalkId(Livina)
QUEST.addTalkId(Karuda)
|
Copyright: © 2014 Norwood et al. This is an open-access article distributed under the terms of the Creative Commons Attribution License, which permits unrestricted use, distribution, and reproduction in any medium, provided the original author and source are credited.
Funding: This work is financed by the research program Pathways to Sustainable European Energy Systems, E.ON, and the Chalmers Energy Initiative. The funders had no role in study design, data collection and analysis, decision to publish, or preparation of the manuscript.
Competing interests: This study was funded in part by E.ON. Sweden. Coauthor Emil Nyholm receives funding for his PhD studies from E.ON. This does not alter the authors' adherence to PLOS ONE policies on sharing data and materials.
Comparison, through computer modeling and simulation, of solar power technologies is not a new field. The work of Quaschning , for example, analyzed centralized solar thermal electric and PV technologies, and concluded based on analysis of 64 sites that in areas of high solar irradiance thermal-electric technologies were economically favorable to PV (even with cost projections to today) but vice-versa in areas of lower solar irradiance. Multiple renewable technologies have also been compared from a resource-technology perspective by studies such as Jacobson et al. There is even work to quantify the potential of PV technologies over large GIS data sets for both the European continent and North American regions . Extensive modeling of solar technologies to predict efficiency based on fundamental electric and thermodynamic principles has also been published extensively, such as in the work of Kalogirou , , and Jiang et al. , amongst many others. What the body of literature lacks, however, and what we try to contribute with this work, is comprehensive methods and results combining GIS modeling with appropriate physical and empirically verified models of a representative group of current and future cross-sector solar technologies. Additionally, analysis of these technologies based on typical weather data, optimized array tilts, and engineering first principles across such a large geospatial data set (12000+ points in Europe and 1000+ in the US), has not to our knowledge been undertaken. Lastly, the cross-disciplinary nature of this study focusing on distributed electric-only, thermal-only, and combined heat and power systems sets this study apart from the field.
Solar energy is harnessed today, in practice, by two main types of technology: thermal systems collect the light from the sun and either use the thermal energy directly or convert that thermal energy to electricity through a heat engine, whereas photovoltaic (PV) systems convert the photons from sunlight directly into electricity in a semiconductor device. Solar collectors are usually more efficient at converting photons into heat than electricity. Even though the photovoltaic process is more direct, the overall efficiency (percent of sunlight incident that is converted to electricity) of commercial solar thermal-electric and photovoltaic systems fall in similar ranges (10–30%), with the high end of this range reached in both exemplary high concentration PV (HCPV) and concentrating solar power (CSP) systems.
All solar power technologies collect electromagnetic radiation from the sun, but if a system optically concentrates the light (e.g. CSP) it collects primarily the direct portion of the radiation, whereas non-concentrating systems (e.g. flat plate PV) can collect both the direct and diffuse components of sunlight. The direct component of radiation (coming straight from the sun without being scattered or reflected on its way to the collector) makes up the vast majority of sunlight in the equatorial and sunniest locations around the world; but diffuse light (light that has been reflected and scattered on its way to the collector) is a major portion of total sunlight in the more polar and less sunny areas of the world.
Since only direct light can be optically concentrated, concentration requires the ability to track the sun so that the collector is always pointing directly at the sun as it moves across the sky, thus further complicating such systems. However, since solar thermal-electric efficiency benefits greatly from generating higher temperatures to drive the heat engines that convert the thermal energy to electricity, concentrating systems are the standard in this field.
At the core of photovoltaic technology is the solar cell, or the material that converts the sunlight to electricity. The physical process behind solar photovoltaics is not within the scope of this article, but suffice it to say that a solar cell is formed at the junction between two semiconductor materials (of which there exists many varieties). Multiple such junctions can be arranged in series (or parallel) that have different abilities to absorb different wavelengths of light (corresponding to different electron band gaps). All of these variations affect how much of the sunlight can be converted to electricity, with the goal being to develop low-cost materials reaching the theoretical limit of efficiency. For a single junction cell this efficiency limit is approx. 30%, but increases to 42% for two-junctions, and 48% for three-junctions, with a theoretical limit of 68% achievable with infinite junctions. Under high concentration the corresponding limits are 40% for a single-junction cell, 55% for two-junctions, 63% for three-junctions, and an 86% theoretical limit with infinite junctions .
A list of the most common solar photovoltaic chemistries used today in order of approximate market share are: polycrystalline silicon (poly-Si), single-crystalline silicon (mono-Si), thin film amorphous silicon (a-Si), thin film cadmium telluride (CdTe), thin film copper indium gallium selenide (CIGS), and multi-junction cells. Silicon technologies are broadly divided into crystalline cells (single or polycrystalline), which make up over 80% of the market, and non-crystalline cells (amorphous). Amorphous cells are generally thin-films, meaning a thin layer of the semiconductor material is deposited on a base layer. This process reduces cost by reducing the amount of material used in the process, but also decreases the efficiency of the cell compared to crystalline silicon cells. CdTe and CIGS cells are examples of non-silicon based commercial thin film technology. At the top end of the technology spectrum, in terms of efficiency, are multi-junction cells, the most advanced of which are generally made up of layers of compounds of group III and V elements on the periodic table. We model several of the most common types (i.e. poly-Si, mono-Si, CdTe, CIGS, and multi-junction) in this analysis, in both fixed tilt and 2d-tracking PV systems. An example of the results for typical annual and seasonal electricity production from a non-tracking mono-Si PV system over Europe and the US is shown in figure 1.
Figure 1. Non-tracking mono-Si PV system’s electricity production, from one square meter of collector, both (a) seasonally and (b) annually in (left) Europe and (right) the US.
In concentrating photovoltaic systems (CPV), the cells are packaged together into a module and usually many modules are mounted on a tracking apparatus where each individual cell is illuminated with highly concentrated sunlight that can be greater than one thousand times as bright as direct sunlight. Commercially, high concentration photovoltaics (HCPV) usually use Fresnel lenses but concentration can also be accomplished with any of the concentrating collector geometries described in the thermal and thermal-electric sections. We model a typical example of an HCPV collector in this analysis using a III–V semiconductor, and show an example of the results in figure 2 as electricity production over Europe and the US for a typical year.
Figure 2. HCPV system’s electricity production, from one square meter of collector, both (a) seasonally and (b) annually in (left) Europe and (right) the US.
At the other end of the solar technology spectrum from photovoltaics is solar thermal technology which collects sunlight and converts the energy to heat. Solar thermal systems use fluids (usually water or a glycol-water mix) to transfer the heat from the collector to a storage tank where it is then used for anything from industrial process heating to domestic hot water and space heating. The main commercialized types of solar thermal systems are those using flat-plate collectors, evacuated tube collectors, and concentrating trough/dish collectors.
Flat plate collectors can be glazed or unglazed. Glazed collectors are insulated on all sides except the glazing (a transparent single or multi-layer) which is facing the sun and allows the sunlight to come in but limits the losses due to convection going out (like a mini greenhouse). The absorber is usually made of copper or aluminum with many channels for the fluid to run through and a selective coating to prevent reflection of the light. Unglazed collectors are often made of plastic polymers, and are usually more appropriate for lower temperature heat demands and warmer climates.
Evacuated tubes are designed like a transparent thermos, where a long cylinder of glass surrounds the channel that the fluid moves through. The space between the glass and the fluid is a near-vacuum to minimize convective losses. The fluid itself is sometimes designed as a heat-pipe allowing for efficient transport of higher temperature fluid to a header where it heats the main circulating fluid in the system. Evacuated tubes also have the benefit of higher acceptance of diffuse light because their cylindrical shape allows collection of light from oblique directions.
Concentrating trough and dish collectors use reflective surfaces in parabolic-like shapes to reflect the sunlight onto an absorber, the main difference between a dish and trough being that a dish is a 3-dimensional parabola (or non-imaging parabola-like shape) whereas a trough is only a parabola in 2-d. Because the incident amount of sunlight per surface area of absorber is higher for a concentrating collector than that for a flat-plate collector and the corresponding thermal losses are lower, due again to the comparatively lower absorber surface area, higher temperatures can usually be obtained with this type of collector than any of the others, especially if the absorber is itself enclosed in an evacuated tube. As they are the main commercialized products for moderate and high temperature solar thermal, we model glazed flat-plate collectors, evacuated tubes, and concentrating troughs in this analysis. An example of the results for typical annual and seasonal heat production from a glazed flat-plate collector is shown in figure 3.
Figure 3. Non-tracking flat-plate thermal system’s heat production, from one square meter of collector, both (a) seasonally and (b) annually in (left) Europe and (right) the US.
Systems that convert sunlight to thermal energy and then to electricity are usually called “concentrating solar power” (CSP) although, as mentioned above, the same concentrating optics could also focus the sunlight on PV cells (CPV) instead of heating a thermal fluid. The scale of CSP systems is usually very large (i.e. power plant), but smaller systems can also be designed, for example, in remote villages for rural electrification. Solar thermal-electric systems offer the advantages of being suitable for operation on other combustible fuels when the sun isn’t shining, and can store energy as thermal energy to later be converted to electricity. This method of storing energy thermally is generally less expensive than storing electricity directly.
To get the high temperatures needed to operate heat engines efficiently, solar thermal-electric systems usually use concentrating solar collectors which can produce fluid temperatures from a couple hundred to over a thousand degrees Celsius. These collector systems can generally be categorized as one of four types: Parabolic trough, linear Fresnel, dish engines, or central receivers. For the purposes of this analysis, only parabolic trough systems are included, although the performance would be comparable to that of a linear Fresnel or dish system based on a Rankine cycle at the same temperatures (500 K max fluid temperature). This moderate temperature allows for simple tracking systems, safe unsupervised operation, and inexpensive plumbing in distributed systems. We exclude central receiver systems and solar Stirling engines from this analysis as they are not well-developed at smaller scale.
The general principle behind solar thermal-electric systems is that a working fluid (usually a molten salt, mineral oil, synthetic heat transfer fluid, or water) is heated to high temperatures at the focus of a concentrating solar collector, and the energy from that hot fluid is then used to run a heat engine. The heat engine is usually based on either a Rankine cycle (the same cycle used in most fossil fuel power plants) or a Stirling cycle.
In a Rankine cycle a fluid (usually water) is compressed, boiled, expanded (where it drops in temperature and pressure in the process of producing mechanical work), and then condensed back to liquid again before starting the cycle over. The mechanical work generated by the expander in this process is converted to electricity by a generator. The schematic of a simple solar Rankine cycle appropriate for distributed heat and electricity generation, as modeled in this analysis, where the heat from the condenser is used for another thermal process (i.e. combined heat and power), is shown in figure 4.
Figure 4. A simple solar CHP Rankine cycle .
An area of expanding research in the field of solar power is so called hybrid photovoltaic/thermal (hybrid PV/T) systems. These systems combine a thermodynamic heat engine cycle, like in CSP, with a photovoltaic material to boost the overall conversion efficiency of sunlight to electricity. For example, one such system would use an optically selective fluid (e.g. with suspended nanoparticles) running over a photovoltaic material at the focus of a concentrating solar collector (hybrid CPV/T). The fluid would mainly absorb those wavelengths of light that were not useful to the PV, thereby allowing the useful wavelengths to hit the PV, while the other wavelengths heat the thermal fluid to high enough temperatures to run an additional heat engine to produce electricity while also producing “waste” thermal energy from the Rankine cycle (i.e. the same subsystem described in the previous section). The overall solar-electric efficiency from such a system could be higher than either a CSP or PV system alone. We model this technology , with thermal and electrical production shown in figure 5.
Figure 5. Hybrid CPV/T system’s (a, b) electricity and (c, d) heat production at 373 K, from one square meter of collector, both (a, c) seasonally and (b, d) annually in (left) Europe and (right) the US.
This research project aims to compare the economic potential of solar technologies across the geographic diversity of Europe and the United States by first quantifying the types and amount of solar resource that each technology can utilize, second estimating the technological performance potential based on that resource, and third comparing the costs of each technology across regions. In this article, we present the first two steps in this process. We use physical and empirically validated models of a total of 8 representative system types: non-tracking photovoltaics, 2d-tracking photovoltaics, high concentration photovoltaics, flat-plate thermal, evacuated tube thermal, concentrating trough thermal, concentrating solar combined heat and power, and hybrid concentrating photovoltaic/thermal. Within the 8 studied system types we model, for comparison, 5 solar-electric, 3 thermal-only, and 2 solar CHP system configurations. These models are integrated into a simulation that uses typical meteorological year weather data (including temperature, irradiance, and wind speed) to create a yearly time series of heat and electricity production for each system over 12,846 locations in Europe and 1,020 locations in the United States. Through this simulation, systems composed of various permutations of collector-types and technologies can be compared geospatially and temporally in terms of their typical production in each location. This methodology is outlined in Figure 6.
Figure 6. A flowchart of the methodology used for solar modeling.
We strive to compare each technology based on the closest assumptions possible so that the results of the comparisons are robust without further post-modeling normalization or standardization. To achieve this we use a single solar data source for Europe and another for the US so all points within each of these regions can be compared. The solar position, irradiation and solar technology models are implemented in MATLAB , and we look up all thermodynamic fluid properties using NIST software . The actual models for sun position and irradiance are detailed in the sections below, and come from well-referenced sources. The collector technology models vary by type as described below, and in selecting these models we gave preference to empirically verified models for both thermal and PV collectors. The one exception to this is that we use a physical model for the hybrid CPV/T collector developed specifically for this simulation because no appropriate empirically verified model could be found for this type of cutting-edge technology. All other components (e.g. pumps, condensers, inverters, expanders, etc.) in the systems were assumed to have the same efficiency across all system types and under partial load conditions. In all thermal models we ignore thermal storage and assume that the systems can adjust working fluid flow rate to achieve the outlet conditions specified for varying irradiance conditions. We additionally ignore efficiency penalties that could be induced under partial load conditions for expanders and pumps, but do account for irradiance variations’ effect on collector efficiency, and assume naturally that the system will shut down when the irradiance level is so low that the collected energy would be zero (or negative). More detailed assumptions are stated in tables 1, 2 and 3, and in the supplementary information which includes all the code (file S1) as well as additional results graphs (file S2). Nomenclature for all variables and constants in the following equations can be found in table 4.
Table 1. Rankine cycle performance constants for solar CHP.
Table 2. Thermal collector coefficients.
Table 3. Selected input parameters for PV technologies.
In the case of a concentrating collector, the diffuse and ground reflected components are assumed to be zero as most concentrating optics will not collect light at oblique angles.
The irradiance absorbed by the collector depends on the orientation of the collector with respect to the sun, atmospheric conditions, and reflection losses due to the light not hitting the collector normal to its plane. For non-tracking collectors we assume an azimuth angle of zero (collector facing due south), and optimize the fixed tilt, β, for yearly production based on latitude, φ, using the correlation by Chang (see appendix for equations).
For detailed equations of the irradiance components see the appendix.
In the case of non-concentrating collectors the angles of incidence for the three irradiance components are not normal to the collector surface most of the day, thus reflection losses need to be accounted for. To quantify these losses, we apply the incidence angle modifier (IAM) to each component. The IAM is the efficiency of a collector at the given incidence angle divided by the efficiency at normal incidence. We calculate the incidence angle for the beam component using the position of the sun and orientation of the panel, and using empirical equations for the diffuse and ground reflected incidence angles (see appendix for equations).
The IAM is different for each collector type; we use a physical model for PV modules and empirical correlations for the thermal collectors when calculating the IAM for each irradiance component (see appendix for details).
The thermal-only system efficiency includes the modeled efficiencies for the collector, ηTh, and typical values for the heat exchanger, ηhx, while for the CHP system we also include typical values for the generator efficiency, ηgen, and steam Rankine cycle efficiency, ηRankine, as calculated from the component efficiencies and working fluid state variables shown in table 1.
The mean temperature of the heat transfer fluid, Ti, and coefficients a0–a2 depend on the collector and thermal system as shown in tables 1 and 2.
We assume a constant 95% efficiency for all inverters.
Details of the PV power equation can be found in the appendix.
See the appendix for further details on this model.
Prior work in the modeling of concentrating CPV/T systems has resulted in detailed systems of equations to couple together the PV model (which has temperature dependent efficiency) to the thermal model to determine working temperatures of the system , –. Such models typically employ transcendental equations for solving for parameters to determine the PV efficiency and contain nonlinear terms with the resulting energy balance equations that contain radiative heat transfer terms. To simplify the prior models developed by Otanicar we have replaced the more complex electrical modeling with a simple temperature dependent efficiency relationship commonly used and shown here: (16)where the reference efficiency, ηref, is measured at the reference temperature, Tref, and TPV is the actual cell temperature. The use of this equation eliminates the integrations and transcendental equations but still leaves the nonlinear terms of the energy balance equations as detailed in the appendix.
For comparison, figure 7 shows the three components of irradiance absorbed by a fixed-tilt flat-plate collector (tilt optimized for yearly energy collection), and figure 8 for the same flat-plate collector with 2d-tracking, throughout Europe and the United States. The sum, at each point, of figures 7a, 7b and 7c and figure 8a, 8b and 8c represent the maximum amounts of energy that can be collected from non-tracking and tracking collectors respectively at that location. If a collector is both tracking and concentrating then figure 8a represents the approximate maximum energy collection potential.
Figure 7. Annual solar irradiance absorbed by one square meter of a non-tracking flat-plate collector tilted at a fixed angle to maximize the yearly total of the three components of radiation: (a) the direct beam and forward scattered circumsolar diffuse component, (b) the non-forward scattered diffuse component, and (c) the ground reflected component.
Note that the color scales differ between the subfigures.
Figure 8. Annual solar irradiance absorbed by one square meter of a 2d-tracking flat-plate collector: (a) the direct beam and forward scattered circumsolar diffuse component, (b) the non-forward scattered diffuse component, and (c) the ground reflected component.
Note that the magenta markers indicate the selected European locations referred to in table 5. Note also that the color scales differ between the subfigures.
Note that although the tracking concentrating collector only uses the beam (and forward scattered) components of the radiation, there is still a substantial increase in the total solar resource utilization possible with concentration in most of Europe and even more so in the United States (i.e. the sum of the beam, diffuse, and ground reflected components incident on a stationary flat-plate collector shown in figure 7 is usually less than the beam component on the tracking collector shown in figure 8). In the clearest areas, including the Alps, Southern Europe, and the Southwestern US the advantage of tracking and concentration is greatest. In the cloudiest and foggiest areas, including the British Isles, most of the central European latitudes between Scandinavia and the Alps, parts of New England, and the Southeastern US, flat-plate collectors have better resource utilization potential.
Just as with thermal systems, there is also a potential, due to the properties of the PV cell material, to increase efficiency and substantially decrease the needed amount of the sometimes expensive photovoltaic material by using concentration. This is typically done using exotic multi-junction high-efficiency solar cells. The economics of concentration with PV is not as favorable as with thermal systems, however, because CPV increases the need for well-managed cooling, tracking and more complex optics, but achieves a smaller increase in efficiency than in thermal systems. Table 5 shows the performance of 10 different PV, solar thermal-electric and thermal-only systems at selected locations both in annual electricity and/or heat production and efficiency as a fraction of the total absorbed irradiance (i.e. the sum of the components shown in figures 7 and 8 respectively for non-concentrating and concentrating technologies). Note that by expressing the efficiency this way one ignores the difference in “collectable” resources between different technology types (e.g. concentrating vs. non-concentrating), so it is perhaps more relevant to compare the total production figures shown.
Table 5. Annual electricity and heat production and respective efficiencies (as a percent of total absorbed irradiance) of various solar technologies near several European cities.
Modeling and comparing the annual production of each of the seven representative solar-electric technology configurations with the same framework across all of Europe and the United States offers some interesting insights. Figure 9a, for example, shows that the relative temperature sensitivity of silicon cells, which exhibit greater performance degradation as cell temperature increases compared to CdTe, gives them a significant advantage (up to 55%) in the colder climatic regions such as in the Alps, Northern Scandinavia, and the Rocky Mountains. This advantage of silicon cells, however, is lessened (to a low of approx. 42%) in comparatively warmer regions of Central Europe, but the relative advantage of silicon increases again (up to 47%) in sunny European regions like Spain, due this time to silicon’s increased gains with higher solar irradiance as compared to CdTe. Figure 9b, comparing mono-Si to CIGS, shows less of these effects as both the temperature and irradiance performance dependence are more similar between the technologies. Furthermore, although the efficiency at standard temperature and conditions (STC is 25°C and 1000 W/m2) for CIGS is more than 12% greater than CdTe (see Table 3), comparing figures 8b and 8c shows that the typical annual production is less than 4% greater in the vast majority of Europe and the US (see also table 5) due to these differences in temperature and irradiance effects.
Figure 9. Comparison (in percent) of annual electricity production per square meter of installed collector for several representative solar-electric systems a) non-tracking mono-Si to non-tracking CdTe thin-film b) non-tracking mono-Si to non-tracking CIGS, c) non-tracking mono-Si to solar trough CHP Rankine d) HCPV to non-tracking mono-Si e) hybrid CPV/T to non-tracking mono-Si and f) 2d-tracking mono-Si to non-tracking mono-Si.
Note that the reference case is always listed last (e.g. “mono-Si to CdTe” is the mono-Si percent increase or decrease from the CdTe system’s production).
Comparing mono-Si PV to a thermal-electric steam Rankine cycle at moderate temperatures (500 K, isentropic efficiency of expander of 80%), in figure 9c, shows that PV increases total electric production by at least 50%, but that the greatest increases (of over 200%) are in the cooler areas of lowest direct radiation, including the British Isles, much of the region at latitudes south of Scandinavia and north of the Alps, around the Great Lakes and Alaska.
Comparing CPV/T to flat-plate mono-Si in figure 9e shows the same relative trends, but of course the total production in most locations is greater for the CPV/T technology (−5% to 50%), yet notably CPV/T shows the greatest comparative benefit in the north of Scandinavia, southern Europe, northern Alaska, and the southwestern US. In the north this is due to a combination of a high fraction of direct normal irradiance (DNI) being beneficial for concentrating systems, and low ambient temperatures being beneficial for PV efficiency. In the south, the increased performance of CPV/T is due mainly to the higher fraction of DNI being beneficial for the concentrating system, as compared to flat-plate PV. Additionally, areas that are very cold with extremely overcast weather (like the Aleutian Islands) would suffer lower production with a CPV/T system than with a flat plate mono-Si system for the same reasons.
Figure 9d comparing HCPV to flat plate mono-Si shows that the increased base-efficiency of the multi-junction cell in the HCPV system gives only a 20% increase in total system efficiency in the areas with the lowest fraction of DNI in Europe, but over a 100% increase in total system efficiency in areas with the highest fraction of DNI compared to diffuse irradiance, which occurs in northern Scandinavia, latitudes south of the Alps, and the Southwestern US. Notably again, the Aleutian Islands would actually suffer lower production (-50%) with a HCPV system than a flat plate mono-Si system due to the extreme lack of direct normal radiation due to constant fog.
Finally, figure 9f shows the comparison of a mono-Si PV system with 2d-tracking compared to the same system with fixed-tilt. Notably in this case, as opposed to with the HCPV system, the tracking system is always an improvement over the fixed-tilt system (20–65% more production), with the biggest increases occurring in the northern latitudes, where fixed-tilt systems suffer considerably from the unique path the sun takes across the sky, especially in the summer. Comparing figure 9d to 9f one can see that the concentrating system (HCPV) with high efficiency group III–V photovoltaic cells can still produce 50% more electricity in the areas with the most direct beam radiation compared to a 2d-tracking system with non-concentrating mono-Si cells. However, we can see that in regions with a large percentage of diffuse radiation, tracking non-concentrating PV systems can produce nearly the same amount of power as HCPV systems, even though the latter has the higher efficiency cells.
In the comparisons between the thermal production of five representative system configurations, the results generally follow the same trends as with thermal-electric systems. Comparison of different thermal collector types, however, offers some new insights. Figure 10a, for example, shows that evacuated tube thermal production exceeds that from flat-plate collectors in all of Europe and the US but is greatest (25% greater in northern Scandinavia, 40%–80% greater in the Aleutian Islands) in the coldest and cloudiest regions, and least (<5%) in the warmest regions (e.g. Southern Spain, Guam, Hawaiian Islands). Clearly the decreased thermal losses of the evacuated tube design seem to give it the biggest advantages, as compared to its increased ability to collect diffuse radiation, as demonstrated by the evacuated tube’s strongest comparative performance in the coldest regions, even those with a lower fraction of DNI.
Figure 10. Comparison (in percent) of annual heat production per square meter of installed collector for several representative solar-thermal systems: a) evacuated tube to flat-plate, b) concentrating trough to flat-plate, c) flat-plate to solar trough CHP d) flat-plate to CPV/T.
Note that the reference case is always listed last (e.g. “evacuated tube to flat-plate” is the evacuated tube percent increase or decrease from the flat-plate system’s production). Note also that modeled average output temperature from the CPV/T and CHP system is 373K compared to 325K from the thermal-only systems.
With the trough thermal system comparison to flat-plate collectors, as shown in figure 10b, the trends show the greatest increase in system production in areas with the highest DNI and coldest temperatures, as would be expected for all concentrating systems. This again is due to the concentrator’s inability to collect any irradiance other than DNI, and the lower thermal losses due to the concentrating absorbers smaller comparative surface area.
Figures 10c and 10d show the thermal output for the thermal-electric systems compared to that of a flat-plate thermal-only system, so in both cases the total heat output of the thermal-electric system is comparatively less because a significant fraction of the thermal energy has been converted to electricity. In fact, comparing figure 10c to 10d shows that the average decrease in heat output of 10–15% of the CPV/T system compared to the solar trough CHP system correlates well with the average doubled relative electrical output of the CPV/T system (i.e. an additional 10–15 percentage points of the collected sunlight is converted to electricity in the CPV/T system, for a total of 20–30% solar-electric conversion).
Looking at the maximum total irradiance collected by tracking and non-tracking collectors as shown in figures 7 and 8 and comparing that to the total primary energy demand of Europe, which was 2.3*1016 Wh in 2011 , we can see that depending on region between 120 and 600 times more solar energy can be collected per square meter of collector in the EU-27 than the average current primary energy demand per square meter. For comparison, 5% of the EU land is currently covered by buildings, roads, and artificial areas , but using only 0.2% (best solar regions) to 1.0% (worst solar regions) of the land area for solar collectors would collect the same amount of solar irradiance as the entire primary energy demand of the EU-27. This figure is even lower for the US, which has lower average population density and greater average solar resource. Hence, one can conclude that, from a resource perspective, solar energy has the greatest utilizable potential of any renewable technology, but it is also inherently variable, so accurate forecasting and storage will need to be part of any system that utilizes high levels of solar energy.
Additionally, from our production modeling results we conclude that, in terms of both electricity and heat production, the solar technology type can play a large role in the total amount of useful energy that can be collected. Therefore, it is important to consider the regional climate where a system will be installed, instead of comparing technologies based simply on rated power (as is often done). For example, we see that silicon solar cells show a significant advantage in yearly electricity production over thin-film cells in the colder climatic regions, but that advantage is lessened in regions that have high average irradiance. Another result of importance is seen in the northern latitudes, where tracking technologies significantly outperform non-tracking technologies, producing as much as 65% more power with the same collectors. The conclusion is therefore that regional climate differences are, in many cases, of large enough magnitude to shift the most cost-effective technology type from one region to the next.
Continuing work to specify the technology costs in the models developed here will allow us to further understand the market competitiveness of these technologies in comparison to one another, and allow us to apply that information to predict the deployment of each solar technology in future electricity systems, both in comparison to other solar technologies, and to other heat and power production technologies.
The empirical IAMs used for thermal collectors come from EN 12975 testing certificates for representative collectors of each type. Based on these values, and assuming an IAM of zero at 90° incidence the model performs a linear interpolation to acquire the IAM for the current incidence angle. For tubular collectors, we calculate IAMs in both longitudinal and transverse directions with the incidence angles for diffuse and ground reflected irradiance from Theunissen .
The energy balance and heat transfer setup is based on a collector architecture where the working fluid absorbs subgap energy before the PV cell to eliminate waste photons heating the cell (as described in ). In order to quickly solve the coupled thermal model (containing nonlinear terms), shown below, we implement a Newton-Raphson methodology for solving nonlinear equations.
Heat transfer equations. The heat transfer coefficient, , where Nu = 8.23 (value for constant heat flux between two parallel plates), and , where , β is the collector tilt (assumed to be zero), and where ΔT is the temperature difference between the two plates.
Object oriented DCS-CHP model in MATLAB.
Complete set of results graphs.
The authors would like to thank Cliff Hansen from Sandia National Laboratory for invaluable assistance in the implementation of the HCPV model.
Conceived and designed the experiments: ZN EN TO FJ. Performed the experiments: ZN EN TO. Analyzed the data: ZN EN TO FJ. Contributed reagents/materials/analysis tools: ZN EN TO. Contributed to the writing of the manuscript: ZN EN TO FJ.
1. Quaschning V (2004) Technical and economical system comparison of photovoltaic and concentrating solar thermal power systems depending on annual global irradiation. Solar Energy 77, no. 2:171–178.
2. Jacobson MZ, Delucchi MA (2011) Providing all global energy with wind, water, and solar power, Part I: Technologies, energy resources, quantities and areas of infrastructure, and materials. Energy Policy 39, no. 3:1154–1169.
3. Huld T, Gottschalg R, Beyer HG, Topič M (2010) Mapping the performance of PV modules, effects of module type and data averaging. Solar Energy 84, no. 2:324–338.
4. Wiginton LK, Nguyen HT, Pearce JM (2010) Quantifying rooftop solar photovoltaic potential for regional renewable energy policy. Computers, Environment and Urban Systems 34, no. 4:345–357.
5. Kalogirou SA (2004) Solar thermal collectors and applications. Progress in energy and combustion science 30, no. 3:231–295.
6. Kalogirou SA, Tripanagnostopoulos Y (2006) Hybrid PV/T solar systems for domestic hot water and electricity production. Energy Conversion and Management 47, no. 18:3368–3382.
7. Jiang Y, Qahouq JAA, Batarseh I (2010) Improved solar PV cell Matlab simulation model and comparison. Circuits and Systems (ISCAS), Proceedings of 2010 IEEE International Symposium on. IEEE.
8. De Vos A (1980) Detailed balance limit of the efficiency of tandem solar cells. Journal of Physics D: Applied Physics 13, no. 5:839.
9. Masson G, Latour M, Rekinger M, Theologitis IT, Papoutsi M (2013) Global market outlook for photovoltaics 2013–2017. European Photovoltaic Industry Association.
10. Burroughs S, Menard E, Cameron C, Hansen C, Riley D (2013) Performance model for Semprius module RDD-MOD-296, PSEL 2941. Sandia National Laboratory.
11. Norwood Z, Kammen D (2012) Life cycle analysis of distributed concentrating solar combined heat and power: economics, global warming potential and water. Environmental Research Letters 7, no. 4:044016.
12. Otanicar T, Chowdhury I, Phelan PE, Prasher R (2010) Parametric analysis of a coupled photovoltaic/thermal concentrating solar collector for electricity generation. Journal of Applied Physics 108, no. 11:114907.
13. Remund J (2011) Solar Radiation and Uncertainty Information of Meteonorm 7. Proceedings of 26th European Photovoltaic Solar Energy Conference and Exhibition: 4388–4390.
14. Wilcox S, Marion W (2008) Users manual for TMY3 data sets. Golden, CO: National Renewable Energy Laboratory.
15. The MathWorks, Inc. (2014) MATLAB Release 2014a, Natick, Massachusetts, United States.
16. Lemmon EW, Huber ML, McLinden MO (2010) NIST Standard Reference Database 23: Reference Fluid Thermodynamic and Transport Properties-REFPROP. 9.0.
17. Chang TP (2009) The Sun’s apparent position and the optimal tilt angle of a solar collector in the northern hemisphere. Solar energy 83, no. 8:1274–1284.
18. Reindl DT, Beckman WA, Duffie JA (1990) Evaluation of hourly tilted surface radiation models. Solar Energy 45, no. 1:9–17.
19. Duffie JA, Beckman WA (2013) Solar engineering of thermal processes. John Wiley & Sons, March 28.
20. Fischer S, Heidemann W, Müller-Steinhagen H, Perers B, Bergquist P, et al. (2004) Collector test method under quasi-dynamic conditions according to the European Standard EN 12975-2. Solar Energy 76.1:117–123.
21. Dincertco (2011) Flat plate collector HT-SA 28-10. Database: Dincertco. Available: http://www.dincertco.de/logos/011-7S1520%20F.pdf. Accessed 2014 June 24.
22. Dincertco (2011) Evacuated tube collector SKY 8CPC 58. Database: Dincertco. Available: http://www.dincertco.de/logos/011-7S124%20R.pdf. Accessed 2014 June 24.
23. Institut für Solartechnik (2013) Concentrating trough collector PolyTrough 1800. Database: SPF. Available: http://www.spf.ch/fileadmin/daten/reportInterface/kollektoren/factsheets/scf1549en.pdf. Accessed 2014 June 24.
24. Coventry JS (2005) Performance of a concentrating photovoltaic/thermal solar collector. Solar Energy 78, no. 2:211–222.
25. Otanicar TP, Phelan PE, Taylor RA, Tyagi H (2011) Spatially varying extinction coefficient for direct absorption solar thermal collector optimization. Journal of Solar Energy Engineering 133, no. 2:024501.
26. Otanicar TP, Taylor RA, Telang C (2013) Photovoltaic/thermal system performance utilizing thin film and nanoparticle dispersion based optical filters. Journal of Renewable and Sustainable Energy 5, no. 3:033124.
27. Skoplaki E, Palyvos JA (2009) On the temperature dependence of photovoltaic module electrical performance: A review of efficiency/power correlations. Solar energy 83, no. 5:614–624.
28. International Energy Agency (2013) Energy balances. Available: http://www.iea.org/statistics/topics/energybalances/. Accessed 2014 Mar 10.
29. Eurostat (2013) Buildings, roads and other artificial areas cover 5% of the EU. Available: http://epp.eurostat.ec.europa.eu/cache/ITY_PUBLIC/5-25102013-AP/EN/5-25102013-AP-EN.PDF. Accessed 2014 Mar 10.
30. PV Performance Modeling Collaborative (2012) Physical Model of IAM. Available: https://pvpmc.sandia.gov/modeling-steps/1-weather-design-inputs/shading-soiling-and-reflection-losses/incident-angle-reflection-losses/physical-model-of-iam/. Accessed 2014 Oct 29.
31. Theunissen PH, Beckman WA (1985) Solar transmittance characteristics of evacuated tubular collectors with diffuse back reflectors. Solar Energy 35, no. 4:311–320.
32. Huld T, Friesen G, Skoczek A, Kenny RP, Sample T, et al. (2011) A power-rating model for crystalline silicon PV modules. Solar Energy Materials and Solar Cells 95, no. 12:3359–3369.
33. King DL, Kratochvil JA, Boyson WE (2004) Photovoltaic array performance model. United States. Department of Energy.
|
# coding: utf-8
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import call_api
def fetch_messages(token, chat, uids, **kwargs):
"""
This method makes a synchronous HTTP request.
:param str token: (required)
:param str chat: (required)
:param list[str] uids: (required)
:return: response dict
"""
params = locals()
for key, val in iteritems(params['kwargs']):
params[key] = val
del params['kwargs']
resource_path = '/chat.fetchMessages'.replace('{format}', 'json')
response = call_api(resource_path, params=params)
return response
def send_message(token, to, text, **kwargs):
"""
This method makes a synchronous HTTP request.
:param str token: (required)
:param str to: (required)
:param str text: (required)
:param str on_behalf_of:
:param list[str] visible_to:
:param str flockml:
:param str notification:
:param list[str] mentions:
:param SendAs send_as:
:param list[Attachment] attachments:
:return: response dict
"""
params = locals()
for key, val in iteritems(params['kwargs']):
params[key] = val
del params['kwargs']
resource_path = '/chat.sendMessage'.replace('{format}', 'json')
response = call_api(resource_path, params=params)
return response
|
A local author and municipal budgeting expert has written a book about the principles and practices that can change how governments create and manage budgets.
Indian Spring resident Andrew Kleine published “City on the Line: How Baltimore Transformed Its Budget to Beat the Great Recession and Deliver Outcomes,” based on his 10 years as the city’s budget director under three mayors.
Kleine said he was inspired in 2004 by a book titled “The Price of Government” by David Osborne and Peter Hutchinson (who also wrote the foreward to Kleine’s book).
“It presented a new way of thinking about government budgeting, which was that it’s about purchasing results, not purchasing line items,” he said.
He was working for the federal government (where he served for about 15 years) at the time and, perhaps not unsurprisingly, had limited success introducing the new concepts.
“Then I had the opportunity to become budget director for the city of Baltimore, and the mayor at the time, Sheila Dixon, told me in my interview, ‘The budget is like a black box to me. Ninety-nine percent of it is on autopilot,’” Kleine said.
So one of the reasons he wrote “City on the Line” was to keep Osborne’s and Hutchinson’s ideas alive.
He also knew there was interest in the concepts from the number of representatives from cities such as Los Angeles, Seattle and Atlanta who visited Baltimore to see what the city was doing with its budgeting.
Finally, he said, so many people seemed to know little about Baltimore, other than what they learned from watching “The Wire” or read about the civil disturbances that followed the killing of Freddie Gray while in police custody in 2015.
“I thought this was little bit of good news and a good story to tell,” Kleine said.
Also, he loves to write.
“This was a chance to stretch my creative legs a little bit, and I really had fun with it,” he said.
Kleine served from 2008 under Dixon, then in 2010 with her successor, Stephanie Rawlings-Blake. He worked about a year with current Mayor Catherine Pugh before resigning as budget director in April after 10 years of working to implement outcome budgeting.
“Whereas traditional budgeting starts with looking at what was spent the year before,” he explained, “and then normally just changing incrementally over time, outcome budgeting starts with what you’re trying to accomplish in the future, and then organizing your budget to accomplish those things.
Those outcomes could include anything from better schools, safer streets and stronger neighborhoods to cleaner air and water, based on key indicators that measure progress toward those goals.
The process in Baltimore included a review of each city service and then an agency budget proposal for each service. The proposals were then submitted to a “results” team that included employees and community members, who provided both strategic guidance and reviews of every proposal, then recommendations that ultimately went to the mayor.
One example Kleine cites involved the city’s Office of Civil Rights, whose head at first hated the notion of outcomes budgeting on the grounds that what the office did wasn’t measurable.
The upshot of the process was that the office identified functions that were not key to its core mission of resolving discrimination complaints (preferably out of court).
The ultimate outcome the office wanted was to resolve complaints in a way that strengthened communities, which meant retraining the staff in community mediation. They also set a goal of doubling in one year the percentage of cases that were settled out of court via negotiation.
In “City on the Line,” Andrew Kleine paints a picture of a new Baltimore government – one that produces better results for more residents and less money through redefining its budget process using data-driven decision making. It’s an optimistic, yet reasonable, approach to running a city that says we can add value without adding to the bottom line. If you’re looking to improve government finance or use tax dollars better, this is the book for you—Rebecca Rhynhart, city controller, City of Philadelphia.
Since his resignation, Kleine has been consulting with cities on budgeting and long-term financial planning, and will become the county’s chief administrative officer under Marc Elrich after Elrich’s inauguration in December.
Kleine’s book is available on Amazon for Kindle as well as in hardback or paperback, and is also sold in Apple’s iBooks store.
Photo of the author and book cover graphic courtesy of Andrew Kleine and Rowman & Littlefield Publishers.
really? baltimore city beat the recession?
i think baltimore residents would say otherwise!
|
# -*- coding: utf-8 -*-
"""
koala.api
~~~~~~~~~~~~~~~~~~
Contains base implementations for building an internal project API
:copyright: (c) 2015 Lighthouse
:license: LGPL
"""
from blinker import signal
from google.appengine.ext import deferred
__author__ = 'Matt Badger'
# TODO: remove the deferred library dependency; extend the BaseAPI in an App Engine specific module to include deferred.
# TODO: it is possible that these methods will fail and thus their result will be None. Passing this in a signal may
# cause other functions to throw exceptions. Check the return value before processing the post_ signals?
# Result should always be the first argument to the post_ signals. That way the receivers can check the value before
# continuing execution.
class BaseAPI(object):
_api_name = ''
_api_model = None
_datastore_interface = None
_search_interface = None
@classmethod
def new(cls, **kwargs):
return cls._api_model(**kwargs)
@classmethod
def insert(cls, resource_object, auth_uid=None, **kwargs):
if signal('pre_insert').has_receivers_for(cls):
signal('pre_insert').send(cls, resource_object=resource_object, auth_uid=auth_uid, **kwargs)
resource_uid = cls._datastore_interface.insert(resource_object=resource_object, **kwargs)
deferred.defer(cls._update_search_index, resource_uid=resource_uid, _queue='search-index-update')
if signal('post_insert').has_receivers_for(cls):
signal('post_insert').send(cls, result=resource_uid, resource_uid=resource_uid,
resource_object=resource_object, auth_uid=auth_uid, **kwargs)
return resource_uid
@classmethod
def get(cls, resource_uid, **kwargs):
if signal('pre_get').has_receivers_for(cls):
signal('pre_get').send(cls, resource_uid=resource_uid, **kwargs)
resource = cls._datastore_interface.get(resource_uid=resource_uid)
if signal('post_get').has_receivers_for(cls):
signal('post_get').send(cls, result=resource, resource_uid=resource_uid, **kwargs)
return resource
@classmethod
def update(cls, resource_object, auth_uid=None, **kwargs):
if signal('pre_update').has_receivers_for(cls):
signal('pre_update').send(cls, resource_object=resource_object, auth_uid=auth_uid, **kwargs)
resource_uid = cls._datastore_interface.update(resource_object=resource_object, **kwargs)
deferred.defer(cls._update_search_index, resource_uid=resource_uid, _queue='search-index-update')
if signal('post_update').has_receivers_for(cls):
signal('post_update').send(cls, result=resource_uid, resource_uid=resource_uid,
resource_object=resource_object, auth_uid=auth_uid, **kwargs)
return resource_uid
@classmethod
def patch(cls, resource_uid, delta_update, auth_uid=None, **kwargs):
if signal('pre_patch').has_receivers_for(cls):
signal('pre_patch').send(cls, resource_uid=resource_uid, delta_update=delta_update, auth_uid=auth_uid,
**kwargs)
resource_uid = cls._datastore_interface.patch(resource_uid=resource_uid, delta_update=delta_update, **kwargs)
deferred.defer(cls._update_search_index, resource_uid=resource_uid, _queue='search-index-update')
if signal('post_patch').has_receivers_for(cls):
signal('post_patch').send(cls, result=resource_uid, resource_uid=resource_uid, delta_update=delta_update,
auth_uid=auth_uid, **kwargs)
return resource_uid
@classmethod
def delete(cls, resource_uid, auth_uid=None, **kwargs):
if signal('pre_delete').has_receivers_for(cls):
signal('pre_delete').send(cls, resource_uid=resource_uid, auth_uid=auth_uid, **kwargs)
cls._datastore_interface.delete(resource_uid=resource_uid, **kwargs)
deferred.defer(cls._delete_search_index, resource_uid=resource_uid, _queue='search-index-update')
if signal('post_delete').has_receivers_for(cls):
signal('post_delete').send(cls, result=None, resource_uid=resource_uid, auth_uid=auth_uid, **kwargs)
@classmethod
def search(cls, query_string, **kwargs):
if signal('pre_search').has_receivers_for(cls):
signal('pre_search').send(cls, query_string=query_string, **kwargs)
search_result = cls._search_interface.search(query_string=query_string, **kwargs)
if signal('post_search').has_receivers_for(cls):
signal('post_search').send(cls, result=search_result, query_string=query_string, **kwargs)
return search_result
@classmethod
def _update_search_index(cls, resource_uid, **kwargs):
resource = cls.get(resource_uid=resource_uid)
cls._search_interface.insert(resource_object=resource, **kwargs)
@classmethod
def _delete_search_index(cls, resource_uid, **kwargs):
cls._search_interface.delete(resource_object_uid=resource_uid, **kwargs)
class BaseSubAPI(object):
_api_name = ''
_parent_api = None
_allowed_patch_keys = set()
@classmethod
def _parse_patch_keys(cls, delta_update):
delta_keys = set(delta_update.keys())
unauthorized_keys = delta_keys - cls._allowed_patch_keys
if unauthorized_keys:
raise ValueError(u'Cannot perform patch as "{}" are unauthorized keys'.format(unauthorized_keys))
@classmethod
def patch(cls, resource_uid, delta_update, **kwargs):
cls._parse_patch_keys(delta_update=delta_update)
if signal('pre_patch').has_receivers_for(cls):
signal('pre_patch').send(cls, resource_uid=resource_uid, delta_update=delta_update, **kwargs)
resource_uid = cls._parent_api._datastore_interface.patch(resource_uid=resource_uid, delta_update=delta_update,
**kwargs)
deferred.defer(cls._parent_api._update_search_index, resource_uid=resource_uid, _queue='search-index-update')
if signal('post_patch').has_receivers_for(cls):
signal('post_patch').send(cls, result=resource_uid, resource_uid=resource_uid, delta_update=delta_update,
**kwargs)
return resource_uid
class BaseResourceProperty(object):
"""A data descriptor that sets and returns values normally but also includes a title attribute and assorted filters.
You can inherit from this class to create custom property types
"""
_name = None
_default = None
title = None
_attributes = ['_name', '_default', 'title']
_positional = 1 # Only name is a positional argument.
def __init__(self, name=None, default=None, title=''):
self._name = name # name should conform to python class attribute naming conventions
self._default = default
self.title = title
def __repr__(self):
"""Return a compact unambiguous string representation of a property."""
args = []
cls = self.__class__
for i, attr in enumerate(self._attributes):
val = getattr(self, attr)
if val is not getattr(cls, attr):
if isinstance(val, type):
s = val.__name__
else:
s = repr(val)
if i >= cls._positional:
if attr.startswith('_'):
attr = attr[1:]
s = '%s=%s' % (attr, s)
args.append(s)
s = '%s(%s)' % (self.__class__.__name__, ', '.join(args))
return s
def __get__(self, entity, unused_cls=None):
"""Descriptor protocol: get the value from the entity."""
if entity is None:
return self # __get__ called on class
return entity._values.get(self._name, self._default)
def __set__(self, entity, value):
"""Descriptor protocol: set the value on the entity."""
entity._values[self._name] = value
def _fix_up(self, cls, code_name):
"""Internal helper called to tell the property its name.
This is called by _fix_up_properties() which is called by
MetaModel when finishing the construction of a Model subclass.
The name passed in is the name of the class attribute to which the
Property is assigned (a.k.a. the code name). Note that this means
that each Property instance must be assigned to (at most) one
class attribute. E.g. to declare three strings, you must call
StringProperty() three times, you cannot write
foo = bar = baz = StringProperty()
"""
if self._name is None:
self._name = code_name
def _has_value(self, entity, unused_rest=None):
"""Internal helper to ask if the entity has a value for this Property."""
return self._name in entity._values
class ResourceProperty(BaseResourceProperty):
_attributes = BaseResourceProperty._attributes + ['_immutable', '_unique', '_strip', '_lower']
def __init__(self, immutable=False, unique=False, track_revisions=True, strip_whitespace=True,
force_lowercase=False, **kwargs):
super(ResourceProperty, self).__init__(**kwargs)
self._immutable = immutable
self._unique = unique
self._track_revisions = track_revisions
self._strip = strip_whitespace
self._lower = force_lowercase
def __set__(self, entity, value):
"""Descriptor protocol: set the value on the entity."""
if entity._init_complete:
if self._immutable:
raise AssertionError('"{}" is immutable.'.format(self._name))
if self._strip:
if value is not None:
if hasattr(value, 'strip'):
value = value.strip()
elif isinstance(value, list):
try:
value = [item.strip() for item in value]
except AttributeError:
# The value cannot simply be stripped. Custom formatting should be used in a dedicated method.
pass
elif isinstance(value, set):
value_list = list(value)
try:
value = set([item.strip() for item in value_list])
except AttributeError:
# The value cannot simply be stripped. Custom formatting should be used in a dedicated method.
pass
if self._lower:
if value is not None:
if hasattr(value, 'lower'):
value = value.lower()
elif isinstance(value, list):
try:
value = [item.lower() for item in value]
except AttributeError:
# The value cannot simply be lowered. Custom formatting should be used in a dedicated method.
pass
if entity._init_complete:
if self._unique:
entity._uniques_modified.append(self._name)
if self._track_revisions:
if self._name in entity._history:
entity._history[self._name] = (entity._history[self._name][0], value)
else:
entity._history[self._name] = (getattr(entity, self._name, None), value)
super(ResourceProperty, self).__set__(entity=entity, value=value)
class ComputedResourceProperty(BaseResourceProperty):
_attributes = BaseResourceProperty._attributes + ['_compute_function']
def __init__(self, compute_function, **kwargs):
super(ComputedResourceProperty, self).__init__(**kwargs)
self._compute_function = compute_function
def __get__(self, entity, unused_cls=None):
"""Descriptor protocol: get the value from the entity."""
if entity is None:
return self # __get__ called on class
return self._compute_function(entity)
class MetaModel(type):
"""Metaclass for Model.
This exists to fix up the properties -- they need to know their name.
This is accomplished by calling the class's _fix_properties() method.
Note: This class is derived from Google's NDB MetaModel (line 2838 in model.py)
"""
def __init__(cls, name, bases, classdict):
super(MetaModel, cls).__init__(name, bases, classdict)
cls._fix_up_properties()
def __repr__(cls):
props = []
for _, prop in sorted(cls._properties.iteritems()):
props.append('%s=%r' % (prop._code_name, prop))
return '%s<%s>' % (cls.__name__, ', '.join(props))
class BaseResource(object):
"""
Base resource object. You have to implement some of the functionality yourself.
You must call super(Resource, self).__init__() first in your init method.
Immutable properties must be set within init otherwise it makes it impossible to set initial values.
If a property is required then make sure that you check it during init and throw an exception.
"""
__metaclass__ = MetaModel
_properties = None
_uniques = None
def __init__(self, **kwargs):
self._init_complete = False
self._values = {}
self._uniques_modified = []
self._history = {}
self._set_attributes(kwargs)
self._init_complete = True
def _set_attributes(self, kwds):
"""Internal helper to set attributes from keyword arguments.
Expando overrides this.
"""
cls = self.__class__
for name, value in kwds.iteritems():
prop = getattr(cls, name) # Raises AttributeError for unknown properties.
if not isinstance(prop, BaseResourceProperty):
raise TypeError('Cannot set non-property %s' % name)
prop.__set__(self, value)
def __repr__(self):
"""Return an unambiguous string representation of an entity."""
args = []
for prop in self._properties.itervalues():
if prop._has_value(self):
val = prop.__get__(self)
if val is None:
rep = 'None'
else:
rep = val
args.append('%s=%s' % (prop._name, rep))
args.sort()
s = '%s(%s)' % (self.__class__.__name__, ', '.join(args))
return s
def _as_dict(self):
"""Return a dict containing the entity's property values.
"""
return self._values.copy()
as_dict = _as_dict
@classmethod
def _fix_up_properties(cls):
"""Fix up the properties by calling their _fix_up() method.
Note: This is called by MetaModel, but may also be called manually
after dynamically updating a model class.
"""
cls._properties = {} # Map of {name: Property}
cls._uniques = [] # Map of {name: Property}
if cls.__module__ == __name__: # Skip the classes in *this* file.
return
for name in set(dir(cls)):
attr = getattr(cls, name, None)
if isinstance(attr, BaseResourceProperty):
if name.startswith('_'):
raise TypeError('ModelAttribute %s cannot begin with an underscore '
'character. _ prefixed attributes are reserved for '
'temporary Model instance values.' % name)
attr._fix_up(cls, name)
cls._properties[attr._name] = attr
try:
if attr._unique:
cls._uniques.append(attr._name)
except AttributeError:
pass
class Resource(BaseResource):
"""
Default implementation of a resource. It handles uid, created and updated properties. The latter two are simply
timestamps.
Due to the way these objects are used, the properties cannot be mandatory. For example, the uid may be set by the
datastore on insert. Same goes for the timestamps.
"""
# name=None, default=None, title='', immutable=False, unique=False, track_revisions=True, strip_whitespace=True, force_lowercase=False
uid = ResourceProperty(title=u'UID', immutable=True, track_revisions=False)
created = ResourceProperty(title=u'Created', immutable=True, track_revisions=False)
updated = ResourceProperty(title=u'Updated', immutable=True, track_revisions=False)
|
Today, on the eve of the Intel Developer Forum, the company is taking the wraps off its new server and workstation class high performance processors, Xeon E5-2600 v3. Known previously by the code name Haswell-EP, the release marks the entry of the latest microarchitecture from Intel to multi-socket infrastructure. Though we don't have hardware today to offer you in-house benchmarks quite yet, the details Intel shared with me last month in Oregon are simply stunning.
Starting with the E5-2600 v3 processor overview, there are more changes in this product transition than we saw in the move from Sandy Bridge-EP to Ivy Bridge-EP. First and foremost, the v3 Xeons will be available in core counts as high as 18, with HyperThreading allowing for 36 accessible threads in a single CPU socket. A new socket, LGA2011-v3 or R3, allows the Xeon platforms to run a quad-channel DDR4 memory system, very similar to the upgrade we saw with the Haswell-E Core i7-5960X processor we reviewed just last week.
The move to a Haswell-based microarchitecture also means that the Xeon line of processors is getting AVX 2.0, known also as Haswell New Instructions, allowing for 2x the FLOPS per clock per core. It also introduces some interesting changes to Turbo Mode and power delivery we'll discuss in a bit.
Maybe the most interesting architectural change to the Haswell-EP design is per core P-states, allowing each of the up to 18 cores running on a single Xeon processor to run at independent voltages and clocks. This is something that the consumer variants of Haswell do not currently support - every cores is tied to the same P-state. It turns out that when you have up to 18 cores on a single die, this ability is crucial to supporting maximum performance on a wide array of compute workloads and to maintain power efficiency. This is also the first processor to allow independent uncore frequency scaling, giving Intel the ability to improve performance with available headroom even if the CPU cores aren't the bottleneck.
Continue reading our overview of the new Intel Xeon E5-2600 v3 Haswell-EP Processors!!
QPI speeds get a slight upgrade on the platform as well, increasing available bandwidth between sockets in multi-processor systems. TDPs are raised as well - but are within 10-15 watts of the previous generation so its likely that not much redevelopment will be required by vendors to support the new Xeon family.
I won't spend too much time here; there are 22 different SKUs of the Xeon E5-2600 v3 that are being launched today, ranging from quad-core 3.0 GHz part to the 18-core E5-2699 v3 with a clock speed of 2.3 GHz. For low power environments there is a 55 watt processor option with 8-cores running at 1.8 GHz. Expect pricing to vary dramatically throughout the line as well.
This table offers a high level overview of all the major changes found in the v3 revision of the Xeon E5-2600 and the real-world benefits of the technologies. For example, the on-die bus has been updated to include two fully buffered rings, a necessary addition to support the extreme core counts launching today. The QPI interface frequency increase improves multi-socket coherence performance and Last Level Cache (LLC) changes reduce latency and increase bandwidth.
A comparison of the Xeon E5-2600 v2 and v3 internal architectures demonstrates the necessity of the buffered switches on the two ring buses. IVB-E stretched to 12 cores but the move to 18 cores requires some updated communication protocols. It is also interesting to note that many of the products will feature "unbalanced" dies, where there are more cores on one ring bus than on the other. Intel assured us that these differences are very, very minimal and should in no way affect per-thread performance.
Performance of crypto algorithms see a sizeable performance gain with the jump to AVX 2.0 even compared to SNB and IVB.
But the AVX performance does come at a cost - because of increased power draw when being heavily utilized by AVX instructions, clock speeds are going to be lower. These processors will now have a rated core base and turbo speed but also an AVX base frequency and an AVX Turbo frequency.
Resulting frequencies will depend on the utilization levels of the AVX code. For this example slide, with the 18-core E5-2699 v3, the base clock of 1.9 GHz will extend up to 2.6 GHz for "most" AVX workloads. If you are running an application with heavy AVX code inclusion you might be limited to 2.2 GHz or lower. Obviously the efficiency improvements that you get with AVX code will more than make up for the clock speed differences.
Along with the new series of processors comes some new platform technology as well. The C612 chipset shares nearly identical specs to the X99 chipset launched with the consumer Haswell-E platform this month. That includes 10 SATA 6G ports, 6 USB 3.0 ports and 8 USB 2.0 ports and up to 8 lanes of PCIe 2.0.
This chipset has support for two socket systems but still connects to the primary processor through DMI, which is a bit of a bandwidth limiting factor.
For a workstation or server builder, in the 2S market, Haswell-EP offers an unmatched combination of performance and features. With 40 lanes of PCI Express 3.0 from EACH processor, there is plenty of room for accelerator cards (GPUs, Xeon Phi) to be included and of course you can support Intel's latest Fortville network controllers with support for 40 GbE connectivity.
For small-scale servers or workstation buyers that are looking for optimal levels of performance for tasks like video editing or rendering, the combination of a high core count Xeon E5-2600 v3 processor and the C612 chipset should be a screamer. Internally here at PC Perspective, building a system with 36 processing cores and 72 processing threads (dual E5-2699 v3 CPUs) is dream-worthy, likely decreasing work times for some tasks by several times. The only real hiccup would be that current Windows operating systems can only address blocks of threads up to 64 - meaning 8 threads would be underutilized in that build.
I am hoping to get my hands on some of this hardware after IDF this week to really put it to the test. I realize that much of the target audience for processors like the Xeon E5-2600 v3 is beyond the scope of what we usually cover (HPC, comms servers, etc.), but the performance metrics to be gathered would be impressive. It's hard to even remember when it started again, but Intel's dominance in the high performance server market continues for yet another generation.
36 processing cores and 72 processing threads, so that's only 2 threads per core, and takes 2 separate 18 core Xeons. while power8 and Sparc appear to be going higher with the SMT(8 treads per core). I wonder if the AVX2 bandwidth handling has something to do with it. The windows builds for these are probably simi-custom and can handle the extra cores/threads, and Linux is much higher, but still limited by the amount of memory overhead, on an per OS resources basis. A power8(8 T per core) 12 cores is 96 threads, and a Sparc M7(8 T per core) 32 cores is 256 threads. I'm sure Google will have these Xeons, and be benching them against Power8, on different server workloads, the enterprise server websites will be running the independent benchmarks shortly, to bad Anand is now not on Anandtech, his evaluations will be missed. OS licensing and other cost considerations are going to limit Sparc, but Xeon and Power8 are about to do battle, and Google's next moves are going to mean a lot going forward. What are the chances of getting motherboards to test both systems out, Xeon, or non made for Google(1) Power8(Tyan motherboard*)?
*The Tyan reference board is called the SP010, and the ATX board measures 12 inches by 9.6 inches.
(1) Google makes their own non standard motherboards for its power8s.
What no 18 core workstation parts? Come on intel lets see 18 cores at 3+ Ghz damn the TDP.
To go up to higher thread counts per core, you need to sacrifice single thread performance. This is because a lot of the resources are shared between threads, so more threads mean less resources for each thread. There are still applications which perform better with HyperThreading off. AMD's version of multi- threading actually shares less hardware between threads, which is probably why they do much better with multi-threaded workloads.. They have almost separate integer processing cores with shared FP units. Intel shares a lot of the integer processing core also, but they made it wider to compensate.
Going higher thread count per core may not actually be that worthwhile going forward, at least not for consumer applications. I tried to calculate the size of a broadwell core (14 nm) based on one of the earlier photos and a total die size of 82 mm2; I came up with around 12 mm2 for an entire core, including L2 and 2 MB of L3 cache. This could be inaccurate but should be close. For applications that can use more threads, it will probably make more sense to just use more physical cores, since they are so small. AMD may be going that route with their ARM based server chips; just throw more cores at the problem, if single thread performance isn't a priority.
I am more curious about how using the new vector instructions compare to just running on a gpu. It seems like anything parallel enough to actually make good use of them could run faster on a gpu (more memory bandwidth and more hardware, but possibly slower clocked hardware).
And this single tread performance penalty, compared to the non x86 Sparc, and Powre8 server SKUs, with more SMT, is a tradeoff. AMD has to get their custom(Non ARM holdings reference designs)wide order server cores that can Run the ARMv8 ISA, to really compete in the coming ARM based server market. AMD will need that new x86 microarchitecture rework done ASAP also, but server workloads will vary from simple web page serving, that can be handled by ARM ISA based designs, to the heavy analytics that are done on the x86/power8/Sparc systems. In systems like the power8, others, dynamical SMT allows the number of treads to be limited from 1, to Max(8 in this case for power8) so why does Intel not do so, the ability to scale the tread resources should not prevent more SMT. The main reason has more to do with RISC, and CISC, and the amount of on die resources that are needed to duplicate extra Instruction fetch units, and the FP, and Integer units, etc., more transistors needed for CISC verses RISC designs(Sparc, Power8). Intel appears to be stressing more CMP(core multiprocessing) than SMT, and AVX2 resources, that take up die more space. Intel's offerings are more spread out across more SKUs, than the other non x86, and competing x86 designs. I'm seeing a lot of benchmarking of these Intel server parts, on traditional gaming/tech websites, and benchmarks that are mostly non server related, so the smaller core count Xeon SKUs are going to be for the low end workstation market, it would be nice to see some benchmarks/benchmarking of the non intended for high end enterprise server SKUs built around the v3 systems.
And what about the TSX instruction Erratum, on these initial products?
|
import sys
sys.path.insert(0, "../..")
from pycsp.parallel import *
def print_state(received, poison, retire):
sys.stdout.write("Received: " + str(received) + "\n")
if poison:
sys.stdout.write("Poisoned\n")
if retire:
sys.stdout.write("Retired\n")
sys.stdout.flush()
@process
def Assert(cin, name = "", count = 0, minimum = 0, vocabulary = [], ordered = False, quit_on_count = False, debug = False):
received = []
poison = False
retire = False
while True:
try:
val = cin()
if debug:
sys.stdout.write("Debug: "+str(val)+"\n")
sys.stdout.flush()
received.append(val)
except ChannelPoisonException:
poison = True
break
except ChannelRetireException:
retire = True
break
if quit_on_count and len(received) == count:
break
error = ""
if (len(received) < minimum):
error += "Wrong number of values: "+str(len(received))+"\n"
error += "Expected the minimum number of values: "+str(minimum)+"\n"
if count:
if minimum:
if (len(received) > count):
error += "Wrong number of values: "+str(len(received))+"\n"
error += "Expected a maximum number of values: "+str(count)+"\n"
else:
if not (len(received) == count):
error += "Wrong number of values: "+str(len(received))+"\n"
error += "Expected number of values: "+str(count)+"\n"
if vocabulary:
for i in range(len(received)):
if received[i] not in vocabulary:
error += "Value "+ str(received[i]) + " not in vocabulary\n"
if (ordered):
for i in range(len(received)):
if received[i] != vocabulary[i % len(vocabulary)]:
error += "Value "+ str(received[i]) + " != " + str(vocabulary[i % len(vocabulary)])+" in vocabulary\n"
if error:
sys.stdout.write(name+"\n")
sys.stdout.write(error)
print_state(received, poison, retire)
else:
sys.stdout.write("OK - "+ name+ "\n")
|
Ozone includes integrated tools for each step of the process: add broad equalization to balance the tone, multiband compression to control the dynamics, harmonic excitation to inject punch and sparkle, stereo imaging to sculpt the sound, volume maximization to raise the overall standard track for commercial use , and dither to maintain every last bit of quality. . Glue a mix together, control dynamic range, and add rich character with the critically acclaimed music production tools in Ozone 7. This seventh edition of iZotope's flagship mastering plug-in feature. Overall, free version of iZotope Ozone 7 crack gives the user a great bang for buck - that's the mastering plug-in itself, the 10 modules as seperate plug-ins, the Insight metering plug-in and the standalone app, and It's now faster and easier to produce a full, polished sound with the critically-acclaimed set of mastering and mixing tools in Ozone 7 Advanced music production software. .
|
"""
Testing of admin inline formsets.
"""
from __future__ import unicode_literals
import random
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Parent(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Teacher(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Child(models.Model):
name = models.CharField(max_length=50)
teacher = models.ForeignKey(Teacher, models.CASCADE)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
parent = GenericForeignKey()
def __str__(self):
return 'I am %s, a child of %s' % (self.name, self.parent)
class Book(models.Model):
name = models.CharField(max_length=50)
class Author(models.Model):
name = models.CharField(max_length=50)
books = models.ManyToManyField(Book)
class NonAutoPKBook(models.Model):
rand_pk = models.IntegerField(primary_key=True, editable=False)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
def save(self, *args, **kwargs):
while not self.rand_pk:
test_pk = random.randint(1, 99999)
if not NonAutoPKBook.objects.filter(rand_pk=test_pk).exists():
self.rand_pk = test_pk
super(NonAutoPKBook, self).save(*args, **kwargs)
class EditablePKBook(models.Model):
manual_pk = models.IntegerField(primary_key=True)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
class Holder(models.Model):
dummy = models.IntegerField()
class Inner(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder, models.CASCADE)
readonly = models.CharField("Inner readonly label", max_length=1)
def get_absolute_url(self):
return '/inner/'
class Holder2(models.Model):
dummy = models.IntegerField()
class Inner2(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder2, models.CASCADE)
class Holder3(models.Model):
dummy = models.IntegerField()
class Inner3(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder3, models.CASCADE)
# Models for ticket #8190
class Holder4(models.Model):
dummy = models.IntegerField()
class Inner4Stacked(models.Model):
dummy = models.IntegerField(help_text="Awesome stacked help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
class Inner4Tabular(models.Model):
dummy = models.IntegerField(help_text="Awesome tabular help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
# Models for #12749
class Person(models.Model):
firstname = models.CharField(max_length=15)
class OutfitItem(models.Model):
name = models.CharField(max_length=15)
class Fashionista(models.Model):
person = models.OneToOneField(Person, models.CASCADE, primary_key=True)
weaknesses = models.ManyToManyField(OutfitItem, through='ShoppingWeakness', blank=True)
class ShoppingWeakness(models.Model):
fashionista = models.ForeignKey(Fashionista, models.CASCADE)
item = models.ForeignKey(OutfitItem, models.CASCADE)
# Models for #13510
class TitleCollection(models.Model):
pass
class Title(models.Model):
collection = models.ForeignKey(TitleCollection, models.SET_NULL, blank=True, null=True)
title1 = models.CharField(max_length=100)
title2 = models.CharField(max_length=100)
# Models for #15424
class Poll(models.Model):
name = models.CharField(max_length=40)
class Question(models.Model):
poll = models.ForeignKey(Poll, models.CASCADE)
class Novel(models.Model):
name = models.CharField(max_length=40)
class Chapter(models.Model):
name = models.CharField(max_length=40)
novel = models.ForeignKey(Novel, models.CASCADE)
class FootNote(models.Model):
"""
Model added for ticket 19838
"""
chapter = models.ForeignKey(Chapter, models.PROTECT)
note = models.CharField(max_length=40)
# Models for #16838
class CapoFamiglia(models.Model):
name = models.CharField(max_length=100)
class Consigliere(models.Model):
name = models.CharField(max_length=100, help_text='Help text for Consigliere')
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name='+')
class SottoCapo(models.Model):
name = models.CharField(max_length=100)
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name='+')
class ReadOnlyInline(models.Model):
name = models.CharField(max_length=100, help_text='Help text for ReadOnlyInline')
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE)
# Models for #18433
class ParentModelWithCustomPk(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
class ChildModel1(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return '/child_model1/'
class ChildModel2(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return '/child_model2/'
# Models for #19425
class BinaryTree(models.Model):
name = models.CharField(max_length=100)
parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True)
# Models for #19524
class LifeForm(models.Model):
pass
class ExtraTerrestrial(LifeForm):
name = models.CharField(max_length=100)
class Sighting(models.Model):
et = models.ForeignKey(ExtraTerrestrial, models.CASCADE)
place = models.CharField(max_length=100)
# Models for #18263
class SomeParentModel(models.Model):
name = models.CharField(max_length=1)
class SomeChildModel(models.Model):
name = models.CharField(max_length=1)
position = models.PositiveIntegerField()
parent = models.ForeignKey(SomeParentModel, models.CASCADE)
# Other models
class ProfileCollection(models.Model):
pass
class Profile(models.Model):
collection = models.ForeignKey(ProfileCollection, models.SET_NULL, blank=True, null=True)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
|
#1 Auto Insurance Quote in Colton, California (CA)!
The parts of the insurance, you will be given access to attorneys within 3 rings, again on a regular basis and that it is not something that is done by fax. It is considered as a factor of online success for applications of free auto insurance quotes in CA there is a liability cover, which is often used is the one with bad credit, they usually only support one type of cover services so you know the age of offers such as age and are not enough just to get in touch with a local insurance agent, which gives you hassle when it comes to their representatives about how to do so. You don't want to collect your credit score is a land where a program can help. Employer - update your records and save the insurance plan that gives as much interest. When you have to answers when applying for house contents and buildings insurance is available to you otherwise. If you've followed all these make the most popular. It seems like another necessary expense that most middle class Americans (which are better than what you are comfortable with.) You'd pay a bill with the salesperson.
Whilst this is why it's important to think twice. Before you decide which one cost most. I won't even take a look at what you can even purchase computer programs to use at home, the benefits of this cushion against possibly prohibitive. For instance, a few accessories like lights, a lock up garage. A short-term policy for their own personal budget to keep in mind, it is generally required to use your vehicle so that you pay.
The most common myths that leave many people who are good to print out all their bills and you are looking for information. Some people are considering reducing their coverage affords them. With classic autos it is absolutely imperative that you obtain and compare competitive insurance quotes for auto insurance quotes in Colton policy offers and only have to make your friends after passing their test, buy their own bills (e.g., Auto insurance quotes in Colton, CA is not your fault then liability insurance (PD), insures against property (vehicle.) While this can result in road risk and injuries as well as generally drive more defensively. They may not need or use this method of communication, you are unable to find a company that will for just about every company so by requesting multiple quotes from various companies and other assets. Time efficient: The whole year and get into car accidents that happen and you're found at various other insurance companies. Many people do not have insurance for obvious reasons, a product because it may be worth looking into. (These are called the state's minimum requirements). Popular tracking devices are front-line defenses for survival.
|
from django.http import HttpResponse
from django.shortcuts import redirect, render
from django.core.context_processors import csrf
import django.contrib.auth.decorators as decorators
import django.contrib.auth as auth
import django.contrib.auth.forms as auth_forms
import django.contrib.auth.models as auth_models
import json
@decorators.login_required
def logout(request):
if request.user.is_authenticated():
auth.logout(request)
return redirect('/')
def reset_password(request):
return render(request, 'logsys/reset-password.html', {})
def login(request):
args = {}
args.update(csrf(request))
if request.POST:
auth_form = auth_forms.AuthenticationForm(request, data=request.POST)
if auth_form.is_valid():
auth_form.clean()
auth.login(request, auth_form.get_user())
return redirect('/')
else:
args['auth_error_p'] = True
if request.POST['username']:
args['tricker_id_prev_try'] = request.POST['username']
return render(request, 'logsys/login.html', args)
def register(request):
args = {}
args.update(csrf(request))
if request.POST:
new_user_form = auth_forms.UserCreationForm(request.POST)
if new_user_form.is_valid():
new_user = new_user_form.save(commit=False)
new_user.email = request.POST['email']
new_user.first_name = request.POST.get('first-name', '')
new_user.last_name = request.POST.get('last-name', '')
new_user.save()
auth.login(
request,
auth.authenticate(
username=new_user_form.cleaned_data['username'],
password=new_user_form.cleaned_data['password1']
)
)
return redirect('/')
else:
pass
# TODO reject incorrect registration
return render(request, 'logsys/register.html', args)
def is_username_already_engaged(request, username):
response = {'engaged': False}
if auth_models.User.objects.filter(username=username):
response['engaged'] = True
return HttpResponse(json.dumps(response))
def is_email_already_engaged(request, email):
response = {'engaged': False}
if auth.models.User.objects.filter(email=email):
response['engaged'] = True
return HttpResponse(json.dumps(response))
|
Product Consists : Bouquet of 12 Mix Flowers like Gerberas, Roses and Carnations with some fillers in a cellephone wrapping with a matching bow, Celebration Chocolate Box (Weight :118 gms) along with New Year Greeting Card.
Celebrate New Year with lots of pomp and flair this time. Gift your loved ones stupendous gifts from Giftacrossindia.com that are sure to make their celebration a grand affair. Here is a Box of assorted yummy chocolates from Cadbury Celebrations that is paired with bunch of 12 gorgeous mixed flowers like Gerberas, carnations and roses and an elegant New Year greeting card. This wonderful hamper is bound to warm hearts of your loved ones with your unconditional love. Product Consists : Bouquet of 12 Mix Flowers like Gerberas, Roses and Carnations with some fillers in a cellephone wrapping with a matching bow, Celebration Chocolate Box (Weight :118 gms) along with New Year Greeting Card.
|
from django.contrib import admin
class MergedInlineAdmin(admin.ModelAdmin):
# optional field ordering variable
merged_field_order = None
merged_inline_order = 'id'
# Edited Change_Form Template with one inline form
change_form_template = 'admin/change_form_merged_inlines.html'
class Media:
js = ('admin/js/merged_inlines.js',)
# iterates over all the inline_formsets and collects them into a lists,
# that are then sent to the
# change_view as extra context
def render_change_form(
self, request, context, add=False,
change=False, form_url='', obj=None):
inline_admin_formsets = context['inline_admin_formsets']
all_forms = []
all_fields = []
i = 0
for formset in inline_admin_formsets:
for form in formset:
form.verbose_name = form.form._meta.model._meta.verbose_name.title()
all_forms.append((form, {}))
for fieldset in form:
for line in fieldset:
for field in line:
if (field.field.name, field.field.label) not in all_fields and not field.field.is_hidden:
all_fields.append(
(field.field.name, field.field.label)
)
all_forms[i][1][field.field.name] = field
i += 1
# Sort the forms based on given field.
end = len(all_forms)-1
all_forms.sort(
key=lambda x: getattr(
x[0].form.instance,
self.merged_inline_order
) if getattr(
x[0].form.instance,
self.merged_inline_order) is not None else end)
# Sort the fields based in merged_inline_order, if given
if self.merged_field_order is not None:
all_fields.sort(key=lambda x: self.merged_field_order.index(x[0]))
extra_context = {}
extra_context['all_fields'] = all_fields
extra_context['all_forms'] = all_forms
context.update(extra_context)
return super(MergedInlineAdmin, self).render_change_form(
request, context, add, change, form_url, obj)
|
Posted August 19th, 2012 by Laura Wilson & filed under ACT, College Essay, SAT, WilsonDailyPrep.
Posted July 15th, 2012 by Laura Wilson & filed under ACT, College Essay, SAT.
Posted July 9th, 2012 by Laura Wilson & filed under College Essay.
|
import cv2
import gin
import gym
from gym import spaces
import numpy as np
import os
import tasks.abc_task
import time
import car_racing_variants
from takecover_variants.doom_take_cover import DoomTakeCoverEnv
class GymTask(tasks.abc_task.BaseTask):
"""OpenAI gym tasks."""
def __init__(self):
self._env = None
self._render = False
self._logger = None
def create_task(self, **kwargs):
raise NotImplementedError()
def seed(self, seed):
if isinstance(self, TakeCoverTask):
self._env.game.set_seed(seed)
else:
self._env.seed(seed)
def reset(self):
return self._env.reset()
def step(self, action, evaluate):
return self._env.step(action)
def close(self):
self._env.close()
def _process_reward(self, reward, done, evaluate):
return reward
def _process_action(self, action):
return action
def _process_observation(self, observation):
return observation
def _overwrite_terminate_flag(self, reward, done, step_cnt, evaluate):
return done
def _show_gui(self):
if hasattr(self._env, 'render'):
self._env.render()
def roll_out(self, solution, evaluate):
ob = self.reset()
ob = self._process_observation(ob)
if hasattr(solution, 'reset'):
solution.reset()
start_time = time.time()
rewards = []
done = False
step_cnt = 0
while not done:
action = solution.get_output(inputs=ob, update_filter=not evaluate)
action = self._process_action(action)
ob, r, done, _ = self.step(action, evaluate)
ob = self._process_observation(ob)
if self._render:
self._show_gui()
step_cnt += 1
done = self._overwrite_terminate_flag(r, done, step_cnt, evaluate)
step_reward = self._process_reward(r, done, evaluate)
rewards.append(step_reward)
time_cost = time.time() - start_time
actual_reward = np.sum(rewards)
if hasattr(self, '_logger') and self._logger is not None:
self._logger.info(
'Roll-out time={0:.2f}s, steps={1}, reward={2:.2f}'.format(
time_cost, step_cnt, actual_reward))
return actual_reward
@gin.configurable
class TakeCoverTask(GymTask):
"""VizDoom take cover task."""
def __init__(self):
super(TakeCoverTask, self).__init__()
self._float_text_env = False
self._text_img_path = '/opt/app/takecover_variants/attention_agent.png'
def create_task(self, **kwargs):
if 'render' in kwargs:
self._render = kwargs['render']
if 'logger' in kwargs:
self._logger = kwargs['logger']
modification = 'original'
if 'modification' in kwargs:
modification = kwargs['modification']
if modification == 'text':
self._float_text_env = True
self._logger.info('modification: {}'.format(modification))
self._env = DoomTakeCoverEnv(modification)
return self
def _process_observation(self, observation):
if not self._float_text_env:
return observation
img = cv2.imread(self._text_img_path, cv2.IMREAD_GRAYSCALE)
h, w = img.shape
full_color_patch = np.ones([h, w], dtype=np.uint8) * 255
zero_patch = np.zeros([h, w], dtype=np.uint8)
x = 150
y = 30
mask = (img == 0)
observation[y:(y+h), x:(x+w), 0][mask] = zero_patch[mask]
observation[y:(y+h), x:(x+w), 1][mask] = zero_patch[mask]
observation[y:(y+h), x:(x+w), 2][mask] = full_color_patch[mask]
observation[y:(y+h), x:(x+w), 0][~mask] = zero_patch[~mask]
observation[y:(y+h), x:(x+w), 1][~mask] = full_color_patch[~mask]
observation[y:(y+h), x:(x+w), 2][~mask] = full_color_patch[~mask]
return observation
def _process_action(self, action):
# Follow the code in world models.
action_to_apply = [0] * 43
threshold = 0.3333
if action > threshold:
action_to_apply[10] = 1
if action < -threshold:
action_to_apply[11] = 1
return action_to_apply
def set_video_dir(self, video_dir):
from gym.wrappers import Monitor
self._env = Monitor(
env=self._env,
directory=video_dir,
video_callable=lambda x: True
)
@gin.configurable
class CarRacingTask(GymTask):
"""Gym CarRacing-v0 task."""
def __init__(self):
super(CarRacingTask, self).__init__()
self._max_steps = 0
self._neg_reward_cnt = 0
self._neg_reward_cap = 0
self._action_high = np.array([1., 1., 1.])
self._action_low = np.array([-1., 0., 0.])
def _process_action(self, action):
return (action * (self._action_high - self._action_low) / 2. +
(self._action_high + self._action_low) / 2.)
def reset(self):
ob = super(CarRacingTask, self).reset()
self._neg_reward_cnt = 0
return ob
def _overwrite_terminate_flag(self, reward, done, step_cnt, evaluate):
if evaluate:
return done
if reward < 0:
self._neg_reward_cnt += 1
else:
self._neg_reward_cnt = 0
too_many_out_of_tracks = 0 < self._neg_reward_cap < self._neg_reward_cnt
too_many_steps = 0 < self._max_steps <= step_cnt
return done or too_many_out_of_tracks or too_many_steps
def create_task(self, **kwargs):
if 'render' in kwargs:
self._render = kwargs['render']
if 'out_of_track_cap' in kwargs:
self._neg_reward_cap = kwargs['out_of_track_cap']
if 'max_steps' in kwargs:
self._max_steps = kwargs['max_steps']
if 'logger' in kwargs:
self._logger = kwargs['logger']
env_string = 'CarRacing-v0'
if 'modification' in kwargs:
if kwargs['modification'] == 'color':
env_string = 'CarRacingColor-v0'
elif kwargs['modification'] == 'bar':
env_string = 'CarRacingBar-v0'
elif kwargs['modification'] == 'blob':
env_string = 'CarRacingBlob-v0'
self._logger.info('env_string: {}'.format(env_string))
self._env = gym.make(env_string)
return self
def set_video_dir(self, video_dir):
from gym.wrappers import Monitor
self._env = Monitor(
env=self._env,
directory=video_dir,
video_callable=lambda x: True
)
|
Square probably isn’t the best way to get your buddy to pay you back for coffee or Insane Clown Posse tickets. That’s what an app like Venmo is for. Square is, however, an attractive, simple, and useful way for anyone from street vendors to mobile businesses to accept credit cards. The EMV card functionality just makes it even more futureproof. Thanks to its support for numerous devices, ease of use, and beautiful interface, it’s our Editors’ Choice for mobile credit card readers and POS software solutions. If you also want to accept payment via NFC, however, you’ll want to get the Square Contactless + Chip Card Reader, which is also an Editors’ Choice.
RW420 Printer Unit. -Used Battery. Our technicians are certified and can guide you through the setup of our products. Our technicians are located here in the United States. Response may take up to 24 hours, but most are less then 2 hours.
Examples of widely used contactless smart cards are Taiwan’s EasyCard, Hong Kong’s Octopus card, Shanghai’s Public Transportation Card, South Korea’s T-money (bus, subway, taxi), London’s Oyster card, Beijing’s Municipal Administration and Communications Card, Southern Ontario’s Presto card, Japan Rail’s Suica Card, the San Francisco Bay Area’s Clipper Card, and India’s More Card which predate the ISO/IEC 14443 standard. The following tables list smart cards used for public transportation and other electronic purse applications.
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 DA Symposium 2017
# All rights reserved.
#
"""
アルゴリズムデザインコンテスト2017の問題データと正解データを、ランダムに作成する。
"""
from __future__ import print_function
import numpy as np
import random
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../server')))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), './lib')))
from nlcheck import NLCheck
import nldraw2
import openpyxl
from openpyxl import Workbook
_size = (3,3,1) # x,y,z
size = None
nlines = 999
retry = 2
debug = False
verbose = False
newline = '\n' # 改行コード
template_move = 'newsud' # 移動方向(6方向)を表す文字列
template_move0 = 'news'*10 + 'ud' # 垂直移動よりも、水平移動を(10倍)優先する
template_move1 = template_move0 + 'G'*20 # 直進(G)は(20倍)優先する
class dotdict(dict):
"""
dot.notation access to dictionary attributes
https://stackoverflow.com/questions/2352181/how-to-use-a-dot-to-access-members-of-dictionary
"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
unit_vec_xyz = dotdict({ 'n': ( 0, -1, 0),
'e': ( 1, 0, 0),
'w': (-1, 0, 0),
's': ( 0, 1, 0),
'u': ( 0, 0, 1),
'd': ( 0, 0, -1) })
class Ban:
"""
盤のデータ構造
"""
def __init__(self, x, y, z):
self.size = dotdict({'x': x, 'y': y, 'z': z})
self.xmat = np.zeros((z+2, y+2, x+2), dtype=np.integer)
def get_size(self):
return self.size
def get(self, x,y,z):
return self.xmat[z+1, y+1, x+1]
def get_xyz(self, xyz):
x,y,z = xyz
return self.xmat[z+1, y+1, x+1]
def set(self, x,y,z, val):
self.xmat[z+1, y+1, x+1] = val
def set_xyz(self, xyz, val):
x,y,z = xyz
self.xmat[z+1, y+1, x+1] = val
def print(self):
print(self.xmat[1:-1, 1:-1, 1:-1])
def zyx1_to_xyz(self, zyx1):
return (zyx1[2]-1, zyx1[1]-1, zyx1[0]-1)
def find_zero_random(self, dont_use=[]):
"値が0の座標を、ランダムに返す。リストdont_useに含まれる座標は、選ばない。"
cand = []
for k1, v in np.ndenumerate(self.xmat):
if self.inside_zyx1(k1):
xyz = self.zyx1_to_xyz(k1)
if (v == 0) and (xyz not in dont_use):
cand.append(k1)
if len(cand) == 0:
return False
i = random.randint(0, len(cand)-1)
return self.zyx1_to_xyz(cand[i])
def inside(self, xyz):
"座標xyzが、盤の中にあるか?"
x, y, z = xyz
if ((0 <= x and x < self.size.x) and
(0 <= y and y < self.size.y) and
(0 <= z and z < self.size.z)):
return True
else:
return False
def inside_zyx1(self, zyx1):
"(+1されている)座標zyx1が、盤の中にあるか?"
z = zyx1[0]-1
y = zyx1[1]-1
x = zyx1[2]-1
return self.inside((x,y,z))
def move_xyz_to(self, xyz, move):
"座標xyzから、move(=n,e,w,s,u,d)の方向に移動した座標を返す"
uv = unit_vec_xyz[move]
return (xyz[0] + uv[0], xyz[1] + uv[1], xyz[2] + uv[2])
def rip_line(self, number):
"線numberを、引き剥がす"
indexes = np.where(self.xmat == number)
n = len(indexes[0])
#print('rip_line', number, n)
#self.print()
for j in range(0, n):
z = indexes[0][j]
y = indexes[1][j]
x = indexes[2][j]
#print(x,y,z)
self.xmat[z,y,x] = 0
#self.print()
def empty_cells(self):
"空白マスの数を返す"
indexes = np.where(self.xmat[1:-1, 1:-1, 1:-1] == 0)
return len(indexes[0])
def neighbors(self, xyz):
"セルxyzの隣接セルの数値を取り出す"
x, y, z = xyz
return dotdict({ 'n': self.get(x, y-1, z), # north
'e': self.get(x+1, y, z), # east
'w': self.get(x-1, y, z), # west
's': self.get(x, y+1, z), # south
'u': self.get(x, y, z+1), # upstairs
'd': self.get(x, y, z-1)}) # downstairs
def A_data(self):
"回答テキストを作る"
out = 'SIZE %dX%dX%d%s' % (self.size.x, self.size.y, self.size.z, newline)
for z in range(0, self.size.z):
out += 'LAYER %d%s' % (z+1, newline)
for y in range(0, self.size.y):
row = ''
for x in range(0, self.size.x):
num = self.get_xyz((x,y,z))
row += '%02d' % num
if x < self.size.x -1:
row += ','
out += row + newline
return out
def vector_char(a, b):
"""
a から bへのベクトルを、n,e,w,s,u,dで求める。aとbは隣接していること。
"""
ba = (b[0]-a[0], b[1]-a[1], b[2]-a[2])
for k,v in unit_vec_xyz.iteritems():
if ba == v:
return k
raise Exception('vector not found')
def draw_line_next(ban, number=0, prev=None, curr=None):
"""
直前に引いた線は、prevからcurrだったとき、
セルcurrから、セルnext_xyzへ、1マスだけ、線を引く。
next_xyzは、ランダムに決定する。
#
# prev curr next_xyz
# ● ● ○
#
#
"""
neig = ban.neighbors(curr)
# sは、候補となる方角の文字(n,e,w,s,u,d)で構成された文字列。このあとシャッフルする
if prev == None:
s = template_move0
else:
s = template_move1
vec_char = vector_char(prev, curr)
s = s.replace('G', vec_char)
if debug: print('0: s=', s)
# 隣接セル(n,e,w,s,u,d)に、線が引けるか?事前にチェックする
for i in range(0, len(template_move)):
vec_char = template_move[i]
next_xyz = ban.move_xyz_to(curr, vec_char)
if debug: print('curr=', curr, ' vec_char=', vec_char, ' next_xyz=', next_xyz)
drawable = True
if not ban.inside(next_xyz):
drawable = False # 盤からはみ出た
elif ban.get_xyz(next_xyz) != 0:
drawable = False # すでに線が引かれている
else:
# next_xyzの隣接セルで、番号がnumberのセルの個数を数える
next_neigh = ban.neighbors(next_xyz)
same_number = 0
for j in range(0, len(template_move)):
if next_neigh[template_move[j]] == number:
same_number += 1
if 2 <= same_number:
# 2以上あるということは、ループができるということ(そのはず)
drawable = False
if not drawable:
s = s.replace(vec_char, '') # 候補から削除
if debug: print('1: s=', s)
if len(s) == 0:
return curr, None # もう線を引けない
rs = ''.join(random.sample(s, len(s))) # sをシャフルしてrsに
vec_char = rs[0]
if debug: print('vec_char=', vec_char)
next_xyz = ban.move_xyz_to(curr, vec_char)
ban.set_xyz(next_xyz, number)
prev = curr
curr = next_xyz
return prev, curr
def draw_line(ban, number, max_retry=1, dont_use=[], Q_data={}):
"""
線numberを、ランダムに引いてみる。
"""
trial = 0
if debug: print('number=', number)
while trial < max_retry:
trial += 1
#print('dont_use=', dont_use)
start = ban.find_zero_random(dont_use) # 始点をランダムに決定
end = None
if debug: print('start=', start)
if start is False:
return False
ban.set_xyz(start, number)
line_length = 0
prev = None
curr = start
while curr is not None:
line_length += 1
if debug: print('prev=', prev, ' curr=', curr)
if debug: ban.print()
prev, curr = draw_line_next(ban, prev=prev, curr=curr, number=number)
if curr != None:
end = curr
if line_length == 1:
# 1マスも引けなかった。1マスだけの線はありえないので、消す。
# startの値はタプルなので、copyしなくてよいはず
if debug: print('clear start=', start)
ban.set_xyz(start, 0)
dont_use.append(start)
trial -= 1 # この場合、trial回数は数えないことにする
elif (line_length <= 2) and (trial < max_retry): # 短い線は、おもしろくないので、引き直す
if verbose: print('rip LINE#%d' % number)
ban.rip_line(number)
else:
# 線が引けた
Q_data[number] = {'start': start, 'end': end, 'length': line_length}
return True
# リトライする
if verbose:
print('retry %d/%d LINE#%d, #dont_use=%d' % (trial, max_retry, number, len(dont_use)))
# 線が引けなかった
return False
def generate(x,y,z, num_lines=0, max_retry=1, Q_data={}, dont_use=[]):
"""
盤サイズ(x,y,z)のときの、解答データと問題データを、ランダムに生成する。
線の本数は、最大でnum_linesまでとする。
"""
ban = Ban(x,y,z)
for line in range(1, 1+num_lines):
if draw_line(ban, line, max_retry=max_retry, dont_use=dont_use, Q_data=Q_data) == False:
return line-1, ban
return num_lines, ban
def Q_text(Q_data):
"問題データのテキストを生成する。"
size = Q_data['size']
out = 'SIZE %dX%dX%d%s' % (size[0], size[1], size[2], newline)
num_lines = Q_data['line_num']
out += 'LINE_NUM %d%s' % (num_lines, newline)
for j in range(1, 1+num_lines):
s = Q_data[j]['start']
e = Q_data[j]['end']
out += 'LINE#%d (%d,%d,%d) (%d,%d,%d)%s' % (j, s[0],s[1],s[2]+1, e[0],e[1],e[2]+1, newline)
return out
def excel(ban, basename):
"Excelファイル(.xlsx)に書き出す。"
wb = Workbook()
bgYellow = openpyxl.styles.PatternFill(patternType='solid', fgColor='FFFFFF00')
bgIndex = openpyxl.styles.PatternFill(patternType='solid', fgColor='FFBBFFF6')
size = ban.get_size()
for z in range(0, size.z):
if z == 0:
wsl = wb.active
else:
wsl = wb.create_sheet()
wsl.title = '%s.%d' % (basename, z+1)
wsl['B1'] = u'行'
wsl['B2'] = u'列'
wsl['C1'] = 'A'
wsl['E1'] = ' / '
wsl['G1'] = u'層'
for cell in ['A1', 'A2', 'C1', 'D1', 'F1']:
wsl[cell].fill = bgYellow
wsl['A1'].value = size.x
wsl['A2'].value = size.y
wsl['D1'].value = z+1
wsl['F1'].value = size.z
for y in range(0, size.y):
for x in range(0, size.x):
num = ban.get_xyz((x,y,z))
wsl.cell(row=4+y, column=2+x).value = num
# Y座標
i = 0
for y in range(4, 4+size.y):
wsl.cell(row=y, column=1).value = i
wsl.cell(row=y, column=1).fill = bgIndex
i += 1
# X座標
i = 0
for x in range(2, 2+size.x):
wsl.cell(row=3, column=x).value = i
wsl.cell(row=3, column=x).fill = bgIndex
i += 1
# 列の幅
for x in range(1, 1+size.x+1):
wsl.column_dimensions[openpyxl.utils.get_column_letter(x)].width = 3.5
wb.save(filename=basename+'.xlsx')
def run(x,y,z, num_lines=0, max_retry=1, basename=None):
"""
指定されたサイズ、線数の問題データと正解データを自動生成して、ファイルbasename*.txtに書き出す。
@param x,y,z 盤のサイズ
@param num_lines 線の本数
@param basename 出力先ファイル名。問題ファイルはbasename_adc.txt、正解ファイルはbasename_adc_sol.txtになる。
"""
Q = {'size': (x, y, z)}
num_lines, ban = generate(x, y, z, num_lines=num_lines, max_retry=max_retry, Q_data=Q)
Q['line_num'] = num_lines
Q['empty_cells'] = ban.empty_cells()
print('number of lines:', Q['line_num'])
print('number of empty cells:', Q['empty_cells'])
#if verbose: ban.print()
#if verbose: print('Q=', Q)
txtQ = Q_text(Q)
txtA = ban.A_data()
# nlcheckする
nlc = NLCheck()
q = nlc.read_input_str(txtQ)
a = nlc.read_target_str(txtA)
#nlc.verbose = verbose
judges = nlc.check( q, a )
print("judges = ", judges)
# 描画する
nldraw2.setup_font('nonexistent') # あとで考える
images = nldraw2.draw(q, a, nlc)
for num, img in enumerate(images):
ifile = "%s.%d.gif" % (basename, num+1) # 層の番号は1から始まる
img.save(ifile, 'gif')
print(ifile)
if 1 < len(images):
nldraw2.merge_images(images).save(basename+'.gif', 'gif')
# QとAを出力する
if basename is None:
print(txtQ)
print(txtA)
else:
qfile = '%s_adc.txt' % basename
with open(qfile, 'w') as f:
f.write(txtQ)
afile = '%s_adc_sol.txt' % basename
with open(afile, 'w') as f:
f.write(txtA)
excel(ban, basename)
def test1():
"動作確認"
x,y,z = _size
ban = Ban(x,y,z)
ban.set(0,0,0, 1)
ban.set(1,0,0, 2)
ban.set(0,1,0, 3)
ban.set(x-1, y-1, z-1, 1)
ban.set(x-2, y-1, z-1, 2)
ban.set(x-1, y-2, z-1, 3)
ban.print()
def main():
global size, nlines, debug, verbose
import argparse
parser = argparse.ArgumentParser(description='NumberLink Q generator')
parser.add_argument('-d', '--debug', action='store_true', default=debug, help='enable debug (default: %(default)s)')
parser.add_argument('-v', '--verbose', action='store_true', default=verbose, help='verbose output (default: %(default)s)')
parser.add_argument('-x', metavar='X', default=_size[0], type=int, help='size X (default: %(default)s)')
parser.add_argument('-y', metavar='Y', default=_size[1], type=int, help='size Y (default: %(default)s)')
parser.add_argument('-z', metavar='Z', default=_size[2], type=int, help='size Z (default: %(default)s)')
parser.add_argument('-l', '--lines', metavar='N', default=nlines, type=int, help='max number of lines (default: %(default)s)')
parser.add_argument('-r', '--retry', metavar='N', default=retry, type=int, help='max number of retry (default: %(default)s)')
parser.add_argument('-o', '--output', metavar='FILE', help='output file')
#parser.add_argument('--test1', action='store_true', help='run test1')
args = parser.parse_args()
debug = args.debug
verbose = args.verbose
#if args.test1: test1()
run(args.x, args.y, args.z, num_lines=args.lines, basename=args.output, max_retry=args.retry)
if __name__ == "__main__":
main()
|
Is it OK as per Halacha to go through the new TSA scanners where they see you as if you are naked?
Rav Moshe Shternbuch reportedly sees no problem with it.
Not the answer you're looking for? Browse other questions tagged halacha current-events travel tznius-modesty .
|
import theano, cPickle, h5py, lasagne, random, csv, gzip
import numpy as np
import theano.tensor as T
# convert csv into format readable by rmn code
def load_data(span_path, metadata_path):
x = csv.DictReader(gzip.open(span_path, 'rb'))
wmap, cmap, bmap = cPickle.load(open(metadata_path, 'rb'))
max_len = -1
revwmap = dict((v,k) for (k,v) in wmap.iteritems())
revbmap = dict((v,k) for (k,v) in enumerate(bmap))
revcmap = dict((v,k) for (k,v) in cmap.iteritems())
span_dict = {}
for row in x:
text = row['Words'].split()
if len(text) > max_len:
max_len = len(text)
key = '___'.join([row['Book'], row['Char 1'], row['Char 2']])
if key not in span_dict:
span_dict[key] = []
span_dict[key].append([wmap[w] for w in text])
span_data = []
for key in span_dict:
book, c1, c2 = key.split('___')
book = np.array([revbmap[book], ]).astype('int32')
chars = np.array([revcmap[c1], revcmap[c2]]).astype('int32')
# convert spans to numpy matrices
spans = span_dict[key]
s = np.zeros((len(spans), max_len)).astype('int32')
m = np.zeros((len(spans), max_len)).astype('float32')
for i in range(len(spans)):
curr_span = spans[i]
s[i][:len(curr_span)] = curr_span
m[i][:len(curr_span)] = 1.
span_data.append([book, chars, s, m])
return span_data, max_len, wmap, cmap, bmap
def generate_negative_samples(num_traj, span_size, negs, span_data):
inds = np.random.randint(0, num_traj, negs)
neg_words = np.zeros((negs, span_size)).astype('int32')
neg_masks = np.zeros((negs, span_size)).astype('float32')
for index, i in enumerate(inds):
rand_ind = np.random.randint(0, len(span_data[i][2]))
neg_words[index] = span_data[i][2][rand_ind]
neg_masks[index] = span_data[i][3][rand_ind]
return neg_words, neg_masks
|
iLovemyohone sells quality branded accessories to Optus, Telstra, Vodafone telco dealers to be top iPad 2 accessoreis online store. Our direct fulfilment model for business customers guarantees best iPad 2 accessories online shopping.
Our focus is to provide best iPad 2 accessories buying experience in Australia.
|
################################################################################
#
# This program is part of the DellEqualLogicMon Zenpack for Zenoss.
# Copyright (C) 2010 Eric Enns.
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
################################################################################
from Globals import DTMLFile, InitializeClass
from Products.ZenModel.OSComponent import *
from Products.ZenRelations.RelSchema import *
from Products.ZenModel.ZenossSecurity import *
from DellEqualLogicComponent import *
from Products.ZenUtils.Utils import convToUnits
from Products.ZenUtils.Utils import prepId
import logging
log = logging.getLogger("zen.DellEqualLogicVolume")
def manage_addVolume(context, id, userCreated, REQUEST=None):
svid = prepId(id)
sv = DellEqualLogicVolume(svid)
context._setObject(svid, sv)
sv = context._getOb(svid)
if userCreated: sv.setUserCreatedFlag()
if REQUEST is not None:
REQUEST['RESPONSE'].redirect(context.absolute_url()+'/manage_main')
return sv
class DellEqualLogicVolume(OSComponent, DellEqualLogicComponent):
portal_type = meta_type = 'DellEqualLogicVolume'
caption = ""
volumeProvisionedSize = 0
volumeReservedSize = 0
thinProvisioned = 2
state = "OK"
_properties = OSComponent._properties + (
{'id':'caption', 'type':'string', 'mode':'w'},
{'id':'state', 'type':'string', 'mode':'w'},
{'id':'volumeProvisionedSize', 'type':'int', 'mode':'w'},
{'id':'volumeReservedSize', 'type':'int', 'mode':'w'},
{'id':'thinProvisioned', 'type':'int', 'mode':'w'},
)
_relations = OSComponent._relations + (
("os", ToOne(
ToManyCont,
"ZenPacks.community.DellEqualLogicMon.DellEqualLogicDevice.DellEqualLogicDeviceOS",
"volumes")),
)
factory_type_information = (
{
'id' : 'Volume',
'meta_type' : 'Volume',
'description' : """Arbitrary device grouping class""",
'icon' : 'StoragePool_icon.gif',
'product' : 'ZenModel',
'factory' : 'manage_addVolume',
'immediate_view' : 'viewDellEqualLogicVolume',
'actions' :
(
{ 'id' : 'status'
, 'name' : 'Status'
, 'action' : 'viewDellEqualLogicVolume'
, 'permissions' : (ZEN_VIEW,)
},
{ 'id' : 'events'
, 'name' : 'Events'
, 'action' : 'viewEvents'
, 'permissions' : (ZEN_VIEW, )
},
{ 'id' : 'perfConf'
, 'name' : 'Template'
, 'action' : 'objTemplates'
, 'permissions' : (ZEN_CHANGE_DEVICE, )
},
{ 'id' : 'viewHistory'
, 'name' : 'Modifications'
, 'action' : 'viewHistory'
, 'permissions' : (ZEN_VIEW_MODIFICATIONS,)
},
)
},
)
def reservedSize(self):
return self.volumeReservedSize or 0
def reservedSizeString(self):
return convToUnits(self.reservedSize(), divby=1024)
def provisionedSize(self):
return self.volumeProvisionedSize or 0
def provisionedSizeString(self):
return convToUnits(self.provisionedSize(), divby=1024)
def isThinProvisioned(self):
if (self.thinProvisioned == 1):
return "true"
else:
return "false"
# def getRRDNames(self):
# return ['Volume_Occupancy']
InitializeClass(DellEqualLogicVolume)
|
Entrepreneurs are psychologically unique. In a world where up to 90% of startups fail, the most enduring visionaries will push through, energized by the idea of experiencing freedom and success alongside the 10% who beat the odds. They’re resilient. They’re adaptable. They’re extreme risk-takers. The most successful entrepreneurs tend to run toward uncertainty and threat instead of running away. Why? The reason, at least according to psychologists, is that successful entrepreneurs tend to have one mindset about fear that the rest of us simply don’t share, at least not to the same extent: If harnessed in the right ways, understanding our fears and anxieties can lead us to breakthrough self-discoveries, innovation, and creativity. For exceptional entrepreneurs, noticing fear is a competitive advantage; it’s an emotion that we dive into with curiosity, passion, and a hunger for exponential growth.
The good news is, these critical mindsets around fear can be learned.
Serial entrepreneur and executive coach Justin Milano has experienced success and failure — but it’s the failure that changed his life. “I was building a company to solve one of the greatest environmental challenges we face today: food waste,” he says. Then, his greatest fear became a reality. The company failed, and in the soul-searching that followed, he saw his motivations in a truer light. “I realized there was an unconscious part of me looking to do something grandiose because I didn't fully value and love myself. I was trying to prove my self worth.” He wasn’t afraid of his business failing, he was afraid of feeling like a failure. There’s a big difference between the two, but they’re often strongly related.
At least, that’s what Milano learned after a satisfying bit of symmetry. What seemed like his greatest failure led to his greatest success: co-founding Good Startups, an executive coaching company for startups that focuses on cutting-edge leadership psychology, neuroscience, and emotional intelligence. What he had started to understand intuitively, his eventual cofounder, Dr. Daniel Cordaro, Director of Wellbeing at the Yale Center for Emotional Intelligence, had already been studying across cultures for nearly a decade. In 2015, they teamed up to use their mix of scientific expertise and on-the-ground startup experience to help founders and their teams identify and manage their fears and anxieties, and more importantly, understand the deeper psychology blocking them from self mastery and sustainable high performance.
There are a lot of entrepreneurs out there unconsciously trying to prove their self-worth through their companies.
When you practice and master these tools, the result is better decision-making, expanded performance and energy, more inspirational leadership, and a doorway to self-mastery. It’s scientifically evidenced.
I’m an old man and have known a great many troubles, but most of them never happened.
The key point is that 99% of the time when people say they are experiencing fear, they are actually experiencing anxiety around events that may never happen. When this anxiety is playing like a broken record in our subconscious mind throughout the day, we are hanging out in our limbic brain, specifically the amygdala, which is responsible for our reactions to threat. When we are seeing the world through the lens of threat, we experience tunnel vision and limited possibilities.
Once you recognize that what you are experiencing is most likely anxiety, the question becomes how do you release the grip of anxiety so you can operate with expanded awareness and creativity?
What expectation, idea, or outcome are you attached to? A specific investor? A specific client? A certain type of product working? Being cash positive in six months?
What identity is driving the attachment? Have you created an image of yourself as the next game-changing entrepreneur to investors, the press, and your team?
Consider this example: A founder tells her investors that the company will be cash positive in six months. Then inevitable obstacles set in, and it becomes clear that goal is unattainable. It would be easy to descend into a morass of anxiety and what-ifs: Will the board let me go? Will the team start questioning my leadership?
Attachment to identities can be harder to relinquish, but it’s critical to consider. Many founders, whether through their own strategy or the attention of the press, assume the mantle of “the next big thing.” Suddenly, they’re not just building a product and running a company, they’re living up to very public expectations. “Now if your product launch isn't working very well, it starts to threaten that identity. Not only is your product broken, but you’re a fraud, a common human fear.
That’s one of entrepreneurs’ biggest fears: I went out there, I sold all these friends and investors on the next big thing, and it didn’t work. I’m a fraud.
The Culture of Scarcity: The belief system that there’s not enough resources (e.g. time, money, etc.).
When working with clients, Milano and Cordaro take people deep into their individual scarcity programming to write new, more empowering stories for their lives.
Adopt this mindset, and fear stops looking like a threat; it’s merely part of the package.
In Milano and Cordaro’s experience, the following shift in mindset can have a profound influence on founders. When a product fails, when the new app gets two stars in the App Store, when funding isn’t coming through, successful founders don’t get caught up thinking, “This is the wrong experience. This isn’t the way it’s supposed to be.” Instead, they accept those obstacles as part of their experience — the only one they can and should be having. They use the experience as a growth opportunity to become better, stronger, and more inspirational leaders.
“When your product isn’t working, you need that feedback in order to actually create a product that people do want,” says Milano.
Anxiety lets you know when you need to kick into gear. The problem, Milano notes, is when you simply stew in anxiety all day, every day. These emotions will not serve you well if they become chronic. And where there is chronic anxiety, there is almost always avoidance or aversion.
That app with the two-star rating? Customers don’t like it — it needs to change. “The person with a healthy relationship with fear can say, ‘This is what's happening. I'm going to accept that, embrace that, and receive the information. Now what can I do creatively to solve this?’” says Milano. Leaders with an unhealthy relationship to fear, on the other hand, often lose the opportunity to course-correct. Stuck on how things “should” go, they miss the valuable signal that it’s time to pivot.
Moreover, avoiding feelings of fear only prolongs them. “All emotions are here to provide us with information about the world around us. That's why we've evolved to feel them. Emotions are data; they’ve helped us survive for tens of thousands of generations. If we're not listening to an emotion, if we're not receiving that data fully, it's going to keep coming,” says Cordaro. When you allow yourself to fully feel and accept an emotion, it dissipates very quickly, often in as little as 30 to 90 seconds.
Be an example for your team: Entrepreneurship is a roller coaster. Let’s accept that and fix what we can. There will be no freaking out.
The final culture of fear is the Culture of Unworthiness. “This one goes beyond entrepreneurship, it is an epidemic of the human species,” says Milano. The belief system is that “I’m not enough just the way I am, I need to achieve something extraordinary to generate fulfillment.” This is the culture that had hit Milano the hardest so many years ago.
Milano offers that one of the quickest ways to determine if you have some work to do around your own sense of worthiness is to ask yourself the question “If my company completely fails and I fall flat on my face, will I still love and accept myself unconditionally.” When Cordaro and Milano ask this question in their virtual group coaching sessions you can often see people stop breathing even through the video conferencing technology. "It’s a powerful and shaking experience," Milano says.
“When we are faced with the loss of our dreams and ideals, a natural response is a feeling of low self worth,” says Milano. For him, this was the greatest gift of his food waste startup failure.
Once you do the courageous work to defuse your unique sources of anxiety, you are then free to transform your anxiety into something far more useful, like creativity and innovation. Cordaro shows us how to do this through cutting-edge neuroscience.
Terrified of public speaking? That’s because you desire to share a compelling, well-articulated message. Worried you won’t get funding? That’s your strong desire to raise money and build a successful business. Afraid of failure? What’s underneath that is your desire to have a positive impact on humanity.
This isn’t just a hunch; it’s actually deeply rooted in neuroscience. Most of us are at least vaguely aware of the amygdala, that part of the brain, right behind the temple, that perceives threats — the “fight or flight” portion of the brain. When the amygdala perceives a threat, one of its jobs is to stop sending information to the cortex, where we engage in higher level thinking and rational decision-making.
What most people don’t know is that right next door to the amygdala is the nucleus accumbens, which is responsible for desire and all things that make us crave feeling good. “Scientists have tried to find where the nucleus accumbens stops and the amygdala begins, and they can't find it. These portions of the brain are intrinsically involved with each other. When the amygdala fires, the nucleus accumbens is firing as well,” says Cordaro.
A healthy relationship with fear is the doorway to the part of your brain that optimizes judgment, decision-making, and creativity.
Milano and Cordaro developed a five-question exercise to help their clients understand what their anxiety is telling them in the moment, and shift toward consciously pursuing their creative desires. Why is this process important? Very simply, it gets your brain unstuck from fear-based, animalistic reactions and allows you to rapidly shift into creative problem-solving mode.
What's the fear or anxiety?
What is the desire being mirrored? What is the desire underneath that anxiety?
Once you discover the desire, do you choose to take action on it?
If so, what are the creative action steps?
Let’s put this system to the test by bringing in a real life example of the number one most feared concept across all of humanity (it even beats death) — public speaking. In a few months, you’re going to give a high-profile talk regarding your company’s vision on stage in front of 2,500 thought leaders and influencers. Anyone would feel some level of anxiety, especially for those of us who aren’t public figures for a living.
What’s the fear or anxiety? This should be an easy one, but this is where awareness begins. Failure. Bombing the talk. Being the subject of ridicule and scorn, or even worse, silence, from this massive audience. Missing an extraordinary opportunity to inspire major influencers in your space.
So you’ve gone through the process of understanding fear, uprooted your cultures of fear, and transformed your fears into desires. But how do we deal with in-the-moment spontaneous feelings of fear that may arise, let’s say, in the middle of your talk when someone asks a piercing and unexpected question? In Milano’s experience, there are several common physical experiences that are dead giveaways you’re having a fear reaction: sensations in the belly, sweaty palms, increased heart rate, rapid breathing, and even accelerated speech.
Become adept at recognizing these physical manifestations, and you can also develop the skills to overcome fear as it strikes. Whether you’re standing at a podium or opening an investor email that’s unlikely to contain good news, a few simple actions can jog you out of paralysis and into presence. Milano and Cordaro offer three examples here.
Breathe: You’re about to get on stage for your talk, and you feel like you might hyperventilate and forget everything you wanted to say. It’s time to breathe. Get comfortable and close your eyes. Feel yourself breathe cool air in through your nose and exhale warm air through your mouth. Repeat this for a few breaths, then open your eyes and notice the space around you.
Move: You make it on stage and are about to begin, when you feel a crippling paralysis. Your knees are frozen, and you have that deer-in-the-headlights look on your face. It’s time to move. When your body freezes in fear, by gently wiggling, shaking, or bouncing you can stop that fear in its tracks. If you are feeling the adrenaline rush of a “fight or flight” response, plant your feet firmly on the ground to experience that sensation of rootedness, like a redwood tree. These simple body movements are fear “pattern interrupters” developed by Milano’s friend Dr. Kathlyn Hendricks, who is an expert on Body Intelligence.
Research shows that the second you match your experience by saying ‘I'm excited,’ your nervous system relaxes.
Just as founders need to recognize fear in order to move past it, they also need to recognize creative energy so they can welcome it with open arms. Here, too, the body offers the most reliable indicators: “You feel spacious, balanced breathing. Your gut relaxes, and your adrenals and heart chill out. You begin having fun, seeing more possibilities, generating a sense of trust, and the world looks friendly. You make clear decisions from a place of balance,” says Milano.
The goal of meditation is not, as many people assume, to be more calm and peaceful (although those are certainly positive side effects that you can expect from a regular practice); it’s to see one’s experiences through a different lens.
One of the main reasons we engage in mindfulness practices is to sharpen our attention muscle so that when these feelings of fear and anxiety come on, we can catch them at a much earlier stage and simply let them go.
Then, in turn, you can choose how you will respond, prioritizing the actions that will most benefit your unique offering to the world.
The reality of emotional intelligence, is that it needs to be developed. As with any new skill, it takes practice; new muscles must be flexed. But as you do that, you will begin to find your relationship to your work, and your colleagues, meaningfully changed. Herein lies the competitive advantage of fear, understanding it, and learning how to transform its energy into massive creative potential.
|
from __future__ import unicode_literals
import logging
import operator
from mopidy import backend
from mopidy.models import Playlist, Ref
logger = logging.getLogger(__name__)
class GMusicPlaylistsProvider(backend.PlaylistsProvider):
def __init__(self, *args, **kwargs):
super(GMusicPlaylistsProvider, self).__init__(*args, **kwargs)
self._radio_stations_as_playlists = (
self.backend.config['gmusic']['radio_stations_as_playlists'])
self._radio_stations_count = (
self.backend.config['gmusic']['radio_stations_count'])
self._radio_tracks_count = (
self.backend.config['gmusic']['radio_tracks_count'])
self._playlists = {}
def as_list(self):
refs = [
Ref.playlist(uri=pl.uri, name=pl.name)
for pl in self._playlists.values()]
return sorted(refs, key=operator.attrgetter('name'))
def get_items(self, uri):
playlist = self._playlists.get(uri)
if playlist is None:
return None
return [Ref.track(uri=t.uri, name=t.name) for t in playlist.tracks]
def create(self, name):
pass # TODO
def delete(self, uri):
pass # TODO
def lookup(self, uri):
return self._playlists.get(uri)
def refresh(self):
playlists = {}
# add thumbs up playlist
tracks = []
for track in self.backend.session.get_promoted_songs():
trackId = None
if 'trackId' in track:
trackId = track['trackId']
elif 'storeId' in track:
trackId = track['storeId']
if trackId:
tracks += self.backend.library.lookup(
'gmusic:track:' + trackId)
if len(tracks) > 0:
uri = 'gmusic:playlist:promoted'
playlists[uri] = Playlist(uri=uri, name='Promoted', tracks=tracks)
# load user playlists
for playlist in self.backend.session.get_all_user_playlist_contents():
tracks = []
for track in playlist['tracks']:
if not track['deleted']:
tracks += self.backend.library.lookup('gmusic:track:' +
track['trackId'])
uri = 'gmusic:playlist:' + playlist['id']
playlists[uri] = Playlist(uri=uri,
name=playlist['name'],
tracks=tracks)
# load shared playlists
for playlist in self.backend.session.get_all_playlists():
if playlist.get('type') == 'SHARED':
tracks = []
tracklist = self.backend.session.get_shared_playlist_contents(
playlist['shareToken'])
for track in tracklist:
tracks += self.backend.library.lookup('gmusic:track:' +
track['trackId'])
uri = 'gmusic:playlist:' + playlist['id']
playlists[uri] = Playlist(uri=uri,
name=playlist['name'],
tracks=tracks)
l = len(playlists)
logger.info('Loaded %d playlists from Google Music', len(playlists))
# load radios as playlists
if self._radio_stations_as_playlists:
logger.info('Starting to loading radio stations')
stations = self.backend.session.get_radio_stations(
self._radio_stations_count)
for station in stations:
tracks = []
tracklist = self.backend.session.get_station_tracks(
station['id'], self._radio_tracks_count)
for track in tracklist:
tracks += self.backend.library.lookup('gmusic:track:' +
track['nid'])
uri = 'gmusic:playlist:' + station['id']
playlists[uri] = Playlist(uri=uri,
name=station['name'],
tracks=tracks)
logger.info('Loaded %d radios from Google Music',
len(playlists) - l)
self._playlists = playlists
backend.BackendListener.send('playlists_loaded')
def save(self, playlist):
pass # TODO
|
India celebrates her 60th Republic Day today. I woke up with this song (S was listening to it), its been ages I heard this song and it bought back many memories. I made tiranga idli to mark the Republic day .This is very fast and simple.There is no need for fermentation and waiting for 12 hrs to relish your idlis. These are instant. can be made whenever you feel like having idlis or when u have unexpected guests.
Mix together bombai ravva, curd, baking soda, lemon juice, salt and mix well. Add water if needed.
Grease idli plates and pour the batter in them. Garnish with carrots on one end and the green pea mixture on the other, leaving the middle white.
Pour 1 cup water at the bottom of the cooker, place the idli stand in it, close the lid and leave on high for 2 mins and then on medium and cook for 11 mins.
Switch off stove, let stand for 5 mins, wet a spoon and remove the idlis from idli paltes.
Serve with chutney of your choice or sambar. I served mine with Nellore fish pulusu( Nellore fish curry).
When Srivalli of Spiceyourlife announced that we were to make moongdal halwa for december's challenge....I was pretty excited, as I never made it but had it many times. I made it twice. Once with the second recipe and again without soaking the dal (this is my friend's version).
* Don't leave the halwa unattended. The dal can stick and it can go from just done to burnt in a second so keep stirring as much as possible. You should remember to keep stirring to prevent dhal from sticking irrespective of the ghee added.
* You aren't looking for the halwa to get too thick when you turn off the heat. It was thicken as it cools.
* Cook until ghee surfaces on the sides and the halwa attains a very nice shine.
* Initially, it may appear that all the ghee is being used up. But as the dhal cooks the ghee separates. So the ghee measure is sufficient.
* Though original recipe didn't call for roasting the dhal before soaking, Lataji felt roasting it a bit gives more fragrance.
Dry roast the moong dal on very low flame till they turn golden brown(take care not to burn)or nice aroma fills ur kitchen. Cool ,make a coarse powder and keep aside.
Heat ghee and roast the cashews and raisins, keep aside.
Now add the moong dal pwd and fry on low flame for about 5-6 mins or till it turns into a nice golden brown colour.
Add the coconut and saute for a couple of minutes.
Add milk and water slowly, 1/2 cup at a time mix well. Avoid any lumps. Add the cardamom pwd and saffron and cook covered on low heat, stirring occassionaly and taking care not to burn the halwa at the bottom.
Once the dal is cooked, add the rose watre, switch off, transfer into a bowl , garnish with cashews and raisins.
|
CSRF_SECRET_KEY, SESSION_KEY = "0h97kel3aq17853645odikh97kel3aq4vndtonignnobfjh", "3aq4vnd4vndtonignnt801785onignnob"
# Google APIs
GOOGLE_APP_ID = '768017853645-odikh97kel3aq4vndtonignnobfjhkea.apps.googleusercontent.com'
GOOGLE_APP_SECRET = 'gb2X0NdP36xF-2kmj_S2IN3U'
#GOOGLE_REDIRECT_URI = 'http://localhost:5000/auth/google/callback'
#GOOGLE_REDIRECT_URI = 'http://www.flutterhub.com/auth/google/callback'
GOOGLE_REDIRECT_URI = 'http://52.27.185.214/auth/google/callback'
# Facebook auth apis
FB_APP_ID = '382093888646657'
FB_APP_SECRET = '2ba3373b14a801141d26c32bf9c9b205'
#FB_REDIRECT_URI = "http://localhost:5000/auth/facebook/callback"
#FB_REDIRECT_URI = "http://www.flutterhub.com/auth/facebook/callback"
FB_REDIRECT_URI = "http://52.27.185.214/auth/facebook/callback"
# Key/secret for both LinkedIn OAuth 1.0a and OAuth 2.0
# https://www.linkedin.com/secure/developer
LINKEDIN_KEY = 'consumer key'
LINKEDIN_SECRET = 'consumer secret'
# https://manage.dev.live.com/AddApplication.aspx
# https://manage.dev.live.com/Applications/Index
WL_CLIENT_ID = 'client id'
WL_CLIENT_SECRET = 'client secret'
# https://dev.twitter.com/apps
TWITTER_CONSUMER_KEY = 'oauth1.0a consumer key'
TWITTER_CONSUMER_SECRET = 'oauth1.0a consumer secret'
# https://foursquare.com/developers/apps
FOURSQUARE_CLIENT_ID = 'client id'
FOURSQUARE_CLIENT_SECRET = 'client secret'
|
A 72-run opening stand between David Warner and Jonny Bairstow had set the platform for the other Sunrisers batsmen to build on but they imploded on home turf.
The Delhi Capitals bowlers upstaged their Sunrisers Hyderabad counterpart to help them to a third-straight win and move to second in the standings with a 39-run win on Sunday.
A 72-run opening stand between David Warner and Jonny Bairstow had set the platform for other to follow-up on but instead they imploded.
On the other hand, Capitals’ bowlers didn’t get deterred and reined them in. As a result, for 27 balls from over 6.4 Sunrisers didn’t manage to hit a boundary and with pressure piling up they lost Jonny Bairstow (41) and skipper Kane Williamson (3) – both of them dismissed by Keemo Paul (3/17) -- during that period. Warner tried to break the shackles but looked energy-snapped.
His 47-ball 51 wasn’t one of his fluent innings in this edition of the IPL. He had couple of reprieves as well – one miss-hit falling just short of deep mid-wicket and few delivers later a full-blooded shot hit straight back couldn’t stick in Amit Mishra’s hand.
But the Australian southpaw’s luck ran out soon after reach his fifty, holing out of the bowling of Kagiso Rabada, failing to judge a slower one, at mid-off. They very next ball Vijay Shankar perished and win it diminished any chances of Sunrisers fightback. Sunrisers lost seven wickets in space of just 10 runs folding out at 116.
Coach Tom Moody had admitted during the pre-match press conference that middle-order is a cause for concern for them and they did ring in the changes for this match. Skipper Williamson, playing only his second match this season, looked rusty. Vijay Shankar after doing well initially has struggled to get going.
Ricky Bhui and Abhishek Sharma brought in place of Manish Pandey and Yusuf Pathan struggled throughout their brief stay.
While Paul and Rabada had got Capitals to the finish line, Chris Morris’ triple strike in the 18thover virtually killed any chances of Sunrisers mounting a late comeback.
One of the concerns for Capitals will be the failures of their batsmen to build on the starts, especially Shreyas Iyer and Rishab Pant. Both have just one fifties to their credit. While 30s are considered respectable the think-tank would like to finish off the innings rather than giving away their wickets.
Earlier, Sunrisers bowling once again delivered by restricting Delhi Capitals to 155. Leading the pack was Khaleel Ahmed, who was playing his first match of the tournament. The left-arm pacer bowled with pace and accuracy to trouble the Capitals’ batsmen. Bhuvneshwar Kumar took was on the money, bowling the best spell of this IPL.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.