repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
kxxoling/google-diff-match-patch | python2/diff_match_patch_test.py | 319 | 41744 | #!/usr/bin/python2.4
"""Test harness for diff_match_patch.py
Copyright 2006 Google Inc.
http://code.google.com/p/google-diff-match-patch/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import time
import unittest
import diff_match_patch as dmp_module
# Force a module reload. Allows one to edit the DMP module and rerun the tests
# without leaving the Python interpreter.
reload(dmp_module)
class DiffMatchPatchTest(unittest.TestCase):
def setUp(self):
"Test harness for dmp_module."
self.dmp = dmp_module.diff_match_patch()
def diff_rebuildtexts(self, diffs):
# Construct the two texts which made up the diff originally.
text1 = ""
text2 = ""
for x in range(0, len(diffs)):
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_INSERT:
text1 += diffs[x][1]
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_DELETE:
text2 += diffs[x][1]
return (text1, text2)
class DiffTest(DiffMatchPatchTest):
"""DIFF TEST FUNCTIONS"""
def testDiffCommonPrefix(self):
# Detect any common prefix.
# Null case.
self.assertEquals(0, self.dmp.diff_commonPrefix("abc", "xyz"))
# Non-null case.
self.assertEquals(4, self.dmp.diff_commonPrefix("1234abcdef", "1234xyz"))
# Whole case.
self.assertEquals(4, self.dmp.diff_commonPrefix("1234", "1234xyz"))
def testDiffCommonSuffix(self):
# Detect any common suffix.
# Null case.
self.assertEquals(0, self.dmp.diff_commonSuffix("abc", "xyz"))
# Non-null case.
self.assertEquals(4, self.dmp.diff_commonSuffix("abcdef1234", "xyz1234"))
# Whole case.
self.assertEquals(4, self.dmp.diff_commonSuffix("1234", "xyz1234"))
def testDiffCommonOverlap(self):
# Null case.
self.assertEquals(0, self.dmp.diff_commonOverlap("", "abcd"))
# Whole case.
self.assertEquals(3, self.dmp.diff_commonOverlap("abc", "abcd"))
# No overlap.
self.assertEquals(0, self.dmp.diff_commonOverlap("123456", "abcd"))
# Overlap.
self.assertEquals(3, self.dmp.diff_commonOverlap("123456xxx", "xxxabcd"))
# Unicode.
# Some overly clever languages (C#) may treat ligatures as equal to their
# component letters. E.g. U+FB01 == 'fi'
self.assertEquals(0, self.dmp.diff_commonOverlap("fi", u"\ufb01i"))
def testDiffHalfMatch(self):
# Detect a halfmatch.
self.dmp.Diff_Timeout = 1
# No match.
self.assertEquals(None, self.dmp.diff_halfMatch("1234567890", "abcdef"))
self.assertEquals(None, self.dmp.diff_halfMatch("12345", "23"))
# Single Match.
self.assertEquals(("12", "90", "a", "z", "345678"), self.dmp.diff_halfMatch("1234567890", "a345678z"))
self.assertEquals(("a", "z", "12", "90", "345678"), self.dmp.diff_halfMatch("a345678z", "1234567890"))
self.assertEquals(("abc", "z", "1234", "0", "56789"), self.dmp.diff_halfMatch("abc56789z", "1234567890"))
self.assertEquals(("a", "xyz", "1", "7890", "23456"), self.dmp.diff_halfMatch("a23456xyz", "1234567890"))
# Multiple Matches.
self.assertEquals(("12123", "123121", "a", "z", "1234123451234"), self.dmp.diff_halfMatch("121231234123451234123121", "a1234123451234z"))
self.assertEquals(("", "-=-=-=-=-=", "x", "", "x-=-=-=-=-=-=-="), self.dmp.diff_halfMatch("x-=-=-=-=-=-=-=-=-=-=-=-=", "xx-=-=-=-=-=-=-="))
self.assertEquals(("-=-=-=-=-=", "", "", "y", "-=-=-=-=-=-=-=y"), self.dmp.diff_halfMatch("-=-=-=-=-=-=-=-=-=-=-=-=y", "-=-=-=-=-=-=-=yy"))
# Non-optimal halfmatch.
# Optimal diff would be -q+x=H-i+e=lloHe+Hu=llo-Hew+y not -qHillo+x=HelloHe-w+Hulloy
self.assertEquals(("qHillo", "w", "x", "Hulloy", "HelloHe"), self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
# Optimal no halfmatch.
self.dmp.Diff_Timeout = 0
self.assertEquals(None, self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
def testDiffLinesToChars(self):
# Convert lines down to characters.
self.assertEquals(("\x01\x02\x01", "\x02\x01\x02", ["", "alpha\n", "beta\n"]), self.dmp.diff_linesToChars("alpha\nbeta\nalpha\n", "beta\nalpha\nbeta\n"))
self.assertEquals(("", "\x01\x02\x03\x03", ["", "alpha\r\n", "beta\r\n", "\r\n"]), self.dmp.diff_linesToChars("", "alpha\r\nbeta\r\n\r\n\r\n"))
self.assertEquals(("\x01", "\x02", ["", "a", "b"]), self.dmp.diff_linesToChars("a", "b"))
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for x in range(1, n + 1):
lineList.append(str(x) + "\n")
charList.append(unichr(x))
self.assertEquals(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEquals(n, len(chars))
lineList.insert(0, "")
self.assertEquals((chars, "", lineList), self.dmp.diff_linesToChars(lines, ""))
def testDiffCharsToLines(self):
# Convert chars up to lines.
diffs = [(self.dmp.DIFF_EQUAL, "\x01\x02\x01"), (self.dmp.DIFF_INSERT, "\x02\x01\x02")]
self.dmp.diff_charsToLines(diffs, ["", "alpha\n", "beta\n"])
self.assertEquals([(self.dmp.DIFF_EQUAL, "alpha\nbeta\nalpha\n"), (self.dmp.DIFF_INSERT, "beta\nalpha\nbeta\n")], diffs)
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for x in range(1, n + 1):
lineList.append(str(x) + "\n")
charList.append(unichr(x))
self.assertEquals(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEquals(n, len(chars))
lineList.insert(0, "")
diffs = [(self.dmp.DIFF_DELETE, chars)]
self.dmp.diff_charsToLines(diffs, lineList)
self.assertEquals([(self.dmp.DIFF_DELETE, lines)], diffs)
def testDiffCleanupMerge(self):
# Cleanup a messy diff.
# Null case.
diffs = []
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([], diffs)
# No change case.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")], diffs)
# Merge equalities.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "abc")], diffs)
# Merge deletions.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc")], diffs)
# Merge insertions.
diffs = [(self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "abc")], diffs)
# Merge interweave.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "d"), (self.dmp.DIFF_EQUAL, "e"), (self.dmp.DIFF_EQUAL, "f")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_INSERT, "bd"), (self.dmp.DIFF_EQUAL, "ef")], diffs)
# Prefix and suffix detection.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "c")], diffs)
# Prefix and suffix detection with equalities.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc"), (self.dmp.DIFF_EQUAL, "y")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "cy")], diffs)
# Slide edit left.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "ba"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "ac")], diffs)
# Slide edit right.
diffs = [(self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "ca"), (self.dmp.DIFF_INSERT, "ba")], diffs)
# Slide edit left recursive.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_EQUAL, "x")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "acx")], diffs)
# Slide edit right recursive.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "ca"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xca"), (self.dmp.DIFF_DELETE, "cba")], diffs)
def testDiffCleanupSemanticLossless(self):
# Slide diffs to match logical boundaries.
# Null case.
diffs = []
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([], diffs)
# Blank lines.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\n\r\nBBB"), (self.dmp.DIFF_INSERT, "\r\nDDD\r\n\r\nBBB"), (self.dmp.DIFF_EQUAL, "\r\nEEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "AAA\r\n\r\n"), (self.dmp.DIFF_INSERT, "BBB\r\nDDD\r\n\r\n"), (self.dmp.DIFF_EQUAL, "BBB\r\nEEE")], diffs)
# Line boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\nBBB"), (self.dmp.DIFF_INSERT, " DDD\r\nBBB"), (self.dmp.DIFF_EQUAL, " EEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "AAA\r\n"), (self.dmp.DIFF_INSERT, "BBB DDD\r\n"), (self.dmp.DIFF_EQUAL, "BBB EEE")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_INSERT, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_INSERT, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Alphanumeric boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The-c"), (self.dmp.DIFF_INSERT, "ow-and-the-c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The-"), (self.dmp.DIFF_INSERT, "cow-and-the-"), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Hitting the start.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "ax")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "aax")], diffs)
# Hitting the end.
diffs = [(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xaa"), (self.dmp.DIFF_DELETE, "a")], diffs)
# Sentence boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The xxx. The "), (self.dmp.DIFF_INSERT, "zzz. The "), (self.dmp.DIFF_EQUAL, "yyy.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The xxx."), (self.dmp.DIFF_INSERT, " The zzz."), (self.dmp.DIFF_EQUAL, " The yyy.")], diffs)
def testDiffCleanupSemantic(self):
# Cleanup semantically trivial equalities.
# Null case.
diffs = []
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([], diffs)
# No elimination #1.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")], diffs)
# No elimination #2.
diffs = [(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")], diffs)
# Simple elimination.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "b")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_EQUAL, "cd"), (self.dmp.DIFF_DELETE, "e"), (self.dmp.DIFF_EQUAL, "f"), (self.dmp.DIFF_INSERT, "g")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcdef"), (self.dmp.DIFF_INSERT, "cdfg")], diffs)
# Multiple eliminations.
diffs = [(self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2"), (self.dmp.DIFF_EQUAL, "_"), (self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "AB_AB"), (self.dmp.DIFF_INSERT, "1A2_1A2")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_DELETE, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_DELETE, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# No overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")], diffs)
# Overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "abcxxx"), (self.dmp.DIFF_INSERT, "xxxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xxx"), (self.dmp.DIFF_INSERT, "def")], diffs)
# Reverse overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "xxxabc"), (self.dmp.DIFF_INSERT, "defxxx")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "def"), (self.dmp.DIFF_EQUAL, "xxx"), (self.dmp.DIFF_DELETE, "abc")], diffs)
# Two overlap eliminations.
diffs = [(self.dmp.DIFF_DELETE, "abcd1212"), (self.dmp.DIFF_INSERT, "1212efghi"), (self.dmp.DIFF_EQUAL, "----"), (self.dmp.DIFF_DELETE, "A3"), (self.dmp.DIFF_INSERT, "3BC")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcd"), (self.dmp.DIFF_EQUAL, "1212"), (self.dmp.DIFF_INSERT, "efghi"), (self.dmp.DIFF_EQUAL, "----"), (self.dmp.DIFF_DELETE, "A"), (self.dmp.DIFF_EQUAL, "3"), (self.dmp.DIFF_INSERT, "BC")], diffs)
def testDiffCleanupEfficiency(self):
# Cleanup operationally trivial equalities.
self.dmp.Diff_EditCost = 4
# Null case.
diffs = []
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([], diffs)
# No elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")], diffs)
# Four-edit elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xyz34")], diffs)
# Three-edit elimination.
diffs = [(self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "xcd"), (self.dmp.DIFF_INSERT, "12x34")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xy"), (self.dmp.DIFF_INSERT, "34"), (self.dmp.DIFF_EQUAL, "z"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "56")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xy34z56")], diffs)
# High cost elimination.
self.dmp.Diff_EditCost = 5
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abwxyzcd"), (self.dmp.DIFF_INSERT, "12wxyz34")], diffs)
self.dmp.Diff_EditCost = 4
def testDiffPrettyHtml(self):
# Pretty print.
diffs = [(self.dmp.DIFF_EQUAL, "a\n"), (self.dmp.DIFF_DELETE, "<B>b</B>"), (self.dmp.DIFF_INSERT, "c&d")]
self.assertEquals("<span>a¶<br></span><del style=\"background:#ffe6e6;\"><B>b</B></del><ins style=\"background:#e6ffe6;\">c&d</ins>", self.dmp.diff_prettyHtml(diffs))
def testDiffText(self):
# Compute the source and destination texts.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy")]
self.assertEquals("jumps over the lazy", self.dmp.diff_text1(diffs))
self.assertEquals("jumped over a lazy", self.dmp.diff_text2(diffs))
def testDiffDelta(self):
# Convert a diff into delta string.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy"), (self.dmp.DIFF_INSERT, "old dog")]
text1 = self.dmp.diff_text1(diffs)
self.assertEquals("jumps over the lazy", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("=4\t-1\t+ed\t=6\t-3\t+a\t=5\t+old dog", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta(text1, delta))
# Generates error (19 != 20).
try:
self.dmp.diff_fromDelta(text1 + "x", delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (19 != 18).
try:
self.dmp.diff_fromDelta(text1[1:], delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (%c3%xy invalid Unicode).
try:
self.dmp.diff_fromDelta("", "+%c3xy")
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Test deltas with special characters.
diffs = [(self.dmp.DIFF_EQUAL, u"\u0680 \x00 \t %"), (self.dmp.DIFF_DELETE, u"\u0681 \x01 \n ^"), (self.dmp.DIFF_INSERT, u"\u0682 \x02 \\ |")]
text1 = self.dmp.diff_text1(diffs)
self.assertEquals(u"\u0680 \x00 \t %\u0681 \x01 \n ^", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("=7\t-7\t+%DA%82 %02 %5C %7C", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta(text1, delta))
# Verify pool of unchanged characters.
diffs = [(self.dmp.DIFF_INSERT, "A-Z a-z 0-9 - _ . ! ~ * ' ( ) ; / ? : @ & = + $ , # ")]
text2 = self.dmp.diff_text2(diffs)
self.assertEquals("A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", text2)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("+A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta("", delta))
def testDiffXIndex(self):
# Translate a location in text1 to text2.
self.assertEquals(5, self.dmp.diff_xIndex([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 2))
# Translation on deletion.
self.assertEquals(1, self.dmp.diff_xIndex([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 3))
def testDiffLevenshtein(self):
# Levenshtein with trailing equality.
self.assertEquals(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")]))
# Levenshtein with leading equality.
self.assertEquals(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234")]))
# Levenshtein with middle equality.
self.assertEquals(7, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_INSERT, "1234")]))
def testDiffBisect(self):
# Normal.
a = "cat"
b = "map"
# Since the resulting diff hasn't been normalized, it would be ok if
# the insertion and deletion pairs are swapped.
# If the order changes, tweak this test as required.
self.assertEquals([(self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "m"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "t"), (self.dmp.DIFF_INSERT, "p")], self.dmp.diff_bisect(a, b, sys.maxint))
# Timeout.
self.assertEquals([(self.dmp.DIFF_DELETE, "cat"), (self.dmp.DIFF_INSERT, "map")], self.dmp.diff_bisect(a, b, 0))
def testDiffMain(self):
# Perform a trivial diff.
# Null case.
self.assertEquals([], self.dmp.diff_main("", "", False))
# Equality.
self.assertEquals([(self.dmp.DIFF_EQUAL, "abc")], self.dmp.diff_main("abc", "abc", False))
# Simple insertion.
self.assertEquals([(self.dmp.DIFF_EQUAL, "ab"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "ab123c", False))
# Simple deletion.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "bc")], self.dmp.diff_main("a123bc", "abc", False))
# Two insertions.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_INSERT, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "a123b456c", False))
# Two deletions.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("a123b456c", "abc", False))
# Perform a real diff.
# Switch off the timeout.
self.dmp.Diff_Timeout = 0
# Simple cases.
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b")], self.dmp.diff_main("a", "b", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "Apple"), (self.dmp.DIFF_INSERT, "Banana"), (self.dmp.DIFF_EQUAL, "s are a"), (self.dmp.DIFF_INSERT, "lso"), (self.dmp.DIFF_EQUAL, " fruit.")], self.dmp.diff_main("Apples are a fruit.", "Bananas are also fruit.", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, u"\u0680"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "\t"), (self.dmp.DIFF_INSERT, "\x00")], self.dmp.diff_main("ax\t", u"\u0680x\x00", False))
# Overlaps.
self.assertEquals([(self.dmp.DIFF_DELETE, "1"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "y"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "2"), (self.dmp.DIFF_INSERT, "xab")], self.dmp.diff_main("1ayb2", "abxab", False))
self.assertEquals([(self.dmp.DIFF_INSERT, "xaxcx"), (self.dmp.DIFF_EQUAL, "abc"), (self.dmp.DIFF_DELETE, "y")], self.dmp.diff_main("abcy", "xaxcxabc", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "ABCD"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "bcd"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "efghijklmnopqrs"), (self.dmp.DIFF_DELETE, "EFGHIJKLMNOefg")], self.dmp.diff_main("ABCDa=bcd=efghijklmnopqrsEFGHIJKLMNOefg", "a-bcd-efghijklmnopqrs", False))
# Large equality.
self.assertEquals([(self.dmp.DIFF_INSERT, " "), (self.dmp.DIFF_EQUAL,"a"), (self.dmp.DIFF_INSERT,"nd"), (self.dmp.DIFF_EQUAL," [[Pennsylvania]]"), (self.dmp.DIFF_DELETE," and [[New")], self.dmp.diff_main("a [[Pennsylvania]] and [[New", " and [[Pennsylvania]]", False))
# Timeout.
self.dmp.Diff_Timeout = 0.1 # 100ms
a = "`Twas brillig, and the slithy toves\nDid gyre and gimble in the wabe:\nAll mimsy were the borogoves,\nAnd the mome raths outgrabe.\n"
b = "I am the very model of a modern major general,\nI've information vegetable, animal, and mineral,\nI know the kings of England, and I quote the fights historical,\nFrom Marathon to Waterloo, in order categorical.\n"
# Increase the text lengths by 1024 times to ensure a timeout.
for x in range(10):
a = a + a
b = b + b
startTime = time.time()
self.dmp.diff_main(a, b)
endTime = time.time()
# Test that we took at least the timeout period.
self.assertTrue(self.dmp.Diff_Timeout <= endTime - startTime)
# Test that we didn't take forever (be forgiving).
# Theoretically this test could fail very occasionally if the
# OS task swaps or locks up for a second at the wrong moment.
self.assertTrue(self.dmp.Diff_Timeout * 2 > endTime - startTime)
self.dmp.Diff_Timeout = 0
# Test the linemode speedup.
# Must be long to pass the 100 char cutoff.
# Simple line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n" * 13
self.assertEquals(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Single line-mode.
a = "1234567890" * 13
b = "abcdefghij" * 13
self.assertEquals(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Overlap line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n"
texts_linemode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, True))
texts_textmode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, False))
self.assertEquals(texts_textmode, texts_linemode)
# Test null inputs.
try:
self.dmp.diff_main(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class MatchTest(DiffMatchPatchTest):
"""MATCH TEST FUNCTIONS"""
def testMatchAlphabet(self):
# Initialise the bitmasks for Bitap.
self.assertEquals({"a":4, "b":2, "c":1}, self.dmp.match_alphabet("abc"))
self.assertEquals({"a":37, "b":18, "c":8}, self.dmp.match_alphabet("abcaba"))
def testMatchBitap(self):
self.dmp.Match_Distance = 100
self.dmp.Match_Threshold = 0.5
# Exact matches.
self.assertEquals(5, self.dmp.match_bitap("abcdefghijk", "fgh", 5))
self.assertEquals(5, self.dmp.match_bitap("abcdefghijk", "fgh", 0))
# Fuzzy matches.
self.assertEquals(4, self.dmp.match_bitap("abcdefghijk", "efxhi", 0))
self.assertEquals(2, self.dmp.match_bitap("abcdefghijk", "cdefxyhijk", 5))
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijk", "bxy", 1))
# Overflow.
self.assertEquals(2, self.dmp.match_bitap("123456789xx0", "3456789x0", 2))
self.assertEquals(0, self.dmp.match_bitap("abcdef", "xxabc", 4))
self.assertEquals(3, self.dmp.match_bitap("abcdef", "defyy", 4))
self.assertEquals(0, self.dmp.match_bitap("abcdef", "xabcdefy", 0))
# Threshold test.
self.dmp.Match_Threshold = 0.4
self.assertEquals(4, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.3
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.0
self.assertEquals(1, self.dmp.match_bitap("abcdefghijk", "bcdef", 1))
self.dmp.Match_Threshold = 0.5
# Multiple select.
self.assertEquals(0, self.dmp.match_bitap("abcdexyzabcde", "abccde", 3))
self.assertEquals(8, self.dmp.match_bitap("abcdexyzabcde", "abccde", 5))
# Distance test.
self.dmp.Match_Distance = 10 # Strict location.
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
self.assertEquals(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdxxefg", 1))
self.dmp.Match_Distance = 1000 # Loose location.
self.assertEquals(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
def testMatchMain(self):
# Full match.
# Shortcut matches.
self.assertEquals(0, self.dmp.match_main("abcdef", "abcdef", 1000))
self.assertEquals(-1, self.dmp.match_main("", "abcdef", 1))
self.assertEquals(3, self.dmp.match_main("abcdef", "", 3))
self.assertEquals(3, self.dmp.match_main("abcdef", "de", 3))
self.assertEquals(3, self.dmp.match_main("abcdef", "defy", 4))
self.assertEquals(0, self.dmp.match_main("abcdef", "abcdefy", 0))
# Complex match.
self.dmp.Match_Threshold = 0.7
self.assertEquals(4, self.dmp.match_main("I am the very model of a modern major general.", " that berry ", 5))
self.dmp.Match_Threshold = 0.5
# Test null inputs.
try:
self.dmp.match_main(None, None, 0)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class PatchTest(DiffMatchPatchTest):
"""PATCH TEST FUNCTIONS"""
def testPatchObj(self):
# Patch Object.
p = dmp_module.patch_obj()
p.start1 = 20
p.start2 = 21
p.length1 = 18
p.length2 = 17
p.diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, "\nlaz")]
strp = str(p)
self.assertEquals("@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n", strp)
def testPatchFromText(self):
self.assertEquals([], self.dmp.patch_fromText(""))
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n"
self.assertEquals(strp, str(self.dmp.patch_fromText(strp)[0]))
self.assertEquals("@@ -1 +1 @@\n-a\n+b\n", str(self.dmp.patch_fromText("@@ -1 +1 @@\n-a\n+b\n")[0]))
self.assertEquals("@@ -1,3 +0,0 @@\n-abc\n", str(self.dmp.patch_fromText("@@ -1,3 +0,0 @@\n-abc\n")[0]))
self.assertEquals("@@ -0,0 +1,3 @@\n+abc\n", str(self.dmp.patch_fromText("@@ -0,0 +1,3 @@\n+abc\n")[0]))
# Generates error.
try:
self.dmp.patch_fromText("Bad\nPatch\n")
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchToText(self):
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
p = self.dmp.patch_fromText(strp)
self.assertEquals(strp, self.dmp.patch_toText(p))
strp = "@@ -1,9 +1,9 @@\n-f\n+F\n oo+fooba\n@@ -7,9 +7,9 @@\n obar\n-,\n+.\n tes\n"
p = self.dmp.patch_fromText(strp)
self.assertEquals(strp, self.dmp.patch_toText(p))
def testPatchAddContext(self):
self.dmp.Patch_Margin = 4
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps over the lazy dog.")
self.assertEquals("@@ -17,12 +17,18 @@\n fox \n-jump\n+somersault\n s ov\n", str(p))
# Same, but not enough trailing context.
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEquals("@@ -17,10 +17,16 @@\n fox \n-jump\n+somersault\n s.\n", str(p))
# Same, but not enough leading context.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEquals("@@ -1,7 +1,8 @@\n Th\n-e\n+at\n qui\n", str(p))
# Same, but with ambiguity.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps. The quick brown fox crashes.")
self.assertEquals("@@ -1,27 +1,28 @@\n Th\n-e\n+at\n quick brown fox jumps. \n", str(p))
def testPatchMake(self):
# Null case.
patches = self.dmp.patch_make("", "")
self.assertEquals("", self.dmp.patch_toText(patches))
text1 = "The quick brown fox jumps over the lazy dog."
text2 = "That quick brown fox jumped over a lazy dog."
# Text2+Text1 inputs.
expectedPatch = "@@ -1,8 +1,7 @@\n Th\n-at\n+e\n qui\n@@ -21,17 +21,18 @@\n jump\n-ed\n+s\n over \n-a\n+the\n laz\n"
# The second patch must be "-21,17 +21,18", not "-22,17 +21,18" due to rolling context.
patches = self.dmp.patch_make(text2, text1)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2 inputs.
expectedPatch = "@@ -1,11 +1,12 @@\n Th\n-e\n+at\n quick b\n@@ -22,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Diff input.
diffs = self.dmp.diff_main(text1, text2, False)
patches = self.dmp.patch_make(diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Diff inputs.
patches = self.dmp.patch_make(text1, diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2+Diff inputs (deprecated).
patches = self.dmp.patch_make(text1, text2, diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Character encoding.
patches = self.dmp.patch_make("`1234567890-=[]\\;',./", "~!@#$%^&*()_+{}|:\"<>?")
self.assertEquals("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n", self.dmp.patch_toText(patches))
# Character decoding.
diffs = [(self.dmp.DIFF_DELETE, "`1234567890-=[]\\;',./"), (self.dmp.DIFF_INSERT, "~!@#$%^&*()_+{}|:\"<>?")]
self.assertEquals(diffs, self.dmp.patch_fromText("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n")[0].diffs)
# Long string with repeats.
text1 = ""
for x in range(100):
text1 += "abcdef"
text2 = text1 + "123"
expectedPatch = "@@ -573,28 +573,31 @@\n cdefabcdefabcdefabcdefabcdef\n+123\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Test null inputs.
try:
self.dmp.patch_make(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchSplitMax(self):
# Assumes that Match_MaxBits is 32.
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz01234567890", "XabXcdXefXghXijXklXmnXopXqrXstXuvXwxXyzX01X23X45X67X89X0")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -1,32 +1,46 @@\n+X\n ab\n+X\n cd\n+X\n ef\n+X\n gh\n+X\n ij\n+X\n kl\n+X\n mn\n+X\n op\n+X\n qr\n+X\n st\n+X\n uv\n+X\n wx\n+X\n yz\n+X\n 012345\n@@ -25,13 +39,18 @@\n zX01\n+X\n 23\n+X\n 45\n+X\n 67\n+X\n 89\n+X\n 0\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdef1234567890123456789012345678901234567890123456789012345678901234567890uvwxyz", "abcdefuvwxyz")
oldToText = self.dmp.patch_toText(patches)
self.dmp.patch_splitMax(patches)
self.assertEquals(oldToText, self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("1234567890123456789012345678901234567890123456789012345678901234567890", "abc")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -1,32 +1,4 @@\n-1234567890123456789012345678\n 9012\n@@ -29,32 +1,4 @@\n-9012345678901234567890123456\n 7890\n@@ -57,14 +1,3 @@\n-78901234567890\n+abc\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1", "abcdefghij , h : 1 , t : 1 abcdefghij , h : 1 , t : 1 abcdefghij , h : 0 , t : 1")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -2,32 +2,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n@@ -29,32 +29,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n", self.dmp.patch_toText(patches))
def testPatchAddPadding(self):
# Both edges full.
patches = self.dmp.patch_make("", "test")
self.assertEquals("@@ -0,0 +1,4 @@\n+test\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -1,8 +1,12 @@\n %01%02%03%04\n+test\n %01%02%03%04\n", self.dmp.patch_toText(patches))
# Both edges partial.
patches = self.dmp.patch_make("XY", "XtestY")
self.assertEquals("@@ -1,2 +1,6 @@\n X\n+test\n Y\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -2,8 +2,12 @@\n %02%03%04X\n+test\n Y%01%02%03\n", self.dmp.patch_toText(patches))
# Both edges none.
patches = self.dmp.patch_make("XXXXYYYY", "XXXXtestYYYY")
self.assertEquals("@@ -1,8 +1,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -5,8 +5,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
def testPatchApply(self):
self.dmp.Match_Distance = 1000
self.dmp.Match_Threshold = 0.5
self.dmp.Patch_DeleteThreshold = 0.5
# Null case.
patches = self.dmp.patch_make("", "")
results = self.dmp.patch_apply(patches, "Hello world.")
self.assertEquals(("Hello world.", []), results)
# Exact match.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "That quick brown fox jumped over a lazy dog.")
results = self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEquals(("That quick brown fox jumped over a lazy dog.", [True, True]), results)
# Partial match.
results = self.dmp.patch_apply(patches, "The quick red rabbit jumps over the tired tiger.")
self.assertEquals(("That quick red rabbit jumped over a tired tiger.", [True, True]), results)
# Failed match.
results = self.dmp.patch_apply(patches, "I am the very model of a modern major general.")
self.assertEquals(("I am the very model of a modern major general.", [False, False]), results)
# Big delete, small change.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x123456789012345678901234567890-----++++++++++-----123456789012345678901234567890y")
self.assertEquals(("xabcy", [True, True]), results)
# Big delete, big change 1.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEquals(("xabc12345678901234567890---------------++++++++++---------------12345678901234567890y", [False, True]), results)
# Big delete, big change 2.
self.dmp.Patch_DeleteThreshold = 0.6
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEquals(("xabcy", [True, True]), results)
self.dmp.Patch_DeleteThreshold = 0.5
# Compensate for failed patch.
self.dmp.Match_Threshold = 0.0
self.dmp.Match_Distance = 0
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz--------------------1234567890", "abcXXXXXXXXXXdefghijklmnopqrstuvwxyz--------------------1234567YYYYYYYYYY890")
results = self.dmp.patch_apply(patches, "ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567890")
self.assertEquals(("ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567YYYYYYYYYY890", [False, True]), results)
self.dmp.Match_Threshold = 0.5
self.dmp.Match_Distance = 1000
# No side effects.
patches = self.dmp.patch_make("", "test")
patchstr = self.dmp.patch_toText(patches)
results = self.dmp.patch_apply(patches, "")
self.assertEquals(patchstr, self.dmp.patch_toText(patches))
# No side effects with major delete.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "Woof")
patchstr = self.dmp.patch_toText(patches)
self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEquals(patchstr, self.dmp.patch_toText(patches))
# Edge exact match.
patches = self.dmp.patch_make("", "test")
self.dmp.patch_apply(patches, "")
self.assertEquals(("test", [True]), results)
# Near edge exact match.
patches = self.dmp.patch_make("XY", "XtestY")
results = self.dmp.patch_apply(patches, "XY")
self.assertEquals(("XtestY", [True]), results)
# Edge partial match.
patches = self.dmp.patch_make("y", "y123")
results = self.dmp.patch_apply(patches, "x")
self.assertEquals(("x123", [True]), results)
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -7,822,959,367,033,748,000 | -5,183,234,683,911,989,000 | 47.036824 | 408 | 0.645578 | false |
mhnatiuk/phd_sociology_of_religion | scrapper/lib/python2.7/site-packages/twisted/internet/test/test_stdio.py | 44 | 6297 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.stdio}.
"""
from twisted.python.runtime import platform
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.internet.protocol import Protocol
if not platform.isWindows():
from twisted.internet._posixstdio import StandardIO
class StdioFilesTests(ReactorBuilder):
"""
L{StandardIO} supports reading and writing to filesystem files.
"""
def setUp(self):
path = self.mktemp()
file(path, "w").close()
self.extraFile = file(path, "r+")
def test_addReader(self):
"""
Adding a filesystem file reader to a reactor will make sure it is
polled.
"""
reactor = self.buildReactor()
class DataProtocol(Protocol):
data = ""
def dataReceived(self, data):
self.data += data
# It'd be better to stop reactor on connectionLost, but that
# fails on FreeBSD, probably due to
# http://bugs.python.org/issue9591:
if self.data == "hello!":
reactor.stop()
path = self.mktemp()
f = file(path, "w")
f.write("hello!")
f.close()
f = file(path, "r")
# Read bytes from a file, deliver them to a protocol instance:
protocol = DataProtocol()
StandardIO(protocol, stdin=f.fileno(),
stdout=self.extraFile.fileno(),
reactor=reactor)
self.runReactor(reactor)
self.assertEqual(protocol.data, "hello!")
def test_addWriter(self):
"""
Adding a filesystem file writer to a reactor will make sure it is
polled.
"""
reactor = self.buildReactor()
class DisconnectProtocol(Protocol):
def connectionLost(self, reason):
reactor.stop()
path = self.mktemp()
f = file(path, "w")
# Write bytes to a transport, hopefully have them written to a file:
protocol = DisconnectProtocol()
StandardIO(protocol, stdout=f.fileno(),
stdin=self.extraFile.fileno(), reactor=reactor)
protocol.transport.write("hello")
protocol.transport.write(", world")
protocol.transport.loseConnection()
self.runReactor(reactor)
f.close()
f = file(path, "r")
self.assertEqual(f.read(), "hello, world")
f.close()
def test_removeReader(self):
"""
Removing a filesystem file reader from a reactor will make sure it is
no longer polled.
"""
reactor = self.buildReactor()
self.addCleanup(self.unbuildReactor, reactor)
path = self.mktemp()
file(path, "w").close()
# Cleanup might fail if file is GCed too soon:
self.f = f = file(path, "r")
# Have the reader added:
stdio = StandardIO(Protocol(), stdin=f.fileno(),
stdout=self.extraFile.fileno(),
reactor=reactor)
self.assertIn(stdio._reader, reactor.getReaders())
stdio._reader.stopReading()
self.assertNotIn(stdio._reader, reactor.getReaders())
def test_removeWriter(self):
"""
Removing a filesystem file writer from a reactor will make sure it is
no longer polled.
"""
reactor = self.buildReactor()
self.addCleanup(self.unbuildReactor, reactor)
# Cleanup might fail if file is GCed too soon:
self.f = f = file(self.mktemp(), "w")
# Have the reader added:
protocol = Protocol()
stdio = StandardIO(protocol, stdout=f.fileno(),
stdin=self.extraFile.fileno(),
reactor=reactor)
protocol.transport.write("hello")
self.assertIn(stdio._writer, reactor.getWriters())
stdio._writer.stopWriting()
self.assertNotIn(stdio._writer, reactor.getWriters())
def test_removeAll(self):
"""
Calling C{removeAll} on a reactor includes descriptors that are
filesystem files.
"""
reactor = self.buildReactor()
self.addCleanup(self.unbuildReactor, reactor)
path = self.mktemp()
file(path, "w").close()
# Cleanup might fail if file is GCed too soon:
self.f = f = file(path, "r")
# Have the reader added:
stdio = StandardIO(Protocol(), stdin=f.fileno(),
stdout=self.extraFile.fileno(), reactor=reactor)
# And then removed:
removed = reactor.removeAll()
self.assertIn(stdio._reader, removed)
self.assertNotIn(stdio._reader, reactor.getReaders())
def test_getReaders(self):
"""
C{reactor.getReaders} includes descriptors that are filesystem files.
"""
reactor = self.buildReactor()
self.addCleanup(self.unbuildReactor, reactor)
path = self.mktemp()
file(path, "w").close()
# Cleanup might fail if file is GCed too soon:
self.f = f = file(path, "r")
# Have the reader added:
stdio = StandardIO(Protocol(), stdin=f.fileno(),
stdout=self.extraFile.fileno(), reactor=reactor)
self.assertIn(stdio._reader, reactor.getReaders())
def test_getWriters(self):
"""
C{reactor.getWriters} includes descriptors that are filesystem files.
"""
reactor = self.buildReactor()
self.addCleanup(self.unbuildReactor, reactor)
# Cleanup might fail if file is GCed too soon:
self.f = f = file(self.mktemp(), "w")
# Have the reader added:
stdio = StandardIO(Protocol(), stdout=f.fileno(),
stdin=self.extraFile.fileno(), reactor=reactor)
self.assertNotIn(stdio._writer, reactor.getWriters())
stdio._writer.startWriting()
self.assertIn(stdio._writer, reactor.getWriters())
if platform.isWindows():
skip = ("StandardIO does not accept stdout as an argument to Windows. "
"Testing redirection to a file is therefore harder.")
globals().update(StdioFilesTests.makeTestCaseClasses())
| gpl-2.0 | 2,207,838,233,919,123,000 | -8,893,481,130,306,463,000 | 31.292308 | 80 | 0.586311 | false |
codrut3/tensorflow | tensorflow/examples/learn/iris.py | 22 | 4007 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset.
This example uses APIs in Tensorflow 1.4 or above.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import urllib
import tensorflow as tf
# Data sets
IRIS_TRAINING = 'iris_training.csv'
IRIS_TRAINING_URL = 'http://download.tensorflow.org/data/iris_training.csv'
IRIS_TEST = 'iris_test.csv'
IRIS_TEST_URL = 'http://download.tensorflow.org/data/iris_test.csv'
FEATURE_KEYS = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
def maybe_download_iris_data(file_name, download_url):
"""Downloads the file and returns the number of data."""
if not os.path.exists(file_name):
raw = urllib.urlopen(download_url).read()
with open(file_name, 'w') as f:
f.write(raw)
# The first line is a comma-separated string. The first one is the number of
# total data in the file.
with open(file_name, 'r') as f:
first_line = f.readline()
num_elements = first_line.split(',')[0]
return int(num_elements)
def input_fn(file_name, num_data, batch_size, is_training):
"""Creates an input_fn required by Estimator train/evaluate."""
# If the data sets aren't stored locally, download them.
def _parse_csv(rows_string_tensor):
"""Takes the string input tensor and returns tuple of (features, labels)."""
# Last dim is the label.
num_features = len(FEATURE_KEYS)
num_columns = num_features + 1
columns = tf.decode_csv(rows_string_tensor,
record_defaults=[[]] * num_columns)
features = dict(zip(FEATURE_KEYS, columns[:num_features]))
labels = tf.cast(columns[num_features], tf.int32)
return features, labels
def _input_fn():
"""The input_fn."""
dataset = tf.data.TextLineDataset([file_name])
# Skip the first line (which does not have data).
dataset = dataset.skip(1)
dataset = dataset.map(_parse_csv)
if is_training:
# For this small dataset, which can fit into memory, to achieve true
# randomness, the shuffle buffer size is set as the total number of
# elements in the dataset.
dataset = dataset.shuffle(num_data)
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
return _input_fn
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
num_training_data = maybe_download_iris_data(
IRIS_TRAINING, IRIS_TRAINING_URL)
num_test_data = maybe_download_iris_data(IRIS_TEST, IRIS_TEST_URL)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column(key, shape=1) for key in FEATURE_KEYS]
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Train.
train_input_fn = input_fn(IRIS_TRAINING, num_training_data, batch_size=32,
is_training=True)
classifier.train(input_fn=train_input_fn, steps=400)
# Eval.
test_input_fn = input_fn(IRIS_TEST, num_test_data, batch_size=32,
is_training=False)
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 | 8,437,698,613,350,695,000 | 8,043,364,434,907,392,000 | 33.543103 | 80 | 0.688295 | false |
mgraupe/acq4 | acq4/pyqtgraph/WidgetGroup.py | 34 | 10002 | # -*- coding: utf-8 -*-
"""
WidgetGroup.py - WidgetGroup class for easily managing lots of Qt widgets
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
This class addresses the problem of having to save and restore the state
of a large group of widgets.
"""
from .Qt import QtCore, QtGui, USE_PYQT5
import weakref, inspect
from .python2_3 import asUnicode
__all__ = ['WidgetGroup']
def splitterState(w):
s = str(w.saveState().toPercentEncoding())
return s
def restoreSplitter(w, s):
if type(s) is list:
w.setSizes(s)
elif type(s) is str:
w.restoreState(QtCore.QByteArray.fromPercentEncoding(s))
else:
print("Can't configure QSplitter using object of type", type(s))
if w.count() > 0: ## make sure at least one item is not collapsed
for i in w.sizes():
if i > 0:
return
w.setSizes([50] * w.count())
def comboState(w):
ind = w.currentIndex()
data = w.itemData(ind)
#if not data.isValid():
if data is not None:
try:
if not data.isValid():
data = None
else:
data = data.toInt()[0]
except AttributeError:
pass
if data is None:
return asUnicode(w.itemText(ind))
else:
return data
def setComboState(w, v):
if type(v) is int:
#ind = w.findData(QtCore.QVariant(v))
ind = w.findData(v)
if ind > -1:
w.setCurrentIndex(ind)
return
w.setCurrentIndex(w.findText(str(v)))
class WidgetGroup(QtCore.QObject):
"""This class takes a list of widgets and keeps an internal record of their
state that is always up to date.
Allows reading and writing from groups of widgets simultaneously.
"""
## List of widget types that can be handled by WidgetGroup.
## The value for each type is a tuple (change signal function, get function, set function, [auto-add children])
## The change signal function that takes an object and returns a signal that is emitted any time the state of the widget changes, not just
## when it is changed by user interaction. (for example, 'clicked' is not a valid signal here)
## If the change signal is None, the value of the widget is not cached.
## Custom widgets not in this list can be made to work with WidgetGroup by giving them a 'widgetGroupInterface' method
## which returns the tuple.
classes = {
QtGui.QSpinBox:
(lambda w: w.valueChanged,
QtGui.QSpinBox.value,
QtGui.QSpinBox.setValue),
QtGui.QDoubleSpinBox:
(lambda w: w.valueChanged,
QtGui.QDoubleSpinBox.value,
QtGui.QDoubleSpinBox.setValue),
QtGui.QSplitter:
(None,
splitterState,
restoreSplitter,
True),
QtGui.QCheckBox:
(lambda w: w.stateChanged,
QtGui.QCheckBox.isChecked,
QtGui.QCheckBox.setChecked),
QtGui.QComboBox:
(lambda w: w.currentIndexChanged,
comboState,
setComboState),
QtGui.QGroupBox:
(lambda w: w.toggled,
QtGui.QGroupBox.isChecked,
QtGui.QGroupBox.setChecked,
True),
QtGui.QLineEdit:
(lambda w: w.editingFinished,
lambda w: str(w.text()),
QtGui.QLineEdit.setText),
QtGui.QRadioButton:
(lambda w: w.toggled,
QtGui.QRadioButton.isChecked,
QtGui.QRadioButton.setChecked),
QtGui.QSlider:
(lambda w: w.valueChanged,
QtGui.QSlider.value,
QtGui.QSlider.setValue),
}
sigChanged = QtCore.Signal(str, object)
def __init__(self, widgetList=None):
"""Initialize WidgetGroup, adding specified widgets into this group.
widgetList can be:
- a list of widget specifications (widget, [name], [scale])
- a dict of name: widget pairs
- any QObject, and all compatible child widgets will be added recursively.
The 'scale' parameter for each widget allows QSpinBox to display a different value than the value recorded
in the group state (for example, the program may set a spin box value to 100e-6 and have it displayed as 100 to the user)
"""
QtCore.QObject.__init__(self)
self.widgetList = weakref.WeakKeyDictionary() # Make sure widgets don't stick around just because they are listed here
self.scales = weakref.WeakKeyDictionary()
self.cache = {} ## name:value pairs
self.uncachedWidgets = weakref.WeakKeyDictionary()
if isinstance(widgetList, QtCore.QObject):
self.autoAdd(widgetList)
elif isinstance(widgetList, list):
for w in widgetList:
self.addWidget(*w)
elif isinstance(widgetList, dict):
for name, w in widgetList.items():
self.addWidget(w, name)
elif widgetList is None:
return
else:
raise Exception("Wrong argument type %s" % type(widgetList))
def addWidget(self, w, name=None, scale=None):
if not self.acceptsType(w):
raise Exception("Widget type %s not supported by WidgetGroup" % type(w))
if name is None:
name = str(w.objectName())
if name == '':
raise Exception("Cannot add widget '%s' without a name." % str(w))
self.widgetList[w] = name
self.scales[w] = scale
self.readWidget(w)
if type(w) in WidgetGroup.classes:
signal = WidgetGroup.classes[type(w)][0]
else:
signal = w.widgetGroupInterface()[0]
if signal is not None:
if inspect.isfunction(signal) or inspect.ismethod(signal):
signal = signal(w)
signal.connect(self.mkChangeCallback(w))
else:
self.uncachedWidgets[w] = None
def findWidget(self, name):
for w in self.widgetList:
if self.widgetList[w] == name:
return w
return None
def interface(self, obj):
t = type(obj)
if t in WidgetGroup.classes:
return WidgetGroup.classes[t]
else:
return obj.widgetGroupInterface()
def checkForChildren(self, obj):
"""Return true if we should automatically search the children of this object for more."""
iface = self.interface(obj)
return (len(iface) > 3 and iface[3])
def autoAdd(self, obj):
## Find all children of this object and add them if possible.
accepted = self.acceptsType(obj)
if accepted:
#print "%s auto add %s" % (self.objectName(), obj.objectName())
self.addWidget(obj)
if not accepted or self.checkForChildren(obj):
for c in obj.children():
self.autoAdd(c)
def acceptsType(self, obj):
for c in WidgetGroup.classes:
if isinstance(obj, c):
return True
if hasattr(obj, 'widgetGroupInterface'):
return True
return False
def setScale(self, widget, scale):
val = self.readWidget(widget)
self.scales[widget] = scale
self.setWidget(widget, val)
def mkChangeCallback(self, w):
return lambda *args: self.widgetChanged(w, *args)
def widgetChanged(self, w, *args):
n = self.widgetList[w]
v1 = self.cache[n]
v2 = self.readWidget(w)
if v1 != v2:
if not USE_PYQT5:
# Old signal kept for backward compatibility.
self.emit(QtCore.SIGNAL('changed'), self.widgetList[w], v2)
self.sigChanged.emit(self.widgetList[w], v2)
def state(self):
for w in self.uncachedWidgets:
self.readWidget(w)
return self.cache.copy()
def setState(self, s):
for w in self.widgetList:
n = self.widgetList[w]
if n not in s:
continue
self.setWidget(w, s[n])
def readWidget(self, w):
if type(w) in WidgetGroup.classes:
getFunc = WidgetGroup.classes[type(w)][1]
else:
getFunc = w.widgetGroupInterface()[1]
if getFunc is None:
return None
## if the getter function provided in the interface is a bound method,
## then just call the method directly. Otherwise, pass in the widget as the first arg
## to the function.
if inspect.ismethod(getFunc) and getFunc.__self__ is not None:
val = getFunc()
else:
val = getFunc(w)
if self.scales[w] is not None:
val /= self.scales[w]
#if isinstance(val, QtCore.QString):
#val = str(val)
n = self.widgetList[w]
self.cache[n] = val
return val
def setWidget(self, w, v):
v1 = v
if self.scales[w] is not None:
v *= self.scales[w]
if type(w) in WidgetGroup.classes:
setFunc = WidgetGroup.classes[type(w)][2]
else:
setFunc = w.widgetGroupInterface()[2]
## if the setter function provided in the interface is a bound method,
## then just call the method directly. Otherwise, pass in the widget as the first arg
## to the function.
if inspect.ismethod(setFunc) and setFunc.__self__ is not None:
setFunc(v)
else:
setFunc(w, v)
#name = self.widgetList[w]
#if name in self.cache and (self.cache[name] != v1):
#print "%s: Cached value %s != set value %s" % (name, str(self.cache[name]), str(v1))
| mit | 3,841,520,100,122,424,000 | -253,455,128,277,807,500 | 33.975524 | 143 | 0.575085 | false |
wcooley/neomodel | test/test_localisation.py | 6 | 1200 | from neomodel import StructuredNode, StringProperty
from neomodel.contrib import Localised, Locale
class Student(Localised, StructuredNode):
name = StringProperty(unique_index=True)
def setup():
for l in ['fr', 'ar', 'pl', 'es']:
Locale(code=l).save()
def test_localised():
bob = Student(name="Bob").save()
bob.add_locale(Locale.get("fr"))
bob.add_locale("ar")
bob.add_locale(Locale.get("ar"))
bob.add_locale(Locale.get("pl"))
assert bob.has_locale("fr")
assert not bob.has_locale("es")
bob.remove_locale("fr")
assert not bob.has_locale("fr")
assert len(bob.locales) == 2
assert Locale.get("pl") in bob.locales.all()
assert Locale.get("ar") in bob.locales.all()
def test_localised_index():
fred = Student(name="Fred").save()
jim = Student(name="Jim").save()
katie = Student(name="Katie").save()
fred.add_locale(Locale.get('fr'))
jim.add_locale(Locale.get('fr'))
katie.add_locale(Locale.get('ar'))
assert Student.locale_index('fr').get(name='Fred')
try:
Student.locale_index('fr').get(name='Katie')
except Student.DoesNotExist:
assert True
else:
assert False
| mit | -1,558,966,181,971,659,800 | -3,097,032,342,477,128,000 | 24 | 54 | 0.635 | false |
mumble-voip/libmumble-gyp | test/ninja/solibs_avoid_relinking/gyptest-solibs-avoid-relinking.py | 216 | 1427 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify that relinking a solib doesn't relink a dependent executable if the
solib's public API hasn't changed.
"""
import os
import sys
import TestCommon
import TestGyp
# NOTE(fischman): This test will not work with other generators because the
# API-hash-based-mtime-preservation optimization is only implemented in
# ninja.py. It could be extended to the make.py generator as well pretty
# easily, probably.
# (also, it tests ninja-specific out paths, which would have to be generalized
# if this was extended to other generators).
test = TestGyp.TestGyp(formats=['ninja'])
test.run_gyp('solibs_avoid_relinking.gyp')
# Build the executable, grab its timestamp, touch the solib's source, rebuild
# executable, ensure timestamp hasn't changed.
test.build('solibs_avoid_relinking.gyp', 'b')
test.built_file_must_exist('b' + TestCommon.exe_suffix)
pre_stat = os.stat(test.built_file_path('b' + TestCommon.exe_suffix))
os.utime(os.path.join(test.workdir, 'solib.cc'),
(pre_stat.st_atime, pre_stat.st_mtime + 100))
test.sleep()
test.build('solibs_avoid_relinking.gyp', 'b')
post_stat = os.stat(test.built_file_path('b' + TestCommon.exe_suffix))
if pre_stat.st_mtime != post_stat.st_mtime:
test.fail_test()
else:
test.pass_test()
| bsd-3-clause | -4,283,240,210,773,183,000 | 265,573,387,495,840,500 | 33.804878 | 78 | 0.740014 | false |
cheddartv/stockstream.live | webapp/stockstream/api.py | 1 | 5316 | import config
import robinhood
import time
import httputil
import json
def get_api_request(request):
return httputil.get_json_object_from_url(config.SS_API_ENDPOINT + request)
def post_api_request(request, obj):
return httputil.post_object_to_url(config.SS_API_ENDPOINT + request, json.dumps(obj))
def get_current_portfolio():
portfolio = get_api_request("/v1/portfolio/current")
return portfolio
def get_portfolio_values():
values = get_api_request("/v1/portfolio/values")
values = sorted(values, key=lambda k: k['begins_at'])
return values
def get_portfolio_values_by_date(date):
request = "/v1/portfolio/values/date/{}".format(date)
values = get_api_request(request)
return values
def get_votes_by_symbol(symbol):
request = "/v1/votes/symbol/{}".format(symbol)
votes = get_api_request(request)
return votes
def get_votes_by_orderId(orderId):
request = "/v1/votes/order/{}".format(orderId)
votes = get_api_request(request)
return votes
def get_influence_for_player(username):
request = "/v1/influence/player/{}".format(username)
order_to_influence = get_api_request(request)
return order_to_influence
def get_votes_by_order_ids(order_ids):
order_to_votes = {}
chunks = split_list(order_ids, 50)
for chunk in chunks:
request = "/v1/votes/orders?ids={}".format(",".join(chunk))
id_to_votes = get_api_request(request)
for id in id_to_votes:
order_to_votes[id] = id_to_votes[id]
return order_to_votes
def get_votes_today():
today_str = time.strftime("%m-%d-%Y")
return get_votes_by_date(today_str)
def get_votes_by_date(date):
request = "/v1/votes/date/{}".format(date)
votes = get_api_request(request)
return votes
def get_votes_by_date_by_symbol(date, symbol):
request = "/v1/votes/date/{}".format(date)
votes = get_api_request(request)
filtered = [vote for vote in votes if vote['parameter'] == symbol]
return filtered
def split_list(lst, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def get_orders_by_id(order_ids):
all_orders = []
chunks = split_list(order_ids, 50)
for chunk in chunks:
request = "/v1/orders?ids={}".format(",".join(chunk))
votes = get_api_request(request)
all_orders += votes
orders = sorted(all_orders, key=lambda k: k['created_at'])
return orders
def get_orders_by_symbol(symbol):
request = "/v1/orders/symbol/{}".format(symbol)
orders = get_api_request(request)
orders = sorted(orders, key=lambda k: k['created_at'])
return orders
def get_order_stats():
request = "/v1/orders/stats"
stats = get_api_request(request)
return stats
def get_positions_by_player(username):
request = "/v1/positions/player/{}".format(username)
positions = get_api_request(request)
return positions
def get_positions_by_symbol(symbol):
request = "/v1/positions/symbol/{}".format(symbol)
positions = get_api_request(request)
return positions
def get_positions_by_date(date):
request = "/v1/positions/date/{}".format(date)
positions = get_api_request(request)
return positions
def get_open_positions():
return get_api_request("/v1/positions/open")
def get_orders_today():
today_str = time.strftime("%Y-%m-%d")
return get_orders_by_date(today_str)
def get_orders_by_date(dateStr):
request = "/v1/orders/date/{}".format(dateStr)
votes = get_api_request(request)
newvotes = sorted(votes, key=lambda k: k['created_at'])
return newvotes
def get_orders_by_date_by_symbol(dateStr, symbol):
request = "/v1/orders/date/{}".format(dateStr.replace("/", "-"))
orders = get_api_request(request)
neworders = sorted(orders, key=lambda k: k['timestamp'])
filtered = [order for order in neworders if order['symbol'] == symbol]
return filtered
def get_votes_by_user(username):
return get_api_request("/v1/votes/player/{}".format(username))
def get_wallet_for_user(username):
return get_api_request("/v1/wallets/player/{}".format(username))
def get_ranked_players():
return get_api_request("/v1/players")
def get_ranked_scores():
return get_api_request("/v1/scores")
def get_referral_code():
return get_api_request("/v1/referral")
def get_portfolio():
return get_api_request("/v1/portfolio/current")
def get_symbols_from_portfolio(portfolio):
return [asset['symbol'] for asset in portfolio['assets']]
def get_net_worth(portfolio):
symbols = get_symbols_from_portfolio(portfolio)
symbol_to_quotes = robinhood.api.get_symbol_to_quotes(symbols)
net_worth = portfolio['cashBalance']
for asset in portfolio['assets']:
symbol = asset['symbol']
quote = symbol_to_quotes[symbol]
net_worth += robinhood.quote.most_recent_price(quote) * asset['shares']
return net_worth
def get_overview(portfolio):
return {
"start_value": 50000,
"net_worth": get_net_worth(portfolio)
}
def get_registration_status(player_id):
return get_api_request("/v1/registered/{}".format(player_id))
def register_contest_player(registration_object):
return post_api_request("/v1/register", registration_object)
| mit | -4,241,928,402,595,577,300 | -4,238,731,220,847,296,000 | 25.187192 | 89 | 0.668548 | false |
imsparsh/python-for-android | python-modules/twisted/twisted/conch/manhole.py | 61 | 10783 | # -*- test-case-name: twisted.conch.test.test_manhole -*-
# Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Line-input oriented interactive interpreter loop.
Provides classes for handling Python source input and arbitrary output
interactively from a Twisted application. Also included is syntax coloring
code with support for VT102 terminals, control code handling (^C, ^D, ^Q),
and reasonable handling of Deferreds.
@author: Jp Calderone
"""
import code, sys, StringIO, tokenize
from twisted.conch import recvline
from twisted.internet import defer
from twisted.python.htmlizer import TokenPrinter
class FileWrapper:
"""Minimal write-file-like object.
Writes are translated into addOutput calls on an object passed to
__init__. Newlines are also converted from network to local style.
"""
softspace = 0
state = 'normal'
def __init__(self, o):
self.o = o
def flush(self):
pass
def write(self, data):
self.o.addOutput(data.replace('\r\n', '\n'))
def writelines(self, lines):
self.write(''.join(lines))
class ManholeInterpreter(code.InteractiveInterpreter):
"""Interactive Interpreter with special output and Deferred support.
Aside from the features provided by L{code.InteractiveInterpreter}, this
class captures sys.stdout output and redirects it to the appropriate
location (the Manhole protocol instance). It also treats Deferreds
which reach the top-level specially: each is formatted to the user with
a unique identifier and a new callback and errback added to it, each of
which will format the unique identifier and the result with which the
Deferred fires and then pass it on to the next participant in the
callback chain.
"""
numDeferreds = 0
def __init__(self, handler, locals=None, filename="<console>"):
code.InteractiveInterpreter.__init__(self, locals)
self._pendingDeferreds = {}
self.handler = handler
self.filename = filename
self.resetBuffer()
def resetBuffer(self):
"""Reset the input buffer."""
self.buffer = []
def push(self, line):
"""Push a line to the interpreter.
The line should not have a trailing newline; it may have
internal newlines. The line is appended to a buffer and the
interpreter's runsource() method is called with the
concatenated contents of the buffer as source. If this
indicates that the command was executed or invalid, the buffer
is reset; otherwise, the command is incomplete, and the buffer
is left as it was after the line was appended. The return
value is 1 if more input is required, 0 if the line was dealt
with in some way (this is the same as runsource()).
"""
self.buffer.append(line)
source = "\n".join(self.buffer)
more = self.runsource(source, self.filename)
if not more:
self.resetBuffer()
return more
def runcode(self, *a, **kw):
orighook, sys.displayhook = sys.displayhook, self.displayhook
try:
origout, sys.stdout = sys.stdout, FileWrapper(self.handler)
try:
code.InteractiveInterpreter.runcode(self, *a, **kw)
finally:
sys.stdout = origout
finally:
sys.displayhook = orighook
def displayhook(self, obj):
self.locals['_'] = obj
if isinstance(obj, defer.Deferred):
# XXX Ick, where is my "hasFired()" interface?
if hasattr(obj, "result"):
self.write(repr(obj))
elif id(obj) in self._pendingDeferreds:
self.write("<Deferred #%d>" % (self._pendingDeferreds[id(obj)][0],))
else:
d = self._pendingDeferreds
k = self.numDeferreds
d[id(obj)] = (k, obj)
self.numDeferreds += 1
obj.addCallbacks(self._cbDisplayDeferred, self._ebDisplayDeferred,
callbackArgs=(k, obj), errbackArgs=(k, obj))
self.write("<Deferred #%d>" % (k,))
elif obj is not None:
self.write(repr(obj))
def _cbDisplayDeferred(self, result, k, obj):
self.write("Deferred #%d called back: %r" % (k, result), True)
del self._pendingDeferreds[id(obj)]
return result
def _ebDisplayDeferred(self, failure, k, obj):
self.write("Deferred #%d failed: %r" % (k, failure.getErrorMessage()), True)
del self._pendingDeferreds[id(obj)]
return failure
def write(self, data, async=False):
self.handler.addOutput(data, async)
CTRL_C = '\x03'
CTRL_D = '\x04'
CTRL_BACKSLASH = '\x1c'
CTRL_L = '\x0c'
class Manhole(recvline.HistoricRecvLine):
"""Mediator between a fancy line source and an interactive interpreter.
This accepts lines from its transport and passes them on to a
L{ManholeInterpreter}. Control commands (^C, ^D, ^\) are also handled
with something approximating their normal terminal-mode behavior. It
can optionally be constructed with a dict which will be used as the
local namespace for any code executed.
"""
namespace = None
def __init__(self, namespace=None):
recvline.HistoricRecvLine.__init__(self)
if namespace is not None:
self.namespace = namespace.copy()
def connectionMade(self):
recvline.HistoricRecvLine.connectionMade(self)
self.interpreter = ManholeInterpreter(self, self.namespace)
self.keyHandlers[CTRL_C] = self.handle_INT
self.keyHandlers[CTRL_D] = self.handle_EOF
self.keyHandlers[CTRL_L] = self.handle_FF
self.keyHandlers[CTRL_BACKSLASH] = self.handle_QUIT
def handle_INT(self):
"""
Handle ^C as an interrupt keystroke by resetting the current input
variables to their initial state.
"""
self.pn = 0
self.lineBuffer = []
self.lineBufferIndex = 0
self.interpreter.resetBuffer()
self.terminal.nextLine()
self.terminal.write("KeyboardInterrupt")
self.terminal.nextLine()
self.terminal.write(self.ps[self.pn])
def handle_EOF(self):
if self.lineBuffer:
self.terminal.write('\a')
else:
self.handle_QUIT()
def handle_FF(self):
"""
Handle a 'form feed' byte - generally used to request a screen
refresh/redraw.
"""
self.terminal.eraseDisplay()
self.terminal.cursorHome()
self.drawInputLine()
def handle_QUIT(self):
self.terminal.loseConnection()
def _needsNewline(self):
w = self.terminal.lastWrite
return not w.endswith('\n') and not w.endswith('\x1bE')
def addOutput(self, bytes, async=False):
if async:
self.terminal.eraseLine()
self.terminal.cursorBackward(len(self.lineBuffer) + len(self.ps[self.pn]))
self.terminal.write(bytes)
if async:
if self._needsNewline():
self.terminal.nextLine()
self.terminal.write(self.ps[self.pn])
if self.lineBuffer:
oldBuffer = self.lineBuffer
self.lineBuffer = []
self.lineBufferIndex = 0
self._deliverBuffer(oldBuffer)
def lineReceived(self, line):
more = self.interpreter.push(line)
self.pn = bool(more)
if self._needsNewline():
self.terminal.nextLine()
self.terminal.write(self.ps[self.pn])
class VT102Writer:
"""Colorizer for Python tokens.
A series of tokens are written to instances of this object. Each is
colored in a particular way. The final line of the result of this is
generally added to the output.
"""
typeToColor = {
'identifier': '\x1b[31m',
'keyword': '\x1b[32m',
'parameter': '\x1b[33m',
'variable': '\x1b[1;33m',
'string': '\x1b[35m',
'number': '\x1b[36m',
'op': '\x1b[37m'}
normalColor = '\x1b[0m'
def __init__(self):
self.written = []
def color(self, type):
r = self.typeToColor.get(type, '')
return r
def write(self, token, type=None):
if token and token != '\r':
c = self.color(type)
if c:
self.written.append(c)
self.written.append(token)
if c:
self.written.append(self.normalColor)
def __str__(self):
s = ''.join(self.written)
return s.strip('\n').splitlines()[-1]
def lastColorizedLine(source):
"""Tokenize and colorize the given Python source.
Returns a VT102-format colorized version of the last line of C{source}.
"""
w = VT102Writer()
p = TokenPrinter(w.write).printtoken
s = StringIO.StringIO(source)
tokenize.tokenize(s.readline, p)
return str(w)
class ColoredManhole(Manhole):
"""A REPL which syntax colors input as users type it.
"""
def getSource(self):
"""Return a string containing the currently entered source.
This is only the code which will be considered for execution
next.
"""
return ('\n'.join(self.interpreter.buffer) +
'\n' +
''.join(self.lineBuffer))
def characterReceived(self, ch, moreCharactersComing):
if self.mode == 'insert':
self.lineBuffer.insert(self.lineBufferIndex, ch)
else:
self.lineBuffer[self.lineBufferIndex:self.lineBufferIndex+1] = [ch]
self.lineBufferIndex += 1
if moreCharactersComing:
# Skip it all, we'll get called with another character in
# like 2 femtoseconds.
return
if ch == ' ':
# Don't bother to try to color whitespace
self.terminal.write(ch)
return
source = self.getSource()
# Try to write some junk
try:
coloredLine = lastColorizedLine(source)
except tokenize.TokenError:
# We couldn't do it. Strange. Oh well, just add the character.
self.terminal.write(ch)
else:
# Success! Clear the source on this line.
self.terminal.eraseLine()
self.terminal.cursorBackward(len(self.lineBuffer) + len(self.ps[self.pn]) - 1)
# And write a new, colorized one.
self.terminal.write(self.ps[self.pn] + coloredLine)
# And move the cursor to where it belongs
n = len(self.lineBuffer) - self.lineBufferIndex
if n:
self.terminal.cursorBackward(n)
| apache-2.0 | -7,947,521,198,111,876,000 | 6,146,513,708,278,120,000 | 31.092262 | 90 | 0.6092 | false |
yencarnacion/jaikuengine | .google_appengine/lib/django-1.3/django/contrib/gis/gdal/geomtype.py | 404 | 2967 | from django.contrib.gis.gdal.error import OGRException
#### OGRGeomType ####
class OGRGeomType(object):
"Encapulates OGR Geometry Types."
wkb25bit = -2147483648
# Dictionary of acceptable OGRwkbGeometryType s and their string names.
_types = {0 : 'Unknown',
1 : 'Point',
2 : 'LineString',
3 : 'Polygon',
4 : 'MultiPoint',
5 : 'MultiLineString',
6 : 'MultiPolygon',
7 : 'GeometryCollection',
100 : 'None',
101 : 'LinearRing',
1 + wkb25bit: 'Point25D',
2 + wkb25bit: 'LineString25D',
3 + wkb25bit: 'Polygon25D',
4 + wkb25bit: 'MultiPoint25D',
5 + wkb25bit : 'MultiLineString25D',
6 + wkb25bit : 'MultiPolygon25D',
7 + wkb25bit : 'GeometryCollection25D',
}
# Reverse type dictionary, keyed by lower-case of the name.
_str_types = dict([(v.lower(), k) for k, v in _types.items()])
def __init__(self, type_input):
"Figures out the correct OGR Type based upon the input."
if isinstance(type_input, OGRGeomType):
num = type_input.num
elif isinstance(type_input, basestring):
type_input = type_input.lower()
if type_input == 'geometry': type_input='unknown'
num = self._str_types.get(type_input, None)
if num is None:
raise OGRException('Invalid OGR String Type "%s"' % type_input)
elif isinstance(type_input, int):
if not type_input in self._types:
raise OGRException('Invalid OGR Integer Type: %d' % type_input)
num = type_input
else:
raise TypeError('Invalid OGR input type given.')
# Setting the OGR geometry type number.
self.num = num
def __str__(self):
"Returns the value of the name property."
return self.name
def __eq__(self, other):
"""
Does an equivalence test on the OGR type with the given
other OGRGeomType, the short-hand string, or the integer.
"""
if isinstance(other, OGRGeomType):
return self.num == other.num
elif isinstance(other, basestring):
return self.name.lower() == other.lower()
elif isinstance(other, int):
return self.num == other
else:
return False
def __ne__(self, other):
return not (self == other)
@property
def name(self):
"Returns a short-hand string form of the OGR Geometry type."
return self._types[self.num]
@property
def django(self):
"Returns the Django GeometryField for this OGR Type."
s = self.name.replace('25D', '')
if s in ('LinearRing', 'None'):
return None
elif s == 'Unknown':
s = 'Geometry'
return s + 'Field'
| apache-2.0 | 5,855,423,895,246,725,000 | 2,885,664,723,518,806,500 | 33.905882 | 79 | 0.545332 | false |
kblin/supybot-gsoc | plugins/Dunno/config.py | 15 | 2344 | ###
# Copyright (c) 2003-2005, Daniel DiPaolo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Dunno', True)
Dunno = conf.registerPlugin('Dunno')
conf.registerChannelValue(Dunno, 'prefixNick',
registry.Boolean(True, """Determines whether the bot will prefix the nick
of the user giving an invalid command to the "dunno" response."""))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause | 7,271,000,189,288,235,000 | -1,150,444,037,142,018,200 | 47.833333 | 79 | 0.767065 | false |
loganasherjones/python-logstash-async | logstash_async/memory_cache.py | 1 | 3382 | # -*- coding: utf-8 -*-
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
import uuid
from logging import getLogger as get_logger
from datetime import datetime, timedelta
from logstash_async.cache import Cache
class MemoryCache(Cache):
"""Backend implementation for python-logstash-async. Keeps messages in a local, in-memory cache
while attempting to publish them to logstash. Does not persist through process restarts. Also,
does not write to disk.
:param cache: Usually just an empty dictionary
:param event_ttl: Optional parameter used to expire events in the cache after a time
"""
logger = get_logger(__name__)
# ----------------------------------------------------------------------
def __init__(self, cache, event_ttl=None):
self._cache = cache
self._event_ttl = event_ttl
# ----------------------------------------------------------------------
def add_event(self, event):
event_id = uuid.uuid4()
self._cache[event_id] = {
"event_text": event,
"pending_delete": False,
"entry_date": datetime.now(),
"id": event_id
}
# ----------------------------------------------------------------------
def get_queued_events(self):
events = []
for event in self._cache.values():
if not event['pending_delete']:
events.append(event)
event['pending_delete'] = True
return events
# ----------------------------------------------------------------------
def requeue_queued_events(self, events):
for event in events:
event_to_queue = self._cache.get(event['id'], None)
# If they gave us an event which is not in the cache,
# there is really nothing for us to do. Right now
# this use-case does not raise an error. Instead, we
# just log the message.
if event_to_queue:
event_to_queue['pending_delete'] = False
else:
self.logger.warn("Could not requeue event with id (%s). It does not appear to be in the cache." % event['id'])
# ----------------------------------------------------------------------
def delete_queued_events(self):
ids_to_delete = [event['id'] for event in self._cache.values() if event['pending_delete']]
self._delete_events(ids_to_delete)
# ----------------------------------------------------------------------
def expire_events(self):
if self._event_ttl is None:
return
delete_time = datetime.now() - timedelta(seconds=self._event_ttl)
ids_to_delete = [event['id'] for event in self._cache.values() if event['entry_date'] < delete_time]
self._delete_events(ids_to_delete)
# ----------------------------------------------------------------------
def _delete_events(self, ids_to_delete):
for event_id in ids_to_delete:
# If the event is not in the cache, is there anything
# that we can do. This currently doesn't throw an error.
event = self._cache.pop(event_id, None)
if not event:
self.logger.warn("Could not delete event with id (%s). It does not appear to be in the cache." % event_id)
| mit | 2,542,749,368,479,078,000 | 7,873,965,888,991,084,000 | 40.753086 | 126 | 0.512714 | false |
bbozhev/flask-test | flask/lib/python2.7/site-packages/wtforms/ext/django/templatetags/wtforms.py | 177 | 2826 | """
Template tags for easy WTForms access in Django templates.
"""
from __future__ import unicode_literals
import re
from django import template
from django.conf import settings
from django.template import Variable
from ....compat import iteritems
register = template.Library()
class FormFieldNode(template.Node):
def __init__(self, field_var, html_attrs):
self.field_var = field_var
self.html_attrs = html_attrs
def render(self, context):
try:
if '.' in self.field_var:
base, field_name = self.field_var.rsplit('.', 1)
field = getattr(Variable(base).resolve(context), field_name)
else:
field = context[self.field_var]
except (template.VariableDoesNotExist, KeyError, AttributeError):
return settings.TEMPLATE_STRING_IF_INVALID
h_attrs = {}
for k, v in iteritems(self.html_attrs):
try:
h_attrs[k] = v.resolve(context)
except template.VariableDoesNotExist:
h_attrs[k] = settings.TEMPLATE_STRING_IF_INVALID
return field(**h_attrs)
@register.tag(name='form_field')
def do_form_field(parser, token):
"""
Render a WTForms form field allowing optional HTML attributes.
Invocation looks like this:
{% form_field form.username class="big_text" onclick="alert('hello')" %}
where form.username is the path to the field value we want. Any number
of key="value" arguments are supported. Unquoted values are resolved as
template variables.
"""
parts = token.contents.split(' ', 2)
if len(parts) < 2:
error_text = '%r tag must have the form field name as the first value, followed by optional key="value" attributes.'
raise template.TemplateSyntaxError(error_text % parts[0])
html_attrs = {}
if len(parts) == 3:
raw_args = list(args_split(parts[2]))
if (len(raw_args) % 2) != 0:
raise template.TemplateSyntaxError('%r tag received the incorrect number of key=value arguments.' % parts[0])
for x in range(0, len(raw_args), 2):
html_attrs[str(raw_args[x])] = Variable(raw_args[x + 1])
return FormFieldNode(parts[1], html_attrs)
args_split_re = re.compile(r'''("(?:[^"\\]*(?:\\.[^"\\]*)*)"|'(?:[^'\\]*(?:\\.[^'\\]*)*)'|[^\s=]+)''')
def args_split(text):
""" Split space-separated key=value arguments. Keeps quoted strings intact. """
for bit in args_split_re.finditer(text):
bit = bit.group(0)
if bit[0] == '"' and bit[-1] == '"':
yield '"' + bit[1:-1].replace('\\"', '"').replace('\\\\', '\\') + '"'
elif bit[0] == "'" and bit[-1] == "'":
yield "'" + bit[1:-1].replace("\\'", "'").replace("\\\\", "\\") + "'"
else:
yield bit
| mit | -6,101,646,401,169,170,000 | -7,219,629,349,246,129,000 | 34.325 | 124 | 0.58528 | false |
varunkumta/azure-linux-extensions | VMEncryption/main/oscrypto/rhel_72_lvm/encryptstates/EncryptBlockDeviceState.py | 4 | 4192 | #!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
import os
import sys
from inspect import ismethod
from time import sleep
from OSEncryptionState import *
class EncryptBlockDeviceState(OSEncryptionState):
def __init__(self, context):
super(EncryptBlockDeviceState, self).__init__('EncryptBlockDeviceState', context)
def should_enter(self):
self.context.logger.log("Verifying if machine should enter encrypt_block_device state")
if not super(EncryptBlockDeviceState, self).should_enter():
return False
self.context.logger.log("Performing enter checks for encrypt_block_device state")
return True
def enter(self):
if not self.should_enter():
return
self.context.logger.log("Entering encrypt_block_device state")
self.command_executor.Execute('mount /boot', False)
# self._find_bek_and_execute_action('_dump_passphrase')
self._find_bek_and_execute_action('_luks_format')
self._find_bek_and_execute_action('_luks_open')
self.context.hutil.do_status_report(operation='EnableEncryptionDataVolumes',
status=CommonVariables.extension_success_status,
status_code=str(CommonVariables.success),
message='OS disk encryption started')
self.command_executor.Execute('dd if={0} of=/dev/mapper/osencrypt bs=52428800'.format(self.rootfs_block_device), True)
def should_exit(self):
self.context.logger.log("Verifying if machine should exit encrypt_block_device state")
if not os.path.exists('/dev/mapper/osencrypt'):
self._find_bek_and_execute_action('_luks_open')
return super(EncryptBlockDeviceState, self).should_exit()
def _luks_format(self, bek_path):
self.command_executor.Execute('mkdir /boot/luks', True)
self.command_executor.Execute('dd if=/dev/zero of=/boot/luks/osluksheader bs=33554432 count=1', True)
self.command_executor.Execute('cryptsetup luksFormat --header /boot/luks/osluksheader -d {0} {1} -q'.format(bek_path,
self.rootfs_block_device),
raise_exception_on_failure=True)
def _luks_open(self, bek_path):
self.command_executor.Execute('cryptsetup luksOpen --header /boot/luks/osluksheader {0} osencrypt -d {1}'.format(self.rootfs_block_device,
bek_path),
raise_exception_on_failure=True)
def _dump_passphrase(self, bek_path):
proc_comm = ProcessCommunicator()
self.command_executor.Execute(command_to_execute="od -c {0}".format(bek_path),
raise_exception_on_failure=True,
communicator=proc_comm)
self.context.logger.log("Passphrase:")
self.context.logger.log(proc_comm.stdout)
def _find_bek_and_execute_action(self, callback_method_name):
callback_method = getattr(self, callback_method_name)
if not ismethod(callback_method):
raise Exception("{0} is not a method".format(callback_method_name))
bek_path = self.bek_util.get_bek_passphrase_file(self.encryption_config)
callback_method(bek_path)
| apache-2.0 | 6,385,464,466,111,180,000 | -8,272,641,254,333,115,000 | 42.666667 | 146 | 0.614265 | false |
kstrauser/ansible | v1/tests/TestConstants.py | 164 | 1600 | # -*- coding: utf-8 -*-
import unittest
from ansible.constants import get_config
import ConfigParser
import random
import string
import os
def random_string(length):
return ''.join(random.choice(string.ascii_uppercase) for x in range(6))
p = ConfigParser.ConfigParser()
p.read(os.path.join(os.path.dirname(__file__), 'ansible.cfg'))
class TestConstants(unittest.TestCase):
#####################################
### get_config unit tests
def test_configfile_and_env_both_set(self):
r = random_string(6)
env_var = 'ANSIBLE_TEST_%s' % r
os.environ[env_var] = r
res = get_config(p, 'defaults', 'test_key', env_var, 'default')
del os.environ[env_var]
assert res == r
def test_configfile_set_env_not_set(self):
r = random_string(6)
env_var = 'ANSIBLE_TEST_%s' % r
assert env_var not in os.environ
res = get_config(p, 'defaults', 'test_key', env_var, 'default')
print res
assert res == 'test_value'
def test_configfile_not_set_env_set(self):
r = random_string(6)
env_var = 'ANSIBLE_TEST_%s' % r
os.environ[env_var] = r
res = get_config(p, 'defaults', 'doesnt_exist', env_var, 'default')
del os.environ[env_var]
assert res == r
def test_configfile_not_set_env_not_set(self):
r = random_string(6)
env_var = 'ANSIBLE_TEST_%s' % r
assert env_var not in os.environ
res = get_config(p, 'defaults', 'doesnt_exist', env_var, 'default')
assert res == 'default'
| gpl-3.0 | 4,609,406,265,641,165,300 | 6,167,318,800,139,580,000 | 24 | 75 | 0.578125 | false |
minorua/QGIS | python/plugins/processing/algs/qgis/RectanglesOvalsDiamondsVariable.py | 10 | 16185 | # -*- coding: utf-8 -*-
"""
***************************************************************************
RectanglesOvalsDiamondsVariable.py
---------------------
Date : April 2016
Copyright : (C) 2016 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import math
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import (NULL,
QgsWkbTypes,
QgsFeature,
QgsFeatureSink,
QgsGeometry,
QgsPointXY,
QgsProcessing,
QgsProcessingException,
QgsProcessingParameterField,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterEnum,
QgsProcessingParameterNumber,
QgsProcessingParameterFeatureSink)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class RectanglesOvalsDiamondsVariable(QgisAlgorithm):
INPUT = 'INPUT'
SHAPE = 'SHAPE'
WIDTH = 'WIDTH'
HEIGHT = 'HEIGHT'
ROTATION = 'ROTATION'
SEGMENTS = 'SEGMENTS'
OUTPUT = 'OUTPUT'
def group(self):
return self.tr('Vector geometry')
def groupId(self):
return 'vectorgeometry'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.shapes = [self.tr('Rectangles'), self.tr('Diamonds'), self.tr('Ovals')]
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Input layer'),
[QgsProcessing.TypeVectorPoint]))
self.addParameter(QgsProcessingParameterEnum(self.SHAPE,
self.tr('Buffer shape'), options=self.shapes))
self.addParameter(QgsProcessingParameterField(self.WIDTH,
self.tr('Width field'),
parentLayerParameterName=self.INPUT,
type=QgsProcessingParameterField.Numeric))
self.addParameter(QgsProcessingParameterField(self.HEIGHT,
self.tr('Height field'),
parentLayerParameterName=self.INPUT,
type=QgsProcessingParameterField.Numeric))
self.addParameter(QgsProcessingParameterField(self.ROTATION,
self.tr('Rotation field'),
parentLayerParameterName=self.INPUT,
type=QgsProcessingParameterField.Numeric,
optional=True))
self.addParameter(QgsProcessingParameterNumber(self.SEGMENTS,
self.tr('Number of segments'),
minValue=1,
defaultValue=36))
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT,
self.tr('Output'),
type=QgsProcessing.TypeVectorPolygon))
def name(self):
return 'rectanglesovalsdiamondsvariable'
def displayName(self):
return self.tr('Rectangles, ovals, diamonds (variable)')
def processAlgorithm(self, parameters, context, feedback):
source = self.parameterAsSource(parameters, self.INPUT, context)
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
shape = self.parameterAsEnum(parameters, self.SHAPE, context)
width_field = self.parameterAsString(parameters, self.WIDTH, context)
height_field = self.parameterAsString(parameters, self.HEIGHT, context)
rotation_field = self.parameterAsString(parameters, self.ROTATION, context)
segments = self.parameterAsInt(parameters, self.SEGMENTS, context)
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
source.fields(), QgsWkbTypes.Polygon, source.sourceCrs())
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
width = source.fields().lookupField(width_field)
height = source.fields().lookupField(height_field)
rotation = source.fields().lookupField(rotation_field)
if shape == 0:
self.rectangles(sink, source, width, height, rotation, feedback)
elif shape == 1:
self.diamonds(sink, source, width, height, rotation, feedback)
else:
self.ovals(sink, source, width, height, rotation, segments, feedback)
return {self.OUTPUT: dest_id}
def rectangles(self, sink, source, width, height, rotation, feedback):
ft = QgsFeature()
features = source.getFeatures()
total = 100.0 / source.featureCount() if source.featureCount() else 0
if rotation >= 0:
for current, feat in enumerate(features):
if feedback.isCanceled():
break
if not feat.hasGeometry():
continue
w = feat[width]
h = feat[height]
angle = feat[rotation]
# block 0/NULL width or height, but allow 0 as angle value
if not w or not h:
feedback.pushInfo(QCoreApplication.translate('RectanglesOvalsDiamondsVariable', 'Feature {} has empty '
'width or height. '
'Skipping…').format(feat.id()))
continue
if angle is NULL:
feedback.pushInfo(QCoreApplication.translate('RectanglesOvalsDiamondsVariable', 'Feature {} has empty '
'angle. '
'Skipping…').format(feat.id()))
continue
xOffset = w / 2.0
yOffset = h / 2.0
phi = angle * math.pi / 180
point = feat.geometry().asPoint()
x = point.x()
y = point.y()
points = [(-xOffset, -yOffset), (-xOffset, yOffset), (xOffset, yOffset), (xOffset, -yOffset)]
polygon = [[QgsPointXY(i[0] * math.cos(phi) + i[1] * math.sin(phi) + x,
-i[0] * math.sin(phi) + i[1] * math.cos(phi) + y) for i in points]]
ft.setGeometry(QgsGeometry.fromPolygonXY(polygon))
ft.setAttributes(feat.attributes())
sink.addFeature(ft, QgsFeatureSink.FastInsert)
feedback.setProgress(int(current * total))
else:
for current, feat in enumerate(features):
if feedback.isCanceled():
break
if not feat.hasGeometry():
continue
w = feat[width]
h = feat[height]
if not w or not h:
feedback.pushInfo(QCoreApplication.translate('RectanglesOvalsDiamondsVariable', 'Feature {} has empty '
'width or height. '
'Skipping…').format(feat.id()))
continue
xOffset = w / 2.0
yOffset = h / 2.0
point = feat.geometry().asPoint()
x = point.x()
y = point.y()
points = [(-xOffset, -yOffset), (-xOffset, yOffset), (xOffset, yOffset), (xOffset, -yOffset)]
polygon = [[QgsPointXY(i[0] + x, i[1] + y) for i in points]]
ft.setGeometry(QgsGeometry.fromPolygonXY(polygon))
ft.setAttributes(feat.attributes())
sink.addFeature(ft, QgsFeatureSink.FastInsert)
feedback.setProgress(int(current * total))
def diamonds(self, sink, source, width, height, rotation, feedback):
features = source.getFeatures()
ft = QgsFeature()
total = 100.0 / source.featureCount() if source.featureCount() else 0
if rotation >= 0:
for current, feat in enumerate(features):
if feedback.isCanceled():
break
if not feat.hasGeometry():
continue
w = feat[width]
h = feat[height]
angle = feat[rotation]
# block 0/NULL width or height, but allow 0 as angle value
if not w or not h:
feedback.pushInfo(QCoreApplication.translate('RectanglesOvalsDiamondsVariable', 'Feature {} has empty '
'width or height. '
'Skipping…').format(feat.id()))
continue
if angle is NULL:
feedback.pushInfo(QCoreApplication.translate('RectanglesOvalsDiamondsVariable', 'Feature {} has empty '
'angle. '
'Skipping…').format(feat.id()))
continue
xOffset = w / 2.0
yOffset = h / 2.0
phi = angle * math.pi / 180
point = feat.geometry().asPoint()
x = point.x()
y = point.y()
points = [(0.0, -yOffset), (-xOffset, 0.0), (0.0, yOffset), (xOffset, 0.0)]
polygon = [[QgsPointXY(i[0] * math.cos(phi) + i[1] * math.sin(phi) + x,
-i[0] * math.sin(phi) + i[1] * math.cos(phi) + y) for i in points]]
ft.setGeometry(QgsGeometry.fromPolygonXY(polygon))
ft.setAttributes(feat.attributes())
sink.addFeature(ft, QgsFeatureSink.FastInsert)
feedback.setProgress(int(current * total))
else:
for current, feat in enumerate(features):
if feedback.isCanceled():
break
if not feat.hasGeometry():
continue
w = feat[width]
h = feat[height]
if not w or not h:
feedback.pushInfo(QCoreApplication.translate('RectanglesOvalsDiamondsVariable', 'Feature {} has empty '
'width or height. '
'Skipping…').format(feat.id()))
continue
xOffset = w / 2.0
yOffset = h / 2.0
point = feat.geometry().asPoint()
x = point.x()
y = point.y()
points = [(0.0, -yOffset), (-xOffset, 0.0), (0.0, yOffset), (xOffset, 0.0)]
polygon = [[QgsPointXY(i[0] + x, i[1] + y) for i in points]]
ft.setGeometry(QgsGeometry.fromPolygonXY(polygon))
ft.setAttributes(feat.attributes())
sink.addFeature(ft, QgsFeatureSink.FastInsert)
feedback.setProgress(int(current * total))
def ovals(self, sink, source, width, height, rotation, segments, feedback):
features = source.getFeatures()
ft = QgsFeature()
total = 100.0 / source.featureCount() if source.featureCount() else 0
if rotation >= 0:
for current, feat in enumerate(features):
if feedback.isCanceled():
break
if not feat.hasGeometry():
continue
w = feat[width]
h = feat[height]
angle = feat[rotation]
# block 0/NULL width or height, but allow 0 as angle value
if not w or not h:
feedback.pushInfo(QCoreApplication.translate('RectanglesOvalsDiamondsVariable', 'Feature {} has empty '
'width or height. '
'Skipping…').format(feat.id()))
continue
if angle == NULL:
feedback.pushInfo(QCoreApplication.translate('RectanglesOvalsDiamondsVariable', 'Feature {} has empty '
'angle. '
'Skipping…').format(feat.id()))
continue
xOffset = w / 2.0
yOffset = h / 2.0
phi = angle * math.pi / 180
point = feat.geometry().asPoint()
x = point.x()
y = point.y()
points = []
for t in [(2 * math.pi) / segments * i for i in range(segments)]:
points.append((xOffset * math.cos(t), yOffset * math.sin(t)))
polygon = [[QgsPointXY(i[0] * math.cos(phi) + i[1] * math.sin(phi) + x,
-i[0] * math.sin(phi) + i[1] * math.cos(phi) + y) for i in points]]
ft.setGeometry(QgsGeometry.fromPolygonXY(polygon))
ft.setAttributes(feat.attributes())
sink.addFeature(ft, QgsFeatureSink.FastInsert)
feedback.setProgress(int(current * total))
else:
for current, feat in enumerate(features):
if feedback.isCanceled():
break
if not feat.hasGeometry():
continue
w = feat[width]
h = feat[height]
if not w or not h:
feedback.pushInfo(QCoreApplication.translate('RectanglesOvalsDiamondsVariable', 'Feature {} has empty '
'width or height. '
'Skipping…').format(feat.id()))
continue
xOffset = w / 2.0
yOffset = h / 2.0
point = feat.geometry().asPoint()
x = point.x()
y = point.y()
points = []
for t in [(2 * math.pi) / segments * i for i in range(segments)]:
points.append((xOffset * math.cos(t), yOffset * math.sin(t)))
polygon = [[QgsPointXY(i[0] + x, i[1] + y) for i in points]]
ft.setGeometry(QgsGeometry.fromPolygonXY(polygon))
ft.setAttributes(feat.attributes())
sink.addFeature(ft, QgsFeatureSink.FastInsert)
feedback.setProgress(int(current * total))
| gpl-2.0 | 5,886,689,314,320,444,000 | 534,477,199,660,991,400 | 44.669492 | 123 | 0.463722 | false |
dataxu/ansible | lib/ansible/modules/cloud/amazon/sqs_queue.py | 23 | 10133 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = """
---
module: sqs_queue
short_description: Creates or deletes AWS SQS queues.
description:
- Create or delete AWS SQS queues.
- Update attributes on existing queues.
version_added: "2.0"
author:
- Alan Loi (@loia)
- Fernando Jose Pando (@nand0p)
- Nadir Lloret (@nadirollo)
requirements:
- "boto >= 2.33.0"
options:
state:
description:
- Create or delete the queue
required: false
choices: ['present', 'absent']
default: 'present'
name:
description:
- Name of the queue.
required: true
default_visibility_timeout:
description:
- The default visibility timeout in seconds.
required: false
default: null
message_retention_period:
description:
- The message retention period in seconds.
required: false
default: null
maximum_message_size:
description:
- The maximum message size in bytes.
required: false
default: null
delivery_delay:
description:
- The delivery delay in seconds.
required: false
default: null
receive_message_wait_time:
description:
- The receive message wait time in seconds.
required: false
default: null
policy:
description:
- The json dict policy to attach to queue
required: false
default: null
version_added: "2.1"
redrive_policy:
description:
- json dict with the redrive_policy (see example)
required: false
default: null
version_added: "2.2"
extends_documentation_fragment:
- aws
- ec2
"""
RETURN = '''
default_visibility_timeout:
description: The default visibility timeout in seconds.
type: int
returned: always
sample: 30
delivery_delay:
description: The delivery delay in seconds.
type: int
returned: always
sample: 0
maximum_message_size:
description: The maximum message size in bytes.
type: int
returned: always
sample: 262144
message_retention_period:
description: The message retention period in seconds.
type: int
returned: always
sample: 345600
name:
description: Name of the SQS Queue
type: string
returned: always
sample: "queuename-987d2de0"
queue_arn:
description: The queue's Amazon resource name (ARN).
type: string
returned: on successful creation or update of the queue
sample: 'arn:aws:sqs:us-east-1:199999999999:queuename-987d2de0'
receive_message_wait_time:
description: The receive message wait time in seconds.
type: int
returned: always
sample: 0
region:
description: Region that the queue was created within
type: string
returned: always
sample: 'us-east-1'
'''
EXAMPLES = '''
# Create SQS queue with redrive policy
- sqs_queue:
name: my-queue
region: ap-southeast-2
default_visibility_timeout: 120
message_retention_period: 86400
maximum_message_size: 1024
delivery_delay: 30
receive_message_wait_time: 20
policy: "{{ json_dict }}"
redrive_policy:
maxReceiveCount: 5
deadLetterTargetArn: arn:aws:sqs:eu-west-1:123456789012:my-dead-queue
# Delete SQS queue
- sqs_queue:
name: my-queue
region: ap-southeast-2
state: absent
'''
import json
import traceback
try:
import boto.sqs
from boto.exception import BotoServerError, NoAuthHandlerFound
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
def create_or_update_sqs_queue(connection, module):
queue_name = module.params.get('name')
queue_attributes = dict(
default_visibility_timeout=module.params.get('default_visibility_timeout'),
message_retention_period=module.params.get('message_retention_period'),
maximum_message_size=module.params.get('maximum_message_size'),
delivery_delay=module.params.get('delivery_delay'),
receive_message_wait_time=module.params.get('receive_message_wait_time'),
policy=module.params.get('policy'),
redrive_policy=module.params.get('redrive_policy')
)
result = dict(
region=module.params.get('region'),
name=queue_name,
)
result.update(queue_attributes)
try:
queue = connection.get_queue(queue_name)
if queue:
# Update existing
result['changed'] = update_sqs_queue(queue, check_mode=module.check_mode, **queue_attributes)
else:
# Create new
if not module.check_mode:
queue = connection.create_queue(queue_name)
update_sqs_queue(queue, **queue_attributes)
result['changed'] = True
if not module.check_mode:
result['queue_arn'] = queue.get_attributes('QueueArn')['QueueArn']
result['default_visibility_timeout'] = queue.get_attributes('VisibilityTimeout')['VisibilityTimeout']
result['message_retention_period'] = queue.get_attributes('MessageRetentionPeriod')['MessageRetentionPeriod']
result['maximum_message_size'] = queue.get_attributes('MaximumMessageSize')['MaximumMessageSize']
result['delivery_delay'] = queue.get_attributes('DelaySeconds')['DelaySeconds']
result['receive_message_wait_time'] = queue.get_attributes('ReceiveMessageWaitTimeSeconds')['ReceiveMessageWaitTimeSeconds']
except BotoServerError:
result['msg'] = 'Failed to create/update sqs queue due to error: ' + traceback.format_exc()
module.fail_json(**result)
else:
module.exit_json(**result)
def update_sqs_queue(queue,
check_mode=False,
default_visibility_timeout=None,
message_retention_period=None,
maximum_message_size=None,
delivery_delay=None,
receive_message_wait_time=None,
policy=None,
redrive_policy=None):
changed = False
changed = set_queue_attribute(queue, 'VisibilityTimeout', default_visibility_timeout,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'MessageRetentionPeriod', message_retention_period,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'MaximumMessageSize', maximum_message_size,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'DelaySeconds', delivery_delay,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'ReceiveMessageWaitTimeSeconds', receive_message_wait_time,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'Policy', policy,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'RedrivePolicy', redrive_policy,
check_mode=check_mode) or changed
return changed
def set_queue_attribute(queue, attribute, value, check_mode=False):
if not value and value != 0:
return False
try:
existing_value = queue.get_attributes(attributes=attribute)[attribute]
except:
existing_value = ''
# convert dict attributes to JSON strings (sort keys for comparing)
if attribute in ['Policy', 'RedrivePolicy']:
value = json.dumps(value, sort_keys=True)
if existing_value:
existing_value = json.dumps(json.loads(existing_value), sort_keys=True)
if str(value) != existing_value:
if not check_mode:
queue.set_attribute(attribute, value)
return True
return False
def delete_sqs_queue(connection, module):
queue_name = module.params.get('name')
result = dict(
region=module.params.get('region'),
name=queue_name,
)
try:
queue = connection.get_queue(queue_name)
if queue:
if not module.check_mode:
connection.delete_queue(queue)
result['changed'] = True
else:
result['changed'] = False
except BotoServerError:
result['msg'] = 'Failed to delete sqs queue due to error: ' + traceback.format_exc()
module.fail_json(**result)
else:
module.exit_json(**result)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['present', 'absent']),
name=dict(required=True, type='str'),
default_visibility_timeout=dict(type='int'),
message_retention_period=dict(type='int'),
maximum_message_size=dict(type='int'),
delivery_delay=dict(type='int'),
receive_message_wait_time=dict(type='int'),
policy=dict(type='dict', required=False),
redrive_policy=dict(type='dict', required=False),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg='region must be specified')
try:
connection = connect_to_aws(boto.sqs, region, **aws_connect_params)
except (NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
state = module.params.get('state')
if state == 'present':
create_or_update_sqs_queue(connection, module)
elif state == 'absent':
delete_sqs_queue(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 | -2,272,539,636,418,747,000 | 7,987,911,841,456,577,000 | 30.468944 | 136 | 0.641962 | false |
akretion/sale-workflow | partner_prepayment/model/sale.py | 37 | 1324 | # -*- coding: utf-8 -*-
#
#
# Author: Guewen Baconnier
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp.osv import orm
class sale_order(orm.Model):
_inherit = 'sale.order'
def onchange_partner_id(self, cr, uid, ids, part, context=None):
vals = super(sale_order, self).onchange_partner_id(
cr, uid, ids, part, context=context)
if not part:
return vals
partner_obj = self.pool.get('res.partner')
partner = partner_obj.browse(cr, uid, part, context=context)
if partner.use_prepayment:
vals['value']['order_policy'] = 'prepaid'
return vals
| agpl-3.0 | -6,820,555,246,374,759,000 | -1,387,210,743,367,276,000 | 34.783784 | 77 | 0.670695 | false |
jorge2703/scikit-learn | sklearn/utils/optimize.py | 135 | 5671 | """
Our own implementation of the Newton algorithm
Unlike the scipy.optimize version, this version of the Newton conjugate
gradient solver uses only one function call to retrieve the
func value, the gradient value and a callable for the Hessian matvec
product. If the function call is very expensive (e.g. for logistic
regression with large design matrix), this approach gives very
significant speedups.
"""
# This is a modified file from scipy.optimize
# Original authors: Travis Oliphant, Eric Jones
# Modifications by Gael Varoquaux, Mathieu Blondel and Tom Dupre la Tour
# License: BSD
import numpy as np
import warnings
from scipy.optimize.linesearch import line_search_wolfe2, line_search_wolfe1
class _LineSearchError(RuntimeError):
pass
def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval,
**kwargs):
"""
Same as line_search_wolfe1, but fall back to line_search_wolfe2 if
suitable step length is not found, and raise an exception if a
suitable step length is not found.
Raises
------
_LineSearchError
If no suitable step size is found
"""
ret = line_search_wolfe1(f, fprime, xk, pk, gfk,
old_fval, old_old_fval,
**kwargs)
if ret[0] is None:
# line search failed: try different one.
ret = line_search_wolfe2(f, fprime, xk, pk, gfk,
old_fval, old_old_fval, **kwargs)
if ret[0] is None:
raise _LineSearchError()
return ret
def _cg(fhess_p, fgrad, maxiter, tol):
"""
Solve iteratively the linear system 'fhess_p . xsupi = fgrad'
with a conjugate gradient descent.
Parameters
----------
fhess_p : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient
fgrad : ndarray, shape (n_features,) or (n_features + 1,)
Gradient vector
maxiter : int
Number of CG iterations.
tol : float
Stopping criterion.
Returns
-------
xsupi : ndarray, shape (n_features,) or (n_features + 1,)
Estimated solution
"""
xsupi = np.zeros(len(fgrad), dtype=fgrad.dtype)
ri = fgrad
psupi = -ri
i = 0
dri0 = np.dot(ri, ri)
while i <= maxiter:
if np.sum(np.abs(ri)) <= tol:
break
Ap = fhess_p(psupi)
# check curvature
curv = np.dot(psupi, Ap)
if 0 <= curv <= 3 * np.finfo(np.float64).eps:
break
elif curv < 0:
if i > 0:
break
else:
# fall back to steepest descent direction
xsupi += dri0 / curv * psupi
break
alphai = dri0 / curv
xsupi += alphai * psupi
ri = ri + alphai * Ap
dri1 = np.dot(ri, ri)
betai = dri1 / dri0
psupi = -ri + betai * psupi
i = i + 1
dri0 = dri1 # update np.dot(ri,ri) for next time.
return xsupi
def newton_cg(grad_hess, func, grad, x0, args=(), tol=1e-4,
maxiter=100, maxinner=200, line_search=True, warn=True):
"""
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
Parameters
----------
grad_hess : callable
Should return the gradient and a callable returning the matvec product
of the Hessian.
func : callable
Should return the value of the function.
grad : callable
Should return the function value and the gradient. This is used
by the linesearch functions.
x0 : array of float
Initial guess.
args: tuple, optional
Arguments passed to func_grad_hess, func and grad.
tol : float
Stopping criterion. The iteration will stop when
``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
maxiter : int
Number of Newton iterations.
maxinner : int
Number of CG iterations.
line_search: boolean
Whether to use a line search or not.
warn: boolean
Whether to warn when didn't converge.
Returns
-------
xk : ndarray of float
Estimated minimum.
"""
x0 = np.asarray(x0).flatten()
xk = x0
k = 1
if line_search:
old_fval = func(x0, *args)
old_old_fval = None
# Outer loop: our Newton iteration
while k <= maxiter:
# Compute a search direction pk by applying the CG method to
# del2 f(xk) p = - fgrad f(xk) starting from 0.
fgrad, fhess_p = grad_hess(xk, *args)
absgrad = np.abs(fgrad)
if np.max(absgrad) < tol:
break
maggrad = np.sum(absgrad)
eta = min([0.5, np.sqrt(maggrad)])
termcond = eta * maggrad
# Inner loop: solve the Newton update by conjugate gradient, to
# avoid inverting the Hessian
xsupi = _cg(fhess_p, fgrad, maxiter=maxinner, tol=termcond)
alphak = 1.0
if line_search:
try:
alphak, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(func, grad, xk, xsupi, fgrad,
old_fval, old_old_fval, args=args)
except _LineSearchError:
warnings.warn('Line Search failed')
break
xk = xk + alphak * xsupi # upcast if necessary
k += 1
if warn and k > maxiter:
warnings.warn("newton-cg failed to converge. Increase the "
"number of iterations.")
return xk
| bsd-3-clause | 2,358,169,133,639,715,000 | -3,151,820,338,360,609,000 | 27.074257 | 78 | 0.578558 | false |
rruebner/odoo | openerp/addons/base/tests/test_qweb.py | 47 | 2594 | # -*- coding: utf-8 -*-
import cgi
from lxml import etree
from openerp.tests import common
from openerp.addons.base.ir import ir_qweb
class TestQWebTField(common.TransactionCase):
def setUp(self):
super(TestQWebTField, self).setUp()
self.engine = self.registry('ir.qweb')
def context(self, values):
return ir_qweb.QWebContext(
self.cr, self.uid, values, context={'inherit_branding': True})
def test_trivial(self):
field = etree.Element('span', {'t-field': u'company.name'})
Companies = self.registry('res.company')
company_id = Companies.create(self.cr, self.uid, {
'name': "My Test Company"
})
result = self.engine.render_node(field, self.context({
'company': Companies.browse(self.cr, self.uid, company_id),
}))
self.assertEqual(
result,
'<span data-oe-model="res.company" data-oe-id="%d" '
'data-oe-field="name" data-oe-type="char" '
'data-oe-expression="company.name">%s</span>' % (
company_id,
"My Test Company",))
def test_i18n(self):
field = etree.Element('span', {'t-field': u'company.name'})
Companies = self.registry('res.company')
s = u"Testing «ταБЬℓσ»: 1<2 & 4+1>3, now 20% off!"
company_id = Companies.create(self.cr, self.uid, {
'name': s,
})
result = self.engine.render_node(field, self.context({
'company': Companies.browse(self.cr, self.uid, company_id),
}))
self.assertEqual(
result,
'<span data-oe-model="res.company" data-oe-id="%d" '
'data-oe-field="name" data-oe-type="char" '
'data-oe-expression="company.name">%s</span>' % (
company_id,
cgi.escape(s.encode('utf-8')),))
def test_reject_crummy_tags(self):
field = etree.Element('td', {'t-field': u'company.name'})
with self.assertRaisesRegexp(
AssertionError,
r'^RTE widgets do not work correctly'):
self.engine.render_node(field, self.context({
'company': None
}))
def test_reject_t_tag(self):
field = etree.Element('t', {'t-field': u'company.name'})
with self.assertRaisesRegexp(
AssertionError,
r'^t-field can not be used on a t element'):
self.engine.render_node(field, self.context({
'company': None
}))
| agpl-3.0 | 3,842,490,692,947,705,000 | 5,904,662,474,108,521,000 | 33.466667 | 74 | 0.542747 | false |
drbild/boto | boto/route53/domains/__init__.py | 129 | 1711 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo, get_regions
def regions():
"""
Get all available regions for the Amazon Route 53 Domains service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from boto.route53.domains.layer1 import Route53DomainsConnection
return get_regions('route53domains',
connection_cls=Route53DomainsConnection)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| mit | -4,999,743,380,071,204,000 | 1,760,738,854,081,068,300 | 41.775 | 77 | 0.738165 | false |
switchboardOp/ansible | lib/ansible/parsing/yaml/loader.py | 74 | 1986 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
from _yaml import CParser, CEmitter
HAVE_PYYAML_C = True
except ImportError:
HAVE_PYYAML_C = False
from yaml.resolver import Resolver
from ansible.parsing.yaml.constructor import AnsibleConstructor
if HAVE_PYYAML_C:
class AnsibleLoader(CParser, AnsibleConstructor, Resolver):
def __init__(self, stream, file_name=None, vault_password=None):
CParser.__init__(self, stream)
AnsibleConstructor.__init__(self, file_name=file_name, b_vault_password=vault_password)
Resolver.__init__(self)
else:
from yaml.composer import Composer
from yaml.reader import Reader
from yaml.scanner import Scanner
from yaml.parser import Parser
class AnsibleLoader(Reader, Scanner, Parser, Composer, AnsibleConstructor, Resolver):
def __init__(self, stream, file_name=None, vault_password=None):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
AnsibleConstructor.__init__(self, file_name=file_name, b_vault_password=vault_password)
Resolver.__init__(self)
| gpl-3.0 | 3,523,472,160,877,020,700 | 1,828,286,863,849,002,200 | 37.192308 | 99 | 0.700906 | false |
airqj/ardupilot-raspilot | Tools/LogAnalyzer/tests/TestParams.py | 261 | 3119 | from LogAnalyzer import Test,TestResult
import DataflashLog
import math # for isnan()
class TestParams(Test):
'''test for any obviously bad parameters in the config'''
def __init__(self):
Test.__init__(self)
self.name = "Parameters"
# helper functions
def __checkParamIsEqual(self, paramName, expectedValue, logdata):
value = logdata.parameters[paramName]
if value != expectedValue:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = self.result.statusMessage + "%s set to %s, expecting %s\n" % (paramName, `value`, `expectedValue`)
def __checkParamIsLessThan(self, paramName, maxValue, logdata):
value = logdata.parameters[paramName]
if value >= maxValue:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = self.result.statusMessage + "%s set to %s, expecting less than %s\n" % (paramName, `value`, `maxValue`)
def __checkParamIsMoreThan(self, paramName, minValue, logdata):
value = logdata.parameters[paramName]
if value <= minValue:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = self.result.statusMessage + "%s set to %s, expecting less than %s\n" % (paramName, `value`, `minValue`)
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD # GOOD by default, tests below will override it if they fail
# check all params for NaN
for name,value in logdata.parameters.iteritems():
if math.isnan(value):
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = self.result.statusMessage + name + " is NaN\n"
try:
# add parameter checks below using the helper functions, any failures will trigger a FAIL status and accumulate info in statusMessage
# if more complex checking or correlations are required you can access parameter values directly using the logdata.parameters[paramName] dict
if logdata.vehicleType == "ArduCopter":
self.__checkParamIsEqual ("MAG_ENABLE", 1, logdata)
self.__checkParamIsLessThan("THR_MIN", 200, logdata)
self.__checkParamIsLessThan("THR_MID", 701, logdata)
self.__checkParamIsMoreThan("THR_MID", 299, logdata)
# TODO: add more parameter tests, these are just an example...
elif logdata.vehicleType == "ArduPlane":
# TODO: add parameter checks for plane...
pass
elif logdata.vehicleType == "ArduRover":
# TODO: add parameter checks for rover...
pass
if self.result.status == TestResult.StatusType.FAIL:
self.result.statusMessage = "Bad parameters found:\n" + self.result.statusMessage
except KeyError as e:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = str(e) + ' not found' | gpl-3.0 | -6,571,384,614,137,038,000 | 7,853,204,200,741,159,000 | 48.52381 | 153 | 0.636422 | false |
sorenk/ansible | lib/ansible/modules/clustering/k8s/k8s_scale.py | 18 | 3282 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Chris Houseknecht <@chouseknecht>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: k8s_scale
short_description: Set a new size for a Deployment, ReplicaSet, Replication Controller, or Job.
version_added: "2.5"
author: "Chris Houseknecht (@chouseknecht)"
description:
- Similar to the kubectl scale command. Use to set the number of replicas for a Deployment, ReplicatSet,
or Replication Controller, or the parallelism attribute of a Job. Supports check mode.
extends_documentation_fragment:
- k8s_name_options
- k8s_auth_options
- k8s_resource_options
- k8s_scale_options
requirements:
- "python >= 2.7"
- "openshift == 0.4.3"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
- name: Scale deployment up, and extend timeout
k8s_scale:
api_version: v1
kind: Deployment
name: elastic
namespace: myproject
replicas: 3
wait_timeout: 60
- name: Scale deployment down when current replicas match
k8s_scale:
api_version: v1
kind: Deployment
name: elastic
namespace: myproject
current_replicas: 3
replicas: 2
- name: Increase job parallelism
k8s_scale:
api_version: batch/v1
kind: job
name: pi-with-timeout
namespace: testing
replicas: 2
# Match object using local file or inline definition
- name: Scale deployment based on a file from the local filesystem
k8s_scale:
src: /myproject/elastic_deployment.yml
replicas: 3
wait: no
- name: Scale deployment based on a template output
k8s_scale:
resource_definition: "{{ lookup('template', '/myproject/elastic_deployment.yml') | from_yaml }}"
replicas: 3
wait: no
- name: Scale deployment based on a file from the Ansible controller filesystem
k8s_scale:
resource_definition: "{{ lookup('file', '/myproject/elastic_deployment.yml') | from_yaml }}"
replicas: 3
wait: no
'''
RETURN = '''
result:
description:
- If a change was made, will return the patched object, otherwise returns the existing object.
returned: success
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: complex
status:
description: Current status details for the object.
returned: success
type: complex
'''
from ansible.module_utils.k8s.scale import KubernetesAnsibleScaleModule
def main():
KubernetesAnsibleScaleModule().execute_module()
if __name__ == '__main__':
main()
| gpl-3.0 | -4,967,039,773,891,838,000 | -2,531,362,657,400,818,000 | 24.84252 | 106 | 0.677026 | false |
nishad-jobsglobal/odoo-marriot | addons/email_template/html2text.py | 440 | 14143 | #!/usr/bin/env python
"""html2text: Turn HTML into equivalent Markdown-structured text."""
__version__ = "2.36"
__author__ = "Aaron Swartz (me@aaronsw.com)"
__copyright__ = "(C) 2004-2008 Aaron Swartz. GNU GPL 3."
__contributors__ = ["Martin 'Joey' Schulze", "Ricardo Reyes", "Kevin Jay North"]
# TODO:
# Support decoded entities with unifiable.
if not hasattr(__builtins__, 'True'): True, False = 1, 0
import re, sys, urllib, htmlentitydefs, codecs
import sgmllib
import urlparse
sgmllib.charref = re.compile('&#([xX]?[0-9a-fA-F]+)[^0-9a-fA-F]')
try: from textwrap import wrap
except: pass
# Use Unicode characters instead of their ascii psuedo-replacements
UNICODE_SNOB = 0
# Put the links after each paragraph instead of at the end.
LINKS_EACH_PARAGRAPH = 0
# Wrap long lines at position. 0 for no wrapping. (Requires Python 2.3.)
BODY_WIDTH = 78
# Don't show internal links (href="#local-anchor") -- corresponding link targets
# won't be visible in the plain text file anyway.
SKIP_INTERNAL_LINKS = False
### Entity Nonsense ###
def name2cp(k):
if k == 'apos': return ord("'")
if hasattr(htmlentitydefs, "name2codepoint"): # requires Python 2.3
return htmlentitydefs.name2codepoint[k]
else:
k = htmlentitydefs.entitydefs[k]
if k.startswith("&#") and k.endswith(";"): return int(k[2:-1]) # not in latin-1
return ord(codecs.latin_1_decode(k)[0])
unifiable = {'rsquo':"'", 'lsquo':"'", 'rdquo':'"', 'ldquo':'"',
'copy':'(C)', 'mdash':'--', 'nbsp':' ', 'rarr':'->', 'larr':'<-', 'middot':'*',
'ndash':'-', 'oelig':'oe', 'aelig':'ae',
'agrave':'a', 'aacute':'a', 'acirc':'a', 'atilde':'a', 'auml':'a', 'aring':'a',
'egrave':'e', 'eacute':'e', 'ecirc':'e', 'euml':'e',
'igrave':'i', 'iacute':'i', 'icirc':'i', 'iuml':'i',
'ograve':'o', 'oacute':'o', 'ocirc':'o', 'otilde':'o', 'ouml':'o',
'ugrave':'u', 'uacute':'u', 'ucirc':'u', 'uuml':'u'}
unifiable_n = {}
for k in unifiable.keys():
unifiable_n[name2cp(k)] = unifiable[k]
def charref(name):
if name[0] in ['x','X']:
c = int(name[1:], 16)
else:
c = int(name)
if not UNICODE_SNOB and c in unifiable_n.keys():
return unifiable_n[c]
else:
return unichr(c)
def entityref(c):
if not UNICODE_SNOB and c in unifiable.keys():
return unifiable[c]
else:
try: name2cp(c)
except KeyError: return "&" + c
else: return unichr(name2cp(c))
def replaceEntities(s):
s = s.group(1)
if s[0] == "#":
return charref(s[1:])
else: return entityref(s)
r_unescape = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape(s):
return r_unescape.sub(replaceEntities, s)
def fixattrs(attrs):
# Fix bug in sgmllib.py
if not attrs: return attrs
newattrs = []
for attr in attrs:
newattrs.append((attr[0], unescape(attr[1])))
return newattrs
### End Entity Nonsense ###
def onlywhite(line):
"""Return true if the line does only consist of whitespace characters."""
for c in line:
if c is not ' ' and c is not ' ':
return c is ' '
return line
def optwrap(text):
"""Wrap all paragraphs in the provided text."""
if not BODY_WIDTH:
return text
assert wrap, "Requires Python 2.3."
result = ''
newlines = 0
for para in text.split("\n"):
if len(para) > 0:
if para[0] is not ' ' and para[0] is not '-' and para[0] is not '*':
for line in wrap(para, BODY_WIDTH):
result += line + "\n"
result += "\n"
newlines = 2
else:
if not onlywhite(para):
result += para + "\n"
newlines = 1
else:
if newlines < 2:
result += "\n"
newlines += 1
return result
def hn(tag):
if tag[0] == 'h' and len(tag) == 2:
try:
n = int(tag[1])
if n in range(1, 10): return n
except ValueError: return 0
class _html2text(sgmllib.SGMLParser):
def __init__(self, out=sys.stdout.write, baseurl=''):
sgmllib.SGMLParser.__init__(self)
if out is None: self.out = self.outtextf
else: self.out = out
self.outtext = u''
self.quiet = 0
self.p_p = 0
self.outcount = 0
self.start = 1
self.space = 0
self.a = []
self.astack = []
self.acount = 0
self.list = []
self.blockquote = 0
self.pre = 0
self.startpre = 0
self.lastWasNL = 0
self.abbr_title = None # current abbreviation definition
self.abbr_data = None # last inner HTML (for abbr being defined)
self.abbr_list = {} # stack of abbreviations to write later
self.baseurl = baseurl
def outtextf(self, s):
self.outtext += s
def close(self):
sgmllib.SGMLParser.close(self)
self.pbr()
self.o('', 0, 'end')
return self.outtext
def handle_charref(self, c):
self.o(charref(c))
def handle_entityref(self, c):
self.o(entityref(c))
def unknown_starttag(self, tag, attrs):
self.handle_tag(tag, attrs, 1)
def unknown_endtag(self, tag):
self.handle_tag(tag, None, 0)
def previousIndex(self, attrs):
""" returns the index of certain set of attributes (of a link) in the
self.a list
If the set of attributes is not found, returns None
"""
if not attrs.has_key('href'): return None
i = -1
for a in self.a:
i += 1
match = 0
if a.has_key('href') and a['href'] == attrs['href']:
if a.has_key('title') or attrs.has_key('title'):
if (a.has_key('title') and attrs.has_key('title') and
a['title'] == attrs['title']):
match = True
else:
match = True
if match: return i
def handle_tag(self, tag, attrs, start):
attrs = fixattrs(attrs)
if hn(tag):
self.p()
if start: self.o(hn(tag)*"#" + ' ')
if tag in ['p', 'div']: self.p()
if tag == "br" and start: self.o(" \n")
if tag == "hr" and start:
self.p()
self.o("* * *")
self.p()
if tag in ["head", "style", 'script']:
if start: self.quiet += 1
else: self.quiet -= 1
if tag in ["body"]:
self.quiet = 0 # sites like 9rules.com never close <head>
if tag == "blockquote":
if start:
self.p(); self.o('> ', 0, 1); self.start = 1
self.blockquote += 1
else:
self.blockquote -= 1
self.p()
if tag in ['em', 'i', 'u']: self.o("_")
if tag in ['strong', 'b']: self.o("**")
if tag == "code" and not self.pre: self.o('`') #TODO: `` `this` ``
if tag == "abbr":
if start:
attrsD = {}
for (x, y) in attrs: attrsD[x] = y
attrs = attrsD
self.abbr_title = None
self.abbr_data = ''
if attrs.has_key('title'):
self.abbr_title = attrs['title']
else:
if self.abbr_title != None:
self.abbr_list[self.abbr_data] = self.abbr_title
self.abbr_title = None
self.abbr_data = ''
if tag == "a":
if start:
attrsD = {}
for (x, y) in attrs: attrsD[x] = y
attrs = attrsD
if attrs.has_key('href') and not (SKIP_INTERNAL_LINKS and attrs['href'].startswith('#')):
self.astack.append(attrs)
self.o("[")
else:
self.astack.append(None)
else:
if self.astack:
a = self.astack.pop()
if a:
i = self.previousIndex(a)
if i is not None:
a = self.a[i]
else:
self.acount += 1
a['count'] = self.acount
a['outcount'] = self.outcount
self.a.append(a)
self.o("][" + `a['count']` + "]")
if tag == "img" and start:
attrsD = {}
for (x, y) in attrs: attrsD[x] = y
attrs = attrsD
if attrs.has_key('src'):
attrs['href'] = attrs['src']
alt = attrs.get('alt', '')
i = self.previousIndex(attrs)
if i is not None:
attrs = self.a[i]
else:
self.acount += 1
attrs['count'] = self.acount
attrs['outcount'] = self.outcount
self.a.append(attrs)
self.o("![")
self.o(alt)
self.o("]["+`attrs['count']`+"]")
if tag == 'dl' and start: self.p()
if tag == 'dt' and not start: self.pbr()
if tag == 'dd' and start: self.o(' ')
if tag == 'dd' and not start: self.pbr()
if tag in ["ol", "ul"]:
if start:
self.list.append({'name':tag, 'num':0})
else:
if self.list: self.list.pop()
self.p()
if tag == 'li':
if start:
self.pbr()
if self.list: li = self.list[-1]
else: li = {'name':'ul', 'num':0}
self.o(" "*len(self.list)) #TODO: line up <ol><li>s > 9 correctly.
if li['name'] == "ul": self.o("* ")
elif li['name'] == "ol":
li['num'] += 1
self.o(`li['num']`+". ")
self.start = 1
else:
self.pbr()
if tag in ["table", "tr"] and start: self.p()
if tag == 'td': self.pbr()
if tag == "pre":
if start:
self.startpre = 1
self.pre = 1
else:
self.pre = 0
self.p()
def pbr(self):
if self.p_p == 0: self.p_p = 1
def p(self):
self.p_p = 2
def o(self, data, puredata=0, force=0):
if self.abbr_data is not None: self.abbr_data += data
if not self.quiet:
if puredata and not self.pre:
data = re.sub('\s+', ' ', data)
if data and data[0] == ' ':
self.space = 1
data = data[1:]
if not data and not force: return
if self.startpre:
#self.out(" :") #TODO: not output when already one there
self.startpre = 0
bq = (">" * self.blockquote)
if not (force and data and data[0] == ">") and self.blockquote: bq += " "
if self.pre:
bq += " "
data = data.replace("\n", "\n"+bq)
if self.start:
self.space = 0
self.p_p = 0
self.start = 0
if force == 'end':
# It's the end.
self.p_p = 0
self.out("\n")
self.space = 0
if self.p_p:
self.out(('\n'+bq)*self.p_p)
self.space = 0
if self.space:
if not self.lastWasNL: self.out(' ')
self.space = 0
if self.a and ((self.p_p == 2 and LINKS_EACH_PARAGRAPH) or force == "end"):
if force == "end": self.out("\n")
newa = []
for link in self.a:
if self.outcount > link['outcount']:
self.out(" ["+`link['count']`+"]: " + urlparse.urljoin(self.baseurl, link['href']))
if link.has_key('title'): self.out(" ("+link['title']+")")
self.out("\n")
else:
newa.append(link)
if self.a != newa: self.out("\n") # Don't need an extra line when nothing was done.
self.a = newa
if self.abbr_list and force == "end":
for abbr, definition in self.abbr_list.items():
self.out(" *[" + abbr + "]: " + definition + "\n")
self.p_p = 0
self.out(data)
self.lastWasNL = data and data[-1] == '\n'
self.outcount += 1
def handle_data(self, data):
if r'\/script>' in data: self.quiet -= 1
self.o(data, 1)
def unknown_decl(self, data):
pass
def wrapwrite(text): sys.stdout.write(text.encode('utf8'))
def html2text_file(html, out=wrapwrite, baseurl=''):
h = _html2text(out, baseurl)
h.feed(html)
h.feed("")
return h.close()
def html2text(html, baseurl=''):
return optwrap(html2text_file(html, None, baseurl))
if __name__ == "__main__":
baseurl = ''
if sys.argv[1:]:
arg = sys.argv[1]
if arg.startswith('http://'):
baseurl = arg
j = urllib.urlopen(baseurl)
try:
from feedparser import _getCharacterEncoding as enc
except ImportError:
enc = lambda x, y: ('utf-8', 1)
text = j.read()
encoding = enc(j.headers, text)[0]
if encoding == 'us-ascii': encoding = 'utf-8'
data = text.decode(encoding)
else:
encoding = 'utf8'
if len(sys.argv) > 2:
encoding = sys.argv[2]
f = open(arg, 'r')
try:
data = f.read().decode(encoding)
finally:
f.close()
else:
data = sys.stdin.read().decode('utf8')
wrapwrite(html2text(data, baseurl))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,945,523,416,795,624,000 | -1,590,162,470,914,615,000 | 29.812636 | 109 | 0.466096 | false |
labordoc/labordoc-next | modules/bibformat/lib/elements/bfe_field.py | 28 | 6253 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints a custom field
"""
__revision__ = "$Id$"
from invenio.bibformat_utils import parse_tag
def format_element(bfo, tag, limit, instances_separator=" ",
subfields_separator=" ", extension="", output_pattern=""):
"""
Prints the given field of a record.
If tag is in range [001, 010], this element assumes
that it accesses a control field. Else it considers it
accesses a data field.
<p>For eg. consider the following metdata:
<pre>
100__ $$aCalatroni, S$$uCERN
245__ $$aStatus of the EP Simulations and Facilities for the SPL
700__ $$aFerreira, L$$uCERN
700__ $$aMacatrao, M$$uCERN
700__ $$aSkala, A$$uCERN
700__ $$aSosin, M$$uCERN
700__ $$ade Waele, R$$uCERN
700__ $$aWithofs, Y$$uKHLim, Diepenbeek
</pre>
The following calls to bfe_field would print:
<pre>
<BFE_FIELD tag="700" instances_separator="<br/>" subfields_separator=" - ">
Ferreira, L - CERN
Macatrao, M - CERN
Skala, A - CERN
Sosin, M - CERN
de Waele, R - CERN
Withofs, Y - KHLim, Diepenbeek
</pre>
</p>
<p>For more advanced formatting, the <code>output_pattern</code>
parameter can be used to output the subfields of each instance in
the specified way. For eg. consider the following metadata:
<pre>
775__ $$b15. Aufl.$$c1995-1996$$nv.1$$pGrundlagen und Werkstoffe$$w317999
775__ $$b12. Aufl.$$c1963$$w278898
775__ $$b14. Aufl.$$c1983$$w107899
775__ $$b13. Aufl.$$c1974$$w99635
</pre>
with the following <code>output_pattern</code>:
<pre>
<a href="/record/%(w)s">%(b)s (%(c)s) %(n)s %(p)s</a>
</pre>
would print:<br/>
<a href="/record/317999">15. Aufl. (1995-1996) v.1 Grundlagen und Werkstoffe</a><br/>
<a href="/record/278898">12. Aufl. (1963) </a><br/>
<a href="/record/107899">14. Aufl. (1983) </a><br/>
<a href="/record/99635">13. Aufl. (1974) </a>
<br/>(<code>instances_separator="<br/>"</code> set for
readability)<br/> The output pattern must follow <a
href="http://docs.python.org/library/stdtypes.html#string-formatting-operations">Python
string formatting</a> syntax. The format must use parenthesized
notation to map to the subfield code. This currently restricts the
support of <code>output_pattern</code> to non-repeatable
subfields</p>
@param tag: the tag code of the field that is to be printed
@param instances_separator: a separator between instances of field
@param subfields_separator: a separator between subfields of an instance
@param limit: the maximum number of values to display.
@param extension: a text printed at the end if 'limit' has been exceeded
@param output_pattern: when specified, prints the subfields of each instance according to pattern specified as parameter (following Python string formatting convention)
"""
# Check if data or control field
p_tag = parse_tag(tag)
if p_tag[0].isdigit() and int(p_tag[0]) in range(0, 11):
return bfo.control_field(tag)
elif p_tag[0].isdigit():
# Get values without subcode.
# We will filter unneeded subcode later
if p_tag[1] == '':
p_tag[1] = '_'
if p_tag[2] == '':
p_tag[2] = '_'
values = bfo.fields(p_tag[0]+p_tag[1]+p_tag[2]) # Values will
# always be a
# list of
# dicts
else:
return ''
x = 0
instances_out = [] # Retain each instance output
for instance in values:
filtered_values = [value for (subcode, value) in instance.iteritems()
if p_tag[3] == '' or p_tag[3] == '%' \
or p_tag[3] == subcode]
if len(filtered_values) > 0:
# We have found some corresponding subcode(s)
if limit.isdigit() and x + len(filtered_values) >= int(limit):
# We are going to exceed the limit
filtered_values = filtered_values[:int(limit)-x] # Takes only needed one
if len(filtered_values) > 0: # do not append empty list!
if output_pattern:
try:
instances_out.append(output_pattern % DictNoKeyError(instance))
except:
pass
else:
instances_out.append(subfields_separator.join(filtered_values))
x += len(filtered_values) # record that so we know limit has been exceeded
break # No need to go further
else:
if output_pattern:
try:
instances_out.append(output_pattern % DictNoKeyError(instance))
except:
pass
else:
instances_out.append(subfields_separator.join(filtered_values))
x += len(filtered_values)
ext_out = ''
if limit.isdigit() and x > int(limit):
ext_out = extension
return instances_separator.join(instances_out) + ext_out
class DictNoKeyError(dict):
def __getitem__(self, key):
if dict.__contains__(self, key):
val = dict.__getitem__(self, key)
else:
val = ''
return val
| gpl-2.0 | 4,704,449,273,975,812,000 | 8,327,175,342,576,260,000 | 39.341935 | 172 | 0.592036 | false |
huntxu/fuel-web | nailgun/nailgun/db/sqlalchemy/models/notification.py | 9 | 1615 | # -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import Enum
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import Text
from nailgun import consts
from nailgun.db.sqlalchemy.models.base import Base
class Notification(Base):
__tablename__ = 'notifications'
id = Column(Integer, primary_key=True)
cluster_id = Column(
Integer,
ForeignKey('clusters.id', ondelete='SET NULL')
)
node_id = Column(Integer, ForeignKey('nodes.id', ondelete='SET NULL'))
task_id = Column(Integer, ForeignKey('tasks.id', ondelete='SET NULL'))
topic = Column(
Enum(*consts.NOTIFICATION_TOPICS, name='notif_topic'),
nullable=False
)
message = Column(Text)
status = Column(
Enum(*consts.NOTIFICATION_STATUSES, name='notif_status'),
nullable=False,
default=consts.NOTIFICATION_STATUSES.unread
)
datetime = Column(DateTime, nullable=False)
| apache-2.0 | 2,859,034,413,977,583,000 | -1,604,019,494,656,412,400 | 31.959184 | 78 | 0.701548 | false |
topnotchgeek/capnlog | apps/www/behaviors.py | 1 | 1759 | import markdown
from django.db import models
from django.utils.text import slugify
from conf import settings
class Permalinkable(models.Model):
slug = models.SlugField()
class Meta:
abstract = True
def get_url_kwargs(self, **kwargs):
kwargs.update(getattr(self, 'url_kwargs', {}))
return kwargs
@models.permalink
def get_absolute_url(self):
url_kwargs = self.get_url_kwargs(slug=self.slug)
return (self.url_name, (), url_kwargs)
def save(self):
if not self.slug:
s = slugify(self.slug_source)
if len(s) > 50:
s = s[:50]
self.slug = s
super(Permalinkable, self).save()
class Authorable(models.Model):
class Meta:
abstract = True
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='author+')
class Syncable(models.Model):
class Meta:
abstract = True
status = models.SmallIntegerField(default=0, blank=True, null=True)
flag = models.SmallIntegerField(default=0, blank=True, null=True)
class Htmlable(object):
def as_html(self):
src = self.html_source
if src is None:
return ''
md = markdown.Markdown(safe_mode=False)
return md.convert(src)
class Auditable(models.Model):
class Meta:
abstract = True
created_by = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='created_by+')
modified_by = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='modified_by+')
class Timestampable(models.Model):
class Meta:
abstract = True
create_time = models.DateTimeField(auto_now_add=True)
modify_time = models.DateTimeField(auto_now=True)
| apache-2.0 | 1,955,632,553,124,603,400 | 5,082,696,630,969,445,000 | 22.453333 | 106 | 0.641842 | false |
HuaweiSwitch/ansible | lib/ansible/modules/cloud/amazon/efs_facts.py | 31 | 11554 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: efs_facts
short_description: Get information about Amazon EFS file systems
description:
- Module searches Amazon EFS file systems
version_added: "2.2"
requirements: [ boto3 ]
author:
- "Ryan Sydnor (@ryansydnor)"
options:
name:
description:
- Creation Token of Amazon EFS file system.
required: false
default: None
id:
description:
- ID of Amazon EFS.
required: false
default: None
tags:
description:
- List of tags of Amazon EFS. Should be defined as dictionary
required: false
default: None
targets:
description:
- "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes:
- SubnetId - Mandatory. The ID of the subnet to add the mount target in.
- IpAddress - Optional. A valid IPv4 address within the address range of the specified subnet.
- SecurityGroups - Optional. List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified."
required: false
default: None
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
# find all existing efs
- efs_facts:
register: result
- efs_facts:
name: myTestNameTag
- efs_facts:
id: fs-1234abcd
# Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a'
- efs_facts:
tags:
name: myTestNameTag
targets:
- subnet-1a2b3c4d
- sg-4d3c2b1a
'''
RETURN = '''
creation_time:
description: timestamp of creation date
returned: always
type: str
sample: "2015-11-16 07:30:57-05:00"
creation_token:
description: EFS creation token
returned: always
type: str
sample: console-88609e04-9a0e-4a2e-912c-feaa99509961
file_system_id:
description: ID of the file system
returned: always
type: str
sample: fs-xxxxxxxx
life_cycle_state:
description: state of the EFS file system
returned: always
type: str
sample: creating, available, deleting, deleted
mount_point:
description: url of file system
returned: always
type: str
sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
mount_targets:
description: list of mount targets
returned: always
type: list
sample:
[
{
"file_system_id": "fs-a7ad440e",
"ip_address": "172.31.17.173",
"life_cycle_state": "available",
"mount_target_id": "fsmt-d8907871",
"network_interface_id": "eni-6e387e26",
"owner_id": "740748460359",
"security_groups": [
"sg-a30b22c6"
],
"subnet_id": "subnet-e265c895"
},
...
]
name:
description: name of the file system
returned: always
type: str
sample: my-efs
number_of_mount_targets:
description: the number of targets mounted
returned: always
type: int
sample: 3
owner_id:
description: AWS account ID of EFS owner
returned: always
type: str
sample: XXXXXXXXXXXX
size_in_bytes:
description: size of the file system in bytes as of a timestamp
returned: always
type: dict
sample:
{
"timestamp": "2015-12-21 13:59:59-05:00",
"value": 12288
}
performance_mode:
description: performance mode of the file system
returned: always
type: str
sample: "generalPurpose"
tags:
description: tags on the efs instance
returned: always
type: dict
sample:
{
"name": "my-efs",
"key": "Value"
}
'''
from time import sleep
from collections import defaultdict
try:
from botocore.exceptions import ClientError
import boto3
HAS_BOTO3 = True
except ImportError as e:
HAS_BOTO3 = False
class EFSConnection(object):
STATE_CREATING = 'creating'
STATE_AVAILABLE = 'available'
STATE_DELETING = 'deleting'
STATE_DELETED = 'deleted'
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = boto3_conn(module, conn_type='client',
resource='efs', region=region,
**aws_connect_params)
except Exception as e:
module.fail_json(msg="Failed to connect to AWS: %s" % str(e))
self.region = region
def get_file_systems(self, **kwargs):
"""
Returns generator of file systems including all attributes of FS
"""
items = iterate_all(
'FileSystems',
self.connection.describe_file_systems,
**kwargs
)
for item in items:
item['CreationTime'] = str(item['CreationTime'])
"""
Suffix of network path to be used as NFS device for mount. More detail here:
http://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html
"""
item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
if 'Timestamp' in item['SizeInBytes']:
item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
if item['LifeCycleState'] == self.STATE_AVAILABLE:
item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId'])
item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId']))
else:
item['Tags'] = {}
item['MountTargets'] = []
yield item
def get_tags(self, **kwargs):
"""
Returns tag list for selected instance of EFS
"""
tags = iterate_all(
'Tags',
self.connection.describe_tags,
**kwargs
)
return dict((tag['Key'], tag['Value']) for tag in tags)
def get_mount_targets(self, **kwargs):
"""
Returns mount targets for selected instance of EFS
"""
targets = iterate_all(
'MountTargets',
self.connection.describe_mount_targets,
**kwargs
)
for target in targets:
if target['LifeCycleState'] == self.STATE_AVAILABLE:
target['SecurityGroups'] = list(self.get_security_groups(
MountTargetId=target['MountTargetId']
))
else:
target['SecurityGroups'] = []
yield target
def get_security_groups(self, **kwargs):
"""
Returns security groups for selected instance of EFS
"""
return iterate_all(
'SecurityGroups',
self.connection.describe_mount_target_security_groups,
**kwargs
)
def iterate_all(attr, map_method, **kwargs):
"""
Method creates iterator from boto result set
"""
args = dict((key, value) for (key, value) in kwargs.items() if value is not None)
wait = 1
while True:
try:
data = map_method(**args)
for elm in data[attr]:
yield elm
if 'NextMarker' in data:
args['Marker'] = data['Nextmarker']
continue
break
except ClientError as e:
if e.response['Error']['Code'] == "ThrottlingException" and wait < 600:
sleep(wait)
wait = wait * 2
continue
def prefix_to_attr(attr_id):
"""
Helper method to convert ID prefix to mount target attribute
"""
attr_by_prefix = {
'fsmt-': 'MountTargetId',
'subnet-': 'SubnetId',
'eni-': 'NetworkInterfaceId',
'sg-': 'SecurityGroups'
}
prefix = first_or_default(filter(
lambda pref: str(attr_id).startswith(pref),
attr_by_prefix.keys()
))
if prefix:
return attr_by_prefix[prefix]
return 'IpAddress'
def first_or_default(items, default=None):
"""
Helper method to fetch first element of list (if exists)
"""
for item in items:
return item
return default
def has_tags(available, required):
"""
Helper method to determine if tag requested already exists
"""
for key, value in required.items():
if key not in available or value != available[key]:
return False
return True
def has_targets(available, required):
"""
Helper method to determine if mount tager requested already exists
"""
grouped = group_list_of_dict(available)
for (value, field) in required:
if field not in grouped or value not in grouped[field]:
return False
return True
def group_list_of_dict(array):
"""
Helper method to group list of dict to dict with all possible values
"""
result = defaultdict(list)
for item in array:
for key, value in item.items():
result[key] += value if isinstance(value, list) else [value]
return result
def main():
"""
Module action handler
"""
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
id=dict(required=False, type='str', default=None),
name=dict(required=False, type='str', default=None),
tags=dict(required=False, type="dict", default={}),
targets=dict(required=False, type="list", default=[])
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, _, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = EFSConnection(module, region, **aws_connect_params)
name = module.params.get('name')
fs_id = module.params.get('id')
tags = module.params.get('tags')
targets = module.params.get('targets')
file_systems_info = connection.get_file_systems(FileSystemId=fs_id, CreationToken=name)
if tags:
file_systems_info = filter(lambda item: has_tags(item['Tags'], tags), file_systems_info)
if targets:
targets = [(item, prefix_to_attr(item)) for item in targets]
file_systems_info = filter(lambda item:
has_targets(item['MountTargets'], targets), file_systems_info)
file_systems_info = [camel_dict_to_snake_dict(x) for x in file_systems_info]
module.exit_json(changed=False, ansible_facts={'efs': file_systems_info})
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 | 452,148,603,187,175,360 | 2,808,803,156,733,090,000 | 29.405263 | 156 | 0.595638 | false |
snyaggarwal/oclapi | ocl/mappings/serializers.py | 2 | 11917 | from rest_framework import serializers
from concepts.fields import ConceptURLField, SourceURLField
from concepts.models import Concept
from mappings.models import Mapping, MappingVersion
from oclapi.serializers import ResourceVersionSerializer
from oclapi.models import RegexValidator,NAMESPACE_REGEX
__author__ = 'misternando'
class MappingBaseSerializer(serializers.Serializer):
def restore_object(self, attrs, instance=None):
mapping = instance if instance else Mapping()
mapping.map_type = attrs.get('map_type', mapping.map_type)
mapping.mnemonic = attrs.get('mnemonic', mapping.mnemonic)
from_concept = None
try:
from_concept = mapping.from_concept
except Concept.DoesNotExist: pass
mapping.retired = attrs.get('retired', mapping.retired)
mapping.from_concept = attrs.get('from_concept', from_concept)
mapping.to_concept = attrs.get('to_concept', mapping.to_concept)
mapping.to_source = attrs.get('to_source', mapping.to_source)
mapping.to_concept_name = attrs.get('to_concept_name', mapping.to_concept_name)
mapping.to_concept_code = attrs.get('to_concept_code', mapping.to_concept_code)
mapping.external_id = attrs.get('external_id', mapping.external_id)
return mapping
class Meta:
model = Mapping
lookup_field = 'mnemonic'
class MappingVersionBaseSerializer(serializers.Serializer):
def restore_object(self, attrs, instance=None):
mapping_version = instance if instance else MappingVersion()
mapping_version.map_type = attrs.get('map_type', mapping_version.map_type)
from_concept = None
try:
from_concept = mapping_version.from_concept
except Concept.DoesNotExist: pass
mapping_version.retired = attrs.get('retired', mapping_version.retired)
mapping_version.from_concept = attrs.get('from_concept', from_concept)
mapping_version.to_concept = attrs.get('to_concept', mapping_version.to_concept)
mapping_version.to_source = attrs.get('to_source', mapping_version.to_source)
mapping_version.to_concept_name = attrs.get('to_concept_name', mapping_version.to_concept_name)
mapping_version.to_concept_code = attrs.get('to_concept_code', mapping_version.to_concept_code)
mapping_version.external_id = attrs.get('external_id', mapping_version.external_id)
return mapping_version
class Meta:
model = MappingVersion
lookup_field = 'mnemonic'
class MappingDetailSerializer(MappingBaseSerializer):
type = serializers.CharField(source='resource_type', read_only=True)
id = serializers.CharField(required=True, validators=[RegexValidator(regex=NAMESPACE_REGEX)], source='mnemonic')
uuid = serializers.CharField(source='id')
external_id = serializers.CharField(required=False)
retired = serializers.BooleanField(required=False)
map_type = serializers.CharField(required=True)
from_source_owner = serializers.CharField(read_only=True)
from_source_owner_type = serializers.CharField(read_only=True)
from_source_name = serializers.CharField(read_only=True)
from_source_url = serializers.URLField(read_only=True)
from_concept_code = serializers.CharField(read_only=True)
from_concept_name = serializers.CharField(read_only=True)
from_concept_url = serializers.URLField()
to_source_owner = serializers.CharField(read_only=True)
to_source_owner_type = serializers.CharField(read_only=True)
to_source_name = serializers.CharField(read_only=True)
to_source_url = serializers.URLField()
to_concept_code = serializers.CharField(source='get_to_concept_code')
to_concept_name = serializers.CharField(source='get_to_concept_name')
to_concept_url = serializers.URLField()
source = serializers.CharField(read_only=True)
owner = serializers.CharField(read_only=True)
owner_type = serializers.CharField(read_only=True)
url = serializers.CharField(read_only=True)
extras = serializers.WritableField(required=False)
created_at = serializers.DateTimeField(read_only=True)
updated_at = serializers.DateTimeField(read_only=True)
created_by = serializers.CharField(read_only=True)
updated_by = serializers.CharField(read_only=True)
class MappingVersionDetailSerializer(MappingVersionBaseSerializer):
version = serializers.CharField(source='mnemonic')
is_latest_version = serializers.CharField()
type = serializers.CharField(source='resource_type', read_only=True)
id = serializers.CharField(source='name')
versioned_object_id = serializers.CharField(source='versioned_object_id')
external_id = serializers.CharField(required=False)
retired = serializers.BooleanField(required=False)
map_type = serializers.CharField(required=True)
from_source_owner = serializers.CharField(read_only=True)
from_source_owner_type = serializers.CharField(read_only=True)
from_source_name = serializers.CharField(read_only=True)
from_source_url = serializers.URLField(read_only=True)
from_concept_code = serializers.CharField(read_only=True)
from_concept_name = serializers.CharField(read_only=True)
from_concept_url = serializers.URLField()
to_source_owner = serializers.CharField(read_only=True)
to_source_owner_type = serializers.CharField(read_only=True)
to_source_name = serializers.CharField(read_only=True)
to_source_url = serializers.URLField()
to_concept_code = serializers.CharField(source='get_to_concept_code')
to_concept_name = serializers.CharField(source='get_to_concept_name')
to_concept_url = serializers.URLField()
source = serializers.CharField(read_only=True)
owner = serializers.CharField(read_only=True)
owner_type = serializers.CharField(read_only=True)
url = serializers.CharField(read_only=True)
extras = serializers.WritableField(required=False)
update_comment = serializers.CharField(required=False)
created_at = serializers.DateTimeField(read_only=True)
updated_at = serializers.DateTimeField(read_only=True)
created_by = serializers.CharField(read_only=True)
updated_by = serializers.CharField(read_only=True)
versioned_object_url = serializers.CharField(source='to_mapping_url')
class MappingListSerializer(MappingBaseSerializer):
id = serializers.CharField(required=True, source='mnemonic')
uuid = serializers.CharField(source='id')
external_id = serializers.CharField(required=False)
retired = serializers.BooleanField(required=False)
map_type = serializers.CharField(required=True)
source = serializers.CharField(read_only=True)
owner = serializers.CharField(read_only=True)
owner_type = serializers.CharField(read_only=True)
from_concept_url = serializers.URLField(read_only=True)
to_concept_url = serializers.URLField()
to_source_url = serializers.URLField()
to_concept_code = serializers.CharField(source='get_to_concept_code')
to_concept_name = serializers.CharField(source='get_to_concept_name')
url = serializers.CharField(read_only=True)
class MappingVersionListSerializer(ResourceVersionSerializer):
external_id = serializers.CharField(required=False)
retired = serializers.BooleanField(required=False)
map_type = serializers.CharField(required=True)
source = serializers.CharField(read_only=True)
owner = serializers.CharField(read_only=True)
owner_type = serializers.CharField(read_only=True)
from_concept_code = serializers.CharField(read_only=True)
from_concept_name = serializers.CharField(read_only=True)
from_concept_url = serializers.URLField(read_only=True)
to_concept_url = serializers.URLField()
to_source_url = serializers.URLField()
to_concept_code = serializers.CharField(source='get_to_concept_code')
to_concept_name = serializers.CharField(source='get_to_concept_name')
# url = serializers.CharField(read_only=True)
# TODO: This is bad solution. in case of HEAD, main mapping should be fetched
# and if that happend then we need not to set the url as we do below
url = serializers.SerializerMethodField(method_name='get_url')
version = serializers.CharField(source='mnemonic')
id = serializers.CharField(source='name')
versioned_object_id = serializers.CharField(source='versioned_object_id')
from_source_owner = serializers.CharField(read_only=True)
from_source_owner_type = serializers.CharField(read_only=True)
from_source_name = serializers.CharField(read_only=True)
from_source_url = serializers.URLField(read_only=True)
to_source_owner = serializers.CharField(read_only=True)
to_source_owner_type = serializers.CharField(read_only=True)
to_source_name = serializers.CharField(read_only=True)
versioned_object_url = serializers.CharField(source='to_mapping_url')
is_latest_version = serializers.CharField()
update_comment = serializers.CharField(required=False)
class Meta:
model = MappingVersion
versioned_object_field_name = 'url'
versioned_object_view_name = 'mapping-detail'
def get_url(self, obj):
if obj is None:
return None
if obj.is_latest_version:
try:
return Mapping.objects.get(id=obj.versioned_object_id).url
except Mapping.DoesNotExist:
raise Mapping.DoesNotExist('Mapping with id %s does not exist' % obj.versioned_object_id)
return obj.url
class MappingCreateSerializer(MappingBaseSerializer):
id = serializers.CharField(required=False, validators=[RegexValidator(regex=NAMESPACE_REGEX)], source='mnemonic')
map_type = serializers.CharField(required=True)
from_concept_url = ConceptURLField(view_name='concept-detail', queryset=Concept.objects.all(), lookup_kwarg='concept', lookup_field='concept', required=True, source='from_concept')
to_concept_url = ConceptURLField(view_name='concept-detail', queryset=Concept.objects.all(), lookup_kwarg='concept', lookup_field='concept', required=False, source='to_concept')
to_source_url = SourceURLField(view_name='source-detail', queryset=Concept.objects.all(), lookup_kwarg='source', lookup_field='source', required=False, source='to_source')
to_concept_code = serializers.CharField(required=False)
to_concept_name = serializers.CharField(required=False)
external_id = serializers.CharField(required=False)
def save_object(self, obj, **kwargs):
request_user = self.context['request'].user
errors = Mapping.persist_new(obj, request_user, **kwargs)
self._errors.update(errors)
class MappingUpdateSerializer(MappingBaseSerializer):
map_type = serializers.CharField(required=False)
retired = serializers.BooleanField(required=False)
from_concept_url = ConceptURLField(view_name='concept-detail', queryset=Concept.objects.all(), lookup_kwarg='concept', lookup_field='concept', required=False, source='from_concept')
to_concept_url = ConceptURLField(view_name='concept-detail', queryset=Concept.objects.all(), lookup_kwarg='concept', lookup_field='concept', required=False, source='to_concept')
to_source_url = SourceURLField(view_name='source-detail', queryset=Concept.objects.all(), lookup_kwarg='source', lookup_field='source', required=False, source='to_source')
to_concept_code = serializers.CharField(required=False)
to_concept_name = serializers.CharField(required=False)
external_id = serializers.CharField(required=False)
def save_object(self, obj, **kwargs):
request_user = self.context['request'].user
if 'update_comment' in kwargs:
errors = Mapping.persist_changes(obj, request_user, update_comment = kwargs.get('update_comment'))
else :
errors = Mapping.persist_changes(obj, request_user)
self._errors.update(errors)
| mpl-2.0 | -2,390,021,549,818,318,000 | 3,432,028,166,985,964,500 | 50.366379 | 185 | 0.729714 | false |
andersk/zulip | zerver/webhooks/mention/view.py | 4 | 1049 | # Webhooks for external integrations.
from typing import Any, Dict, Sequence
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
@webhook_view("Mention")
@has_request_variables
def api_mention_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: Dict[str, Sequence[Dict[str, Any]]] = REQ(argument_type="body"),
) -> HttpResponse:
title = payload["title"]
source_url = payload["url"]
description = payload["description"]
# construct the body of the message
template = """
**[{title}]({url})**:
``` quote
{description}
```
""".strip()
body = template.format(title=title, url=source_url, description=description)
topic = "news"
# send the message
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
| apache-2.0 | -7,682,392,690,362,630,000 | 2,381,522,511,995,501,000 | 27.351351 | 80 | 0.722593 | false |
pierotofy/OpenDroneMap | stages/odm_slam.py | 2 | 2843 | """Cell to run odm_slam."""
import os
from opendm import log
from opendm import io
from opendm import system
from opendm import context
from opendm import types
class ODMSlamStage(types.ODM_Stage):
"""Run odm_slam on a video and export to opensfm format."""
def process(self, args, outputs):
tree = outputs['tree']
video = os.path.join(tree.root_path, args.video)
slam_config = os.path.join(tree.root_path, args.slam_config)
if not video:
log.ODM_ERROR('No video provided')
exit(1)
# create working directories
system.mkdir_p(tree.opensfm)
vocabulary = os.path.join(context.orb_slam2_path,
'Vocabulary/ORBvoc.txt')
orb_slam_cmd = os.path.join(context.odm_modules_path, 'odm_slam')
trajectory = os.path.join(tree.opensfm, 'KeyFrameTrajectory.txt')
map_points = os.path.join(tree.opensfm, 'MapPoints.txt')
# check if slam was run before
if not io.file_exists(trajectory) or self.rerun():
# run slam binary
system.run(' '.join([
'cd {} &&'.format(tree.opensfm),
orb_slam_cmd,
vocabulary,
slam_config,
video,
]))
else:
log.ODM_WARNING('Found a valid slam trajectory in: {}'.format(
trajectory))
# check if trajectory was exported to opensfm before
if not io.file_exists(tree.opensfm_reconstruction) or self.rerun():
# convert slam to opensfm
system.run(' '.join([
'cd {} &&'.format(tree.opensfm),
'PYTHONPATH={}:{}'.format(context.pyopencv_path,
context.opensfm_path),
'python',
os.path.join(context.odm_modules_src_path,
'odm_slam/src/orb_slam_to_opensfm.py'),
video,
trajectory,
map_points,
slam_config,
]))
# link opensfm images to resized images
os.symlink(tree.opensfm + '/images', tree.dataset_resize)
else:
log.ODM_WARNING('Found a valid OpenSfM file in: {}'.format(
tree.opensfm_reconstruction))
# check if reconstruction was exported to bundler before
if not io.file_exists(tree.opensfm_bundle_list) or self.rerun():
# convert back to bundler's format
system.run(
'PYTHONPATH={} {}/bin/export_bundler {}'.format(
context.pyopencv_path, context.opensfm_path, tree.opensfm))
else:
log.ODM_WARNING(
'Found a valid Bundler file in: {}'.format(
tree.opensfm_reconstruction))
| gpl-3.0 | 7,812,408,546,733,654,000 | 3,053,664,793,328,692,000 | 35.922078 | 79 | 0.543088 | false |
smaffulli/libcloud | libcloud/storage/drivers/auroraobjects.py | 18 | 1935 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.common.types import LibcloudError
from libcloud.storage.providers import Provider
from libcloud.storage.drivers.s3 import BaseS3StorageDriver, BaseS3Connection
__all__ = [
'AuroraObjectsStorageDriver'
]
AURORA_OBJECTS_EU_HOST = 'o.auroraobjects.eu'
NO_CDN_SUPPORT_ERROR = 'CDN is not supported by AuroraObjects'
class BaseAuroraObjectsConnection(BaseS3Connection):
host = AURORA_OBJECTS_EU_HOST
class BaseAuroraObjectsStorageDriver(BaseS3StorageDriver):
type = Provider.AURORAOBJECTS
name = 'PCextreme AuroraObjects'
website = 'https://www.pcextreme.nl/en/aurora/objects'
class AuroraObjectsStorageDriver(BaseAuroraObjectsStorageDriver):
connectionCls = BaseAuroraObjectsConnection
def enable_container_cdn(self, *argv):
raise LibcloudError(NO_CDN_SUPPORT_ERROR, driver=self)
def enable_object_cdn(self, *argv):
raise LibcloudError(NO_CDN_SUPPORT_ERROR, driver=self)
def get_container_cdn_url(self, *argv):
raise LibcloudError(NO_CDN_SUPPORT_ERROR, driver=self)
def get_object_cdn_url(self, *argv):
raise LibcloudError(NO_CDN_SUPPORT_ERROR, driver=self)
| apache-2.0 | 3,017,691,285,518,592,500 | -8,938,651,266,079,605,000 | 36.211538 | 77 | 0.765375 | false |
pong3489/TEST_Mission | Lib/csv.py | 55 | 16795 |
"""
csv.py - read/write/investigate CSV files
"""
import re
from functools import reduce
from _csv import Error, __version__, writer, reader, register_dialect, \
unregister_dialect, get_dialect, list_dialects, \
field_size_limit, \
QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
__doc__
from _csv import Dialect as _Dialect
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = [ "QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
"Error", "Dialect", "__doc__", "excel", "excel_tab",
"field_size_limit", "reader", "writer",
"register_dialect", "get_dialect", "list_dialects", "Sniffer",
"unregister_dialect", "__version__", "DictReader", "DictWriter" ]
class Dialect:
"""Describe an Excel dialect.
This must be subclassed (see csv.excel). Valid attributes are:
delimiter, quotechar, escapechar, doublequote, skipinitialspace,
lineterminator, quoting.
"""
_name = ""
_valid = False
# placeholders
delimiter = None
quotechar = None
escapechar = None
doublequote = None
skipinitialspace = None
lineterminator = None
quoting = None
def __init__(self):
if self.__class__ != Dialect:
self._valid = True
self._validate()
def _validate(self):
try:
_Dialect(self)
except TypeError, e:
# We do this for compatibility with py2.3
raise Error(str(e))
class excel(Dialect):
"""Describe the usual properties of Excel-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
register_dialect("excel", excel)
class excel_tab(excel):
"""Describe the usual properties of Excel-generated TAB-delimited files."""
delimiter = '\t'
register_dialect("excel-tab", excel_tab)
class DictReader:
def __init__(self, f, fieldnames=None, restkey=None, restval=None,
dialect="excel", *args, **kwds):
self._fieldnames = fieldnames # list of keys for the dict
self.restkey = restkey # key to catch long rows
self.restval = restval # default value for short rows
self.reader = reader(f, dialect, *args, **kwds)
self.dialect = dialect
self.line_num = 0
def __iter__(self):
return self
@property
def fieldnames(self):
if self._fieldnames is None:
try:
self._fieldnames = self.reader.next()
except StopIteration:
pass
self.line_num = self.reader.line_num
return self._fieldnames
@fieldnames.setter
def fieldnames(self, value):
self._fieldnames = value
def next(self):
if self.line_num == 0:
# Used only for its side effect.
self.fieldnames
row = self.reader.next()
self.line_num = self.reader.line_num
# unlike the basic reader, we prefer not to return blanks,
# because we will typically wind up with a dict full of None
# values
while row == []:
row = self.reader.next()
d = dict(zip(self.fieldnames, row))
lf = len(self.fieldnames)
lr = len(row)
if lf < lr:
d[self.restkey] = row[lf:]
elif lf > lr:
for key in self.fieldnames[lr:]:
d[key] = self.restval
return d
class DictWriter:
def __init__(self, f, fieldnames, restval="", extrasaction="raise",
dialect="excel", *args, **kwds):
self.fieldnames = fieldnames # list of keys for the dict
self.restval = restval # for writing short dicts
if extrasaction.lower() not in ("raise", "ignore"):
raise ValueError, \
("extrasaction (%s) must be 'raise' or 'ignore'" %
extrasaction)
self.extrasaction = extrasaction
self.writer = writer(f, dialect, *args, **kwds)
def writeheader(self):
header = dict(zip(self.fieldnames, self.fieldnames))
self.writerow(header)
def _dict_to_list(self, rowdict):
if self.extrasaction == "raise":
wrong_fields = [k for k in rowdict if k not in self.fieldnames]
if wrong_fields:
raise ValueError("dict contains fields not in fieldnames: " +
", ".join(wrong_fields))
return [rowdict.get(key, self.restval) for key in self.fieldnames]
def writerow(self, rowdict):
return self.writer.writerow(self._dict_to_list(rowdict))
def writerows(self, rowdicts):
rows = []
for rowdict in rowdicts:
rows.append(self._dict_to_list(rowdict))
return self.writer.writerows(rows)
# Guard Sniffer's type checking against builds that exclude complex()
try:
complex
except NameError:
complex = float
class Sniffer:
'''
"Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
Returns a Dialect object.
'''
def __init__(self):
# in case there is more than one possible delimiter
self.preferred = [',', '\t', ';', ' ', ':']
def sniff(self, sample, delimiters=None):
"""
Returns a dialect (or None) corresponding to the sample
"""
quotechar, doublequote, delimiter, skipinitialspace = \
self._guess_quote_and_delimiter(sample, delimiters)
if not delimiter:
delimiter, skipinitialspace = self._guess_delimiter(sample,
delimiters)
if not delimiter:
raise Error, "Could not determine delimiter"
class dialect(Dialect):
_name = "sniffed"
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
# escapechar = ''
dialect.doublequote = doublequote
dialect.delimiter = delimiter
# _csv.reader won't accept a quotechar of ''
dialect.quotechar = quotechar or '"'
dialect.skipinitialspace = skipinitialspace
return dialect
def _guess_quote_and_delimiter(self, data, delimiters):
"""
Looks for text enclosed between two identical quotes
(the probable quotechar) which are preceded and followed
by the same character (the probable delimiter).
For example:
,'some text',
The quote with the most wins, same with the delimiter.
If there is no quotechar the delimiter can't be determined
this way.
"""
matches = []
for restr in ('(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
'(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(data)
if matches:
break
if not matches:
# (quotechar, doublequote, delimiter, skipinitialspace)
return ('', False, None, 0)
quotes = {}
delims = {}
spaces = 0
for m in matches:
n = regexp.groupindex['quote'] - 1
key = m[n]
if key:
quotes[key] = quotes.get(key, 0) + 1
try:
n = regexp.groupindex['delim'] - 1
key = m[n]
except KeyError:
continue
if key and (delimiters is None or key in delimiters):
delims[key] = delims.get(key, 0) + 1
try:
n = regexp.groupindex['space'] - 1
except KeyError:
continue
if m[n]:
spaces += 1
quotechar = reduce(lambda a, b, quotes = quotes:
(quotes[a] > quotes[b]) and a or b, quotes.keys())
if delims:
delim = reduce(lambda a, b, delims = delims:
(delims[a] > delims[b]) and a or b, delims.keys())
skipinitialspace = delims[delim] == spaces
if delim == '\n': # most likely a file with a single column
delim = ''
else:
# there is *no* delimiter, it's a single column of quoted data
delim = ''
skipinitialspace = 0
# if we see an extra quote between delimiters, we've got a
# double quoted format
dq_regexp = re.compile(r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \
{'delim':delim, 'quote':quotechar}, re.MULTILINE)
if dq_regexp.search(data):
doublequote = True
else:
doublequote = False
return (quotechar, doublequote, delim, skipinitialspace)
def _guess_delimiter(self, data, delimiters):
"""
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of frequencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
"""
data = filter(None, data.split('\n'))
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
# build frequency tables
chunkLength = min(10, len(data))
iteration = 0
charFrequency = {}
modes = {}
delims = {}
start, end = 0, min(chunkLength, len(data))
while start < len(data):
iteration += 1
for line in data[start:end]:
for char in ascii:
metaFrequency = charFrequency.get(char, {})
# must count even if frequency is 0
freq = line.count(char)
# value is the mode
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
charFrequency[char] = metaFrequency
for char in charFrequency.keys():
items = charFrequency[char].items()
if len(items) == 1 and items[0][0] == 0:
continue
# get the mode of the frequencies
if len(items) > 1:
modes[char] = reduce(lambda a, b: a[1] > b[1] and a or b,
items)
# adjust the mode - subtract the sum of all
# other frequencies
items.remove(modes[char])
modes[char] = (modes[char][0], modes[char][1]
- reduce(lambda a, b: (0, a[1] + b[1]),
items)[1])
else:
modes[char] = items[0]
# build a list of possible delimiters
modeList = modes.items()
total = float(chunkLength * iteration)
# (rows of consistent data) / (number of rows) = 100%
consistency = 1.0
# minimum consistency threshold
threshold = 0.9
while len(delims) == 0 and consistency >= threshold:
for k, v in modeList:
if v[0] > 0 and v[1] > 0:
if ((v[1]/total) >= consistency and
(delimiters is None or k in delimiters)):
delims[k] = v
consistency -= 0.01
if len(delims) == 1:
delim = delims.keys()[0]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
# analyze another chunkLength lines
start = end
end += chunkLength
if not delims:
return ('', 0)
# if there's more than one, fall back to a 'preferred' list
if len(delims) > 1:
for d in self.preferred:
if d in delims.keys():
skipinitialspace = (data[0].count(d) ==
data[0].count("%c " % d))
return (d, skipinitialspace)
# nothing else indicates a preference, pick the character that
# dominates(?)
items = [(v,k) for (k,v) in delims.items()]
items.sort()
delim = items[-1][1]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
def has_header(self, sample):
# Creates a dictionary of types of data in each column. If any
# column is of a single type (say, integers), *except* for the first
# row, then the first row is presumed to be labels. If the type
# can't be determined, it is assumed to be a string in which case
# the length of the string is the determining factor: if all of the
# rows except for the first are the same length, it's a header.
# Finally, a 'vote' is taken at the end for each column, adding or
# subtracting from the likelihood of the first row being a header.
rdr = reader(StringIO(sample), self.sniff(sample))
header = rdr.next() # assume first row is header
columns = len(header)
columnTypes = {}
for i in range(columns): columnTypes[i] = None
checked = 0
for row in rdr:
# arbitrary number of rows to check, to keep it sane
if checked > 20:
break
checked += 1
if len(row) != columns:
continue # skip rows that have irregular number of columns
for col in columnTypes.keys():
for thisType in [int, long, float, complex]:
try:
thisType(row[col])
break
except (ValueError, OverflowError):
pass
else:
# fallback to length of string
thisType = len(row[col])
# treat longs as ints
if thisType == long:
thisType = int
if thisType != columnTypes[col]:
if columnTypes[col] is None: # add new column type
columnTypes[col] = thisType
else:
# type is inconsistent, remove column from
# consideration
del columnTypes[col]
# finally, compare results against first row and "vote"
# on whether it's a header
hasHeader = 0
for col, colType in columnTypes.items():
if type(colType) == type(0): # it's a length
if len(header[col]) != colType:
hasHeader += 1
else:
hasHeader -= 1
else: # attempt typecast
try:
colType(header[col])
except (ValueError, TypeError):
hasHeader += 1
else:
hasHeader -= 1
return hasHeader > 0
| gpl-3.0 | -4,340,046,864,839,008,000 | -1,568,953,552,966,939,600 | 35.239468 | 131 | 0.506639 | false |
hengyicai/OnlineAggregationUCAS | examples/src/main/python/parquet_inputformat.py | 3 | 2240 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark import SparkContext
"""
Read data file users.parquet in local Spark distro:
$ cd $SPARK_HOME
$ export AVRO_PARQUET_JARS=/path/to/parquet-avro-1.5.0.jar
$ ./bin/spark-submit --driver-class-path /path/to/example/jar \\
--jars $AVRO_PARQUET_JARS \\
./examples/src/main/python/parquet_inputformat.py \\
examples/src/main/resources/users.parquet
<...lots of log output...>
{u'favorite_color': None, u'name': u'Alyssa', u'favorite_numbers': [3, 9, 15, 20]}
{u'favorite_color': u'red', u'name': u'Ben', u'favorite_numbers': []}
<...more log output...>
"""
if __name__ == "__main__":
if len(sys.argv) != 2:
print >> sys.stderr, """
Usage: parquet_inputformat.py <data_file>
Run with example jar:
./bin/spark-submit --driver-class-path /path/to/example/jar \\
/path/to/examples/parquet_inputformat.py <data_file>
Assumes you have Parquet data stored in <data_file>.
"""
exit(-1)
path = sys.argv[1]
sc = SparkContext(appName="ParquetInputFormat")
parquet_rdd = sc.newAPIHadoopFile(
path,
'parquet.avro.AvroParquetInputFormat',
'java.lang.Void',
'org.apache.avro.generic.IndexedRecord',
valueConverter='org.apache.spark.examples.pythonconverters.IndexedRecordToJavaConverter')
output = parquet_rdd.map(lambda x: x[1]).collect()
for k in output:
print k
sc.stop()
| apache-2.0 | -4,760,426,264,765,681,000 | 7,352,111,028,692,291,000 | 35.721311 | 97 | 0.68125 | false |
westinedu/newertrends | django/contrib/gis/sitemaps/georss.py | 291 | 2156 | from django.core import urlresolvers
from django.contrib.sitemaps import Sitemap
class GeoRSSSitemap(Sitemap):
"""
A minimal hook to produce sitemaps for GeoRSS feeds.
"""
def __init__(self, feed_dict, slug_dict=None):
"""
This sitemap object initializes on a feed dictionary (as would be passed
to `django.contrib.syndication.views.feed`) and a slug dictionary.
If the slug dictionary is not defined, then it's assumed the keys provide
the URL parameter to the feed. However, if you have a complex feed (e.g.,
you override `get_object`, then you'll need to provide a slug dictionary.
The slug dictionary should have the same keys as the feed dictionary, but
each value in the slug dictionary should be a sequence of slugs that may
be used for valid feeds. For example, let's say we have a feed that
returns objects for a specific ZIP code in our feed dictionary:
feed_dict = {'zipcode' : ZipFeed}
Then we would use a slug dictionary with a list of the zip code slugs
corresponding to feeds you want listed in the sitemap:
slug_dict = {'zipcode' : ['77002', '77054']}
"""
# Setting up.
self.feed_dict = feed_dict
self.locations = []
if slug_dict is None: slug_dict = {}
# Getting the feed locations.
for section in feed_dict.keys():
if slug_dict.get(section, False):
for slug in slug_dict[section]:
self.locations.append('%s/%s' % (section, slug))
else:
self.locations.append(section)
def get_urls(self, page=1, site=None):
"""
This method is overrridden so the appropriate `geo_format` attribute
is placed on each URL element.
"""
urls = Sitemap.get_urls(self, page=page, site=site)
for url in urls: url['geo_format'] = 'georss'
return urls
def items(self):
return self.locations
def location(self, obj):
return urlresolvers.reverse('django.contrib.syndication.views.feed', args=(obj,))
| bsd-3-clause | 393,402,855,675,557,800 | 5,913,403,707,312,960,000 | 39.679245 | 89 | 0.62384 | false |
sunlianqiang/kbengine | kbe/src/lib/python/Lib/test/test_importlib/import_/test___package__.py | 84 | 4859 | """PEP 366 ("Main module explicit relative imports") specifies the
semantics for the __package__ attribute on modules. This attribute is
used, when available, to detect which package a module belongs to (instead
of using the typical __path__/__name__ test).
"""
import unittest
from .. import util
from . import util as import_util
class Using__package__:
"""Use of __package__ supercedes the use of __name__/__path__ to calculate
what package a module belongs to. The basic algorithm is [__package__]::
def resolve_name(name, package, level):
level -= 1
base = package.rsplit('.', level)[0]
return '{0}.{1}'.format(base, name)
But since there is no guarantee that __package__ has been set (or not been
set to None [None]), there has to be a way to calculate the attribute's value
[__name__]::
def calc_package(caller_name, has___path__):
if has__path__:
return caller_name
else:
return caller_name.rsplit('.', 1)[0]
Then the normal algorithm for relative name imports can proceed as if
__package__ had been set.
"""
def test_using___package__(self):
# [__package__]
with self.mock_modules('pkg.__init__', 'pkg.fake') as importer:
with util.import_state(meta_path=[importer]):
self.__import__('pkg.fake')
module = self.__import__('',
globals={'__package__': 'pkg.fake'},
fromlist=['attr'], level=2)
self.assertEqual(module.__name__, 'pkg')
def test_using___name__(self, package_as_None=False):
# [__name__]
globals_ = {'__name__': 'pkg.fake', '__path__': []}
if package_as_None:
globals_['__package__'] = None
with self.mock_modules('pkg.__init__', 'pkg.fake') as importer:
with util.import_state(meta_path=[importer]):
self.__import__('pkg.fake')
module = self.__import__('', globals= globals_,
fromlist=['attr'], level=2)
self.assertEqual(module.__name__, 'pkg')
def test_None_as___package__(self):
# [None]
self.test_using___name__(package_as_None=True)
def test_bad__package__(self):
globals = {'__package__': '<not real>'}
with self.assertRaises(SystemError):
self.__import__('', globals, {}, ['relimport'], 1)
def test_bunk__package__(self):
globals = {'__package__': 42}
with self.assertRaises(TypeError):
self.__import__('', globals, {}, ['relimport'], 1)
class Using__package__PEP302(Using__package__):
mock_modules = util.mock_modules
Frozen_UsingPackagePEP302, Source_UsingPackagePEP302 = util.test_both(
Using__package__PEP302, __import__=import_util.__import__)
class Using__package__PEP302(Using__package__):
mock_modules = util.mock_spec
Frozen_UsingPackagePEP451, Source_UsingPackagePEP451 = util.test_both(
Using__package__PEP302, __import__=import_util.__import__)
class Setting__package__:
"""Because __package__ is a new feature, it is not always set by a loader.
Import will set it as needed to help with the transition to relying on
__package__.
For a top-level module, __package__ is set to None [top-level]. For a
package __name__ is used for __package__ [package]. For submodules the
value is __name__.rsplit('.', 1)[0] [submodule].
"""
__import__ = import_util.__import__[1]
# [top-level]
def test_top_level(self):
with self.mock_modules('top_level') as mock:
with util.import_state(meta_path=[mock]):
del mock['top_level'].__package__
module = self.__import__('top_level')
self.assertEqual(module.__package__, '')
# [package]
def test_package(self):
with self.mock_modules('pkg.__init__') as mock:
with util.import_state(meta_path=[mock]):
del mock['pkg'].__package__
module = self.__import__('pkg')
self.assertEqual(module.__package__, 'pkg')
# [submodule]
def test_submodule(self):
with self.mock_modules('pkg.__init__', 'pkg.mod') as mock:
with util.import_state(meta_path=[mock]):
del mock['pkg.mod'].__package__
pkg = self.__import__('pkg.mod')
module = getattr(pkg, 'mod')
self.assertEqual(module.__package__, 'pkg')
class Setting__package__PEP302(Setting__package__, unittest.TestCase):
mock_modules = util.mock_modules
class Setting__package__PEP451(Setting__package__, unittest.TestCase):
mock_modules = util.mock_spec
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 | 800,191,564,304,666,400 | -8,708,496,466,633,564,000 | 35.533835 | 81 | 0.565754 | false |
40223144/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/unittest/__init__.py | 900 | 2718 | """
Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's
Smalltalk testing framework.
This module contains the core framework classes that form the basis of
specific test cases and suites (TestCase, TestSuite etc.), and also a
text-based utility class for running the tests and reporting the results
(TextTestRunner).
Simple usage:
import unittest
class IntegerArithmeticTestCase(unittest.TestCase):
def testAdd(self): ## test method names begin 'test*'
self.assertEqual((1 + 2), 3)
self.assertEqual(0 + 1, 1)
def testMultiply(self):
self.assertEqual((0 * 10), 0)
self.assertEqual((5 * 8), 40)
if __name__ == '__main__':
unittest.main()
Further information is available in the bundled documentation, and from
http://docs.python.org/library/unittest.html
Copyright (c) 1999-2003 Steve Purcell
Copyright (c) 2003-2010 Python Software Foundation
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
__all__ = ['TestResult', 'TestCase', 'TestSuite',
'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main',
'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless',
'expectedFailure', 'TextTestResult', 'installHandler',
'registerResult', 'removeResult', 'removeHandler']
# Expose obsolete functions for backwards compatibility
__all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])
__unittest = True
from .result import TestResult
from .case import (TestCase, FunctionTestCase, SkipTest, skip, skipIf,
skipUnless, expectedFailure)
from .suite import BaseTestSuite, TestSuite
from .loader import (TestLoader, defaultTestLoader, makeSuite, getTestCaseNames,
findTestCases)
from .main import TestProgram, main
from .runner import TextTestRunner, TextTestResult
from .signals import installHandler, registerResult, removeResult, removeHandler
# deprecated
_TextTestResult = TextTestResult
| gpl-3.0 | -4,294,281,602,697,569,300 | 1,441,452,554,953,986,000 | 38.391304 | 80 | 0.732892 | false |
emmuchira/kps_erp | erpnext/hr/doctype/employee_loan/employee_loan.py | 24 | 6387 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, math
import erpnext
from frappe import _
from frappe.utils import flt, rounded, add_months, nowdate
from erpnext.controllers.accounts_controller import AccountsController
class EmployeeLoan(AccountsController):
def validate(self):
check_repayment_method(self.repayment_method, self.loan_amount, self.monthly_repayment_amount, self.repayment_periods)
if not self.company:
self.company = erpnext.get_default_company()
if not self.posting_date:
self.posting_date = nowdate()
if self.loan_type and not self.rate_of_interest:
self.rate_of_interest = frappe.db.get_value("Loan Type", self.loan_type, "rate_of_interest")
if self.repayment_method == "Repay Over Number of Periods":
self.monthly_repayment_amount = get_monthly_repayment_amount(self.repayment_method, self.loan_amount, self.rate_of_interest, self.repayment_periods)
self.make_repayment_schedule()
self.set_repayment_period()
self.calculate_totals()
def make_jv_entry(self):
self.check_permission('write')
journal_entry = frappe.new_doc('Journal Entry')
journal_entry.voucher_type = 'Bank Entry'
journal_entry.user_remark = _('Against Employee Loan: {0}').format(self.name)
journal_entry.company = self.company
journal_entry.posting_date = nowdate()
account_amt_list = []
account_amt_list.append({
"account": self.employee_loan_account,
"party_type": "Employee",
"party": self.employee,
"debit_in_account_currency": self.loan_amount,
"reference_type": "Employee Loan",
"reference_name": self.name,
})
account_amt_list.append({
"account": self.payment_account,
"credit_in_account_currency": self.loan_amount,
"reference_type": "Employee Loan",
"reference_name": self.name,
})
journal_entry.set("accounts", account_amt_list)
return journal_entry.as_dict()
def make_repayment_schedule(self):
self.repayment_schedule = []
payment_date = self.disbursement_date
balance_amount = self.loan_amount
while(balance_amount > 0):
interest_amount = rounded(balance_amount * flt(self.rate_of_interest) / (12*100))
principal_amount = self.monthly_repayment_amount - interest_amount
balance_amount = rounded(balance_amount + interest_amount - self.monthly_repayment_amount)
if balance_amount < 0:
principal_amount += balance_amount
balance_amount = 0.0
total_payment = principal_amount + interest_amount
self.append("repayment_schedule", {
"payment_date": payment_date,
"principal_amount": principal_amount,
"interest_amount": interest_amount,
"total_payment": total_payment,
"balance_loan_amount": balance_amount
})
next_payment_date = add_months(payment_date, 1)
payment_date = next_payment_date
def set_repayment_period(self):
if self.repayment_method == "Repay Fixed Amount per Period":
repayment_periods = len(self.repayment_schedule)
self.repayment_periods = repayment_periods
def calculate_totals(self):
self.total_payment = 0
self.total_interest_payable = 0
for data in self.repayment_schedule:
self.total_payment += data.total_payment
self.total_interest_payable +=data.interest_amount
def update_disbursement_status(doc):
disbursement = frappe.db.sql("""select posting_date, ifnull(sum(debit_in_account_currency), 0) as disbursed_amount
from `tabGL Entry` where against_voucher_type = 'Employee Loan' and against_voucher = %s""",
(doc.name), as_dict=1)[0]
if disbursement.disbursed_amount == doc.loan_amount:
frappe.db.set_value("Employee Loan", doc.name , "status", "Fully Disbursed")
if disbursement.disbursed_amount < doc.loan_amount and disbursement.disbursed_amount != 0:
frappe.db.set_value("Employee Loan", doc.name , "status", "Partially Disbursed")
if disbursement.disbursed_amount == 0:
frappe.db.set_value("Employee Loan", doc.name , "status", "Sanctioned")
if disbursement.disbursed_amount > doc.loan_amount:
frappe.throw(_("Disbursed Amount cannot be greater than Loan Amount {0}").format(doc.loan_amount))
if disbursement.disbursed_amount > 0:
frappe.db.set_value("Employee Loan", doc.name , "disbursement_date", disbursement.posting_date)
def check_repayment_method(repayment_method, loan_amount, monthly_repayment_amount, repayment_periods):
if repayment_method == "Repay Over Number of Periods" and not repayment_periods:
frappe.throw(_("Please enter Repayment Periods"))
if repayment_method == "Repay Fixed Amount per Period":
if not monthly_repayment_amount:
frappe.throw(_("Please enter repayment Amount"))
if monthly_repayment_amount > loan_amount:
frappe.throw(_("Monthly Repayment Amount cannot be greater than Loan Amount"))
def get_monthly_repayment_amount(repayment_method, loan_amount, rate_of_interest, repayment_periods):
if rate_of_interest:
monthly_interest_rate = flt(rate_of_interest) / (12 *100)
monthly_repayment_amount = math.ceil((loan_amount * monthly_interest_rate *
(1 + monthly_interest_rate)**repayment_periods) \
/ ((1 + monthly_interest_rate)**repayment_periods - 1))
else:
monthly_repayment_amount = math.ceil(flt(loan_amount) / repayment_periods)
return monthly_repayment_amount
@frappe.whitelist()
def get_employee_loan_application(employee_loan_application):
employee_loan = frappe.get_doc("Employee Loan Application", employee_loan_application)
if employee_loan:
return employee_loan.as_dict()
@frappe.whitelist()
def make_jv_entry(employee_loan, company, employee_loan_account, employee, loan_amount, payment_account):
journal_entry = frappe.new_doc('Journal Entry')
journal_entry.voucher_type = 'Bank Entry'
journal_entry.user_remark = _('Against Employee Loan: {0}').format(employee_loan)
journal_entry.company = company
journal_entry.posting_date = nowdate()
account_amt_list = []
account_amt_list.append({
"account": employee_loan_account,
"debit_in_account_currency": loan_amount,
"reference_type": "Employee Loan",
"reference_name": employee_loan,
})
account_amt_list.append({
"account": payment_account,
"credit_in_account_currency": loan_amount,
"reference_type": "Employee Loan",
"reference_name": employee_loan,
})
journal_entry.set("accounts", account_amt_list)
return journal_entry.as_dict() | gpl-3.0 | -8,456,169,238,654,692,000 | -4,806,012,204,282,085,000 | 38.925 | 151 | 0.733834 | false |
yyt030/pyzmq | zmqversion.py | 7 | 3916 | """A simply script to scrape zmq.h for the zeromq version.
This is similar to the version.sh script in a zeromq source dir, but
it searches for an installed header, rather than in the current dir.
"""
# Copyright (c) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from __future__ import with_statement
import os
import sys
import re
import traceback
from warnings import warn
try:
from configparser import ConfigParser
except:
from ConfigParser import ConfigParser
pjoin = os.path.join
MAJOR_PAT='^#define +ZMQ_VERSION_MAJOR +[0-9]+$'
MINOR_PAT='^#define +ZMQ_VERSION_MINOR +[0-9]+$'
PATCH_PAT='^#define +ZMQ_VERSION_PATCH +[0-9]+$'
def include_dirs_from_path():
"""Check the exec path for include dirs."""
include_dirs = []
for p in os.environ['PATH'].split(os.path.pathsep):
if p.endswith('/'):
p = p[:-1]
if p.endswith('bin'):
include_dirs.append(p[:-3]+'include')
return include_dirs
def default_include_dirs():
"""Default to just /usr/local/include:/usr/include"""
return ['/usr/local/include', '/usr/include']
def find_zmq_version():
"""check setup.cfg, then /usr/local/include, then /usr/include for zmq.h.
Then scrape zmq.h for the version tuple.
Returns
-------
((major,minor,patch), "/path/to/zmq.h")"""
include_dirs = []
if os.path.exists('setup.cfg'):
cfg = ConfigParser()
cfg.read('setup.cfg')
if 'build_ext' in cfg.sections():
items = cfg.items('build_ext')
for name,val in items:
if name == 'include_dirs':
include_dirs = val.split(os.path.pathsep)
if not include_dirs:
include_dirs = default_include_dirs()
for include in include_dirs:
zmq_h = pjoin(include, 'zmq.h')
if os.path.isfile(zmq_h):
with open(zmq_h) as f:
contents = f.read()
else:
continue
line = re.findall(MAJOR_PAT, contents, re.MULTILINE)[0]
major = int(re.findall('[0-9]+',line)[0])
line = re.findall(MINOR_PAT, contents, re.MULTILINE)[0]
minor = int(re.findall('[0-9]+',line)[0])
line = re.findall(PATCH_PAT, contents, re.MULTILINE)[0]
patch = int(re.findall('[0-9]+',line)[0])
return ((major,minor,patch), zmq_h)
raise IOError("Couldn't find zmq.h")
def ver_str(version):
"""version tuple as string"""
return '.'.join(map(str, version))
def check_zmq_version(min_version):
"""Check that zmq.h has an appropriate version."""
sv = ver_str(min_version)
try:
found, zmq_h = find_zmq_version()
sf = ver_str(found)
if found < min_version:
print ("This pyzmq requires zeromq >= %s"%sv)
print ("but it appears you are building against %s"%zmq_h)
print ("which has zeromq %s"%sf)
sys.exit(1)
except IOError:
msg = '\n'.join(["Couldn't find zmq.h to check for version compatibility.",
"If you see 'undeclared identifier' errors, your ZeroMQ is likely too old.",
"This pyzmq requires zeromq >= %s"%sv])
warn(msg)
except IndexError:
msg = '\n'.join(["Couldn't find ZMQ_VERSION macros in zmq.h to check for version compatibility.",
"This probably means that you have ZeroMQ <= 2.0.9",
"If you see 'undeclared identifier' errors, your ZeroMQ is likely too old.",
"This pyzmq requires zeromq >= %s"%sv])
warn(msg)
except Exception:
traceback.print_exc()
msg = '\n'.join(["Unexpected Error checking for zmq version.",
"If you see 'undeclared identifier' errors, your ZeroMQ is likely too old.",
"This pyzmq requires zeromq >= %s"%sv])
warn(msg)
if __name__ == '__main__':
v,h = find_zmq_version()
print (h)
print (ver_str(v))
| bsd-3-clause | -5,514,901,607,545,460,000 | 6,184,563,124,901,542,000 | 31.907563 | 105 | 0.599081 | false |
TheTimmy/spack | var/spack/repos/builtin/packages/r-htmltools/package.py | 1 | 1623 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RHtmltools(RPackage):
"""Tools for HTML generation and output."""
homepage = "https://github.com/rstudio/htmltools"
url = "https://cran.r-project.org/src/contrib/htmltools_0.3.5.tar.gz"
version('0.3.5', '5f001aff4a39e329f7342dcec5139724')
depends_on('r-digest', type=('build', 'run'))
depends_on('r-rcpp', type=('build', 'run'))
| lgpl-2.1 | -1,679,242,033,742,127,000 | 7,395,616,670,651,112,000 | 42.864865 | 78 | 0.666667 | false |
zimolzak/poker-experiments | pokermodules/convenience_hole.py | 1 | 11895 | import numpy
from convenience import reduce_h, find_pcts_multi
from deuces.deuces import Deck, Card
from itertools import combinations, product
import random
all52 = Deck.GetFullDeck()
all_hole_explicit = []
for h in combinations(all52, 2):
all_hole_explicit += [list(h)]
deck_choose_2 = len(all_hole_explicit)
assert deck_choose_2 == 1326
def _all_hole_cards():
"""Enumerate all 1326 2-card combos, lumping them together based on
suited or off-suit. Do this as a generator, yielding these in
somewhat specialized string form.
Example:
AA AA AA AKs AK AQs ... AA AA AK AKs AK AK AQ ...
Which reflects:
AsAh AsAd AsAc AsKs AsKh AsQs AhAd AhAc AhKs AhKh AhKd AhKc AhQs
"""
for hole in all_hole_explicit:
s = reduce_h(hole)
if s[2] == 'o':
s = s[0:2]
yield s
def numbers_of_hole_cards():
"""Return a dict counting how many combos in each of the 169 hands,
along the lines of {'AA': 6, 'AKs': 4, 'AQ': 12, ...}. Built by
iterating thru each of the 1326 combos one by one
"""
Table = {}
cells = [] # lets us do it in order
for s in _all_hole_cards():
if s in Table.keys():
Table[s] += 1
else:
cells += [s]
Table[s] = 1
assert sum(Table.values()) == deck_choose_2
return [Table, cells]
def numbers_of_hole_cards_random(n):
"""Return a dict counting combos in each hand, but built by drawing 2
at random (not normalizing to n).
"""
Table = {}
for i in range(n):
d = Deck()
hole = d.draw(2)
s = reduce_h(hole)
if s[2] == 'o':
s = s[0:2]
if s in Table.keys():
Table[s] += 1
else:
Table[s] = 1
return Table
#### Functions for calculating and plotting ranges ####
def range_plot(hands):
"""Take a list of strings describing hands. Return 13 lines of dots
and stars representing the hands on a grid.
"""
M = numpy.array([[0]*13]*13)
ranks = 'AKQJT98765432'
for h in hands:
if 's' in h:
row = ranks.find(h[0])
col = ranks.find(h[1])
else:
row = ranks.find(h[1])
col = ranks.find(h[0])
M[row][col] = 1
M_str = "\n".join(map(str, M)).replace('[','').replace(', ','')
M_str = M_str.replace(']','').replace('0','.').replace('1','*')
return M_str
def add_margins(M_str):
"""Adds margins showing ranks to a range plot. Useful as the outermost
in a series of nested function calls along the lines of:
print add_margins(range_plot(top_hands_pct(25)))
"""
lines = M_str.split('\n')
ranks = 'AKQJT98765432'
for i, row in enumerate(lines):
lines[i] = ranks[i] + ' ' + row
lines = [' A K Q J T 9 8 7 6 5 4 3 2'] + lines
return '\n'.join(lines)
def top_hands_pct(p):
"""Return list of the top p percent of hands, using the lookup table
called 'HR' (short for Hand Rankings). It's a list of strings like
['AA', 'AKs', 'KJ']
"""
[Table, cells] = numbers_of_hole_cards()
tot_hands = sum(Table.values())
n_hands = tot_hands * (p / 100.0)
# setup for loop
hands_retrieved = 0
hand_list = []
for d in HR:
hs = d['h']
old_distance = abs(n_hands - hands_retrieved)
new_distance = abs(n_hands - hands_retrieved - Table[hs])
if new_distance < old_distance:
hand_list += [hs]
hands_retrieved += Table[hs]
return hand_list
def find_pcts_range(p1, range_pct, start_b = [], iter = 10000):
"""Equity calculator for hand versus range. Given 1 player's hole
cards and one range expressed as a percent, and an optional board,
what is each player's chance of winning (equity)?
"""
main_winlist = [0, 0]
enum_hands = all_hands_in_range(top_hands_pct(range_pct))
print " villain hands (before elim) N =",
print len(enum_hands)
for i in range(iter):
p2 = []
while not p2:
candidate = random.choice(enum_hands)
if p1[0] in candidate or p1[1] in candidate or candidate[0] in start_b or candidate[1] in start_b:
# print ' ng',
# pr(candidate)
continue
p2 = candidate
## consider just doing one eval, not call to func?
winlist = find_pcts_multi([p1, p2], start_b = start_b, iter = 1)
for i in range(len(winlist)):
main_winlist[i] += winlist [i]
for i in range(len(main_winlist)):
main_winlist[i] /= iter
return main_winlist
def all_hands_in_range(list_of_str):
"""Return a list of lists of deuces objects, to answer 'What detailed
hole cards (combos) are in range provided?'
"""
total_hands = []
for s in list_of_str:
if s[0] == s[1]: # pairs (6 for each)
a = [s[0] + 's', s[0] + 'h', s[0] + 'd', s[0] + 'c']
for pair_strings in combinations(a, 2):
total_hands += [[Card.new(pair_strings[0]),
Card.new(pair_strings[1])]]
elif 's' in s: # suited (4 for each)
for suit in 'shdc':
total_hands += [[Card.new(s[0] + suit),
Card.new(s[1] + suit)]]
else: # offsuit (12 for each)
a = [s[0] + 's', s[0] + 'h', s[0] + 'd', s[0] + 'c']
b = [s[1] + 's', s[1] + 'h', s[1] + 'd', s[1] + 'c']
for s1, s2 in product(a, b):
if s1[1] == s2[1]:
continue # because suited
total_hands += [[Card.new(s1),
Card.new(s2)]]
return total_hands
#### bunch of data ####
# Source for hole card rank table -
# http://www.tightpoker.com/poker_hands.html
# sum(n) = 115591080
HR = [
{'h':'AA', 'e':2.32, 'n':521324},
{'h':'KK', 'e':1.67, 'n':522652},
{'h':'QQ', 'e':1.22, 'n':520663},
{'h':'JJ', 'e':0.86, 'n':521866},
{'h':'AKs', 'e':0.78, 'n':348364},
{'h':'AQs', 'e':0.59, 'n':348759},
{'h':'TT', 'e':0.58, 'n':520705},
{'h':'AK', 'e':0.51, 'n':1048008},
{'h':'AJs', 'e':0.44, 'n':348126},
{'h':'KQs', 'e':0.39, 'n':346772},
{'h':'99', 'e':0.38, 'n':522454},
{'h':'ATs', 'e':0.32, 'n':348013},
{'h':'AQ', 'e':0.31, 'n':1042962},
{'h':'KJs', 'e':0.29, 'n':346582},
{'h':'88', 'e':0.25, 'n':521972},
{'h':'QJs', 'e':0.23, 'n':348870},
{'h':'KTs', 'e':0.20, 'n':348774},
{'h':'A9s', 'e':0.19, 'n':348992},
{'h':'AJ', 'e':0.19, 'n':1045857},
{'h':'QTs', 'e':0.17, 'n':346115},
{'h':'KQ', 'e':0.16, 'n':1045069},
{'h':'77', 'e':0.16, 'n':524345},
{'h':'JTs', 'e':0.15, 'n':348235},
{'h':'A8s', 'e':0.10, 'n':349431},
{'h':'K9s', 'e':0.09, 'n':348286},
{'h':'AT', 'e':0.08, 'n':1047289},
{'h':'A5s', 'e':0.08, 'n':348544},
{'h':'A7s', 'e':0.08, 'n':349949},
{'h':'KJ', 'e':0.08, 'n':1047098},
{'h':'66', 'e':0.07, 'n':520946},
{'h':'T9s', 'e':0.05, 'n':348264},
{'h':'A4s', 'e':0.05, 'n':347862},
{'h':'Q9s', 'e':0.05, 'n':348760},
{'h':'J9s', 'e':0.04, 'n':349965},
{'h':'QJ', 'e':0.03, 'n':1044338},
{'h':'A6s', 'e':0.03, 'n':347677},
{'h':'55', 'e':0.02, 'n':521945},
{'h':'A3s', 'e':0.02, 'n':347895},
{'h':'K8s', 'e':0.01, 'n':350401},
{'h':'KT', 'e':0.01, 'n':1045392},
{'h':'98s', 'e':0.00, 'n':348759},
{'h':'T8s', 'e':-0.00, 'n':347443},
{'h':'K7s', 'e':-0.00, 'n':348341},
{'h':'A2s', 'e':0.00, 'n':347318},
{'h':'87s', 'e':-0.02, 'n':348348},
{'h':'QT', 'e':-0.02, 'n':1047827},
{'h':'Q8s', 'e':-0.02, 'n':348381},
{'h':'44', 'e':-0.03, 'n':523398},
{'h':'A9', 'e':-0.03, 'n':1047672},
{'h':'J8s', 'e':-0.03, 'n':348046},
{'h':'76s', 'e':-0.03, 'n':347540},
{'h':'JT', 'e':-0.03, 'n':1043812},
{'h':'97s', 'e':-0.04, 'n':350158},
{'h':'K6s', 'e':-0.04, 'n':347029},
{'h':'K5s', 'e':-0.05, 'n':349320},
{'h':'K4s', 'e':-0.05, 'n':348681},
{'h':'T7s', 'e':-0.05, 'n':347638},
{'h':'Q7s', 'e':-0.06, 'n':348073},
{'h':'K9', 'e':-0.07, 'n':1045630},
{'h':'65s', 'e':-0.07, 'n':348590},
{'h':'T9', 'e':-0.07, 'n':1045306},
{'h':'86s', 'e':-0.07, 'n':348374},
{'h':'A8', 'e':-0.07, 'n':1042209},
{'h':'J7s', 'e':-0.07, 'n':345009},
{'h':'33', 'e':-0.07, 'n':522632},
{'h':'54s', 'e':-0.08, 'n':348260},
{'h':'Q6s', 'e':-0.08, 'n':349068},
{'h':'K3s', 'e':-0.08, 'n':348865},
{'h':'Q9', 'e':-0.08, 'n':1049468},
{'h':'75s', 'e':-0.09, 'n':349781},
{'h':'22', 'e':-0.09, 'n':524131},
{'h':'J9', 'e':-0.09, 'n':1044150},
{'h':'64s', 'e':-0.09, 'n':349689},
{'h':'Q5s', 'e':-0.09, 'n':350110},
{'h':'K2s', 'e':-0.09, 'n':349276},
{'h':'96s', 'e':-0.09, 'n':349514},
{'h':'Q3s', 'e':-0.10, 'n':348009},
{'h':'J8', 'e':-0.10, 'n':1046506},
{'h':'98', 'e':-0.10, 'n':1044759},
{'h':'T8', 'e':-0.10, 'n':1048779},
{'h':'97', 'e':-0.10, 'n':1046152},
{'h':'A7', 'e':-0.10, 'n':1046587},
{'h':'T7', 'e':-0.10, 'n':1044950},
{'h':'Q4s', 'e':-0.10, 'n':348979},
{'h':'Q8', 'e':-0.11, 'n':1048251},
{'h':'J5s', 'e':-0.11, 'n':348923},
{'h':'T6', 'e':-0.11, 'n':1043014},
{'h':'75', 'e':-0.11, 'n':1047447},
{'h':'J4s', 'e':-0.11, 'n':347508},
{'h':'74s', 'e':-0.11, 'n':350325},
{'h':'K8', 'e':-0.11, 'n':1048167},
{'h':'86', 'e':-0.11, 'n':1047524},
{'h':'53s', 'e':-0.11, 'n':346930},
{'h':'K7', 'e':-0.11, 'n':1043698},
{'h':'63s', 'e':-0.11, 'n':346449},
{'h':'J6s', 'e':-0.11, 'n':347570},
{'h':'85', 'e':-0.11, 'n':1048159},
{'h':'T6s', 'e':-0.11, 'n':348875},
{'h':'76', 'e':-0.11, 'n':1046722},
{'h':'A6', 'e':-0.12, 'n':1046762},
{'h':'T2', 'e':-0.12, 'n':1047032},
{'h':'95s', 'e':-0.12, 'n':348477},
{'h':'84', 'e':-0.12, 'n':1046266},
{'h':'62', 'e':-0.12, 'n':1049495},
{'h':'T5s', 'e':-0.12, 'n':348928},
{'h':'95', 'e':-0.12, 'n':1044601},
{'h':'A5', 'e':-0.12, 'n':1046285},
{'h':'Q7', 'e':-0.12, 'n':1046099},
{'h':'T5', 'e':-0.12, 'n':1048428},
{'h':'87', 'e':-0.12, 'n':1044635},
{'h':'83', 'e':-0.12, 'n':1048550},
{'h':'65', 'e':-0.12, 'n':1045971},
{'h':'Q2s', 'e':-0.12, 'n':348912},
{'h':'94', 'e':-0.12, 'n':1047422},
{'h':'74', 'e':-0.12, 'n':1043278},
{'h':'54', 'e':-0.12, 'n':1046435},
{'h':'A4', 'e':-0.12, 'n':1046931},
{'h':'T4', 'e':-0.12, 'n':1047976},
{'h':'82', 'e':-0.12, 'n':1043638},
{'h':'64', 'e':-0.12, 'n':1043079},
{'h':'42', 'e':-0.12, 'n':1043357},
{'h':'J7', 'e':-0.12, 'n':1046565},
{'h':'93', 'e':-0.12, 'n':1045989},
{'h':'85s', 'e':-0.12, 'n':347928},
{'h':'73', 'e':-0.12, 'n':1047020},
{'h':'53', 'e':-0.12, 'n':1047022},
{'h':'T3', 'e':-0.12, 'n':1043908},
{'h':'63', 'e':-0.12, 'n':1044818},
{'h':'K6', 'e':-0.12, 'n':1045039},
{'h':'J6', 'e':-0.12, 'n':1045991},
{'h':'96', 'e':-0.12, 'n':1047156},
{'h':'92', 'e':-0.12, 'n':1049342},
{'h':'72', 'e':-0.12, 'n':1046167},
{'h':'52', 'e':-0.12, 'n':1049213},
{'h':'Q4', 'e':-0.13, 'n':1045087},
{'h':'K5', 'e':-0.13, 'n':1047359},
{'h':'J5', 'e':-0.13, 'n':1047697},
{'h':'43s', 'e':-0.13, 'n':348802},
{'h':'Q3', 'e':-0.13, 'n':1047649},
{'h':'43', 'e':-0.13, 'n':1047900},
{'h':'K4', 'e':-0.13, 'n':1046562},
{'h':'J4', 'e':-0.13, 'n':1048129},
{'h':'T4s', 'e':-0.13, 'n':350639},
{'h':'Q6', 'e':-0.13, 'n':1046958},
{'h':'Q2', 'e':-0.13, 'n':1046353},
{'h':'J3s', 'e':-0.13, 'n':349254},
{'h':'J3', 'e':-0.13, 'n':1046204},
{'h':'T3s', 'e':-0.13, 'n':349673},
{'h':'A3', 'e':-0.13, 'n':1046970},
{'h':'Q5', 'e':-0.13, 'n':1047946},
{'h':'J2', 'e':-0.13, 'n':1045715},
{'h':'84s', 'e':-0.13, 'n':349390},
{'h':'82s', 'e':-0.14, 'n':348622},
{'h':'42s', 'e':-0.14, 'n':350591},
{'h':'93s', 'e':-0.14, 'n':348835},
{'h':'73s', 'e':-0.14, 'n':349007},
{'h':'K3', 'e':-0.14, 'n':1045968},
{'h':'J2s', 'e':-0.14, 'n':348259},
{'h':'92s', 'e':-0.14, 'n':347868},
{'h':'52s', 'e':-0.14, 'n':348401},
{'h':'K2', 'e':-0.14, 'n':1048521},
{'h':'T2s', 'e':-0.14, 'n':349612},
{'h':'62s', 'e':-0.14, 'n':348033},
{'h':'32', 'e':-0.14, 'n':1044956},
{'h':'A2', 'e':-0.15, 'n':1047979},
{'h':'83s', 'e':-0.15, 'n':349355},
{'h':'94s', 'e':-0.15, 'n':348259},
{'h':'72s', 'e':-0.15, 'n':348368},
{'h':'32s', 'e':-0.15, 'n':349794},
]
| mit | 6,827,478,078,738,472,000 | -8,068,927,824,809,021,000 | 33.181034 | 110 | 0.479781 | false |
emergebtc/muddery | evennia/evennia/typeclasses/attributes.py | 2 | 21055 | """
Attributes are arbitrary data stored on objects. Attributes supports
both pure-string values and pickled arbitrary data.
Attributes are also used to implement Nicks. This module also contains
the Attribute- and NickHandlers as well as the `NAttributeHandler`,
which is a non-db version of Attributes.
"""
import re
import weakref
from django.db import models
from django.conf import settings
from django.utils.encoding import smart_str
from evennia.locks.lockhandler import LockHandler
from evennia.utils.idmapper.models import SharedMemoryModel
from evennia.utils.dbserialize import to_pickle, from_pickle
from evennia.utils.picklefield import PickledObjectField
from evennia.utils.utils import lazy_property, to_str, make_iter
_TYPECLASS_AGGRESSIVE_CACHE = settings.TYPECLASS_AGGRESSIVE_CACHE
#------------------------------------------------------------
#
# Attributes
#
#------------------------------------------------------------
class Attribute(SharedMemoryModel):
"""
Attributes are things that are specific to different types of objects. For
example, a drink container needs to store its fill level, whereas an exit
needs to store its open/closed/locked/unlocked state. These are done via
attributes, rather than making different classes for each object type and
storing them directly. The added benefit is that we can add/remove
attributes on the fly as we like.
The Attribute class defines the following properties:
key - primary identifier.
lock_storage - perm strings.
obj - which object the attribute is defined on.
date_created - when the attribute was created.
value - the data stored in the attribute, in pickled form
using wrappers to be able to store/retrieve models.
strvalue - string-only data. This data is not pickled and is
thus faster to search for in the database.
category - optional character string for grouping the Attribute.
"""
#
# Attribute Database Model setup
#
# These database fields are all set using their corresponding properties,
# named same as the field, but withtout the db_* prefix.
db_key = models.CharField('key', max_length=255, db_index=True)
db_value = PickledObjectField(
'value', null=True,
help_text="The data returned when the attribute is accessed. Must be "
"written as a Python literal if editing through the admin "
"interface. Attribute values which are not Python literals "
"cannot be edited through the admin interface.")
db_strvalue = models.TextField(
'strvalue', null=True, blank=True,
help_text="String-specific storage for quick look-up")
db_category = models.CharField(
'category', max_length=128, db_index=True, blank=True, null=True,
help_text="Optional categorization of attribute.")
# Lock storage
db_lock_storage = models.TextField(
'locks', blank=True,
help_text="Lockstrings for this object are stored here.")
db_model = models.CharField(
'model', max_length=32, db_index=True, blank=True, null=True,
help_text="Which model of object this attribute is attached to (A "
"natural key like 'objects.dbobject'). You should not change "
"this value unless you know what you are doing.")
# subclass of Attribute (None or nick)
db_attrtype = models.CharField(
'attrtype', max_length=16, db_index=True, blank=True, null=True,
help_text="Subclass of Attribute (None or nick)")
# time stamp
db_date_created = models.DateTimeField(
'date_created', editable=False, auto_now_add=True)
# Database manager
#objects = managers.AttributeManager()
@lazy_property
def locks(self):
return LockHandler(self)
class Meta:
"Define Django meta options"
verbose_name = "Evennia Attribute"
# read-only wrappers
key = property(lambda self: self.db_key)
strvalue = property(lambda self: self.db_strvalue)
category = property(lambda self: self.db_category)
model = property(lambda self: self.db_model)
attrtype = property(lambda self: self.db_attrtype)
date_created = property(lambda self: self.db_date_created)
def __lock_storage_get(self):
return self.db_lock_storage
def __lock_storage_set(self, value):
self.db_lock_storage = value
self.save(update_fields=["db_lock_storage"])
def __lock_storage_del(self):
self.db_lock_storage = ""
self.save(update_fields=["db_lock_storage"])
lock_storage = property(__lock_storage_get, __lock_storage_set, __lock_storage_del)
# Wrapper properties to easily set database fields. These are
# @property decorators that allows to access these fields using
# normal python operations (without having to remember to save()
# etc). So e.g. a property 'attr' has a get/set/del decorator
# defined that allows the user to do self.attr = value,
# value = self.attr and del self.attr respectively (where self
# is the object in question).
# value property (wraps db_value)
#@property
def __value_get(self):
"""
Getter. Allows for `value = self.value`.
We cannot cache here since it makes certain cases (such
as storing a dbobj which is then deleted elsewhere) out-of-sync.
The overhead of unpickling seems hard to avoid.
"""
return from_pickle(self.db_value, db_obj=self)
#@value.setter
def __value_set(self, new_value):
"""
Setter. Allows for self.value = value. We cannot cache here,
see self.__value_get.
"""
self.db_value = to_pickle(new_value)
self.save(update_fields=["db_value"])
#@value.deleter
def __value_del(self):
"Deleter. Allows for del attr.value. This removes the entire attribute."
self.delete()
value = property(__value_get, __value_set, __value_del)
#
#
# Attribute methods
#
#
def __str__(self):
return smart_str("%s(%s)" % (self.db_key, self.id))
def __unicode__(self):
return u"%s(%s)" % (self.db_key,self.id)
def access(self, accessing_obj, access_type='read', default=False, **kwargs):
"""
Determines if another object has permission to access.
Args:
accessing_obj (object): object trying to access this one.
access_type (optional): type of access sought.
default (optional): what to return if no lock of access_type was found
Kwargs:
**kwargs: passed to `at_access` hook along with `result`.
Returns:
result:
"""
result = self.locks.check(accessing_obj, access_type=access_type, default=default)
#self.at_access(result, **kwargs)
return result
#
# Handlers making use of the Attribute model
#
class AttributeHandler(object):
"""
Handler for adding Attributes to the object.
"""
_m2m_fieldname = "db_attributes"
_attrcreate = "attrcreate"
_attredit = "attredit"
_attrread = "attrread"
_attrtype = None
def __init__(self, obj):
"Initialize handler"
self.obj = obj
self._objid = obj.id
self._model = to_str(obj.__dbclass__.__name__.lower())
self._cache = None
def _recache(self):
"Cache all attributes of this object"
query = {"%s__id" % self._model : self._objid,
"attribute__db_attrtype" : self._attrtype}
attrs = [conn.attribute for conn in getattr(self.obj, self._m2m_fieldname).through.objects.filter(**query)]
self._cache = dict(("%s-%s" % (to_str(attr.db_key).lower(),
attr.db_category.lower() if attr.db_category else None),
attr) for attr in attrs)
def has(self, key, category=None):
"""
Checks if the given Attribute (or list of Attributes) exists on
the object.
If an iterable is given, returns list of booleans.
"""
if self._cache is None or not _TYPECLASS_AGGRESSIVE_CACHE:
self._recache()
key = [k.strip().lower() for k in make_iter(key) if k]
category = category.strip().lower() if category is not None else None
searchkeys = ["%s-%s" % (k, category) for k in make_iter(key)]
ret = [self._cache.get(skey) for skey in searchkeys if skey in self._cache]
return ret[0] if len(ret) == 1 else ret
def get(self, key=None, category=None, default=None, return_obj=False,
strattr=False, raise_exception=False, accessing_obj=None,
default_access=True, not_found_none=False):
"""
Returns the value of the given Attribute or list of Attributes.
`strattr` will cause the string-only value field instead of the normal
pickled field data. Use to get back values from Attributes added with
the `strattr` keyword.
If `return_obj=True`, return the matching Attribute object
instead. Returns `default` if no matches (or [ ] if `key` was a list
with no matches). If `raise_exception=True`, failure to find a
match will raise `AttributeError` instead.
If `accessing_obj` is given, its `attrread` permission lock will be
checked before displaying each looked-after Attribute. If no
`accessing_obj` is given, no check will be done.
"""
class RetDefault(object):
"Holds default values"
def __init__(self):
self.value = default
self.strvalue = str(default) if default is not None else None
if self._cache is None or not _TYPECLASS_AGGRESSIVE_CACHE:
self._recache()
ret = []
key = [k.strip().lower() for k in make_iter(key) if k]
category = category.strip().lower() if category is not None else None
#print "cache:", self._cache.keys(), key
if not key:
# return all with matching category (or no category)
catkey = "-%s" % category if category is not None else None
ret = [attr for key, attr in self._cache.items() if key and key.endswith(catkey)]
else:
for searchkey in ("%s-%s" % (k, category) for k in key):
attr_obj = self._cache.get(searchkey)
if attr_obj:
ret.append(attr_obj)
else:
if raise_exception:
raise AttributeError
else:
ret.append(RetDefault())
if accessing_obj:
# check 'attrread' locks
ret = [attr for attr in ret if attr.access(accessing_obj, self._attrread, default=default_access)]
if strattr:
ret = ret if return_obj else [attr.strvalue for attr in ret if attr]
else:
ret = ret if return_obj else [attr.value for attr in ret if attr]
if not ret:
return ret if len(key) > 1 else default
return ret[0] if len(ret)==1 else ret
def add(self, key, value, category=None, lockstring="",
strattr=False, accessing_obj=None, default_access=True):
"""
Add attribute to object, with optional `lockstring`.
If `strattr` is set, the `db_strvalue` field will be used (no pickling).
Use the `get()` method with the `strattr` keyword to get it back.
If `accessing_obj` is given, `self.obj`'s `attrcreate` lock access
will be checked against it. If no `accessing_obj` is given, no check
will be done.
"""
if accessing_obj and not self.obj.access(accessing_obj,
self._attrcreate, default=default_access):
# check create access
return
if self._cache is None:
self._recache()
if not key:
return
category = category.strip().lower() if category is not None else None
keystr = key.strip().lower()
cachekey = "%s-%s" % (keystr, category)
attr_obj = self._cache.get(cachekey)
if attr_obj:
# update an existing attribute object
if strattr:
# store as a simple string (will not notify OOB handlers)
attr_obj.db_strvalue = value
attr_obj.save(update_fields=["db_strvalue"])
else:
# store normally (this will also notify OOB handlers)
attr_obj.value = value
else:
# create a new Attribute (no OOB handlers can be notified)
kwargs = {"db_key" : keystr, "db_category" : category,
"db_model" : self._model, "db_attrtype" : self._attrtype,
"db_value" : None if strattr else to_pickle(value),
"db_strvalue" : value if strattr else None}
new_attr = Attribute(**kwargs)
new_attr.save()
getattr(self.obj, self._m2m_fieldname).add(new_attr)
self._cache[cachekey] = new_attr
def batch_add(self, key, value, category=None, lockstring="",
strattr=False, accessing_obj=None, default_access=True):
"""
Batch-version of `add()`. This is more efficient than
repeat-calling add.
`key` and `value` must be sequences of the same length, each
representing a key-value pair.
"""
if accessing_obj and not self.obj.access(accessing_obj,
self._attrcreate, default=default_access):
# check create access
return
if self._cache is None:
self._recache()
if not key:
return
keys, values= make_iter(key), make_iter(value)
if len(keys) != len(values):
raise RuntimeError("AttributeHandler.add(): key and value of different length: %s vs %s" % key, value)
category = category.strip().lower() if category is not None else None
new_attrobjs = []
for ikey, keystr in enumerate(keys):
keystr = keystr.strip().lower()
new_value = values[ikey]
cachekey = "%s-%s" % (keystr, category)
attr_obj = self._cache.get(cachekey)
if attr_obj:
# update an existing attribute object
if strattr:
# store as a simple string (will not notify OOB handlers)
attr_obj.db_strvalue = new_value
attr_obj.save(update_fields=["db_strvalue"])
else:
# store normally (this will also notify OOB handlers)
attr_obj.value = new_value
else:
# create a new Attribute (no OOB handlers can be notified)
kwargs = {"db_key" : keystr, "db_category" : category,
"db_attrtype" : self._attrtype,
"db_value" : None if strattr else to_pickle(new_value),
"db_strvalue" : value if strattr else None}
new_attr = Attribute(**kwargs)
new_attr.save()
new_attrobjs.append(new_attr)
if new_attrobjs:
# Add new objects to m2m field all at once
getattr(self.obj, self._m2m_fieldname).add(*new_attrobjs)
self._recache()
def remove(self, key, raise_exception=False, category=None,
accessing_obj=None, default_access=True):
"""
Remove attribute or a list of attributes from object.
If `accessing_obj` is given, will check against the `attredit` lock.
If not given, this check is skipped.
"""
if self._cache is None or not _TYPECLASS_AGGRESSIVE_CACHE:
self._recache()
key = [k.strip().lower() for k in make_iter(key) if k]
category = category.strip().lower() if category is not None else None
for searchstr in ("%s-%s" % (k, category) for k in key):
attr_obj = self._cache.get(searchstr)
if attr_obj:
if not (accessing_obj and not attr_obj.access(accessing_obj,
self._attredit, default=default_access)):
attr_obj.delete()
elif not attr_obj and raise_exception:
raise AttributeError
self._recache()
def clear(self, category=None, accessing_obj=None, default_access=True):
"""
Remove all Attributes on this object. If `accessing_obj` is
given, check the `attredit` lock on each Attribute before
continuing. If not given, skip check.
"""
if self._cache is None or not _TYPECLASS_AGGRESSIVE_CACHE:
self._recache()
if accessing_obj:
[attr.delete() for attr in self._cache.values()
if attr.access(accessing_obj, self._attredit, default=default_access)]
else:
[attr.delete() for attr in self._cache.values()]
self._recache()
def all(self, accessing_obj=None, default_access=True):
"""
Return all Attribute objects on this object.
If `accessing_obj` is given, check the `attrread` lock on
each attribute before returning them. If not given, this
check is skipped.
"""
if self._cache is None or not _TYPECLASS_AGGRESSIVE_CACHE:
self._recache()
attrs = sorted(self._cache.values(), key=lambda o: o.id)
if accessing_obj:
return [attr for attr in attrs
if attr.access(accessing_obj, self._attredit, default=default_access)]
else:
return attrs
class NickHandler(AttributeHandler):
"""
Handles the addition and removal of Nicks
(uses Attributes' `strvalue` and `category` fields)
Nicks are stored as Attributes
with categories `nick_<nicktype>`
"""
_attrtype = "nick"
def has(self, key, category="inputline"):
return super(NickHandler, self).has(key, category=category)
def get(self, key=None, category="inputline", **kwargs):
"Get the replacement value matching the given key and category"
return super(NickHandler, self).get(key=key, category=category, strattr=True, **kwargs)
def add(self, key, replacement, category="inputline", **kwargs):
"Add a new nick"
super(NickHandler, self).add(key, replacement, category=category, strattr=True, **kwargs)
def remove(self, key, category="inputline", **kwargs):
"Remove Nick with matching category"
super(NickHandler, self).remove(key, category=category, **kwargs)
def nickreplace(self, raw_string, categories=("inputline", "channel"), include_player=True):
"Replace entries in raw_string with nick replacement"
raw_string
obj_nicks, player_nicks = [], []
for category in make_iter(categories):
obj_nicks.extend([n for n in make_iter(self.get(category=category, return_obj=True)) if n])
if include_player and self.obj.has_player:
for category in make_iter(categories):
player_nicks.extend([n for n in make_iter(self.obj.player.nicks.get(category=category, return_obj=True)) if n])
for nick in obj_nicks + player_nicks:
# make a case-insensitive match here
match = re.match(re.escape(nick.db_key), raw_string, re.IGNORECASE)
if match:
raw_string = raw_string.replace(match.group(), nick.db_strvalue, 1)
break
return raw_string
class NAttributeHandler(object):
"""
This stand-alone handler manages non-database saving.
It is similar to `AttributeHandler` and is used
by the `.ndb` handler in the same way as `.db` does
for the `AttributeHandler`.
"""
def __init__(self, obj):
"initialized on the object"
self._store = {}
self.obj = weakref.proxy(obj)
def has(self, key):
"Check if object has this attribute or not"
return key in self._store
def get(self, key):
"Returns named key value"
return self._store.get(key, None)
def add(self, key, value):
"Add new key and value"
self._store[key] = value
self.obj.set_recache_protection()
def remove(self, key):
"Remove key from storage"
if key in self._store:
del self._store[key]
self.obj.set_recache_protection(self._store)
def clear(self):
"Remove all nattributes from handler"
self._store = {}
def all(self, return_tuples=False):
"List all keys or (keys, values) stored, except _keys"
if return_tuples:
return [(key, value) for (key, value) in self._store.items() if not key.startswith("_")]
return [key for key in self._store if not key.startswith("_")]
| bsd-3-clause | 2,230,562,988,828,682,800 | -2,597,210,016,988,933,000 | 39.104762 | 127 | 0.601615 | false |
GuillaumeGomez/servo | tests/wpt/css-tests/tools/wptserve/wptserve/pipes.py | 87 | 14196 | from cgi import escape
import gzip as gzip_module
import re
import time
import types
import uuid
from cStringIO import StringIO
def resolve_content(response):
rv = "".join(item for item in response.iter_content(read_file=True))
if type(rv) == unicode:
rv = rv.encode(response.encoding)
return rv
class Pipeline(object):
pipes = {}
def __init__(self, pipe_string):
self.pipe_functions = self.parse(pipe_string)
def parse(self, pipe_string):
functions = []
for item in PipeTokenizer().tokenize(pipe_string):
if not item:
break
if item[0] == "function":
functions.append((self.pipes[item[1]], []))
elif item[0] == "argument":
functions[-1][1].append(item[1])
return functions
def __call__(self, request, response):
for func, args in self.pipe_functions:
response = func(request, response, *args)
return response
class PipeTokenizer(object):
def __init__(self):
#This whole class can likely be replaced by some regexps
self.state = None
def tokenize(self, string):
self.string = string
self.state = self.func_name_state
self._index = 0
while self.state:
yield self.state()
yield None
def get_char(self):
if self._index >= len(self.string):
return None
rv = self.string[self._index]
self._index += 1
return rv
def func_name_state(self):
rv = ""
while True:
char = self.get_char()
if char is None:
self.state = None
if rv:
return ("function", rv)
else:
return None
elif char == "(":
self.state = self.argument_state
return ("function", rv)
elif char == "|":
if rv:
return ("function", rv)
else:
rv += char
def argument_state(self):
rv = ""
while True:
char = self.get_char()
if char is None:
self.state = None
return ("argument", rv)
elif char == "\\":
rv += self.get_escape()
if rv is None:
#This should perhaps be an error instead
return ("argument", rv)
elif char == ",":
return ("argument", rv)
elif char == ")":
self.state = self.func_name_state
return ("argument", rv)
else:
rv += char
def get_escape(self):
char = self.get_char()
escapes = {"n": "\n",
"r": "\r",
"t": "\t"}
return escapes.get(char, char)
class pipe(object):
def __init__(self, *arg_converters):
self.arg_converters = arg_converters
self.max_args = len(self.arg_converters)
self.min_args = 0
opt_seen = False
for item in self.arg_converters:
if not opt_seen:
if isinstance(item, opt):
opt_seen = True
else:
self.min_args += 1
else:
if not isinstance(item, opt):
raise ValueError("Non-optional argument cannot follow optional argument")
def __call__(self, f):
def inner(request, response, *args):
if not (self.min_args <= len(args) <= self.max_args):
raise ValueError("Expected between %d and %d args, got %d" %
(self.min_args, self.max_args, len(args)))
arg_values = tuple(f(x) for f, x in zip(self.arg_converters, args))
return f(request, response, *arg_values)
Pipeline.pipes[f.__name__] = inner
#We actually want the undecorated function in the main namespace
return f
class opt(object):
def __init__(self, f):
self.f = f
def __call__(self, arg):
return self.f(arg)
def nullable(func):
def inner(arg):
if arg.lower() == "null":
return None
else:
return func(arg)
return inner
def boolean(arg):
if arg.lower() in ("true", "1"):
return True
elif arg.lower() in ("false", "0"):
return False
raise ValueError
@pipe(int)
def status(request, response, code):
"""Alter the status code.
:param code: Status code to use for the response."""
response.status = code
return response
@pipe(str, str, opt(boolean))
def header(request, response, name, value, append=False):
"""Set a HTTP header.
Replaces any existing HTTP header of the same name unless
append is set, in which case the header is appended without
replacement.
:param name: Name of the header to set.
:param value: Value to use for the header.
:param append: True if existing headers should not be replaced
"""
if not append:
response.headers.set(name, value)
else:
response.headers.append(name, value)
return response
@pipe(str)
def trickle(request, response, delays):
"""Send the response in parts, with time delays.
:param delays: A string of delays and amounts, in bytes, of the
response to send. Each component is separated by
a colon. Amounts in bytes are plain integers, whilst
delays are floats prefixed with a single d e.g.
d1:100:d2
Would cause a 1 second delay, would then send 100 bytes
of the file, and then cause a 2 second delay, before sending
the remainder of the file.
If the last token is of the form rN, instead of sending the
remainder of the file, the previous N instructions will be
repeated until the whole file has been sent e.g.
d1:100:d2:r2
Causes a delay of 1s, then 100 bytes to be sent, then a 2s delay
and then a further 100 bytes followed by a two second delay
until the response has been fully sent.
"""
def parse_delays():
parts = delays.split(":")
rv = []
for item in parts:
if item.startswith("d"):
item_type = "delay"
item = item[1:]
value = float(item)
elif item.startswith("r"):
item_type = "repeat"
value = int(item[1:])
if not value % 2 == 0:
raise ValueError
else:
item_type = "bytes"
value = int(item)
if len(rv) and rv[-1][0] == item_type:
rv[-1][1] += value
else:
rv.append((item_type, value))
return rv
delays = parse_delays()
if not delays:
return response
content = resolve_content(response)
offset = [0]
def add_content(delays, repeat=False):
for i, (item_type, value) in enumerate(delays):
if item_type == "bytes":
yield content[offset[0]:offset[0] + value]
offset[0] += value
elif item_type == "delay":
time.sleep(value)
elif item_type == "repeat":
if i != len(delays) - 1:
continue
while offset[0] < len(content):
for item in add_content(delays[-(value + 1):-1], True):
yield item
if not repeat and offset[0] < len(content):
yield content[offset[0]:]
response.content = add_content(delays)
return response
@pipe(nullable(int), opt(nullable(int)))
def slice(request, response, start, end=None):
"""Send a byte range of the response body
:param start: The starting offset. Follows python semantics including
negative numbers.
:param end: The ending offset, again with python semantics and None
(spelled "null" in a query string) to indicate the end of
the file.
"""
content = resolve_content(response)
response.content = content[start:end]
return response
class ReplacementTokenizer(object):
def ident(scanner, token):
return ("ident", token)
def index(scanner, token):
token = token[1:-1]
try:
token = int(token)
except ValueError:
token = unicode(token, "utf8")
return ("index", token)
def var(scanner, token):
token = token[:-1]
return ("var", token)
def tokenize(self, string):
return self.scanner.scan(string)[0]
scanner = re.Scanner([(r"\$\w+:", var),
(r"\$?\w+(?:\(\))?", ident),
(r"\[[^\]]*\]", index)])
class FirstWrapper(object):
def __init__(self, params):
self.params = params
def __getitem__(self, key):
try:
return self.params.first(key)
except KeyError:
return ""
@pipe(opt(nullable(str)))
def sub(request, response, escape_type="html"):
"""Substitute environment information about the server and request into the script.
:param escape_type: String detailing the type of escaping to use. Known values are
"html" and "none", with "html" the default for historic reasons.
The format is a very limited template language. Substitutions are
enclosed by {{ and }}. There are several avaliable substitutions:
host
A simple string value and represents the primary host from which the
tests are being run.
domains
A dictionary of available domains indexed by subdomain name.
ports
A dictionary of lists of ports indexed by protocol.
location
A dictionary of parts of the request URL. Valid keys are
'server, 'scheme', 'host', 'hostname', 'port', 'path' and 'query'.
'server' is scheme://host:port, 'host' is hostname:port, and query
includes the leading '?', but other delimiters are omitted.
headers
A dictionary of HTTP headers in the request.
GET
A dictionary of query parameters supplied with the request.
uuid()
A pesudo-random UUID suitable for usage with stash
So for example in a setup running on localhost with a www
subdomain and a http server on ports 80 and 81::
{{host}} => localhost
{{domains[www]}} => www.localhost
{{ports[http][1]}} => 81
It is also possible to assign a value to a variable name, which must start with
the $ character, using the ":" syntax e.g.
{{$id:uuid()}
Later substitutions in the same file may then refer to the variable
by name e.g.
{{$id}}
"""
content = resolve_content(response)
new_content = template(request, content, escape_type=escape_type)
response.content = new_content
return response
def template(request, content, escape_type="html"):
#TODO: There basically isn't any error handling here
tokenizer = ReplacementTokenizer()
variables = {}
def config_replacement(match):
content, = match.groups()
tokens = tokenizer.tokenize(content)
if tokens[0][0] == "var":
variable = tokens[0][1]
tokens = tokens[1:]
else:
variable = None
assert tokens[0][0] == "ident" and all(item[0] == "index" for item in tokens[1:]), tokens
field = tokens[0][1]
if field in variables:
value = variables[field]
elif field == "headers":
value = request.headers
elif field == "GET":
value = FirstWrapper(request.GET)
elif field in request.server.config:
value = request.server.config[tokens[0][1]]
elif field == "location":
value = {"server": "%s://%s:%s" % (request.url_parts.scheme,
request.url_parts.hostname,
request.url_parts.port),
"scheme": request.url_parts.scheme,
"host": "%s:%s" % (request.url_parts.hostname,
request.url_parts.port),
"hostname": request.url_parts.hostname,
"port": request.url_parts.port,
"path": request.url_parts.path,
"pathname": request.url_parts.path,
"query": "?%s" % request.url_parts.query}
elif field == "uuid()":
value = str(uuid.uuid4())
elif field == "url_base":
value = request.url_base
else:
raise Exception("Undefined template variable %s" % field)
for item in tokens[1:]:
value = value[item[1]]
assert isinstance(value, (int,) + types.StringTypes), tokens
if variable is not None:
variables[variable] = value
escape_func = {"html": lambda x:escape(x, quote=True),
"none": lambda x:x}[escape_type]
#Should possibly support escaping for other contexts e.g. script
#TODO: read the encoding of the response
return escape_func(unicode(value)).encode("utf-8")
template_regexp = re.compile(r"{{([^}]*)}}")
new_content = template_regexp.sub(config_replacement, content)
return new_content
@pipe()
def gzip(request, response):
"""This pipe gzip-encodes response data.
It sets (or overwrites) these HTTP headers:
Content-Encoding is set to gzip
Content-Length is set to the length of the compressed content
"""
content = resolve_content(response)
response.headers.set("Content-Encoding", "gzip")
out = StringIO()
with gzip_module.GzipFile(fileobj=out, mode="w") as f:
f.write(content)
response.content = out.getvalue()
response.headers.set("Content-Length", len(response.content))
return response
| mpl-2.0 | -2,190,520,524,825,067,000 | 4,569,089,272,285,059,000 | 30.616927 | 97 | 0.547126 | false |
s20121035/rk3288_android5.1_repo | external/chromium_org/chrome/tools/build/repack_locales.py | 25 | 10221 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper script to repack paks for a list of locales.
Gyp doesn't have any built-in looping capability, so this just provides a way to
loop over a list of locales when repacking pak files, thus avoiding a
proliferation of mostly duplicate, cut-n-paste gyp actions.
"""
import optparse
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', '..',
'tools', 'grit'))
from grit.format import data_pack
# The gyp "branding" variable.
BRANDING = None
# Some build paths defined by gyp.
GRIT_DIR = None
SHARE_INT_DIR = None
INT_DIR = None
# The target platform. If it is not defined, sys.platform will be used.
OS = None
# Note that OS is normally set to 'linux' when building for chromeos.
CHROMEOS = False
USE_ASH = False
USE_ATHENA = False
ENABLE_AUTOFILL_DIALOG = False
ENABLE_EXTENSIONS = False
WHITELIST = None
# Extra input files.
EXTRA_INPUT_FILES = []
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def calc_output(locale):
"""Determine the file that will be generated for the given locale."""
#e.g. '<(INTERMEDIATE_DIR)/repack/da.pak',
# For Fake Bidi, generate it at a fixed path so that tests can safely
# reference it.
if locale == 'fake-bidi':
return '%s/%s.pak' % (INT_DIR, locale)
if OS == 'mac' or OS == 'ios':
# For Cocoa to find the locale at runtime, it needs to use '_' instead
# of '-' (http://crbug.com/20441). Also, 'en-US' should be represented
# simply as 'en' (http://crbug.com/19165, http://crbug.com/25578).
if locale == 'en-US':
locale = 'en'
return '%s/repack/%s.lproj/locale.pak' % (INT_DIR, locale.replace('-', '_'))
else:
return os.path.join(INT_DIR, 'repack', locale + '.pak')
def calc_inputs(locale):
"""Determine the files that need processing for the given locale."""
inputs = []
#e.g. '<(grit_out_dir)/generated_resources_da.pak'
inputs.append(os.path.join(GRIT_DIR, 'generated_resources_%s.pak' % locale))
#e.g. '<(grit_out_dir)/locale_settings_da.pak'
inputs.append(os.path.join(GRIT_DIR, 'locale_settings_%s.pak' % locale))
#e.g. '<(grit_out_dir)/platform_locale_settings_da.pak'
inputs.append(os.path.join(GRIT_DIR,
'platform_locale_settings_%s.pak' % locale))
#e.g. '<(SHARED_INTERMEDIATE_DIR)/components/strings/
# components_strings_da.pak',
inputs.append(os.path.join(SHARE_INT_DIR, 'components', 'strings',
'components_strings_%s.pak' % locale))
if USE_ASH:
#e.g. '<(SHARED_INTERMEDIATE_DIR)/ash/strings/ash_strings_da.pak',
inputs.append(os.path.join(SHARE_INT_DIR, 'ash', 'strings',
'ash_strings_%s.pak' % locale))
if USE_ATHENA:
#e.g. '<(SHARED_INTERMEDIATE_DIR)/athena/strings/athena_strings_da.pak',
inputs.append(os.path.join(SHARE_INT_DIR, 'athena', 'strings',
'athena_strings_%s.pak' % locale))
if CHROMEOS:
inputs.append(os.path.join(SHARE_INT_DIR, 'ui', 'chromeos', 'strings',
'ui_chromeos_strings_%s.pak' % locale))
if OS != 'ios':
#e.g.
# '<(SHARED_INTERMEDIATE_DIR)/content/app/strings/content_strings_da.pak'
inputs.append(os.path.join(SHARE_INT_DIR, 'content', 'app', 'strings',
'content_strings_%s.pak' % locale))
#e.g. '<(SHARED_INTERMEDIATE_DIR)/ui/strings/ui_strings_da.pak',
inputs.append(os.path.join(SHARE_INT_DIR, 'ui', 'strings',
'ui_strings_%s.pak' % locale))
#e.g. '<(SHARED_INTERMEDIATE_DIR)/ui/strings/app_locale_settings_da.pak',
inputs.append(os.path.join(SHARE_INT_DIR, 'ui', 'strings',
'app_locale_settings_%s.pak' % locale))
if ENABLE_AUTOFILL_DIALOG and OS != 'ios' and OS != 'android':
#e.g. '<(SHARED_INTERMEDIATE_DIR)/third_party/libaddressinput/
# address_input_strings_da.pak',
inputs.append(os.path.join(SHARE_INT_DIR, 'third_party', 'libaddressinput',
'address_input_strings_%s.pak' % locale))
if ENABLE_EXTENSIONS:
#e.g. '<(SHARED_INTERMEDIATE_DIR)/device/bluetooth/strings/
# device_bluetooth_strings_da.pak',
inputs.append(os.path.join(SHARE_INT_DIR, 'device', 'bluetooth', 'strings',
'device_bluetooth_strings_%s.pak' % locale))
# For example:
# '<(SHARED_INTERMEDIATE_DIR)/extensions/strings/extensions_strings_da.pak
# TODO(jamescook): When Android stops building extensions code move this
# to the OS != 'ios' and OS != 'android' section below.
inputs.append(os.path.join(SHARE_INT_DIR, 'extensions', 'strings',
'extensions_strings_%s.pak' % locale))
#e.g. '<(grit_out_dir)/google_chrome_strings_da.pak'
# or
# '<(grit_out_dir)/chromium_strings_da.pak'
inputs.append(os.path.join(
GRIT_DIR, '%s_strings_%s.pak' % (BRANDING, locale)))
# Add any extra input files.
for extra_file in EXTRA_INPUT_FILES:
inputs.append('%s_%s.pak' % (extra_file, locale))
return inputs
def list_outputs(locales):
"""Returns the names of files that will be generated for the given locales.
This is to provide gyp the list of output files, so build targets can
properly track what needs to be built.
"""
outputs = []
for locale in locales:
outputs.append(calc_output(locale))
# Quote each element so filename spaces don't mess up gyp's attempt to parse
# it into a list.
return " ".join(['"%s"' % x for x in outputs])
def list_inputs(locales):
"""Returns the names of files that will be processed for the given locales.
This is to provide gyp the list of input files, so build targets can properly
track their prerequisites.
"""
inputs = []
for locale in locales:
inputs += calc_inputs(locale)
# Quote each element so filename spaces don't mess up gyp's attempt to parse
# it into a list.
return " ".join(['"%s"' % x for x in inputs])
def repack_locales(locales):
""" Loop over and repack the given locales."""
for locale in locales:
inputs = []
inputs += calc_inputs(locale)
output = calc_output(locale)
data_pack.DataPack.RePack(output, inputs, whitelist_file=WHITELIST)
def DoMain(argv):
global BRANDING
global GRIT_DIR
global SHARE_INT_DIR
global INT_DIR
global OS
global CHROMEOS
global USE_ASH
global USE_ATHENA
global WHITELIST
global ENABLE_AUTOFILL_DIALOG
global ENABLE_EXTENSIONS
global EXTRA_INPUT_FILES
parser = optparse.OptionParser("usage: %prog [options] locales")
parser.add_option("-i", action="store_true", dest="inputs", default=False,
help="Print the expected input file list, then exit.")
parser.add_option("-o", action="store_true", dest="outputs", default=False,
help="Print the expected output file list, then exit.")
parser.add_option("-g", action="store", dest="grit_dir",
help="GRIT build files output directory.")
parser.add_option("-x", action="store", dest="int_dir",
help="Intermediate build files output directory.")
parser.add_option("-s", action="store", dest="share_int_dir",
help="Shared intermediate build files output directory.")
parser.add_option("-b", action="store", dest="branding",
help="Branding type of this build.")
parser.add_option("-e", action="append", dest="extra_input", default=[],
help="Full path to an extra input pak file without the\
locale suffix and \".pak\" extension.")
parser.add_option("-p", action="store", dest="os",
help="The target OS. (e.g. mac, linux, win, etc.)")
parser.add_option("--use-ash", action="store", dest="use_ash",
help="Whether to include ash strings")
parser.add_option("--use-athena", action="store", dest="use_athena",
help="Whether to include athena strings")
parser.add_option("--chromeos", action="store",
help="Whether building for Chrome OS")
parser.add_option("--whitelist", action="store", help="Full path to the "
"whitelist used to filter output pak file resource IDs")
parser.add_option("--enable-autofill-dialog", action="store",
dest="enable_autofill_dialog",
help="Whether to include strings for autofill dialog")
parser.add_option("--enable-extensions", action="store",
dest="enable_extensions",
help="Whether to include strings for extensions")
options, locales = parser.parse_args(argv)
if not locales:
parser.error('Please specificy at least one locale to process.\n')
print_inputs = options.inputs
print_outputs = options.outputs
GRIT_DIR = options.grit_dir
INT_DIR = options.int_dir
SHARE_INT_DIR = options.share_int_dir
BRANDING = options.branding
EXTRA_INPUT_FILES = options.extra_input
OS = options.os
CHROMEOS = options.chromeos == '1'
USE_ASH = options.use_ash == '1'
USE_ATHENA = options.use_athena == '1'
WHITELIST = options.whitelist
ENABLE_AUTOFILL_DIALOG = options.enable_autofill_dialog == '1'
ENABLE_EXTENSIONS = options.enable_extensions == '1'
if not OS:
if sys.platform == 'darwin':
OS = 'mac'
elif sys.platform.startswith('linux'):
OS = 'linux'
elif sys.platform in ('cygwin', 'win32'):
OS = 'win'
else:
OS = sys.platform
if not (GRIT_DIR and INT_DIR and SHARE_INT_DIR):
parser.error('Please specify all of "-g" and "-x" and "-s".\n')
if print_inputs and print_outputs:
parser.error('Please specify only one of "-i" or "-o".\n')
# Need to know the branding, unless we're just listing the outputs.
if not print_outputs and not BRANDING:
parser.error('Please specify "-b" to determine the input files.\n')
if print_inputs:
return list_inputs(locales)
if print_outputs:
return list_outputs(locales)
return repack_locales(locales)
if __name__ == '__main__':
results = DoMain(sys.argv[1:])
if results:
print results
| gpl-3.0 | -6,712,947,654,045,847,000 | -6,947,169,058,668,931,000 | 35.634409 | 80 | 0.646316 | false |
ygol/odoo | addons/procurement_jit/__init__.py | 374 | 1078 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import procurement_jit
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -6,174,385,104,008,570,000 | 5,634,837,287,500,168,000 | 43.916667 | 79 | 0.611317 | false |
vladikr/nova_drafts | nova/objects/network_request.py | 3 | 2348 | # Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.objects import base as obj_base
from nova.objects import fields
from nova import utils
class NetworkRequest(obj_base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added pci_request_id
VERSION = '1.0'
fields = {
'network_id': fields.StringField(nullable=True),
'address': fields.IPAddressField(nullable=True),
'port_id': fields.UUIDField(nullable=True),
'pci_request_id': fields.UUIDField(nullable=True),
}
def obj_load_attr(self, attr):
setattr(self, attr, None)
def to_tuple(self):
address = str(self.address) if self.address is not None else None
if utils.is_neutron():
return self.network_id, address, self.port_id, self.pci_request_id
else:
return self.network_id, address
@classmethod
def from_tuple(cls, net_tuple):
if len(net_tuple) == 4:
network_id, address, port_id, pci_request_id = net_tuple
return cls(network_id=network_id, address=address,
port_id=port_id, pci_request_id=pci_request_id)
else:
network_id, address = net_tuple
return cls(network_id=network_id, address=address)
class NetworkRequestList(obj_base.ObjectListBase, obj_base.NovaObject):
fields = {
'objects': fields.ListOfObjectsField('NetworkRequest'),
}
child_versions = {
'1.0': '1.0',
'1.1': '1.1',
}
VERSION = '1.1'
def as_tuples(self):
return [x.to_tuple() for x in self.objects]
@property
def is_single_unspecified(self):
return ((len(self.objects) == 1) and
(self.objects[0].to_tuple() == NetworkRequest().to_tuple()))
| apache-2.0 | 4,517,824,373,873,138,000 | 8,567,848,443,520,045,000 | 33.028986 | 78 | 0.63586 | false |
MER-GROUP/intellij-community | python/lib/Lib/site-packages/django/contrib/localflavor/tr/tr_provinces.py | 316 | 2191 | # -*- coding: utf-8 -*-
"""
This exists in this standalone file so that it's only imported into memory
when explicitly needed.
"""
PROVINCE_CHOICES = (
('01', ('Adana')),
('02', ('Adıyaman')),
('03', ('Afyonkarahisar')),
('04', ('Ağrı')),
('68', ('Aksaray')),
('05', ('Amasya')),
('06', ('Ankara')),
('07', ('Antalya')),
('75', ('Ardahan')),
('08', ('Artvin')),
('09', ('Aydın')),
('10', ('Balıkesir')),
('74', ('Bartın')),
('72', ('Batman')),
('69', ('Bayburt')),
('11', ('Bilecik')),
('12', ('Bingöl')),
('13', ('Bitlis')),
('14', ('Bolu')),
('15', ('Burdur')),
('16', ('Bursa')),
('17', ('Çanakkale')),
('18', ('Çankırı')),
('19', ('Çorum')),
('20', ('Denizli')),
('21', ('Diyarbakır')),
('81', ('Düzce')),
('22', ('Edirne')),
('23', ('Elazığ')),
('24', ('Erzincan')),
('25', ('Erzurum')),
('26', ('Eskişehir')),
('27', ('Gaziantep')),
('28', ('Giresun')),
('29', ('Gümüşhane')),
('30', ('Hakkari')),
('31', ('Hatay')),
('76', ('Iğdır')),
('32', ('Isparta')),
('33', ('Mersin')),
('34', ('İstanbul')),
('35', ('İzmir')),
('78', ('Karabük')),
('36', ('Kars')),
('37', ('Kastamonu')),
('38', ('Kayseri')),
('39', ('Kırklareli')),
('40', ('Kırşehir')),
('41', ('Kocaeli')),
('42', ('Konya')),
('43', ('Kütahya')),
('44', ('Malatya')),
('45', ('Manisa')),
('46', ('Kahramanmaraş')),
('70', ('Karaman')),
('71', ('Kırıkkale')),
('79', ('Kilis')),
('47', ('Mardin')),
('48', ('Muğla')),
('49', ('Muş')),
('50', ('Nevşehir')),
('51', ('Niğde')),
('52', ('Ordu')),
('80', ('Osmaniye')),
('53', ('Rize')),
('54', ('Sakarya')),
('55', ('Samsun')),
('56', ('Siirt')),
('57', ('Sinop')),
('58', ('Sivas')),
('73', ('Şırnak')),
('59', ('Tekirdağ')),
('60', ('Tokat')),
('61', ('Trabzon')),
('62', ('Tunceli')),
('63', ('Şanlıurfa')),
('64', ('Uşak')),
('65', ('Van')),
('77', ('Yalova')),
('66', ('Yozgat')),
('67', ('Zonguldak')),
)
| apache-2.0 | 4,616,186,035,980,959,000 | -3,690,462,013,604,697,600 | 23.146067 | 74 | 0.374128 | false |
bwrsandman/OpenUpgrade | addons/account_analytic_analysis/migrations/8.0.1.1/pre-migration.py | 9 | 1607 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2015 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.openupgrade import openupgrade
@openupgrade.migrate()
def migrate(cr, version):
# if account_analytic_analysis_recurring is installed, uninstall it and
# move relevant xmlids to this module
cr.execute(
"update ir_model_data set module='account_analytic_analysis' "
"where name in ('account_analytic_cron_for_invoice') "
"and module='account_analytic_analysis_recurring'")
cr.execute(
"update ir_module_module set state='to remove' "
"where name='account_analytic_analysis_recurring' "
"and state in ('installed', 'to install', 'to upgrade')")
| agpl-3.0 | -1,413,086,231,831,258,600 | 876,114,761,760,188,700 | 44.914286 | 78 | 0.633479 | false |
matejc/searx | tests/unit/engines/test_qwant.py | 1 | 9798 | from collections import defaultdict
import mock
from searx.engines import qwant
from searx.testing import SearxTestCase
class TestQwantEngine(SearxTestCase):
def test_request(self):
query = 'test_query'
dicto = defaultdict(dict)
dicto['pageno'] = 0
dicto['language'] = 'fr-FR'
qwant.categories = ['']
params = qwant.request(query, dicto)
self.assertIn('url', params)
self.assertIn(query, params['url'])
self.assertIn('web', params['url'])
self.assertIn('qwant.com', params['url'])
self.assertIn('fr_fr', params['url'])
dicto['language'] = 'all'
qwant.categories = ['news']
params = qwant.request(query, dicto)
self.assertFalse('fr' in params['url'])
self.assertIn('news', params['url'])
def test_response(self):
self.assertRaises(AttributeError, qwant.response, None)
self.assertRaises(AttributeError, qwant.response, [])
self.assertRaises(AttributeError, qwant.response, '')
self.assertRaises(AttributeError, qwant.response, '[]')
response = mock.Mock(text='{}')
self.assertEqual(qwant.response(response), [])
response = mock.Mock(text='{"data": {}}')
self.assertEqual(qwant.response(response), [])
json = """
{
"status": "success",
"data": {
"query": {
"locale": "en_us",
"query": "Test",
"offset": 10
},
"result": {
"items": [
{
"title": "Title",
"score": 9999,
"url": "http://www.url.xyz",
"source": "...",
"desc": "Description",
"date": "",
"_id": "db0aadd62c2a8565567ffc382f5c61fa",
"favicon": "https://s.qwant.com/fav.ico"
}
],
"filters": []
},
"cache": {
"key": "e66aa864c00147a0e3a16ff7a5efafde",
"created": 1433092754,
"expiration": 259200,
"status": "miss",
"age": 0
}
}
}
"""
response = mock.Mock(text=json)
qwant.categories = ['general']
results = qwant.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['title'], 'Title')
self.assertEqual(results[0]['url'], 'http://www.url.xyz')
self.assertEqual(results[0]['content'], 'Description')
json = """
{
"status": "success",
"data": {
"query": {
"locale": "en_us",
"query": "Test",
"offset": 10
},
"result": {
"items": [
{
"title": "Title",
"score": 9999,
"url": "http://www.url.xyz",
"source": "...",
"media": "http://image.jpg",
"desc": "",
"thumbnail": "http://thumbnail.jpg",
"date": "",
"_id": "db0aadd62c2a8565567ffc382f5c61fa",
"favicon": "https://s.qwant.com/fav.ico"
}
],
"filters": []
},
"cache": {
"key": "e66aa864c00147a0e3a16ff7a5efafde",
"created": 1433092754,
"expiration": 259200,
"status": "miss",
"age": 0
}
}
}
"""
response = mock.Mock(text=json)
qwant.categories = ['images']
results = qwant.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['title'], 'Title')
self.assertEqual(results[0]['url'], 'http://www.url.xyz')
self.assertEqual(results[0]['content'], '')
self.assertEqual(results[0]['thumbnail_src'], 'http://thumbnail.jpg')
self.assertEqual(results[0]['img_src'], 'http://image.jpg')
json = """
{
"status": "success",
"data": {
"query": {
"locale": "en_us",
"query": "Test",
"offset": 10
},
"result": {
"items": [
{
"title": "Title",
"score": 9999,
"url": "http://www.url.xyz",
"source": "...",
"desc": "Description",
"date": 1433260920,
"_id": "db0aadd62c2a8565567ffc382f5c61fa",
"favicon": "https://s.qwant.com/fav.ico"
}
],
"filters": []
},
"cache": {
"key": "e66aa864c00147a0e3a16ff7a5efafde",
"created": 1433092754,
"expiration": 259200,
"status": "miss",
"age": 0
}
}
}
"""
response = mock.Mock(text=json)
qwant.categories = ['news']
results = qwant.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['title'], 'Title')
self.assertEqual(results[0]['url'], 'http://www.url.xyz')
self.assertEqual(results[0]['content'], 'Description')
self.assertIn('publishedDate', results[0])
json = """
{
"status": "success",
"data": {
"query": {
"locale": "en_us",
"query": "Test",
"offset": 10
},
"result": {
"items": [
{
"title": "Title",
"score": 9999,
"url": "http://www.url.xyz",
"source": "...",
"desc": "Description",
"date": 1433260920,
"_id": "db0aadd62c2a8565567ffc382f5c61fa",
"favicon": "https://s.qwant.com/fav.ico"
}
],
"filters": []
},
"cache": {
"key": "e66aa864c00147a0e3a16ff7a5efafde",
"created": 1433092754,
"expiration": 259200,
"status": "miss",
"age": 0
}
}
}
"""
response = mock.Mock(text=json)
qwant.categories = ['social media']
results = qwant.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['title'], 'Title')
self.assertEqual(results[0]['url'], 'http://www.url.xyz')
self.assertEqual(results[0]['content'], 'Description')
self.assertIn('publishedDate', results[0])
json = """
{
"status": "success",
"data": {
"query": {
"locale": "en_us",
"query": "Test",
"offset": 10
},
"result": {
"items": [
{
"title": "Title",
"score": 9999,
"url": "http://www.url.xyz",
"source": "...",
"desc": "Description",
"date": 1433260920,
"_id": "db0aadd62c2a8565567ffc382f5c61fa",
"favicon": "https://s.qwant.com/fav.ico"
}
],
"filters": []
},
"cache": {
"key": "e66aa864c00147a0e3a16ff7a5efafde",
"created": 1433092754,
"expiration": 259200,
"status": "miss",
"age": 0
}
}
}
"""
response = mock.Mock(text=json)
qwant.categories = ['']
results = qwant.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 0)
json = """
{
"status": "success",
"data": {
"query": {
"locale": "en_us",
"query": "Test",
"offset": 10
},
"result": {
"filters": []
},
"cache": {
"key": "e66aa864c00147a0e3a16ff7a5efafde",
"created": 1433092754,
"expiration": 259200,
"status": "miss",
"age": 0
}
}
}
"""
response = mock.Mock(text=json)
results = qwant.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 0)
json = """
{
"status": "success",
"data": {
"query": {
"locale": "en_us",
"query": "Test",
"offset": 10
},
"cache": {
"key": "e66aa864c00147a0e3a16ff7a5efafde",
"created": 1433092754,
"expiration": 259200,
"status": "miss",
"age": 0
}
}
}
"""
response = mock.Mock(text=json)
results = qwant.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 0)
json = """
{
"status": "success"
}
"""
response = mock.Mock(text=json)
results = qwant.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 0)
| agpl-3.0 | 4,309,232,267,806,087,700 | 8,657,995,429,735,106,000 | 29.908517 | 77 | 0.423046 | false |
machinalis/django-srd20 | srd20/migrations/0005_auto__chg_field_spell_altname.py | 1 | 3803 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Spell.altname'
db.alter_column('spell', 'altname', self.gf('django.db.models.fields.SlugField')(max_length=64))
# Adding index on 'Spell', fields ['altname']
db.create_index('spell', ['altname'])
def backwards(self, orm):
# Removing index on 'Spell', fields ['altname']
db.delete_index('spell', ['altname'])
# Changing field 'Spell.altname'
db.alter_column('spell', 'altname', self.gf('django.db.models.fields.CharField')(max_length=64))
models = {
'srd20.spell': {
'Meta': {'ordering': "('name',)", 'object_name': 'Spell', 'db_table': "'spell'"},
'altname': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'arcane_focus': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'arcane_material_components': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'area': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'casting_time': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'cleric_focus': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'components': ('django.db.models.fields.TextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'descriptor': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'druid_focus': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'duration': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'effect': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'focus': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'full_text': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'material_components': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'range': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'saving_throw': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'school': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'spell_resistance': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'spellcraft_dc': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'subschool': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'to_develop': ('django.db.models.fields.TextField', [], {}),
'verbal_components': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'xp_cost': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['srd20']
| bsd-3-clause | -3,948,223,630,564,087,000 | 4,875,915,446,067,198,000 | 59.365079 | 124 | 0.556929 | false |
Tejal011089/trufil-erpnext | erpnext/patches/v4_0/fields_to_be_renamed.py | 101 | 3062 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.utils.rename_field import rename_field
from frappe.modules import scrub, get_doctype_module
rename_map = {
"Quotation Item": [
["ref_rate", "price_list_rate"],
["base_ref_rate", "base_price_list_rate"],
["adj_rate", "discount_percentage"],
["export_rate", "rate"],
["basic_rate", "base_rate"],
["amount", "base_amount"],
["export_amount", "amount"]
],
"Sales Order Item": [
["ref_rate", "price_list_rate"],
["base_ref_rate", "base_price_list_rate"],
["adj_rate", "discount_percentage"],
["export_rate", "rate"],
["basic_rate", "base_rate"],
["amount", "base_amount"],
["export_amount", "amount"],
["reserved_warehouse", "warehouse"]
],
"Delivery Note Item": [
["ref_rate", "price_list_rate"],
["base_ref_rate", "base_price_list_rate"],
["adj_rate", "discount_percentage"],
["export_rate", "rate"],
["basic_rate", "base_rate"],
["amount", "base_amount"],
["export_amount", "amount"]
],
"Sales Invoice Item": [
["ref_rate", "price_list_rate"],
["base_ref_rate", "base_price_list_rate"],
["adj_rate", "discount_percentage"],
["export_rate", "rate"],
["basic_rate", "base_rate"],
["amount", "base_amount"],
["export_amount", "amount"]
],
"Supplier Quotation Item": [
["import_ref_rate", "price_list_rate"],
["purchase_ref_rate", "base_price_list_rate"],
["discount_rate", "discount_percentage"],
["import_rate", "rate"],
["purchase_rate", "base_rate"],
["amount", "base_amount"],
["import_amount", "amount"]
],
"Purchase Order Item": [
["import_ref_rate", "price_list_rate"],
["purchase_ref_rate", "base_price_list_rate"],
["discount_rate", "discount_percentage"],
["import_rate", "rate"],
["purchase_rate", "base_rate"],
["amount", "base_amount"],
["import_amount", "amount"]
],
"Purchase Receipt Item": [
["import_ref_rate", "price_list_rate"],
["purchase_ref_rate", "base_price_list_rate"],
["discount_rate", "discount_percentage"],
["import_rate", "rate"],
["purchase_rate", "base_rate"],
["amount", "base_amount"],
["import_amount", "amount"]
],
"Purchase Invoice Item": [
["import_ref_rate", "price_list_rate"],
["purchase_ref_rate", "base_price_list_rate"],
["discount_rate", "discount_percentage"],
["import_rate", "rate"],
["rate", "base_rate"],
["amount", "base_amount"],
["import_amount", "amount"],
["expense_head", "expense_account"]
],
"Item": [
["purchase_account", "expense_account"],
["default_sales_cost_center", "selling_cost_center"],
["cost_center", "buying_cost_center"],
["default_income_account", "income_account"],
],
"Item Price": [
["ref_rate", "price_list_rate"]
]
}
def execute():
for dn in rename_map:
frappe.reload_doc(get_doctype_module(dn), "doctype", scrub(dn))
for dt, field_list in rename_map.items():
for field in field_list:
rename_field(dt, field[0], field[1])
| agpl-3.0 | -2,893,998,842,033,914,400 | 3,849,963,513,909,921,000 | 27.091743 | 68 | 0.621489 | false |
OpenWinCon/OpenWinNet | web-gui/myvenv/lib/python3.4/site-packages/django/core/files/utils.py | 901 | 1230 | class FileProxyMixin(object):
"""
A mixin class used to forward file methods to an underlaying file
object. The internal file object has to be called "file"::
class FileProxy(FileProxyMixin):
def __init__(self, file):
self.file = file
"""
encoding = property(lambda self: self.file.encoding)
fileno = property(lambda self: self.file.fileno)
flush = property(lambda self: self.file.flush)
isatty = property(lambda self: self.file.isatty)
newlines = property(lambda self: self.file.newlines)
read = property(lambda self: self.file.read)
readinto = property(lambda self: self.file.readinto)
readline = property(lambda self: self.file.readline)
readlines = property(lambda self: self.file.readlines)
seek = property(lambda self: self.file.seek)
softspace = property(lambda self: self.file.softspace)
tell = property(lambda self: self.file.tell)
truncate = property(lambda self: self.file.truncate)
write = property(lambda self: self.file.write)
writelines = property(lambda self: self.file.writelines)
xreadlines = property(lambda self: self.file.xreadlines)
def __iter__(self):
return iter(self.file)
| apache-2.0 | 2,336,998,900,659,673,000 | -5,836,552,659,706,038,000 | 41.413793 | 69 | 0.688618 | false |
ThiagoGarciaAlves/intellij-community | plugins/hg4idea/testData/bin/hgext/schemes.py | 96 | 3372 | # Copyright 2009, Alexander Solovyov <piranha@piranha.org.ua>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""extend schemes with shortcuts to repository swarms
This extension allows you to specify shortcuts for parent URLs with a
lot of repositories to act like a scheme, for example::
[schemes]
py = http://code.python.org/hg/
After that you can use it like::
hg clone py://trunk/
Additionally there is support for some more complex schemas, for
example used by Google Code::
[schemes]
gcode = http://{1}.googlecode.com/hg/
The syntax is taken from Mercurial templates, and you have unlimited
number of variables, starting with ``{1}`` and continuing with
``{2}``, ``{3}`` and so on. This variables will receive parts of URL
supplied, split by ``/``. Anything not specified as ``{part}`` will be
just appended to an URL.
For convenience, the extension adds these schemes by default::
[schemes]
py = http://hg.python.org/
bb = https://bitbucket.org/
bb+ssh = ssh://hg@bitbucket.org/
gcode = https://{1}.googlecode.com/hg/
kiln = https://{1}.kilnhg.com/Repo/
You can override a predefined scheme by defining a new scheme with the
same name.
"""
import os, re
from mercurial import extensions, hg, templater, util
from mercurial.i18n import _
testedwith = 'internal'
class ShortRepository(object):
def __init__(self, url, scheme, templater):
self.scheme = scheme
self.templater = templater
self.url = url
try:
self.parts = max(map(int, re.findall(r'\{(\d+)\}', self.url)))
except ValueError:
self.parts = 0
def __repr__(self):
return '<ShortRepository: %s>' % self.scheme
def instance(self, ui, url, create):
# Should this use the util.url class, or is manual parsing better?
try:
url = url.split('://', 1)[1]
except IndexError:
raise util.Abort(_("no '://' in scheme url '%s'") % url)
parts = url.split('/', self.parts)
if len(parts) > self.parts:
tail = parts[-1]
parts = parts[:-1]
else:
tail = ''
context = dict((str(i + 1), v) for i, v in enumerate(parts))
url = ''.join(self.templater.process(self.url, context)) + tail
return hg._peerlookup(url).instance(ui, url, create)
def hasdriveletter(orig, path):
if path:
for scheme in schemes:
if path.startswith(scheme + ':'):
return False
return orig(path)
schemes = {
'py': 'http://hg.python.org/',
'bb': 'https://bitbucket.org/',
'bb+ssh': 'ssh://hg@bitbucket.org/',
'gcode': 'https://{1}.googlecode.com/hg/',
'kiln': 'https://{1}.kilnhg.com/Repo/'
}
def extsetup(ui):
schemes.update(dict(ui.configitems('schemes')))
t = templater.engine(lambda x: x)
for scheme, url in schemes.items():
if (os.name == 'nt' and len(scheme) == 1 and scheme.isalpha()
and os.path.exists('%s:\\' % scheme)):
raise util.Abort(_('custom scheme %s:// conflicts with drive '
'letter %s:\\\n') % (scheme, scheme.upper()))
hg.schemes[scheme] = ShortRepository(url, scheme, t)
extensions.wrapfunction(util, 'hasdriveletter', hasdriveletter)
| apache-2.0 | 8,286,395,999,085,114,000 | -3,000,498,264,674,075,600 | 31.423077 | 76 | 0.620996 | false |
jalilag/apspir | objedit/gnosis/xml/pickle/test/test_4list.py | 3 | 1604 |
"exercise all 4 list-writing methods --fpm"
import gnosis.xml.pickle as xml_pickle
import sys
import funcs
funcs.set_parser()
class foo: pass
f = foo()
f.a = (1,2,3)
# method 1 -- StreamWriter is an uncompressed StringIO
x = xml_pickle.dumps(f)
# check header (to ensure correct method used) + contents
if x[0:5] == '<?xml':
print "OK"
else:
print "ERROR"
sys.exit(1)
g = xml_pickle.loads(x)
if g.a == (1,2,3):
print "OK"
else:
print "ERROR"
sys.exit(1)
# method 2 -- StreamWriter is a compressed StringIO
x = xml_pickle.dumps(f,1)
# check header + contents
if x[0:2] == '\037\213':
print "OK"
else:
print "ERROR"
sys.exit(1)
g = xml_pickle.loads(x)
if g.a == (1,2,3):
print "OK"
else:
print "ERROR"
sys.exit(1)
# method 3 -- StreamWriter is an uncompressed file
fh = open('aaa','wb')
xml_pickle.dump(f,fh)
fh.close()
# check header + contents
fh = open('aaa','rb')
line = fh.read(5)
if line == '<?xml':
print "OK"
else:
print "ERROR"
sys.exit(1)
fh.close()
fh = open('aaa','rb')
g = xml_pickle.load(fh)
if g.a == (1,2,3):
print "OK"
else:
print "ERROR"
sys.exit(1)
fh.close()
# method 4 -- StreamWriter is a compressed file
fh = open('aaa','wb')
xml_pickle.dump(f,fh,1)
fh.close()
# check header + contents
fh = open('aaa','rb')
line = fh.read(2)
if line == '\037\213':
print "OK"
else:
print "ERROR"
sys.exit(1)
fh.close()
fh = open('aaa','rb')
g = xml_pickle.load(fh)
if g.a == (1,2,3):
print "OK"
else:
print "ERROR"
sys.exit(1)
fh.close()
funcs.unlink('aaa')
| lgpl-2.1 | 578,665,360,886,507,500 | 55,260,253,574,899,140 | 15.20202 | 57 | 0.59414 | false |
dhenrygithub/QGIS | python/plugins/processing/algs/lidar/lastools/las2lasPro_transform.py | 3 | 4150 | # -*- coding: utf-8 -*-
"""
***************************************************************************
las2lasPro_transform.py
---------------------
Date : October 2014 and May 2016
Copyright : (C) 2014 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Isenburg'
__date__ = 'October 2014'
__copyright__ = '(C) 2014, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from .LAStoolsUtils import LAStoolsUtils
from .LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterString
from processing.core.parameters import ParameterSelection
class las2lasPro_transform(LAStoolsAlgorithm):
OPERATION = "OPERATION"
OPERATIONS = ["---", "set_point_type", "set_point_size", "set_version_minor", "set_version_major", "start_at_point", "stop_at_point", "remove_vlr", "week_to_adjusted", "adjusted_to_week", "auto_reoffset", "scale_rgb_up", "scale_rgb_down", "remove_all_vlrs", "remove_extra", "clip_to_bounding_box"]
OPERATIONARG = "OPERATIONARG"
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('las2lasPro_transform')
self.group, self.i18n_group = self.trAlgorithm('LAStools Production')
self.addParametersPointInputFolderGUI()
self.addParametersTransform1CoordinateGUI()
self.addParametersTransform2CoordinateGUI()
self.addParametersTransform1OtherGUI()
self.addParametersTransform2OtherGUI()
self.addParameter(ParameterSelection(las2lasPro_transform.OPERATION,
self.tr("operations (first 8 need an argument)"),
las2lasPro_transform.OPERATIONS, 0))
self.addParameter(ParameterString(las2lasPro_transform.OPERATIONARG,
self.tr("argument for operation")))
self.addParametersOutputDirectoryGUI()
self.addParametersOutputAppendixGUI()
self.addParametersPointOutputFormatGUI()
self.addParametersAdditionalGUI()
self.addParametersCoresGUI()
self.addParametersVerboseGUI()
def processAlgorithm(self, progress):
if (LAStoolsUtils.hasWine()):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "las2las.exe")]
else:
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "las2las")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputFolderCommands(commands)
self.addParametersTransform1CoordinateCommands(commands)
self.addParametersTransform2CoordinateCommands(commands)
self.addParametersTransform1OtherCommands(commands)
self.addParametersTransform2OtherCommands(commands)
operation = self.getParameterValue(las2lasPro_transform.OPERATION)
if operation != 0:
commands.append("-" + las2lasPro_transform.OPERATIONS[operation])
if operation > 8:
commands.append(self.getParameterValue(las2lasPro_transform.OPERATIONARG))
self.addParametersOutputDirectoryCommands(commands)
self.addParametersOutputAppendixCommands(commands)
self.addParametersPointOutputFormatCommands(commands)
self.addParametersAdditionalCommands(commands)
self.addParametersCoresCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
| gpl-2.0 | -2,052,274,198,502,975,200 | 1,882,930,963,933,773,300 | 49.609756 | 301 | 0.617108 | false |
murat1985/bagpipe-bgp | bagpipe/bgp/engine/__init__.py | 2 | 7221 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# encoding: utf-8
# Copyright 2014 Orange
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module classes related to producing and consuming events related to
BGP routes.
routes: RouteEntry
events: RouteEvent
an announcement or a withdrawal of a BGP route
workers: Worker
* produce events
* subscribe to the route table manager to consume events related to
certain BGP routes
route table manager (singleton)
* tracks subscriptions of workers
* dispatches events based on subscriptions
"""
import logging
from bagpipe.exabgp.structure.address import AFI, SAFI
from bagpipe.exabgp.message.update.attribute.communities import RouteTarget
from bagpipe.exabgp.message.update.attribute import AttributeID
from bagpipe.exabgp.message.update.attributes import Attributes
from bagpipe.bgp.common.looking_glass import LookingGlass, \
LookingGlassReferences
log = logging.getLogger(__name__)
class RouteEntry(LookingGlass):
"""A route entry describes a BGP route, i.e. the association of:
* a BGP NLRI of a specific type (e.g. a VPNv4 route
like "1.2.3.4:5:192.168.0.5/32")
* BGP attributes
* the source of the BGP route (e.g. the BGP peer, or the local VPN instance,
that advertizes the route)
"""
def __init__(self, afi, safi, routeTargets, nlri, attributes, source):
assert(isinstance(afi, AFI))
assert(isinstance(safi, SAFI))
assert(isinstance(attributes, Attributes))
self.source = source
self.afi = afi
self.safi = safi
self.nlri = nlri
self.attributes = attributes
# a list of exabgp.message.update.attribute.communities.RouteTarget:
self.routeTargets = routeTargets
def __cmp__(self, other):
if (isinstance(other, RouteEntry) and
self.afi == other.afi and
self.safi == other.safi and
self.source == other.source and
self.nlri == other.nlri and
self.attributes.sameValuesAs(other.attributes)):
return 0
else:
return -1
def __hash__(self): # FIXME: improve for better performance ?
return hash("%d/%d %s %d %s" % (self.afi, self.safi, self.source,
hash(self.nlri), hash(self.attributes)
))
def __repr__(self):
fromString = " from:%s" % self.source if self.source else ""
return "[RouteEntry: %s %s %s %s RT:%s%s]" % (self.afi, self.safi,
self.nlri,
self.attributes,
self.routeTargets,
fromString)
def getLookingGlassLocalInfo(self, pathPrefix):
attributesDict = {}
for (attributeId, value) in self.attributes.iteritems():
# skip some attributes that we care less about
if (attributeId == AttributeID.AS_PATH or
attributeId == AttributeID.ORIGIN or
attributeId == AttributeID.LOCAL_PREF):
continue
attributesDict[
str(AttributeID(attributeId)).lower()] = repr(value)
res = {"afi-safi": "%s/%s" % (self.afi, self.safi),
"attributes": attributesDict
}
if self.source:
res["source"] = {"id": self.source.name,
"href": LookingGlassReferences.getAbsolutePath(
"BGP_WORKERS", pathPrefix, [self.source.name])
}
if (self.safi) in [SAFI.mpls_vpn, SAFI.evpn]:
res["route_targets"] = [repr(rt) for rt in self.routeTargets]
return {
repr(self.nlri): res
}
class RouteEvent(object):
"""A RouteEvent represents an advertisement or a withdrawal of a RouteEntry
"""
# event Types
ADVERTISE = 1
WITHDRAW = 2
type2name = {ADVERTISE: "Advertise",
WITHDRAW: "Withdraw"}
def __init__(self, eventType, routeEntry, source=None):
assert(eventType in RouteEvent.type2name.keys())
assert(isinstance(routeEntry, RouteEntry))
self.type = eventType
self.routeEntry = routeEntry
if source is not None:
self.source = source
else:
self.source = routeEntry.source
self.replacedRoute = None
def setReplacedRoute(self, replacedRoute):
''' Called only by RouteTableManager, replacedRoute should be a
RouteEntry '''
assert(isinstance(replacedRoute, RouteEntry)
or (replacedRoute is None))
assert(replacedRoute != self.routeEntry)
self.replacedRoute = replacedRoute
def __repr__(self):
if self.replacedRoute:
replacesStr = "replaces one route"
else:
replacesStr = "replaces no route"
return "[RouteEvent(%s): %s %s %s]" % (replacesStr,
RouteEvent.type2name[self.type],
self.routeEntry,
self.source)
class _SubUnsubCommon(object):
def __init__(self, afi, safi, routeTarget, worker=None):
assert(isinstance(afi, AFI))
assert(isinstance(safi, SAFI))
assert(routeTarget is None or isinstance(routeTarget, RouteTarget))
self.afi = afi
self.safi = safi
self.routeTarget = routeTarget
self.worker = worker
def __repr__(self):
byWorker = " by %s" % self.worker.name if self.worker else ""
return "%s [%s/%s,%s]%s" % (self.__class__.__name__,
self.afi or "*", self.safi or "*",
self.routeTarget or "*", byWorker)
class Subscription(_SubUnsubCommon):
"""Represents a Subscription to RouteEvents
A subscription specifies the AFI, the SAFI, and the Route Target of the
RouteEntry for which the subscriber wants to receive events.
Any of these (afi, safi or route target) can be replaced by a wildcard:
* Subscription.ANY_AFI
* Subscription.ANY_SAFI
* Subscription.ANY_RT
"""
ANY_AFI = AFI(0)
ANY_SAFI = SAFI(0)
ANY_RT = None
def __init__(self, afi, safi, routeTarget=None, worker=None):
_SubUnsubCommon.__init__(self, afi, safi, routeTarget, worker)
class Unsubscription(_SubUnsubCommon):
def __init__(self, afi, safi, routeTarget=None, worker=None):
_SubUnsubCommon.__init__(self, afi, safi, routeTarget, worker)
| apache-2.0 | -432,022,435,660,101,060 | -1,088,874,457,414,168,000 | 32.430556 | 79 | 0.595485 | false |
jimi-c/ansible | lib/ansible/modules/source_control/gitlab_group.py | 7 | 7742 | #!/usr/bin/python
# (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gitlab_group
short_description: Creates/updates/deletes Gitlab Groups
description:
- When the group does not exist in Gitlab, it will be created.
- When the group does exist and state=absent, the group will be deleted.
- As of Ansible version 2.7, this module make use of a different python module and thus some arguments are deprecated.
version_added: "2.1"
author: "Werner Dijkerman (@dj-wasabi)"
requirements:
- python-gitlab python module
options:
server_url:
description:
- Url of Gitlab server, with protocol (http or https).
required: true
validate_certs:
description:
- When using https if SSL certificate needs to be verified.
required: false
default: true
aliases:
- verify_ssl
login_user:
description:
- Gitlab user name.
required: false
default: null
login_password:
description:
- Gitlab password for login_user
required: false
default: null
login_token:
description:
- Gitlab token for logging in.
required: false
default: null
name:
description:
- Name of the group you want to create.
required: true
path:
description:
- The path of the group you want to create, this will be server_url/group_path
- If not supplied, the group_name will be used.
required: false
default: null
description:
description:
- A description for the group.
required: false
default: null
version_added: "2.7"
state:
description:
- create or delete group.
- Possible values are present and absent.
required: false
default: "present"
choices: ["present", "absent"]
'''
EXAMPLES = '''
- name: "Delete Gitlab Group"
local_action:
gitlab_group:
server_url: http://gitlab.dj-wasabi.local
validate_certs: False
login_token: WnUzDsxjy8230-Dy_k
name: my_first_group
state: absent
- name: "Create Gitlab Group"
local_action:
gitlab_group:
server_url: https://gitlab.dj-wasabi.local"
validate_certs: True
login_user: dj-wasabi
login_password: "MySecretPassword"
name: my_first_group
path: my_first_group
state: present
'''
RETURN = '''# '''
try:
import gitlab
HAS_GITLAB_PACKAGE = True
except:
HAS_GITLAB_PACKAGE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class GitLabGroup(object):
def __init__(self, module, git):
self._module = module
self._gitlab = git
self.groupObject = None
def createOrUpdateGroup(self, name, path, description):
changed = False
if self.groupObject is None:
group = self._gitlab.groups.create({'name': name, 'path': path})
changed = True
else:
group = self.groupObject
if description is not None:
if group.description != description:
group.description = description
changed = True
if changed:
if self._module.check_mode:
self._module.exit_json(changed=True, result="Group should have updated.")
try:
group.save()
except Exception as e:
self._module.fail_json(msg="Failed to create or update a group: %s " % e)
return True
else:
return False
def deleteGroup(self):
group = self.groupObject
if len(group.projects.list()) >= 1:
self._module.fail_json(
msg="There are still projects in this group. These needs to be moved or deleted before this group can be removed.")
else:
if self._module.check_mode:
self._module.exit_json(changed=True)
try:
group.delete()
except Exception as e:
self._module.fail_json(msg="Failed to delete a group: %s " % e)
return True
def existsGroup(self, name):
"""When group/user exists, object will be stored in self.groupObject."""
groups = self._gitlab.groups.list(search=name)
if len(groups) == 1:
self.groupObject = groups[0]
return True
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(required=True, type='str'),
validate_certs=dict(required=False, default=True, type='bool', aliases=['verify_ssl']),
login_user=dict(required=False, no_log=True, type='str'),
login_password=dict(required=False, no_log=True, type='str'),
login_token=dict(required=False, no_log=True, type='str'),
name=dict(required=True, type='str'),
path=dict(required=False, type='str'),
description=dict(required=False, type='str'),
state=dict(default="present", choices=["present", "absent"]),
),
mutually_exclusive=[
['login_user', 'login_token'],
['login_password', 'login_token']
],
required_together=[
['login_user', 'login_password']
],
required_one_of=[
['login_user', 'login_token']
],
supports_check_mode=True
)
if not HAS_GITLAB_PACKAGE:
module.fail_json(msg="Missing required gitlab module (check docs or install with: pip install python-gitlab")
server_url = module.params['server_url']
validate_certs = module.params['validate_certs']
login_user = module.params['login_user']
login_password = module.params['login_password']
login_token = module.params['login_token']
group_name = module.params['name']
group_path = module.params['path']
description = module.params['description']
state = module.params['state']
try:
git = gitlab.Gitlab(url=server_url, ssl_verify=validate_certs, email=login_user, password=login_password,
private_token=login_token, api_version=4)
git.auth()
except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e:
module.fail_json(msg='Failed to connect to Gitlab server: %s' % to_native(e))
if group_path is None:
group_path = group_name.replace(" ", "_")
group = GitLabGroup(module, git)
group_name = group_name.lower()
group_exists = group.existsGroup(group_name)
if group_exists and state == "absent":
if group.deleteGroup():
module.exit_json(changed=True, result="Successfully deleted group %s" % group_name)
else:
if state == "absent":
module.exit_json(changed=False, result="Group deleted or does not exists")
else:
if group.createOrUpdateGroup(name=group_name, path=group_path, description=description):
module.exit_json(changed=True, result="Successfully created or updated the group %s" % group_name)
else:
module.exit_json(changed=False, result="No need to update the group %s" % group_name)
if __name__ == '__main__':
main()
| gpl-3.0 | 2,104,940,204,767,891,200 | 2,001,315,667,238,421,500 | 32.80786 | 131 | 0.597778 | false |
martinbuc/missionplanner | packages/IronPython.StdLib.2.7.4/content/Lib/encodings/iso8859_15.py | 93 | 13775 | """ Python Character Mapping Codec iso8859_15 generated from 'MAPPINGS/ISO8859/8859-15.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-15',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\u20ac' # 0xA4 -> EURO SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\u0160' # 0xA6 -> LATIN CAPITAL LETTER S WITH CARON
u'\xa7' # 0xA7 -> SECTION SIGN
u'\u0161' # 0xA8 -> LATIN SMALL LETTER S WITH CARON
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\u017d' # 0xB4 -> LATIN CAPITAL LETTER Z WITH CARON
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u017e' # 0xB8 -> LATIN SMALL LETTER Z WITH CARON
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u0152' # 0xBC -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xBD -> LATIN SMALL LIGATURE OE
u'\u0178' # 0xBE -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0xFE -> LATIN SMALL LETTER THORN
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 | -1,429,347,618,757,801,000 | 6,806,241,051,522,269,000 | 42.869707 | 109 | 0.515717 | false |
jaimemaretoli/Arduino | arduino-core/src/processing/app/i18n/python/requests/packages/charade/langcyrillicmodel.py | 168 | 17750 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
# KOI8-R language model
# Character Mapping Table:
KOI8R_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90
223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0
238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0
27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0
15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0
59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0
35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0
)
win1251_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
)
latin5_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
macCyrillic_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255,
)
IBM855_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205,
206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70,
3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219,
220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229,
230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243,
8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248,
43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249,
250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255,
)
IBM866_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 97.6601%
# first 1024 sequences: 2.3389%
# rest sequences: 0.1237%
# negative sequences: 0.0009%
RussianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1,
1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1,
1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0,
2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1,
1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0,
3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1,
1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0,
2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2,
1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1,
1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1,
1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1,
1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0,
3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2,
1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1,
2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1,
1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,
2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1,
1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0,
1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1,
1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0,
3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,
3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1,
1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,
1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1,
0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1,
1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0,
1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,
0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1,
1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2,
2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1,
1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0,
1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0,
2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,
1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1,
1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1,
1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1,
0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1,
0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,
2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0,
0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
)
Koi8rModel = {
'charToOrderMap': KOI8R_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "KOI8-R"
}
Win1251CyrillicModel = {
'charToOrderMap': win1251_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
Latin5CyrillicModel = {
'charToOrderMap': latin5_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
MacCyrillicModel = {
'charToOrderMap': macCyrillic_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "MacCyrillic"
};
Ibm866Model = {
'charToOrderMap': IBM866_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM866"
}
Ibm855Model = {
'charToOrderMap': IBM855_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM855"
}
# flake8: noqa
| lgpl-2.1 | 5,070,279,080,245,075,000 | -567,807,804,829,091,600 | 52.625378 | 70 | 0.582648 | false |
danielchatfield/shreddi.es | libs/jinja2/testsuite/core_tags.py | 412 | 11858 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.core_tags
~~~~~~~~~~~~~~~~~~~~~~~~~~
Test the core tags like for and if.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Environment, TemplateSyntaxError, UndefinedError, \
DictLoader
env = Environment()
class ForLoopTestCase(JinjaTestCase):
def test_simple(self):
tmpl = env.from_string('{% for item in seq %}{{ item }}{% endfor %}')
assert tmpl.render(seq=list(range(10))) == '0123456789'
def test_else(self):
tmpl = env.from_string('{% for item in seq %}XXX{% else %}...{% endfor %}')
assert tmpl.render() == '...'
def test_empty_blocks(self):
tmpl = env.from_string('<{% for item in seq %}{% else %}{% endfor %}>')
assert tmpl.render() == '<>'
def test_context_vars(self):
tmpl = env.from_string('''{% for item in seq -%}
{{ loop.index }}|{{ loop.index0 }}|{{ loop.revindex }}|{{
loop.revindex0 }}|{{ loop.first }}|{{ loop.last }}|{{
loop.length }}###{% endfor %}''')
one, two, _ = tmpl.render(seq=[0, 1]).split('###')
(one_index, one_index0, one_revindex, one_revindex0, one_first,
one_last, one_length) = one.split('|')
(two_index, two_index0, two_revindex, two_revindex0, two_first,
two_last, two_length) = two.split('|')
assert int(one_index) == 1 and int(two_index) == 2
assert int(one_index0) == 0 and int(two_index0) == 1
assert int(one_revindex) == 2 and int(two_revindex) == 1
assert int(one_revindex0) == 1 and int(two_revindex0) == 0
assert one_first == 'True' and two_first == 'False'
assert one_last == 'False' and two_last == 'True'
assert one_length == two_length == '2'
def test_cycling(self):
tmpl = env.from_string('''{% for item in seq %}{{
loop.cycle('<1>', '<2>') }}{% endfor %}{%
for item in seq %}{{ loop.cycle(*through) }}{% endfor %}''')
output = tmpl.render(seq=list(range(4)), through=('<1>', '<2>'))
assert output == '<1><2>' * 4
def test_scope(self):
tmpl = env.from_string('{% for item in seq %}{% endfor %}{{ item }}')
output = tmpl.render(seq=list(range(10)))
assert not output
def test_varlen(self):
def inner():
for item in range(5):
yield item
tmpl = env.from_string('{% for item in iter %}{{ item }}{% endfor %}')
output = tmpl.render(iter=inner())
assert output == '01234'
def test_noniter(self):
tmpl = env.from_string('{% for item in none %}...{% endfor %}')
self.assert_raises(TypeError, tmpl.render)
def test_recursive(self):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
assert tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]) == '[1<[1][2]>][2<[1][2]>][3<[a]>]'
def test_recursive_depth0(self):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ loop.depth0 }}:{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
self.assertEqual(tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]), '[0:1<[1:1][1:2]>][0:2<[1:1][1:2]>][0:3<[1:a]>]')
def test_recursive_depth(self):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ loop.depth }}:{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
self.assertEqual(tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]), '[1:1<[2:1][2:2]>][1:2<[2:1][2:2]>][1:3<[2:a]>]')
def test_looploop(self):
tmpl = env.from_string('''{% for row in table %}
{%- set rowloop = loop -%}
{% for cell in row -%}
[{{ rowloop.index }}|{{ loop.index }}]
{%- endfor %}
{%- endfor %}''')
assert tmpl.render(table=['ab', 'cd']) == '[1|1][1|2][2|1][2|2]'
def test_reversed_bug(self):
tmpl = env.from_string('{% for i in items %}{{ i }}'
'{% if not loop.last %}'
',{% endif %}{% endfor %}')
assert tmpl.render(items=reversed([3, 2, 1])) == '1,2,3'
def test_loop_errors(self):
tmpl = env.from_string('''{% for item in [1] if loop.index
== 0 %}...{% endfor %}''')
self.assert_raises(UndefinedError, tmpl.render)
tmpl = env.from_string('''{% for item in [] %}...{% else
%}{{ loop }}{% endfor %}''')
assert tmpl.render() == ''
def test_loop_filter(self):
tmpl = env.from_string('{% for item in range(10) if item '
'is even %}[{{ item }}]{% endfor %}')
assert tmpl.render() == '[0][2][4][6][8]'
tmpl = env.from_string('''
{%- for item in range(10) if item is even %}[{{
loop.index }}:{{ item }}]{% endfor %}''')
assert tmpl.render() == '[1:0][2:2][3:4][4:6][5:8]'
def test_loop_unassignable(self):
self.assert_raises(TemplateSyntaxError, env.from_string,
'{% for loop in seq %}...{% endfor %}')
def test_scoped_special_var(self):
t = env.from_string('{% for s in seq %}[{{ loop.first }}{% for c in s %}'
'|{{ loop.first }}{% endfor %}]{% endfor %}')
assert t.render(seq=('ab', 'cd')) == '[True|True|False][False|True|False]'
def test_scoped_loop_var(self):
t = env.from_string('{% for x in seq %}{{ loop.first }}'
'{% for y in seq %}{% endfor %}{% endfor %}')
assert t.render(seq='ab') == 'TrueFalse'
t = env.from_string('{% for x in seq %}{% for y in seq %}'
'{{ loop.first }}{% endfor %}{% endfor %}')
assert t.render(seq='ab') == 'TrueFalseTrueFalse'
def test_recursive_empty_loop_iter(self):
t = env.from_string('''
{%- for item in foo recursive -%}{%- endfor -%}
''')
assert t.render(dict(foo=[])) == ''
def test_call_in_loop(self):
t = env.from_string('''
{%- macro do_something() -%}
[{{ caller() }}]
{%- endmacro %}
{%- for i in [1, 2, 3] %}
{%- call do_something() -%}
{{ i }}
{%- endcall %}
{%- endfor -%}
''')
assert t.render() == '[1][2][3]'
def test_scoping_bug(self):
t = env.from_string('''
{%- for item in foo %}...{{ item }}...{% endfor %}
{%- macro item(a) %}...{{ a }}...{% endmacro %}
{{- item(2) -}}
''')
assert t.render(foo=(1,)) == '...1......2...'
def test_unpacking(self):
tmpl = env.from_string('{% for a, b, c in [[1, 2, 3]] %}'
'{{ a }}|{{ b }}|{{ c }}{% endfor %}')
assert tmpl.render() == '1|2|3'
class IfConditionTestCase(JinjaTestCase):
def test_simple(self):
tmpl = env.from_string('''{% if true %}...{% endif %}''')
assert tmpl.render() == '...'
def test_elif(self):
tmpl = env.from_string('''{% if false %}XXX{% elif true
%}...{% else %}XXX{% endif %}''')
assert tmpl.render() == '...'
def test_else(self):
tmpl = env.from_string('{% if false %}XXX{% else %}...{% endif %}')
assert tmpl.render() == '...'
def test_empty(self):
tmpl = env.from_string('[{% if true %}{% else %}{% endif %}]')
assert tmpl.render() == '[]'
def test_complete(self):
tmpl = env.from_string('{% if a %}A{% elif b %}B{% elif c == d %}'
'C{% else %}D{% endif %}')
assert tmpl.render(a=0, b=False, c=42, d=42.0) == 'C'
def test_no_scope(self):
tmpl = env.from_string('{% if a %}{% set foo = 1 %}{% endif %}{{ foo }}')
assert tmpl.render(a=True) == '1'
tmpl = env.from_string('{% if true %}{% set foo = 1 %}{% endif %}{{ foo }}')
assert tmpl.render() == '1'
class MacrosTestCase(JinjaTestCase):
env = Environment(trim_blocks=True)
def test_simple(self):
tmpl = self.env.from_string('''\
{% macro say_hello(name) %}Hello {{ name }}!{% endmacro %}
{{ say_hello('Peter') }}''')
assert tmpl.render() == 'Hello Peter!'
def test_scoping(self):
tmpl = self.env.from_string('''\
{% macro level1(data1) %}
{% macro level2(data2) %}{{ data1 }}|{{ data2 }}{% endmacro %}
{{ level2('bar') }}{% endmacro %}
{{ level1('foo') }}''')
assert tmpl.render() == 'foo|bar'
def test_arguments(self):
tmpl = self.env.from_string('''\
{% macro m(a, b, c='c', d='d') %}{{ a }}|{{ b }}|{{ c }}|{{ d }}{% endmacro %}
{{ m() }}|{{ m('a') }}|{{ m('a', 'b') }}|{{ m(1, 2, 3) }}''')
assert tmpl.render() == '||c|d|a||c|d|a|b|c|d|1|2|3|d'
def test_varargs(self):
tmpl = self.env.from_string('''\
{% macro test() %}{{ varargs|join('|') }}{% endmacro %}\
{{ test(1, 2, 3) }}''')
assert tmpl.render() == '1|2|3'
def test_simple_call(self):
tmpl = self.env.from_string('''\
{% macro test() %}[[{{ caller() }}]]{% endmacro %}\
{% call test() %}data{% endcall %}''')
assert tmpl.render() == '[[data]]'
def test_complex_call(self):
tmpl = self.env.from_string('''\
{% macro test() %}[[{{ caller('data') }}]]{% endmacro %}\
{% call(data) test() %}{{ data }}{% endcall %}''')
assert tmpl.render() == '[[data]]'
def test_caller_undefined(self):
tmpl = self.env.from_string('''\
{% set caller = 42 %}\
{% macro test() %}{{ caller is not defined }}{% endmacro %}\
{{ test() }}''')
assert tmpl.render() == 'True'
def test_include(self):
self.env = Environment(loader=DictLoader({'include':
'{% macro test(foo) %}[{{ foo }}]{% endmacro %}'}))
tmpl = self.env.from_string('{% from "include" import test %}{{ test("foo") }}')
assert tmpl.render() == '[foo]'
def test_macro_api(self):
tmpl = self.env.from_string('{% macro foo(a, b) %}{% endmacro %}'
'{% macro bar() %}{{ varargs }}{{ kwargs }}{% endmacro %}'
'{% macro baz() %}{{ caller() }}{% endmacro %}')
assert tmpl.module.foo.arguments == ('a', 'b')
assert tmpl.module.foo.defaults == ()
assert tmpl.module.foo.name == 'foo'
assert not tmpl.module.foo.caller
assert not tmpl.module.foo.catch_kwargs
assert not tmpl.module.foo.catch_varargs
assert tmpl.module.bar.arguments == ()
assert tmpl.module.bar.defaults == ()
assert not tmpl.module.bar.caller
assert tmpl.module.bar.catch_kwargs
assert tmpl.module.bar.catch_varargs
assert tmpl.module.baz.caller
def test_callself(self):
tmpl = self.env.from_string('{% macro foo(x) %}{{ x }}{% if x > 1 %}|'
'{{ foo(x - 1) }}{% endif %}{% endmacro %}'
'{{ foo(5) }}')
assert tmpl.render() == '5|4|3|2|1'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ForLoopTestCase))
suite.addTest(unittest.makeSuite(IfConditionTestCase))
suite.addTest(unittest.makeSuite(MacrosTestCase))
return suite
| artistic-2.0 | -445,920,013,274,752,600 | 6,856,206,038,558,488,000 | 37.878689 | 90 | 0.48676 | false |
alxgu/ansible | lib/ansible/parsing/quoting.py | 241 | 1141 | # (c) 2014 James Cammarata, <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
def is_quoted(data):
return len(data) > 1 and data[0] == data[-1] and data[0] in ('"', "'") and data[-2] != '\\'
def unquote(data):
''' removes first and last quotes from a string, if the string starts and ends with the same quotes '''
if is_quoted(data):
return data[1:-1]
return data
| gpl-3.0 | 5,055,627,312,344,778,000 | 1,560,827,129,307,708,400 | 35.806452 | 107 | 0.71078 | false |
jjas0nn/solvem | tensorflow/lib/python2.7/site-packages/external/protobuf/python/google/protobuf/duration_pb2.py | 43 | 2746 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/duration.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/duration.proto',
package='google.protobuf',
syntax='proto3',
serialized_pb=_b('\n\x1egoogle/protobuf/duration.proto\x12\x0fgoogle.protobuf\"*\n\x08\x44uration\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x12\r\n\x05nanos\x18\x02 \x01(\x05\x42|\n\x13\x63om.google.protobufB\rDurationProtoP\x01Z*github.com/golang/protobuf/ptypes/duration\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_DURATION = _descriptor.Descriptor(
name='Duration',
full_name='google.protobuf.Duration',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='seconds', full_name='google.protobuf.Duration.seconds', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='nanos', full_name='google.protobuf.Duration.nanos', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=51,
serialized_end=93,
)
DESCRIPTOR.message_types_by_name['Duration'] = _DURATION
Duration = _reflection.GeneratedProtocolMessageType('Duration', (_message.Message,), dict(
DESCRIPTOR = _DURATION,
__module__ = 'google.protobuf.duration_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Duration)
))
_sym_db.RegisterMessage(Duration)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\023com.google.protobufB\rDurationProtoP\001Z*github.com/golang/protobuf/ptypes/duration\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes'))
# @@protoc_insertion_point(module_scope)
| mit | 2,570,354,094,350,103,000 | -875,139,371,616,268,300 | 34.205128 | 353 | 0.7378 | false |
MountainWei/nova | nova/db/sqlalchemy/models.py | 13 | 54014 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for nova data.
"""
from oslo_config import cfg
from oslo_db.sqlalchemy import models
from oslo_utils import timeutils
from sqlalchemy import (Column, Index, Integer, BigInteger, Enum, String,
schema, Unicode)
from sqlalchemy.dialects.mysql import MEDIUMTEXT
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import orm
from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float
from nova.db.sqlalchemy import types
CONF = cfg.CONF
BASE = declarative_base()
def MediumText():
return Text().with_variant(MEDIUMTEXT(), 'mysql')
class NovaBase(models.SoftDeleteMixin,
models.TimestampMixin,
models.ModelBase):
metadata = None
def __copy__(self):
"""Implement a safe copy.copy().
SQLAlchemy-mapped objects travel with an object
called an InstanceState, which is pegged to that object
specifically and tracks everything about that object. It's
critical within all attribute operations, including gets
and deferred loading. This object definitely cannot be
shared among two instances, and must be handled.
The copy routine here makes use of session.merge() which
already essentially implements a "copy" style of operation,
which produces a new instance with a new InstanceState and copies
all the data along mapped attributes without using any SQL.
The mode we are using here has the caveat that the given object
must be "clean", e.g. that it has no database-loaded state
that has been updated and not flushed. This is a good thing,
as creating a copy of an object including non-flushed, pending
database state is probably not a good idea; neither represents
what the actual row looks like, and only one should be flushed.
"""
session = orm.Session()
copy = session.merge(self, load=False)
session.expunge(copy)
return copy
def save(self, session=None):
from nova.db.sqlalchemy import api
if session is None:
session = api.get_session()
super(NovaBase, self).save(session=session)
class Service(BASE, NovaBase):
"""Represents a running service on a host."""
__tablename__ = 'services'
__table_args__ = (
schema.UniqueConstraint("host", "topic", "deleted",
name="uniq_services0host0topic0deleted"),
schema.UniqueConstraint("host", "binary", "deleted",
name="uniq_services0host0binary0deleted")
)
id = Column(Integer, primary_key=True)
host = Column(String(255)) # , ForeignKey('hosts.id'))
binary = Column(String(255))
topic = Column(String(255))
report_count = Column(Integer, nullable=False, default=0)
disabled = Column(Boolean, default=False)
disabled_reason = Column(String(255))
last_seen_up = Column(DateTime, nullable=True)
forced_down = Column(Boolean, default=False)
version = Column(Integer, default=0)
class ComputeNode(BASE, NovaBase):
"""Represents a running compute service on a host."""
__tablename__ = 'compute_nodes'
__table_args__ = (
schema.UniqueConstraint(
'host', 'hypervisor_hostname', 'deleted',
name="uniq_compute_nodes0host0hypervisor_hostname0deleted"),
)
id = Column(Integer, primary_key=True)
service_id = Column(Integer, nullable=True)
# FIXME(sbauza: Host field is nullable because some old Juno compute nodes
# can still report stats from an old ResourceTracker without setting this
# field.
# This field has to be set non-nullable in a later cycle (probably Lxxx)
# once we are sure that all compute nodes in production report it.
host = Column(String(255), nullable=True)
vcpus = Column(Integer, nullable=False)
memory_mb = Column(Integer, nullable=False)
local_gb = Column(Integer, nullable=False)
vcpus_used = Column(Integer, nullable=False)
memory_mb_used = Column(Integer, nullable=False)
local_gb_used = Column(Integer, nullable=False)
hypervisor_type = Column(MediumText(), nullable=False)
hypervisor_version = Column(Integer, nullable=False)
hypervisor_hostname = Column(String(255))
# Free Ram, amount of activity (resize, migration, boot, etc) and
# the number of running VM's are a good starting point for what's
# important when making scheduling decisions.
free_ram_mb = Column(Integer)
free_disk_gb = Column(Integer)
current_workload = Column(Integer)
running_vms = Column(Integer)
# Note(masumotok): Expected Strings example:
#
# '{"arch":"x86_64",
# "model":"Nehalem",
# "topology":{"sockets":1, "threads":2, "cores":3},
# "features":["tdtscp", "xtpr"]}'
#
# Points are "json translatable" and it must have all dictionary keys
# above, since it is copied from <cpu> tag of getCapabilities()
# (See libvirt.virtConnection).
cpu_info = Column(MediumText(), nullable=False)
disk_available_least = Column(Integer)
host_ip = Column(types.IPAddress())
supported_instances = Column(Text)
metrics = Column(Text)
# Note(yongli): json string PCI Stats
# '{"vendor_id":"8086", "product_id":"1234", "count":3 }'
pci_stats = Column(Text)
# extra_resources is a json string containing arbitrary
# data about additional resources.
extra_resources = Column(Text)
# json-encode string containing compute node statistics
stats = Column(Text, default='{}')
# json-encoded dict that contains NUMA topology as generated by
# objects.NUMATopoloogy._to_json()
numa_topology = Column(Text)
# allocation ratios provided by the RT
ram_allocation_ratio = Column(Float, nullable=True)
cpu_allocation_ratio = Column(Float, nullable=True)
class Certificate(BASE, NovaBase):
"""Represents a x509 certificate."""
__tablename__ = 'certificates'
__table_args__ = (
Index('certificates_project_id_deleted_idx', 'project_id', 'deleted'),
Index('certificates_user_id_deleted_idx', 'user_id', 'deleted')
)
id = Column(Integer, primary_key=True)
user_id = Column(String(255))
project_id = Column(String(255))
file_name = Column(String(255))
class Instance(BASE, NovaBase):
"""Represents a guest VM."""
__tablename__ = 'instances'
__table_args__ = (
Index('uuid', 'uuid', unique=True),
Index('instances_project_id_deleted_idx',
'project_id', 'deleted'),
Index('instances_reservation_id_idx',
'reservation_id'),
Index('instances_terminated_at_launched_at_idx',
'terminated_at', 'launched_at'),
Index('instances_uuid_deleted_idx',
'uuid', 'deleted'),
Index('instances_task_state_updated_at_idx',
'task_state', 'updated_at'),
Index('instances_host_node_deleted_idx',
'host', 'node', 'deleted'),
Index('instances_host_deleted_cleaned_idx',
'host', 'deleted', 'cleaned'),
schema.UniqueConstraint('uuid', name='uniq_instances0uuid'),
)
injected_files = []
id = Column(Integer, primary_key=True, autoincrement=True)
@property
def name(self):
try:
base_name = CONF.instance_name_template % self.id
except TypeError:
# Support templates like "uuid-%(uuid)s", etc.
info = {}
# NOTE(russellb): Don't use self.iteritems() here, as it will
# result in infinite recursion on the name property.
for column in iter(orm.object_mapper(self).columns):
key = column.name
# prevent recursion if someone specifies %(name)s
# %(name)s will not be valid.
if key == 'name':
continue
info[key] = self[key]
try:
base_name = CONF.instance_name_template % info
except KeyError:
base_name = self.uuid
return base_name
@property
def _extra_keys(self):
return ['name']
user_id = Column(String(255))
project_id = Column(String(255))
image_ref = Column(String(255))
kernel_id = Column(String(255))
ramdisk_id = Column(String(255))
hostname = Column(String(255))
launch_index = Column(Integer)
key_name = Column(String(255))
key_data = Column(MediumText())
power_state = Column(Integer)
vm_state = Column(String(255))
task_state = Column(String(255))
memory_mb = Column(Integer)
vcpus = Column(Integer)
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
ephemeral_key_uuid = Column(String(36))
# This is not related to hostname, above. It refers
# to the nova node.
host = Column(String(255)) # , ForeignKey('hosts.id'))
# To identify the "ComputeNode" which the instance resides in.
# This equals to ComputeNode.hypervisor_hostname.
node = Column(String(255))
# *not* flavorid, this is the internal primary_key
instance_type_id = Column(Integer)
user_data = Column(MediumText())
reservation_id = Column(String(255))
# NOTE(sbiswas7): 'scheduled_at' is still in the database
# and can be removed in the future release.
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
# This always refers to the availability_zone kwarg passed in /servers and
# provided as an API option, not at all related to the host AZ the instance
# belongs to.
availability_zone = Column(String(255))
# User editable field for display in user-facing UIs
display_name = Column(String(255))
display_description = Column(String(255))
# To remember on which host an instance booted.
# An instance may have moved to another host by live migration.
launched_on = Column(MediumText())
# NOTE(jdillaman): locked deprecated in favor of locked_by,
# to be removed in Icehouse
locked = Column(Boolean)
locked_by = Column(Enum('owner', 'admin'))
os_type = Column(String(255))
architecture = Column(String(255))
vm_mode = Column(String(255))
uuid = Column(String(36), nullable=False)
root_device_name = Column(String(255))
default_ephemeral_device = Column(String(255))
default_swap_device = Column(String(255))
config_drive = Column(String(255))
# User editable field meant to represent what ip should be used
# to connect to the instance
access_ip_v4 = Column(types.IPAddress())
access_ip_v6 = Column(types.IPAddress())
auto_disk_config = Column(Boolean())
progress = Column(Integer)
# EC2 instance_initiated_shutdown_terminate
# True: -> 'terminate'
# False: -> 'stop'
# Note(maoy): currently Nova will always stop instead of terminate
# no matter what the flag says. So we set the default to False.
shutdown_terminate = Column(Boolean(), default=False)
# EC2 disable_api_termination
disable_terminate = Column(Boolean(), default=False)
# OpenStack compute cell name. This will only be set at the top of
# the cells tree and it'll be a full cell name such as 'api!hop1!hop2'
cell_name = Column(String(255))
internal_id = Column(Integer)
# Records whether an instance has been deleted from disk
cleaned = Column(Integer, default=0)
class InstanceInfoCache(BASE, NovaBase):
"""Represents a cache of information about an instance
"""
__tablename__ = 'instance_info_caches'
__table_args__ = (
schema.UniqueConstraint(
"instance_uuid",
name="uniq_instance_info_caches0instance_uuid"),)
id = Column(Integer, primary_key=True, autoincrement=True)
# text column used for storing a json object of network data for api
network_info = Column(MediumText())
instance_uuid = Column(String(36), ForeignKey('instances.uuid'),
nullable=False)
instance = orm.relationship(Instance,
backref=orm.backref('info_cache', uselist=False),
foreign_keys=instance_uuid,
primaryjoin=instance_uuid == Instance.uuid)
class InstanceExtra(BASE, NovaBase):
__tablename__ = 'instance_extra'
__table_args__ = (
Index('instance_extra_idx', 'instance_uuid'),)
id = Column(Integer, primary_key=True, autoincrement=True)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'),
nullable=False)
numa_topology = orm.deferred(Column(Text))
pci_requests = orm.deferred(Column(Text))
flavor = orm.deferred(Column(Text))
vcpu_model = orm.deferred(Column(Text))
migration_context = orm.deferred(Column(Text))
instance = orm.relationship(Instance,
backref=orm.backref('extra',
uselist=False),
foreign_keys=instance_uuid,
primaryjoin=instance_uuid == Instance.uuid)
class InstanceTypes(BASE, NovaBase):
"""Represents possible flavors for instances.
Note: instance_type and flavor are synonyms and the term instance_type is
deprecated and in the process of being removed.
"""
__tablename__ = "instance_types"
__table_args__ = (
schema.UniqueConstraint("flavorid", "deleted",
name="uniq_instance_types0flavorid0deleted"),
schema.UniqueConstraint("name", "deleted",
name="uniq_instance_types0name0deleted")
)
# Internal only primary key/id
id = Column(Integer, primary_key=True)
name = Column(String(255))
memory_mb = Column(Integer, nullable=False)
vcpus = Column(Integer, nullable=False)
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
# Public facing id will be renamed public_id
flavorid = Column(String(255))
swap = Column(Integer, nullable=False, default=0)
rxtx_factor = Column(Float, default=1)
vcpu_weight = Column(Integer)
disabled = Column(Boolean, default=False)
is_public = Column(Boolean, default=True)
class Quota(BASE, NovaBase):
"""Represents a single quota override for a project.
If there is no row for a given project id and resource, then the
default for the quota class is used. If there is no row for a
given quota class and resource, then the default for the
deployment is used. If the row is present but the hard limit is
Null, then the resource is unlimited.
"""
__tablename__ = 'quotas'
__table_args__ = (
schema.UniqueConstraint("project_id", "resource", "deleted",
name="uniq_quotas0project_id0resource0deleted"
),
)
id = Column(Integer, primary_key=True)
project_id = Column(String(255))
resource = Column(String(255), nullable=False)
hard_limit = Column(Integer)
class ProjectUserQuota(BASE, NovaBase):
"""Represents a single quota override for a user with in a project."""
__tablename__ = 'project_user_quotas'
uniq_name = "uniq_project_user_quotas0user_id0project_id0resource0deleted"
__table_args__ = (
schema.UniqueConstraint("user_id", "project_id", "resource", "deleted",
name=uniq_name),
Index('project_user_quotas_project_id_deleted_idx',
'project_id', 'deleted'),
Index('project_user_quotas_user_id_deleted_idx',
'user_id', 'deleted')
)
id = Column(Integer, primary_key=True, nullable=False)
project_id = Column(String(255), nullable=False)
user_id = Column(String(255), nullable=False)
resource = Column(String(255), nullable=False)
hard_limit = Column(Integer)
class QuotaClass(BASE, NovaBase):
"""Represents a single quota override for a quota class.
If there is no row for a given quota class and resource, then the
default for the deployment is used. If the row is present but the
hard limit is Null, then the resource is unlimited.
"""
__tablename__ = 'quota_classes'
__table_args__ = (
Index('ix_quota_classes_class_name', 'class_name'),
)
id = Column(Integer, primary_key=True)
class_name = Column(String(255))
resource = Column(String(255))
hard_limit = Column(Integer)
class QuotaUsage(BASE, NovaBase):
"""Represents the current usage for a given resource."""
__tablename__ = 'quota_usages'
__table_args__ = (
Index('ix_quota_usages_project_id', 'project_id'),
Index('ix_quota_usages_user_id_deleted', 'user_id', 'deleted'),
)
id = Column(Integer, primary_key=True)
project_id = Column(String(255))
user_id = Column(String(255))
resource = Column(String(255), nullable=False)
in_use = Column(Integer, nullable=False)
reserved = Column(Integer, nullable=False)
@property
def total(self):
return self.in_use + self.reserved
until_refresh = Column(Integer)
class Reservation(BASE, NovaBase):
"""Represents a resource reservation for quotas."""
__tablename__ = 'reservations'
__table_args__ = (
Index('ix_reservations_project_id', 'project_id'),
Index('reservations_uuid_idx', 'uuid'),
Index('reservations_deleted_expire_idx', 'deleted', 'expire'),
Index('ix_reservations_user_id_deleted', 'user_id', 'deleted'),
)
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36), nullable=False)
usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False)
project_id = Column(String(255))
user_id = Column(String(255))
resource = Column(String(255))
delta = Column(Integer, nullable=False)
expire = Column(DateTime)
usage = orm.relationship(
"QuotaUsage",
foreign_keys=usage_id,
primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,'
'QuotaUsage.deleted == 0)')
class Snapshot(BASE, NovaBase):
"""Represents a block storage device that can be attached to a VM."""
__tablename__ = 'snapshots'
__table_args__ = ()
id = Column(String(36), primary_key=True, nullable=False)
deleted = Column(String(36), default="")
@property
def name(self):
return CONF.snapshot_name_template % self.id
@property
def volume_name(self):
return CONF.volume_name_template % self.volume_id
user_id = Column(String(255))
project_id = Column(String(255))
volume_id = Column(String(36), nullable=False)
status = Column(String(255))
progress = Column(String(255))
volume_size = Column(Integer)
scheduled_at = Column(DateTime)
display_name = Column(String(255))
display_description = Column(String(255))
class BlockDeviceMapping(BASE, NovaBase):
"""Represents block device mapping that is defined by EC2."""
__tablename__ = "block_device_mapping"
__table_args__ = (
Index('snapshot_id', 'snapshot_id'),
Index('volume_id', 'volume_id'),
Index('block_device_mapping_instance_uuid_device_name_idx',
'instance_uuid', 'device_name'),
Index('block_device_mapping_instance_uuid_volume_id_idx',
'instance_uuid', 'volume_id'),
Index('block_device_mapping_instance_uuid_idx', 'instance_uuid'),
)
id = Column(Integer, primary_key=True, autoincrement=True)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
instance = orm.relationship(Instance,
backref=orm.backref('block_device_mapping'),
foreign_keys=instance_uuid,
primaryjoin='and_(BlockDeviceMapping.'
'instance_uuid=='
'Instance.uuid,'
'BlockDeviceMapping.deleted=='
'0)')
source_type = Column(String(255))
destination_type = Column(String(255))
guest_format = Column(String(255))
device_type = Column(String(255))
disk_bus = Column(String(255))
boot_index = Column(Integer)
device_name = Column(String(255))
# default=False for compatibility of the existing code.
# With EC2 API,
# default True for ami specified device.
# default False for created with other timing.
# TODO(sshturm) add default in db
delete_on_termination = Column(Boolean, default=False)
snapshot_id = Column(String(36))
volume_id = Column(String(36))
volume_size = Column(Integer)
image_id = Column(String(36))
# for no device to suppress devices.
no_device = Column(Boolean)
connection_info = Column(MediumText())
class SecurityGroupInstanceAssociation(BASE, NovaBase):
__tablename__ = 'security_group_instance_association'
__table_args__ = (
Index('security_group_instance_association_instance_uuid_idx',
'instance_uuid'),
)
id = Column(Integer, primary_key=True, nullable=False)
security_group_id = Column(Integer, ForeignKey('security_groups.id'))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
class SecurityGroup(BASE, NovaBase):
"""Represents a security group."""
__tablename__ = 'security_groups'
__table_args__ = (
schema.UniqueConstraint('project_id', 'name', 'deleted',
name='uniq_security_groups0project_id0'
'name0deleted'),
)
id = Column(Integer, primary_key=True)
name = Column(String(255))
description = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
instances = orm.relationship(Instance,
secondary="security_group_instance_association",
primaryjoin='and_('
'SecurityGroup.id == '
'SecurityGroupInstanceAssociation.security_group_id,'
'SecurityGroupInstanceAssociation.deleted == 0,'
'SecurityGroup.deleted == 0)',
secondaryjoin='and_('
'SecurityGroupInstanceAssociation.instance_uuid == Instance.uuid,'
# (anthony) the condition below shouldn't be necessary now that the
# association is being marked as deleted. However, removing this
# may cause existing deployments to choke, so I'm leaving it
'Instance.deleted == 0)',
backref='security_groups')
class SecurityGroupIngressRule(BASE, NovaBase):
"""Represents a rule in a security group."""
__tablename__ = 'security_group_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True)
parent_group_id = Column(Integer, ForeignKey('security_groups.id'))
parent_group = orm.relationship("SecurityGroup", backref="rules",
foreign_keys=parent_group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == 0)')
protocol = Column(String(255))
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
# Note: This is not the parent SecurityGroup. It's SecurityGroup we're
# granting access for.
group_id = Column(Integer, ForeignKey('security_groups.id'))
grantee_group = orm.relationship("SecurityGroup",
foreign_keys=group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == 0)')
class SecurityGroupIngressDefaultRule(BASE, NovaBase):
__tablename__ = 'security_group_default_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
protocol = Column(String(5)) # "tcp", "udp" or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
class ProviderFirewallRule(BASE, NovaBase):
"""Represents a rule in a security group."""
__tablename__ = 'provider_fw_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
protocol = Column(String(5)) # "tcp", "udp", or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
class KeyPair(BASE, NovaBase):
"""Represents a public key pair for ssh / WinRM."""
__tablename__ = 'key_pairs'
__table_args__ = (
schema.UniqueConstraint("user_id", "name", "deleted",
name="uniq_key_pairs0user_id0name0deleted"),
)
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String(255), nullable=False)
user_id = Column(String(255))
fingerprint = Column(String(255))
public_key = Column(MediumText())
type = Column(Enum('ssh', 'x509', name='keypair_types'),
nullable=False, server_default='ssh')
class Migration(BASE, NovaBase):
"""Represents a running host-to-host migration."""
__tablename__ = 'migrations'
__table_args__ = (
Index('migrations_instance_uuid_and_status_idx', 'deleted',
'instance_uuid', 'status'),
# MySQL has a limit of 3072 bytes for an multi-column index. This
# index ends up being larger than that using the utf-8 encoding.
# Limiting the index to the prefixes will keep it under the limit.
# FIXME(johannes): Is it MySQL or InnoDB that imposes the limit?
Index('migrations_by_host_nodes_and_status_idx', 'deleted',
'source_compute', 'dest_compute', 'source_node', 'dest_node',
'status', mysql_length={'source_compute': 100,
'dest_compute': 100,
'source_node': 100,
'dest_node': 100}),
)
id = Column(Integer, primary_key=True, nullable=False)
# NOTE(tr3buchet): the ____compute variables are instance['host']
source_compute = Column(String(255))
dest_compute = Column(String(255))
# nodes are equivalent to a compute node's 'hypervisor_hostname'
source_node = Column(String(255))
dest_node = Column(String(255))
# NOTE(tr3buchet): dest_host, btw, is an ip address
dest_host = Column(String(255))
old_instance_type_id = Column(Integer())
new_instance_type_id = Column(Integer())
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
# TODO(_cerberus_): enum
status = Column(String(255))
migration_type = Column(Enum('migration', 'resize', 'live-migration',
'evacuation'),
nullable=True)
hidden = Column(Boolean, default=False)
instance = orm.relationship("Instance", foreign_keys=instance_uuid,
primaryjoin='and_(Migration.instance_uuid == '
'Instance.uuid, Instance.deleted == '
'0)')
class Network(BASE, NovaBase):
"""Represents a network."""
__tablename__ = 'networks'
__table_args__ = (
schema.UniqueConstraint("vlan", "deleted",
name="uniq_networks0vlan0deleted"),
Index('networks_bridge_deleted_idx', 'bridge', 'deleted'),
Index('networks_host_idx', 'host'),
Index('networks_project_id_deleted_idx', 'project_id', 'deleted'),
Index('networks_uuid_project_id_deleted_idx', 'uuid',
'project_id', 'deleted'),
Index('networks_vlan_deleted_idx', 'vlan', 'deleted'),
Index('networks_cidr_v6_idx', 'cidr_v6')
)
id = Column(Integer, primary_key=True, nullable=False)
label = Column(String(255))
injected = Column(Boolean, default=False)
cidr = Column(types.CIDR())
cidr_v6 = Column(types.CIDR())
multi_host = Column(Boolean, default=False)
gateway_v6 = Column(types.IPAddress())
netmask_v6 = Column(types.IPAddress())
netmask = Column(types.IPAddress())
bridge = Column(String(255))
bridge_interface = Column(String(255))
gateway = Column(types.IPAddress())
broadcast = Column(types.IPAddress())
dns1 = Column(types.IPAddress())
dns2 = Column(types.IPAddress())
vlan = Column(Integer)
vpn_public_address = Column(types.IPAddress())
vpn_public_port = Column(Integer)
vpn_private_address = Column(types.IPAddress())
dhcp_start = Column(types.IPAddress())
rxtx_base = Column(Integer)
project_id = Column(String(255))
priority = Column(Integer)
host = Column(String(255)) # , ForeignKey('hosts.id'))
uuid = Column(String(36))
mtu = Column(Integer)
dhcp_server = Column(types.IPAddress())
enable_dhcp = Column(Boolean, default=True)
share_address = Column(Boolean, default=False)
class VirtualInterface(BASE, NovaBase):
"""Represents a virtual interface on an instance."""
__tablename__ = 'virtual_interfaces'
__table_args__ = (
schema.UniqueConstraint("address", "deleted",
name="uniq_virtual_interfaces0address0deleted"),
Index('virtual_interfaces_network_id_idx', 'network_id'),
Index('virtual_interfaces_instance_uuid_fkey', 'instance_uuid'),
Index('virtual_interfaces_uuid_idx', 'uuid'),
)
id = Column(Integer, primary_key=True, nullable=False)
address = Column(String(255))
network_id = Column(Integer)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
uuid = Column(String(36))
# TODO(vish): can these both come from the same baseclass?
class FixedIp(BASE, NovaBase):
"""Represents a fixed ip for an instance."""
__tablename__ = 'fixed_ips'
__table_args__ = (
schema.UniqueConstraint(
"address", "deleted", name="uniq_fixed_ips0address0deleted"),
Index('fixed_ips_virtual_interface_id_fkey', 'virtual_interface_id'),
Index('network_id', 'network_id'),
Index('address', 'address'),
Index('fixed_ips_instance_uuid_fkey', 'instance_uuid'),
Index('fixed_ips_host_idx', 'host'),
Index('fixed_ips_network_id_host_deleted_idx', 'network_id', 'host',
'deleted'),
Index('fixed_ips_address_reserved_network_id_deleted_idx',
'address', 'reserved', 'network_id', 'deleted'),
Index('fixed_ips_deleted_allocated_idx', 'address', 'deleted',
'allocated'),
Index('fixed_ips_deleted_allocated_updated_at_idx', 'deleted',
'allocated', 'updated_at')
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
network_id = Column(Integer)
virtual_interface_id = Column(Integer)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
# associated means that a fixed_ip has its instance_id column set
# allocated means that a fixed_ip has its virtual_interface_id column set
# TODO(sshturm) add default in db
allocated = Column(Boolean, default=False)
# leased means dhcp bridge has leased the ip
# TODO(sshturm) add default in db
leased = Column(Boolean, default=False)
# TODO(sshturm) add default in db
reserved = Column(Boolean, default=False)
host = Column(String(255))
network = orm.relationship(Network,
backref=orm.backref('fixed_ips'),
foreign_keys=network_id,
primaryjoin='and_('
'FixedIp.network_id == Network.id,'
'FixedIp.deleted == 0,'
'Network.deleted == 0)')
instance = orm.relationship(Instance,
foreign_keys=instance_uuid,
primaryjoin='and_('
'FixedIp.instance_uuid == Instance.uuid,'
'FixedIp.deleted == 0,'
'Instance.deleted == 0)')
virtual_interface = orm.relationship(VirtualInterface,
backref=orm.backref('fixed_ips'),
foreign_keys=virtual_interface_id,
primaryjoin='and_('
'FixedIp.virtual_interface_id == '
'VirtualInterface.id,'
'FixedIp.deleted == 0,'
'VirtualInterface.deleted == 0)')
class FloatingIp(BASE, NovaBase):
"""Represents a floating ip that dynamically forwards to a fixed ip."""
__tablename__ = 'floating_ips'
__table_args__ = (
schema.UniqueConstraint("address", "deleted",
name="uniq_floating_ips0address0deleted"),
Index('fixed_ip_id', 'fixed_ip_id'),
Index('floating_ips_host_idx', 'host'),
Index('floating_ips_project_id_idx', 'project_id'),
Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
'pool', 'deleted', 'fixed_ip_id', 'project_id')
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
fixed_ip_id = Column(Integer)
project_id = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
auto_assigned = Column(Boolean, default=False)
# TODO(sshturm) add default in db
pool = Column(String(255))
interface = Column(String(255))
fixed_ip = orm.relationship(FixedIp,
backref=orm.backref('floating_ips'),
foreign_keys=fixed_ip_id,
primaryjoin='and_('
'FloatingIp.fixed_ip_id == FixedIp.id,'
'FloatingIp.deleted == 0,'
'FixedIp.deleted == 0)')
class DNSDomain(BASE, NovaBase):
"""Represents a DNS domain with availability zone or project info."""
__tablename__ = 'dns_domains'
__table_args__ = (
Index('dns_domains_project_id_idx', 'project_id'),
Index('dns_domains_domain_deleted_idx', 'domain', 'deleted'),
)
deleted = Column(Boolean, default=False)
domain = Column(String(255), primary_key=True)
scope = Column(String(255))
availability_zone = Column(String(255))
project_id = Column(String(255))
class ConsolePool(BASE, NovaBase):
"""Represents pool of consoles on the same physical node."""
__tablename__ = 'console_pools'
__table_args__ = (
schema.UniqueConstraint(
"host", "console_type", "compute_host", "deleted",
name="uniq_console_pools0host0console_type0compute_host0deleted"),
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
username = Column(String(255))
password = Column(String(255))
console_type = Column(String(255))
public_hostname = Column(String(255))
host = Column(String(255))
compute_host = Column(String(255))
class Console(BASE, NovaBase):
"""Represents a console session for an instance."""
__tablename__ = 'consoles'
__table_args__ = (
Index('consoles_instance_uuid_idx', 'instance_uuid'),
)
id = Column(Integer, primary_key=True)
instance_name = Column(String(255))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
password = Column(String(255))
port = Column(Integer)
pool_id = Column(Integer, ForeignKey('console_pools.id'))
pool = orm.relationship(ConsolePool, backref=orm.backref('consoles'))
class InstanceMetadata(BASE, NovaBase):
"""Represents a user-provided metadata key/value pair for an instance."""
__tablename__ = 'instance_metadata'
__table_args__ = (
Index('instance_metadata_instance_uuid_idx', 'instance_uuid'),
)
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
instance = orm.relationship(Instance, backref="metadata",
foreign_keys=instance_uuid,
primaryjoin='and_('
'InstanceMetadata.instance_uuid == '
'Instance.uuid,'
'InstanceMetadata.deleted == 0)')
class InstanceSystemMetadata(BASE, NovaBase):
"""Represents a system-owned metadata key/value pair for an instance."""
__tablename__ = 'instance_system_metadata'
__table_args__ = (
Index('instance_uuid', 'instance_uuid'),
)
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255))
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'),
nullable=False)
instance = orm.relationship(Instance, backref="system_metadata",
foreign_keys=instance_uuid)
class InstanceTypeProjects(BASE, NovaBase):
"""Represent projects associated instance_types."""
__tablename__ = "instance_type_projects"
__table_args__ = (schema.UniqueConstraint(
"instance_type_id", "project_id", "deleted",
name="uniq_instance_type_projects0instance_type_id0project_id0deleted"
),
)
id = Column(Integer, primary_key=True)
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
nullable=False)
project_id = Column(String(255))
instance_type = orm.relationship(InstanceTypes, backref="projects",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeProjects.instance_type_id == InstanceTypes.id,'
'InstanceTypeProjects.deleted == 0)')
class InstanceTypeExtraSpecs(BASE, NovaBase):
"""Represents additional specs as key/value pairs for an instance_type."""
__tablename__ = 'instance_type_extra_specs'
__table_args__ = (
Index('instance_type_extra_specs_instance_type_id_key_idx',
'instance_type_id', 'key'),
schema.UniqueConstraint(
"instance_type_id", "key", "deleted",
name=("uniq_instance_type_extra_specs0"
"instance_type_id0key0deleted")
),
{'mysql_collate': 'utf8_bin'},
)
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
nullable=False)
instance_type = orm.relationship(InstanceTypes, backref="extra_specs",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,'
'InstanceTypeExtraSpecs.deleted == 0)')
class Cell(BASE, NovaBase):
"""Represents parent and child cells of this cell. Cells can
have multiple parents and children, so there could be any number
of entries with is_parent=True or False
"""
__tablename__ = 'cells'
__table_args__ = (schema.UniqueConstraint(
"name", "deleted", name="uniq_cells0name0deleted"
),
)
id = Column(Integer, primary_key=True)
# Name here is the 'short name' of a cell. For instance: 'child1'
name = Column(String(255))
api_url = Column(String(255))
transport_url = Column(String(255), nullable=False)
weight_offset = Column(Float(), default=0.0)
weight_scale = Column(Float(), default=1.0)
is_parent = Column(Boolean())
class AggregateHost(BASE, NovaBase):
"""Represents a host that is member of an aggregate."""
__tablename__ = 'aggregate_hosts'
__table_args__ = (schema.UniqueConstraint(
"host", "aggregate_id", "deleted",
name="uniq_aggregate_hosts0host0aggregate_id0deleted"
),
)
id = Column(Integer, primary_key=True, autoincrement=True)
host = Column(String(255))
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
class AggregateMetadata(BASE, NovaBase):
"""Represents a metadata key/value pair for an aggregate."""
__tablename__ = 'aggregate_metadata'
__table_args__ = (
schema.UniqueConstraint("aggregate_id", "key", "deleted",
name="uniq_aggregate_metadata0aggregate_id0key0deleted"
),
Index('aggregate_metadata_key_idx', 'key'),
)
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255), nullable=False)
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
class Aggregate(BASE, NovaBase):
"""Represents a cluster of hosts that exists in this zone."""
__tablename__ = 'aggregates'
__table_args__ = ()
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(255))
_hosts = orm.relationship(AggregateHost,
primaryjoin='and_('
'Aggregate.id == AggregateHost.aggregate_id,'
'AggregateHost.deleted == 0,'
'Aggregate.deleted == 0)')
_metadata = orm.relationship(AggregateMetadata,
primaryjoin='and_('
'Aggregate.id == AggregateMetadata.aggregate_id,'
'AggregateMetadata.deleted == 0,'
'Aggregate.deleted == 0)')
@property
def _extra_keys(self):
return ['hosts', 'metadetails', 'availability_zone']
@property
def hosts(self):
return [h.host for h in self._hosts]
@property
def metadetails(self):
return {m.key: m.value for m in self._metadata}
@property
def availability_zone(self):
if 'availability_zone' not in self.metadetails:
return None
return self.metadetails['availability_zone']
class AgentBuild(BASE, NovaBase):
"""Represents an agent build."""
__tablename__ = 'agent_builds'
__table_args__ = (
Index('agent_builds_hypervisor_os_arch_idx', 'hypervisor', 'os',
'architecture'),
schema.UniqueConstraint("hypervisor", "os", "architecture", "deleted",
name="uniq_agent_builds0hypervisor0os0architecture0deleted"),
)
id = Column(Integer, primary_key=True)
hypervisor = Column(String(255))
os = Column(String(255))
architecture = Column(String(255))
version = Column(String(255))
url = Column(String(255))
md5hash = Column(String(255))
class BandwidthUsage(BASE, NovaBase):
"""Cache for instance bandwidth usage data pulled from the hypervisor."""
__tablename__ = 'bw_usage_cache'
__table_args__ = (
Index('bw_usage_cache_uuid_start_period_idx', 'uuid',
'start_period'),
)
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36))
mac = Column(String(255))
start_period = Column(DateTime, nullable=False)
last_refreshed = Column(DateTime)
bw_in = Column(BigInteger)
bw_out = Column(BigInteger)
last_ctr_in = Column(BigInteger)
last_ctr_out = Column(BigInteger)
class VolumeUsage(BASE, NovaBase):
"""Cache for volume usage data pulled from the hypervisor."""
__tablename__ = 'volume_usage_cache'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
volume_id = Column(String(36), nullable=False)
instance_uuid = Column(String(36))
project_id = Column(String(36))
user_id = Column(String(64))
availability_zone = Column(String(255))
tot_last_refreshed = Column(DateTime)
tot_reads = Column(BigInteger, default=0)
tot_read_bytes = Column(BigInteger, default=0)
tot_writes = Column(BigInteger, default=0)
tot_write_bytes = Column(BigInteger, default=0)
curr_last_refreshed = Column(DateTime)
curr_reads = Column(BigInteger, default=0)
curr_read_bytes = Column(BigInteger, default=0)
curr_writes = Column(BigInteger, default=0)
curr_write_bytes = Column(BigInteger, default=0)
class S3Image(BASE, NovaBase):
"""Compatibility layer for the S3 image service talking to Glance."""
__tablename__ = 's3_images'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class VolumeIdMapping(BASE, NovaBase):
"""Compatibility layer for the EC2 volume service."""
__tablename__ = 'volume_id_mappings'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class SnapshotIdMapping(BASE, NovaBase):
"""Compatibility layer for the EC2 snapshot service."""
__tablename__ = 'snapshot_id_mappings'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class InstanceFault(BASE, NovaBase):
__tablename__ = 'instance_faults'
__table_args__ = (
Index('instance_faults_host_idx', 'host'),
Index('instance_faults_instance_uuid_deleted_created_at_idx',
'instance_uuid', 'deleted', 'created_at')
)
id = Column(Integer, primary_key=True, nullable=False)
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'))
code = Column(Integer(), nullable=False)
message = Column(String(255))
details = Column(MediumText())
host = Column(String(255))
class InstanceAction(BASE, NovaBase):
"""Track client actions on an instance.
The intention is that there will only be one of these per user request. A
lookup by (instance_uuid, request_id) should always return a single result.
"""
__tablename__ = 'instance_actions'
__table_args__ = (
Index('instance_uuid_idx', 'instance_uuid'),
Index('request_id_idx', 'request_id')
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
action = Column(String(255))
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'))
request_id = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
start_time = Column(DateTime, default=timeutils.utcnow)
finish_time = Column(DateTime)
message = Column(String(255))
class InstanceActionEvent(BASE, NovaBase):
"""Track events that occur during an InstanceAction."""
__tablename__ = 'instance_actions_events'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
event = Column(String(255))
action_id = Column(Integer, ForeignKey('instance_actions.id'))
start_time = Column(DateTime, default=timeutils.utcnow)
finish_time = Column(DateTime)
result = Column(String(255))
traceback = Column(Text)
host = Column(String(255))
details = Column(Text)
class InstanceIdMapping(BASE, NovaBase):
"""Compatibility layer for the EC2 instance service."""
__tablename__ = 'instance_id_mappings'
__table_args__ = (
Index('ix_instance_id_mappings_uuid', 'uuid'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class TaskLog(BASE, NovaBase):
"""Audit log for background periodic tasks."""
__tablename__ = 'task_log'
__table_args__ = (
schema.UniqueConstraint(
'task_name', 'host', 'period_beginning', 'period_ending',
name="uniq_task_log0task_name0host0period_beginning0period_ending"
),
Index('ix_task_log_period_beginning', 'period_beginning'),
Index('ix_task_log_host', 'host'),
Index('ix_task_log_period_ending', 'period_ending'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
task_name = Column(String(255), nullable=False)
state = Column(String(255), nullable=False)
host = Column(String(255), nullable=False)
period_beginning = Column(DateTime, default=timeutils.utcnow,
nullable=False)
period_ending = Column(DateTime, default=timeutils.utcnow,
nullable=False)
message = Column(String(255), nullable=False)
task_items = Column(Integer(), default=0)
errors = Column(Integer(), default=0)
class InstanceGroupMember(BASE, NovaBase):
"""Represents the members for an instance group."""
__tablename__ = 'instance_group_member'
__table_args__ = (
Index('instance_group_member_instance_idx', 'instance_id'),
)
id = Column(Integer, primary_key=True, nullable=False)
instance_id = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
class InstanceGroupPolicy(BASE, NovaBase):
"""Represents the policy type for an instance group."""
__tablename__ = 'instance_group_policy'
__table_args__ = (
Index('instance_group_policy_policy_idx', 'policy'),
)
id = Column(Integer, primary_key=True, nullable=False)
policy = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
class InstanceGroup(BASE, NovaBase):
"""Represents an instance group.
A group will maintain a collection of instances and the relationship
between them.
"""
__tablename__ = 'instance_groups'
__table_args__ = (
schema.UniqueConstraint("uuid", "deleted",
name="uniq_instance_groups0uuid0deleted"),
)
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(String(255))
project_id = Column(String(255))
uuid = Column(String(36), nullable=False)
name = Column(String(255))
_policies = orm.relationship(InstanceGroupPolicy, primaryjoin='and_('
'InstanceGroup.id == InstanceGroupPolicy.group_id,'
'InstanceGroupPolicy.deleted == 0,'
'InstanceGroup.deleted == 0)')
_members = orm.relationship(InstanceGroupMember, primaryjoin='and_('
'InstanceGroup.id == InstanceGroupMember.group_id,'
'InstanceGroupMember.deleted == 0,'
'InstanceGroup.deleted == 0)')
@property
def policies(self):
return [p.policy for p in self._policies]
@property
def members(self):
return [m.instance_id for m in self._members]
class PciDevice(BASE, NovaBase):
"""Represents a PCI host device that can be passed through to instances.
"""
__tablename__ = 'pci_devices'
__table_args__ = (
Index('ix_pci_devices_compute_node_id_deleted',
'compute_node_id', 'deleted'),
Index('ix_pci_devices_instance_uuid_deleted',
'instance_uuid', 'deleted'),
schema.UniqueConstraint(
"compute_node_id", "address", "deleted",
name="uniq_pci_devices0compute_node_id0address0deleted")
)
id = Column(Integer, primary_key=True)
compute_node_id = Column(Integer, ForeignKey('compute_nodes.id'),
nullable=False)
# physical address of device domain:bus:slot.func (0000:09:01.1)
address = Column(String(12), nullable=False)
vendor_id = Column(String(4), nullable=False)
product_id = Column(String(4), nullable=False)
dev_type = Column(String(8), nullable=False)
dev_id = Column(String(255))
# label is abstract device name, that is used to unify devices with the
# same functionality with different addresses or host.
label = Column(String(255), nullable=False)
status = Column(String(36), nullable=False)
# the request_id is used to identify a device that is allocated for a
# particular request
request_id = Column(String(36), nullable=True)
extra_info = Column(Text)
instance_uuid = Column(String(36))
numa_node = Column(Integer, nullable=True)
instance = orm.relationship(Instance, backref="pci_devices",
foreign_keys=instance_uuid,
primaryjoin='and_('
'PciDevice.instance_uuid == Instance.uuid,'
'PciDevice.deleted == 0)')
class Tag(BASE, models.ModelBase):
"""Represents the tag for a resource."""
__tablename__ = "tags"
__table_args__ = (
Index('tags_tag_idx', 'tag'),
)
resource_id = Column(String(36), primary_key=True, nullable=False)
tag = Column(Unicode(80), primary_key=True, nullable=False)
instance = orm.relationship(
"Instance",
backref='tags',
primaryjoin='and_(Tag.resource_id == Instance.uuid,'
'Instance.deleted == 0)',
foreign_keys=resource_id
)
| apache-2.0 | -4,431,280,207,788,083,700 | -3,814,416,635,614,905,000 | 36.588031 | 79 | 0.622357 | false |
havt/odoo | addons/l10n_syscohada/__init__.py | 439 | 1040 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2010-2011 BAAMTU SARL (<http://www.baamtu.sn>).
# contact: leadsn@baamtu.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 6,072,371,537,974,765,000 | 7,827,589,659,225,418,000 | 44.217391 | 78 | 0.608654 | false |
ShaneMcC/hellanzb | Hellanzb/HellaXMLRPC/xmlrpc.py | 4 | 13743 | # -*- coding: iso-8859-1 -*-
# -*- test-case-name: twisted.web.test.test_xmlrpc -*-
#
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""A generic resource for publishing objects via XML-RPC.
Requires xmlrpclib (comes standard with Python 2.2 and later, otherwise can be
downloaded from http://www.pythonware.com/products/xmlrpc/).
API Stability: semi-stable
Maintainer: U{Itamar Shtull-Trauring<mailto:twisted@itamarst.org>}
"""
from __future__ import nested_scopes
__version__ = "$Revision: 1.32 $"[11:-2]
# System Imports
import base64
import string
import xmlrpclib
import urllib
import urlparse
# Sibling Imports
from twisted.web import resource, server
from twisted.internet import defer, protocol, reactor
from twisted.python import log, reflect
import twisted.copyright
if twisted.copyright.version >= '2.0.0':
from twisted.web import http
else:
from twisted.protocols import http
# These are deprecated, use the class level definitions
NOT_FOUND = 8001
FAILURE = 8002
# Useful so people don't need to import xmlrpclib directly
Fault = xmlrpclib.Fault
Binary = xmlrpclib.Binary
Boolean = xmlrpclib.Boolean
DateTime = xmlrpclib.DateTime
class NoSuchFunction(Fault):
"""There is no function by the given name."""
pass
class Handler:
"""Handle a XML-RPC request and store the state for a request in progress.
Override the run() method and return result using self.result,
a Deferred.
We require this class since we're not using threads, so we can't
encapsulate state in a running function if we're going to have
to wait for results.
For example, lets say we want to authenticate against twisted.cred,
run a LDAP query and then pass its result to a database query, all
as a result of a single XML-RPC command. We'd use a Handler instance
to store the state of the running command.
"""
def __init__(self, resource, *args):
self.resource = resource # the XML-RPC resource we are connected to
self.result = defer.Deferred()
self.run(*args)
def run(self, *args):
# event driven equivalent of 'raise UnimplementedError'
self.result.errback(NotImplementedError("Implement run() in subclasses"))
class XMLRPC(resource.Resource):
"""A resource that implements XML-RPC.
You probably want to connect this to '/RPC2'.
Methods published can return XML-RPC serializable results, Faults,
Binary, Boolean, DateTime, Deferreds, or Handler instances.
By default methods beginning with 'xmlrpc_' are published.
Sub-handlers for prefixed methods (e.g., system.listMethods)
can be added with putSubHandler. By default, prefixes are
separated with a '.'. Override self.separator to change this.
"""
# Error codes for Twisted, if they conflict with yours then
# modify them at runtime.
NOT_FOUND = 8001
FAILURE = 8002
isLeaf = 1
separator = '.'
def __init__(self):
resource.Resource.__init__(self)
self.subHandlers = {}
def putSubHandler(self, prefix, handler):
self.subHandlers[prefix] = handler
def getSubHandler(self, prefix):
return self.subHandlers.get(prefix, None)
def getSubHandlerPrefixes(self):
return self.subHandlers.keys()
def render(self, request):
request.content.seek(0, 0)
try:
args, functionPath = xmlrpclib.loads(request.content.read())
except:
# FIXME: this is what's returned to a normal GET
return ''
try:
function = self._getFunction(functionPath)
except Fault, f:
self._cbRender(f, request)
else:
request.setHeader("content-type", "text/xml")
defer.maybeDeferred(function, *args).addErrback(
self._ebRender
).addCallback(
self._cbRender, request
)
return server.NOT_DONE_YET
def _cbRender(self, result, request):
if isinstance(result, Handler):
result = result.result
if not isinstance(result, Fault):
result = (result,)
try:
s = xmlrpclib.dumps(result, methodresponse=1)
except:
f = Fault(self.FAILURE, "can't serialize output")
s = xmlrpclib.dumps(f, methodresponse=1)
request.setHeader("content-length", str(len(s)))
request.write(s)
request.finish()
def _ebRender(self, failure):
if isinstance(failure.value, Fault):
return failure.value
log.err(failure)
return Fault(self.FAILURE, "error")
def _getFunction(self, functionPath):
"""Given a string, return a function, or raise NoSuchFunction.
This returned function will be called, and should return the result
of the call, a Deferred, or a Fault instance.
Override in subclasses if you want your own policy. The default
policy is that given functionPath 'foo', return the method at
self.xmlrpc_foo, i.e. getattr(self, "xmlrpc_" + functionPath).
If functionPath contains self.separator, the sub-handler for
the initial prefix is used to search for the remaining path.
"""
if functionPath.find(self.separator) != -1:
prefix, functionPath = functionPath.split(self.separator, 1)
handler = self.getSubHandler(prefix)
if handler is None: raise NoSuchFunction(self.NOT_FOUND, "no such subHandler %s" % prefix)
return handler._getFunction(functionPath)
f = getattr(self, "xmlrpc_%s" % functionPath, None)
if not f:
raise NoSuchFunction(self.NOT_FOUND, "function %s not found" % functionPath)
elif not callable(f):
raise NoSuchFunction(self.NOT_FOUND, "function %s not callable" % functionPath)
else:
return f
def _listFunctions(self):
"""Return a list of the names of all xmlrpc methods."""
return reflect.prefixedMethodNames(self.__class__, 'xmlrpc_')
class XMLRPCIntrospection(XMLRPC):
"""Implement the XML-RPC Introspection API.
By default, the methodHelp method returns the 'help' method attribute,
if it exists, otherwise the __doc__ method attribute, if it exists,
otherwise the empty string.
To enable the methodSignature method, add a 'signature' method attribute
containing a list of lists. See methodSignature's documentation for the
format. Note the type strings should be XML-RPC types, not Python types.
"""
def __init__(self, parent):
"""Implement Introspection support for an XMLRPC server.
@param parent: the XMLRPC server to add Introspection support to.
"""
XMLRPC.__init__(self)
self._xmlrpc_parent = parent
def xmlrpc_listMethods(self):
"""Return a list of the method names implemented by this server."""
functions = []
todo = [(self._xmlrpc_parent, '')]
while todo:
obj, prefix = todo.pop(0)
functions.extend([ prefix + name for name in obj._listFunctions() ])
todo.extend([ (obj.getSubHandler(name),
prefix + name + obj.separator)
for name in obj.getSubHandlerPrefixes() ])
return functions
xmlrpc_listMethods.signature = [['array']]
def xmlrpc_methodHelp(self, method):
"""Return a documentation string describing the use of the given method.
"""
method = self._xmlrpc_parent._getFunction(method)
return (getattr(method, 'help', None)
or getattr(method, '__doc__', None) or '')
xmlrpc_methodHelp.signature = [['string', 'string']]
def xmlrpc_methodSignature(self, method):
"""Return a list of type signatures.
Each type signature is a list of the form [rtype, type1, type2, ...]
where rtype is the return type and typeN is the type of the Nth
argument. If no signature information is available, the empty
string is returned.
"""
method = self._xmlrpc_parent._getFunction(method)
return getattr(method, 'signature', None) or ''
xmlrpc_methodSignature.signature = [['array', 'string'],
['string', 'string']]
def addIntrospection(xmlrpc):
"""Add Introspection support to an XMLRPC server.
@param xmlrpc: The xmlrpc server to add Introspection support to.
"""
xmlrpc.putSubHandler('system', XMLRPCIntrospection(xmlrpc))
class QueryProtocol(http.HTTPClient):
def connectionMade(self):
self.sendCommand('POST', self.factory.url)
if self.factory.user != None:
authString = self.factory.user + ':'
if self.factory.password != None:
authString += self.factory.password
auth = base64.encodestring(urllib.unquote(authString))
auth = string.join(string.split(auth), "") # get rid of whitespace
self.sendHeader('Authorization', 'Basic ' + auth)
self.sendHeader('User-Agent', 'Twisted/XMLRPClib')
self.sendHeader('Host', self.factory.host)
self.sendHeader('Content-type', 'text/xml')
self.sendHeader('Content-length', str(len(self.factory.payload)))
self.endHeaders()
self.transport.write(self.factory.payload)
def handleStatus(self, version, status, message):
if status != '200':
self.factory.badStatus(status, message)
def handleResponse(self, contents):
self.factory.parseResponse(contents)
payloadTemplate = """<?xml version="1.0"?>
<methodCall>
<methodName>%s</methodName>
%s
</methodCall>
"""
class QueryFactory(protocol.ClientFactory):
deferred = None
protocol = QueryProtocol
def __init__(self, url, host, user, password, method, *args):
self.url, self.host, self.user, self.password = url, host, user, password
self.payload = payloadTemplate % (method, xmlrpclib.dumps(args))
self.deferred = defer.Deferred()
def parseResponse(self, contents):
if not self.deferred:
return
try:
response = xmlrpclib.loads(contents)
except xmlrpclib.Fault, error:
self.deferred.errback(error)
self.deferred = None
else:
self.deferred.callback(response[0][0])
self.deferred = None
def clientConnectionLost(self, _, reason):
if self.deferred is not None:
self.deferred.errback(reason)
self.deferred = None
clientConnectionFailed = clientConnectionLost
def badStatus(self, status, message):
self.deferred.errback(ValueError(status, message))
self.deferred = None
class Proxy:
"""A Proxy for making remote XML-RPC calls.
Pass the URL of the remote XML-RPC server to the constructor.
Use proxy.callRemote('foobar', *args) to call remote method
'foobar' with *args.
"""
def __init__(self, url):
type, uri = urllib.splittype(url)
#if type not in ("http", "https"):
# raise IOError, "unsupported XML-RPC protocol"
self.host, self.url = urllib.splithost(uri)
if self.url == "":
self.url = "/"
self.user = self.password = None
self.user, self.host = urllib.splituser(self.host)
try:
self.user, self.password = urllib.splitpasswd(self.user)
except TypeError:
pass
self.host, self.port = urllib.splitport(self.host)
self.port = int(self.port)
self.secure = type == 'https'
def callRemote(self, method, *args):
factory = QueryFactory(self.url, self.host, self.user, self.password,
method, *args)
if self.secure:
from twisted.internet import ssl
reactor.connectSSL(self.host, self.port or 443,
factory, ssl.ClientContextFactory())
else:
reactor.connectTCP(self.host, self.port or 80, factory)
return factory.deferred
__all__ = ["XMLRPC", "Handler", "NoSuchFunction", "Fault", "Proxy"]
"""
LICENSE:
Copyright (c) 2004
Allen Short
Andrew Bennetts
Benjamin Bruheim
Bob Ippolito
Christopher Armstrong
Donovan Preston
Itamar Shtull-Trauring
James Knight
Jason A. Mobarak
Jonathan Lange
Jonathan D. Simms
Jp Calderone
Jürgen Hermann
Kevin Turner
Mary Gardiner
Matthew Lefkowitz
Massachusetts Institute of Technology
Moshe Zadka
Paul Swartz
Pavel Pergamenshchik
Sean Riley
Travis B. Hartwell
except as noted at the end of this file.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Copyright Exceptions:
"""
| bsd-3-clause | 3,452,568,610,559,122,000 | -2,232,775,662,483,190,800 | 31.799523 | 102 | 0.655315 | false |
nirmeshk/oh-mainline | vendor/packages/webob/webob/etag.py | 80 | 4531 | """
Does parsing of ETag-related headers: If-None-Matches, If-Matches
Also If-Range parsing
"""
from webob.datetime_utils import (
parse_date,
serialize_date,
)
from webob.descriptors import _rx_etag
from webob.util import (
header_docstring,
warn_deprecation,
)
__all__ = ['AnyETag', 'NoETag', 'ETagMatcher', 'IfRange', 'etag_property']
def etag_property(key, default, rfc_section, strong=True):
doc = header_docstring(key, rfc_section)
doc += " Converts it as a Etag."
def fget(req):
value = req.environ.get(key)
if not value:
return default
else:
return ETagMatcher.parse(value, strong=strong)
def fset(req, val):
if val is None:
req.environ[key] = None
else:
req.environ[key] = str(val)
def fdel(req):
del req.environ[key]
return property(fget, fset, fdel, doc=doc)
def _warn_weak_match_deprecated():
warn_deprecation("weak_match is deprecated", '1.2', 3)
def _warn_if_range_match_deprecated(*args, **kw): # pragma: no cover
raise DeprecationWarning("IfRange.match[_response] API is deprecated")
class _AnyETag(object):
"""
Represents an ETag of *, or a missing ETag when matching is 'safe'
"""
def __repr__(self):
return '<ETag *>'
def __nonzero__(self):
return False
__bool__ = __nonzero__ # python 3
def __contains__(self, other):
return True
def weak_match(self, other):
_warn_weak_match_deprecated()
def __str__(self):
return '*'
AnyETag = _AnyETag()
class _NoETag(object):
"""
Represents a missing ETag when matching is unsafe
"""
def __repr__(self):
return '<No ETag>'
def __nonzero__(self):
return False
__bool__ = __nonzero__ # python 3
def __contains__(self, other):
return False
def weak_match(self, other): # pragma: no cover
_warn_weak_match_deprecated()
def __str__(self):
return ''
NoETag = _NoETag()
# TODO: convert into a simple tuple
class ETagMatcher(object):
def __init__(self, etags):
self.etags = etags
def __contains__(self, other):
return other in self.etags
def weak_match(self, other): # pragma: no cover
_warn_weak_match_deprecated()
def __repr__(self):
return '<ETag %s>' % (' or '.join(self.etags))
@classmethod
def parse(cls, value, strong=True):
"""
Parse this from a header value
"""
if value == '*':
return AnyETag
if not value:
return cls([])
matches = _rx_etag.findall(value)
if not matches:
return cls([value])
elif strong:
return cls([t for w,t in matches if not w])
else:
return cls([t for w,t in matches])
def __str__(self):
return ', '.join(map('"%s"'.__mod__, self.etags))
class IfRange(object):
def __init__(self, etag):
self.etag = etag
@classmethod
def parse(cls, value):
"""
Parse this from a header value.
"""
if not value:
return cls(AnyETag)
elif value.endswith(' GMT'):
# Must be a date
return IfRangeDate(parse_date(value))
else:
return cls(ETagMatcher.parse(value))
def __contains__(self, resp):
"""
Return True if the If-Range header matches the given etag or last_modified
"""
return resp.etag_strong in self.etag
def __nonzero__(self):
return bool(self.etag)
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
self.etag
)
def __str__(self):
return str(self.etag) if self.etag else ''
match = match_response = _warn_if_range_match_deprecated
__bool__ = __nonzero__ # python 3
class IfRangeDate(object):
def __init__(self, date):
self.date = date
def __contains__(self, resp):
last_modified = resp.last_modified
#if isinstance(last_modified, str):
# last_modified = parse_date(last_modified)
return last_modified and (last_modified <= self.date)
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
self.date
#serialize_date(self.date)
)
def __str__(self):
return serialize_date(self.date)
match = match_response = _warn_if_range_match_deprecated
| agpl-3.0 | -5,934,333,241,359,720,000 | 1,470,693,068,685,624,000 | 22.973545 | 82 | 0.557272 | false |
gochist/horizon | openstack_dashboard/openstack/common/context.py | 19 | 2677 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Simple class that stores security context information in the web request.
Projects should subclass this class if they wish to enhance the request
context or provide additional information in their specific WSGI pipeline.
"""
import itertools
from openstack_dashboard.openstack.common import uuidutils
def generate_request_id():
return 'req-%s' % uuidutils.generate_uuid()
class RequestContext(object):
"""
Stores information about the security context under which the user
accesses the system, as well as additional request information.
"""
def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False,
read_only=False, show_deleted=False, request_id=None):
self.auth_token = auth_token
self.user = user
self.tenant = tenant
self.is_admin = is_admin
self.read_only = read_only
self.show_deleted = show_deleted
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
def to_dict(self):
return {'user': self.user,
'tenant': self.tenant,
'is_admin': self.is_admin,
'read_only': self.read_only,
'show_deleted': self.show_deleted,
'auth_token': self.auth_token,
'request_id': self.request_id}
def get_admin_context(show_deleted="no"):
context = RequestContext(None,
tenant=None,
is_admin=True,
show_deleted=show_deleted)
return context
def get_context_from_function_and_args(function, args, kwargs):
"""Find an arg of type RequestContext and return it.
This is useful in a couple of decorators where we don't
know much about the function we're wrapping.
"""
for arg in itertools.chain(kwargs.values(), args):
if isinstance(arg, RequestContext):
return arg
return None
| apache-2.0 | -5,586,956,372,797,702,000 | -2,349,918,513,446,786,000 | 31.646341 | 79 | 0.647366 | false |
hilaskis/UAV_MissionPlanner | Lib/site-packages/numpy/linalg/linalg.py | 53 | 61098 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh','lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError']
import sys
from numpy.core import array, asarray, zeros, empty, transpose, \
intc, single, double, csingle, cdouble, inexact, complexfloating, \
newaxis, ravel, all, Inf, dot, add, multiply, identity, sqrt, \
maximum, flatnonzero, diagonal, arange, fastCopyAndTranspose, sum, \
isfinite, size, finfo, absolute, log, exp
from numpy.lib import triu
from numpy.linalg import lapack_lite
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError, 'Singular matrix'
numpy.linalg.linalg.LinAlgError: Singular matrix
"""
pass
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError, '%d-dimensional array given. Array must be \
two-dimensional' % len(a.shape)
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError, 'Array must be square'
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError, "Array must not contain infs or NaNs"
def _assertNonEmpty(*arrays):
for a in arrays:
if size(a) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a,wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = range(0, an)
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : array_like, shape (M, M)
Coefficient matrix.
b : array_like, shape (M,) or (M, N)
Ordinate or "dependent variable" values.
Returns
-------
x : ndarray, shape (M,) or (M, N) depending on b
Solution to the system a x = b
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
`solve` is a wrapper for the LAPACK routines `dgesv`_ and
`zgesv`_, the former being used if `a` is real-valued, the latter if
it is complex-valued. The solution to the system of linear equations
is computed using an LU decomposition [1]_ with partial pivoting and
row interchanges.
.. _dgesv: http://www.netlib.org/lapack/double/dgesv.f
.. _zgesv: http://www.netlib.org/lapack/complex16/zgesv.f
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> (np.dot(a, x) == b).all()
True
"""
a, _ = _makearray(a)
b, wrap = _makearray(b)
one_eq = len(b.shape) == 1
if one_eq:
b = b[:, newaxis]
_assertRank2(a, b)
_assertSquareness(a)
n_eq = a.shape[0]
n_rhs = b.shape[1]
if n_eq != b.shape[0]:
raise LinAlgError, 'Incompatible dimensions'
t, result_t = _commonType(a, b)
# lapack_routine = _findLapackRoutine('gesv', t)
if isComplexType(t):
lapack_routine = lapack_lite.zgesv
else:
lapack_routine = lapack_lite.dgesv
a, b = _fastCopyAndTranspose(t, a, b)
a, b = _to_native_byte_order(a, b)
pivots = zeros(n_eq, fortran_int)
results = lapack_routine(n_eq, n_rhs, a, n_eq, pivots, b, n_eq, 0)
if results['info'] > 0:
raise LinAlgError, 'Singular matrix'
if one_eq:
return wrap(b.ravel().astype(result_t))
else:
return wrap(b.transpose().astype(result_t))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[:ind] + a.shape[ind:]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError, "Invalid ind argument."
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : array_like, shape (M, M)
Matrix to be inverted.
Returns
-------
ainv : ndarray or matrix, shape (M, M)
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is singular or not square.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = LA.inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = LA.inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
"""
a, wrap = _makearray(a)
return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : array_like, shape (M, M)
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : ndarray, or matrix object if `a` is, shape (M, M)
Lower-triangular Cholesky factor of a.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
m = a.shape[0]
n = a.shape[1]
if isComplexType(t):
lapack_routine = lapack_lite.zpotrf
else:
lapack_routine = lapack_lite.dpotrf
results = lapack_routine(_L, n, a, m, 0)
if results['info'] > 0:
raise LinAlgError, 'Matrix is not positive definite - \
Cholesky decomposition cannot be computed'
s = triu(a, k=0).transpose()
if (s.dtype != result_t):
s = s.astype(result_t)
return wrap(s)
# QR decompostion
def qr(a, mode='full'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like
Matrix to be factored, of shape (M, N).
mode : {'full', 'r', 'economic'}, optional
Specifies the values to be returned. 'full' is the default.
Economic mode is slightly faster then 'r' mode if only `r` is needed.
Returns
-------
q : ndarray of float or complex, optional
The orthonormal matrix, of shape (M, K). Only returned if
``mode='full'``.
r : ndarray of float or complex, optional
The upper-triangular matrix, of shape (K, N) with K = min(M, N).
Only returned when ``mode='full'`` or ``mode='r'``.
a2 : ndarray of float or complex, optional
Array of shape (M, N), only returned when ``mode='economic``'.
The diagonal and the upper triangle of `a2` contains `r`, while
the rest of the matrix is undefined.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved, so if `a` is of type `matrix`,
all the return values will be matrices too.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
a, wrap = _makearray(a)
_assertRank2(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# economic mode. Isn't actually economic.
if mode[0] == 'e':
if t != result_t :
a = a.astype(result_t)
return a.T
# generate r
r = _fastCopyAndTranspose(result_t, a[:,:mn])
for i in range(mn):
r[i,:i].fill(0.0)
# 'r'-mode, that is, calculate only r
if mode[0] == 'r':
return r
# from here on: build orthonormal matrix q from a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mn, mn, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mn, mn, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
q = _fastCopyAndTranspose(result_t, a[:mn,:])
return wrap(q), wrap(r)
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : array_like, shape (M, M)
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
This is a simple interface to the LAPACK routines dgeev and zgeev
that sets those routines' flags to return only the eigenvalues of
general real and complex arrays, respectively.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
dummy = zeros((1,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeev
w = zeros((n,), t)
rwork = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, w,
dummy, 1, dummy, 1, work, -1, rwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, w,
dummy, 1, dummy, 1, work, lwork, rwork, 0)
else:
lapack_routine = lapack_lite.dgeev
wr = zeros((n,), t)
wi = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, wr, wi,
dummy, 1, dummy, 1, work, -1, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, wr, wi,
dummy, 1, dummy, 1, work, lwork, 0)
if all(wi == 0.):
w = wr
result_t = _realType(result_t)
else:
w = wr+1j*wi
result_t = _complexType(result_t)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
return w.astype(result_t)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : array_like, shape (M, M)
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, not necessarily ordered, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
This is a simple interface to the LAPACK routines dsyevd and zheevd
that sets those routines' flags to return only the eigenvalues of
real symmetric and complex Hermitian arrays, respectively.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288+0.j, 5.82842712+0.j])
"""
UPLO = asbytes(UPLO)
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
liwork = 5*n+3
iwork = zeros((liwork,), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zheevd
w = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
lrwork = 1
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, -1,
rwork, -1, iwork, liwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
lrwork = int(rwork[0])
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork,
rwork, lrwork, iwork, liwork, 0)
else:
lapack_routine = lapack_lite.dsyevd
w = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, -1,
iwork, liwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork,
iwork, liwork, 0)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
return w.astype(result_t)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : array_like, shape (M, M)
A square array of real or complex elements.
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered, nor are they
necessarily real for real arrays (though for real arrays
complex-valued eigenvalues should occur in conjugate pairs).
v : ndarray, shape (M, M)
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
eigvals : eigenvalues of a non-symmetric array.
Notes
-----
This is a simple interface to the LAPACK routines dgeev and zgeev
which compute the eigenvalues and eigenvectors of, respectively,
general real- and complex-valued square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[i,:], v[i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
_assertFinite(a)
a, t, result_t = _convertarray(a) # convert to double or cdouble type
a = _to_native_byte_order(a)
real_t = _linalgRealType(t)
n = a.shape[0]
dummy = zeros((1,), t)
if isComplexType(t):
# Complex routines take different arguments
lapack_routine = lapack_lite.zgeev
w = zeros((n,), t)
v = zeros((n, n), t)
lwork = 1
work = zeros((lwork,), t)
rwork = zeros((2*n,), real_t)
results = lapack_routine(_N, _V, n, a, n, w,
dummy, 1, v, n, work, -1, rwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, w,
dummy, 1, v, n, work, lwork, rwork, 0)
else:
lapack_routine = lapack_lite.dgeev
wr = zeros((n,), t)
wi = zeros((n,), t)
vr = zeros((n, n), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, wr, wi,
dummy, 1, vr, n, work, -1, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, wr, wi,
dummy, 1, vr, n, work, lwork, 0)
if all(wi == 0.0):
w = wr
v = vr
result_t = _realType(result_t)
else:
w = wr+1j*wi
v = array(vr, w.dtype)
ind = flatnonzero(wi != 0.0) # indices of complex e-vals
for i in range(len(ind)//2):
v[ind[2*i]] = vr[ind[2*i]] + 1j*vr[ind[2*i+1]]
v[ind[2*i+1]] = vr[ind[2*i]] - 1j*vr[ind[2*i+1]]
result_t = _complexType(result_t)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
vt = v.transpose().astype(result_t)
return w.astype(result_t), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : array_like, shape (M, M)
A complex Hermitian or real symmetric matrix.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, not necessarily ordered.
v : ndarray, or matrix object if `a` is, shape (M, M)
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
This is a simple interface to the LAPACK routines dsyevd and zheevd,
which compute the eigenvalues and eigenvectors of real symmetric and
complex Hermitian arrays, respectively.
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = asbytes(UPLO)
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
liwork = 5*n+3
iwork = zeros((liwork,), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zheevd
w = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
lrwork = 1
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, -1,
rwork, -1, iwork, liwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
lrwork = int(rwork[0])
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork,
rwork, lrwork, iwork, liwork, 0)
else:
lapack_routine = lapack_lite.dsyevd
w = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, -1,
iwork, liwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork,
iwork, liwork, 0)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
at = a.transpose().astype(result_t)
return w.astype(_realType(result_t)), wrap(at)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : ndarray
Unitary matrix. The shape of `u` is (`M`, `M`) or (`M`, `K`)
depending on value of ``full_matrices``.
s : ndarray
The singular values, sorted so that ``s[i] >= s[i+1]``. `s` is
a 1-d array of length min(`M`, `N`).
v : ndarray
Unitary matrix of shape (`N`, `N`) or (`K`, `N`), depending on
``full_matrices``.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertNonEmpty(a)
m, n = a.shape
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
s = zeros((min(n, m),), real_t)
if compute_uv:
if full_matrices:
nu = m
nvt = n
option = _A
else:
nu = min(n, m)
nvt = min(n, m)
option = _S
u = zeros((nu, m), t)
vt = zeros((n, nvt), t)
else:
option = _N
nu = 1
nvt = 1
u = empty((1, 1), t)
vt = empty((1, 1), t)
iwork = zeros((8*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgesdd
rwork = zeros((5*min(m, n)*min(m, n) + 5*min(m, n),), real_t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgesdd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError, 'SVD did not converge'
s = s.astype(_realType(result_t))
if compute_uv:
u = u.transpose().astype(result_t)
vt = vt.transpose().astype(result_t)
return wrap(u), s, wrap(vt)
else:
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : array_like, shape (M, N)
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x,compute_uv=False)
return s[0]/s[-1]
else:
return norm(x,p)*norm(inv(x),p)
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the
array that are greater than `tol`.
Parameters
----------
M : array_like
array of <=2 dimensions
tol : {None, float}
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * eps``.
Notes
-----
Golub and van Loan [1]_ define "numerical rank deficiency" as using
tol=eps*S[0] (where S[0] is the maximum singular value and thus the
2-norm of the matrix). This is one definition of rank deficiency,
and the one we use here. When floating point roundoff is the main
concern, then "numerical rank deficiency" is a reasonable choice. In
some cases you may prefer other definitions. The most useful measure
of the tolerance depends on the operations you intend to use on your
matrix. For example, if your data come from uncertain measurements
with uncertainties greater than floating point epsilon, choosing a
tolerance near that uncertainty may be preferable. The tolerance
may be absolute if the uncertainties are absolute rather than
relative.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*.
Baltimore: Johns Hopkins University Press, 1996.
Examples
--------
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : ndarray, shape (N, M)
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNonEmpty(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.;
res = dot(transpose(vt), multiply(s[:, newaxis],transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, than a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : array_like, shape (M, M)
Input array.
Returns
-------
sign : float or complex
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : float
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to `sign * np.exp(logdet)`.
Notes
-----
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
.. versionadded:: 2.0.0.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
See Also
--------
det
"""
a = asarray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
if isComplexType(t):
lapack_routine = lapack_lite.zgetrf
else:
lapack_routine = lapack_lite.dgetrf
pivots = zeros((n,), fortran_int)
results = lapack_routine(n, n, a, n, pivots, 0)
info = results['info']
if (info < 0):
raise TypeError, "Illegal input to Fortran routine"
elif (info > 0):
return (t(0.0), _realType(t)(-Inf))
sign = 1. - 2. * (add.reduce(pivots != arange(1, n + 1)) % 2)
d = diagonal(a)
absd = absolute(d)
sign *= multiply.reduce(d / absd)
log(absd, absd)
logdet = add.reduce(absd, axis=-1)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : array_like, shape (M, M)
Input array.
Returns
-------
det : ndarray
Determinant of `a`.
Notes
-----
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
"""
sign, logdet = slogdet(a)
return sign * exp(logdet)
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : array_like, shape (M, N)
"Coefficient" matrix.
b : array_like, shape (M,) or (M, K)
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : ndarray, shape (N,) or (N, K)
Least-squares solution. The shape of `x` depends on the shape of
`b`.
residues : ndarray, shape (), (1,), or (K,)
Sums of residues; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or > M, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : ndarray, shape (min(M,N),)
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print m, c
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError, 'Incompatible dimensions'
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0],:n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError, 'SVD did not converge in Linear Least Squares'
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
st = s[:min(n, m)].copy().astype(result_real_t)
return wrap(x), wrap(resids), results['rank'], st
def norm(x, ord=None):
"""
Matrix or vector norm.
This function is able to return one of seven different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like, shape (M,) or (M, N)
Input array.
ord : {non-zero int, inf, -inf, 'fro'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
Returns
-------
n : float
Norm of the matrix or vector.
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
"""
x = asarray(x)
if ord is None: # check the default case first and handle it immediately
return sqrt(add.reduce((x.conj() * x).ravel().real))
nd = x.ndim
if nd == 1:
if ord == Inf:
return abs(x).max()
elif ord == -Inf:
return abs(x).min()
elif ord == 0:
return (x != 0).sum() # Zero norm
elif ord == 1:
return abs(x).sum() # special case for speedup
elif ord == 2:
return sqrt(((x.conj()*x).real).sum()) # special case for speedup
else:
try:
ord + 1
except TypeError:
raise ValueError, "Invalid norm order for vectors."
return ((abs(x)**ord).sum())**(1.0/ord)
elif nd == 2:
if ord == 2:
return svd(x, compute_uv=0).max()
elif ord == -2:
return svd(x, compute_uv=0).min()
elif ord == 1:
return abs(x).sum(axis=0).max()
elif ord == Inf:
return abs(x).sum(axis=1).max()
elif ord == -1:
return abs(x).sum(axis=0).min()
elif ord == -Inf:
return abs(x).sum(axis=1).min()
elif ord in ['fro','f']:
return sqrt(add.reduce((x.conj() * x).real.ravel()))
else:
raise ValueError, "Invalid norm order for matrices."
else:
raise ValueError, "Improper number of dimensions to norm."
| gpl-2.0 | -8,883,453,398,373,004,000 | -2,992,840,244,489,469,400 | 29.810893 | 79 | 0.546466 | false |
kubeflow/kfp-tekton-backend | samples/contrib/image-captioning-gcp/src/models.py | 2 | 3943 | # Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains the models used in the image captioning pipeline"""
import tensorflow as tf
class BahdanauAttention(tf.keras.Model):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, features, hidden):
# features(CNN_encoder output) shape == (batch_size, 64, embedding_dim)
# hidden shape == (batch_size, hidden_size)
# hidden_with_time_axis shape == (batch_size, 1, hidden_size)
hidden_with_time_axis = tf.expand_dims(hidden, 1)
# score shape == (batch_size, 64, hidden_size)
score = tf.nn.tanh(self.W1(features) + self.W2(hidden_with_time_axis))
# attention_weights shape == (batch_size, 64, 1)
# you get 1 at the last axis because you are applying score to self.V
attention_weights = tf.nn.softmax(self.V(score), axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * features
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
# CNN Encoder model
class CNN_Encoder(tf.keras.Model):
# Since you have already extracted the features and dumped it using pickle
# This encoder passes those features through a Fully connected layer
def __init__(self, embedding_dim):
super(CNN_Encoder, self).__init__()
# shape after fc == (batch_size, 64, embedding_dim)
self.fc = tf.keras.layers.Dense(embedding_dim)
def call(self, x):
x = self.fc(x)
x = tf.nn.relu(x)
return x
# RNN Decoder model
class RNN_Decoder(tf.keras.Model):
def __init__(self, embedding_dim, units, vocab_size):
super(RNN_Decoder, self).__init__()
self.units = units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc1 = tf.keras.layers.Dense(self.units)
self.fc2 = tf.keras.layers.Dense(vocab_size)
self.attention = BahdanauAttention(self.units)
def call(self, x, features, hidden):
# defining attention as a separate model
context_vector, attention_weights = self.attention(features, hidden)
# x shape after passing through embedding == (batch_size, 1, embedding_dim)
x = self.embedding(x)
# x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# passing the concatenated vector to the GRU
output, state = self.gru(x)
# shape == (batch_size, max_length, hidden_size)
x = self.fc1(output)
# x shape == (batch_size * max_length, hidden_size)
x = tf.reshape(x, (-1, x.shape[2]))
# output shape == (batch_size * max_length, vocab)
x = self.fc2(x)
return x, state, attention_weights
def reset_state(self, batch_size):
return tf.zeros((batch_size, self.units))
| apache-2.0 | 1,592,721,680,591,071,500 | 2,165,532,262,322,535,000 | 37.656863 | 85 | 0.636064 | false |
apollo13/ansible | test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_config.py | 47 | 21870 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "network",
}
DOCUMENTATION = """module: ios_config
author: Peter Sprygada (@privateip)
short_description: Manage Cisco IOS configuration sections
description:
- Cisco IOS configurations use a simple block indent file syntax for segmenting configuration
into sections. This module provides an implementation for working with IOS configuration
sections in a deterministic way.
extends_documentation_fragment:
- cisco.ios.ios
notes:
- Tested against IOS 15.6
- Abbreviated commands are NOT idempotent, see L(Network FAQ,../network/user_guide/faq.html#why-do-the-config-modules-always-return-changed-true-with-abbreviated-commands).
options:
lines:
description:
- The ordered set of commands that should be configured in the section. The commands
must be the exact same commands as found in the device running-config. Be sure
to note the configuration command syntax as some commands are automatically
modified by the device config parser.
aliases:
- commands
parents:
description:
- The ordered set of parents that uniquely identify the section or hierarchy the
commands should be checked against. If the parents argument is omitted, the
commands are checked against the set of top level or global commands.
src:
description:
- Specifies the source path to the file that contains the configuration or configuration
template to load. The path to the source file can either be the full path on
the Ansible control host or a relative path from the playbook or role root directory. This
argument is mutually exclusive with I(lines), I(parents).
before:
description:
- The ordered set of commands to push on to the command stack if a change needs
to be made. This allows the playbook designer the opportunity to perform configuration
commands prior to pushing any changes without affecting how the set of commands
are matched against the system.
after:
description:
- The ordered set of commands to append to the end of the command stack if a change
needs to be made. Just like with I(before) this allows the playbook designer
to append a set of commands to be executed after the command set.
match:
description:
- Instructs the module on the way to perform the matching of the set of commands
against the current device config. If match is set to I(line), commands are
matched line by line. If match is set to I(strict), command lines are matched
with respect to position. If match is set to I(exact), command lines must be
an equal match. Finally, if match is set to I(none), the module will not attempt
to compare the source configuration with the running configuration on the remote
device.
choices:
- line
- strict
- exact
- none
default: line
replace:
description:
- Instructs the module on the way to perform the configuration on the device.
If the replace argument is set to I(line) then the modified lines are pushed
to the device in configuration mode. If the replace argument is set to I(block)
then the entire command block is pushed to the device in configuration mode
if any line is not correct.
default: line
choices:
- line
- block
multiline_delimiter:
description:
- This argument is used when pushing a multiline configuration element to the
IOS device. It specifies the character to use as the delimiting character. This
only applies to the configuration action.
default: '@'
backup:
description:
- This argument will cause the module to create a full backup of the current C(running-config)
from the remote device before any changes are made. If the C(backup_options)
value is not given, the backup file is written to the C(backup) folder in the
playbook root directory or role root directory, if playbook is part of an ansible
role. If the directory does not exist, it is created.
type: bool
default: 'no'
running_config:
description:
- The module, by default, will connect to the remote device and retrieve the current
running-config to use as a base for comparing against the contents of source.
There are times when it is not desirable to have the task get the current running-config
for every task in a playbook. The I(running_config) argument allows the implementer
to pass in the configuration to use as the base config for comparison.
aliases:
- config
defaults:
description:
- This argument specifies whether or not to collect all defaults when getting
the remote device running config. When enabled, the module will get the current
config by issuing the command C(show running-config all).
type: bool
default: 'no'
save_when:
description:
- When changes are made to the device running-configuration, the changes are not
copied to non-volatile storage by default. Using this argument will change
that before. If the argument is set to I(always), then the running-config will
always be copied to the startup-config and the I(modified) flag will always
be set to True. If the argument is set to I(modified), then the running-config
will only be copied to the startup-config if it has changed since the last save
to startup-config. If the argument is set to I(never), the running-config will
never be copied to the startup-config. If the argument is set to I(changed),
then the running-config will only be copied to the startup-config if the task
has made a change. I(changed) was added in Ansible 2.5.
default: never
choices:
- always
- never
- modified
- changed
diff_against:
description:
- When using the C(ansible-playbook --diff) command line argument the module can
generate diffs against different sources.
- When this option is configure as I(startup), the module will return the diff
of the running-config against the startup-config.
- When this option is configured as I(intended), the module will return the diff
of the running-config against the configuration provided in the C(intended_config)
argument.
- When this option is configured as I(running), the module will return the before
and after diff of the running-config with respect to any changes made to the
device configuration.
choices:
- running
- startup
- intended
diff_ignore_lines:
description:
- Use this argument to specify one or more lines that should be ignored during
the diff. This is used for lines in the configuration that are automatically
updated by the system. This argument takes a list of regular expressions or
exact line matches.
intended_config:
description:
- The C(intended_config) provides the master configuration that the node should
conform to and is used to check the final running-config against. This argument
will not modify any settings on the remote device and is strictly used to check
the compliance of the current device's configuration against. When specifying
this argument, the task should also modify the C(diff_against) value and set
it to I(intended).
backup_options:
description:
- This is a dict object containing configurable options related to backup file
path. The value of this option is read only when C(backup) is set to I(yes),
if C(backup) is set to I(no) this option will be silently ignored.
suboptions:
filename:
description:
- The filename to be used to store the backup configuration. If the filename
is not given it will be generated based on the hostname, current time and
date in format defined by <hostname>_config.<current-date>@<current-time>
dir_path:
description:
- This option provides the path ending with directory name in which the backup
configuration file will be stored. If the directory does not exist it will
be first created and the filename is either the value of C(filename) or
default filename as described in C(filename) options description. If the
path value is not given in that case a I(backup) directory will be created
in the current working directory and backup configuration will be copied
in C(filename) within I(backup) directory.
type: path
type: dict
"""
EXAMPLES = """
- name: configure top level configuration
ios_config:
lines: hostname {{ inventory_hostname }}
- name: configure interface settings
ios_config:
lines:
- description test interface
- ip address 172.31.1.1 255.255.255.0
parents: interface Ethernet1
- name: configure ip helpers on multiple interfaces
ios_config:
lines:
- ip helper-address 172.26.1.10
- ip helper-address 172.26.3.8
parents: "{{ item }}"
with_items:
- interface Ethernet1
- interface Ethernet2
- interface GigabitEthernet1
- name: configure policer in Scavenger class
ios_config:
lines:
- conform-action transmit
- exceed-action drop
parents:
- policy-map Foo
- class Scavenger
- police cir 64000
- name: load new acl into device
ios_config:
lines:
- 10 permit ip host 192.0.2.1 any log
- 20 permit ip host 192.0.2.2 any log
- 30 permit ip host 192.0.2.3 any log
- 40 permit ip host 192.0.2.4 any log
- 50 permit ip host 192.0.2.5 any log
parents: ip access-list extended test
before: no ip access-list extended test
match: exact
- name: check the running-config against master config
ios_config:
diff_against: intended
intended_config: "{{ lookup('file', 'master.cfg') }}"
- name: check the startup-config against the running-config
ios_config:
diff_against: startup
diff_ignore_lines:
- ntp clock .*
- name: save running to startup when modified
ios_config:
save_when: modified
- name: for idempotency, use full-form commands
ios_config:
lines:
# - shut
- shutdown
# parents: int gig1/0/11
parents: interface GigabitEthernet1/0/11
# Set boot image based on comparison to a group_var (version) and the version
# that is returned from the `ios_facts` module
- name: SETTING BOOT IMAGE
ios_config:
lines:
- no boot system
- boot system flash bootflash:{{new_image}}
host: "{{ inventory_hostname }}"
when: ansible_net_version != version
- name: render a Jinja2 template onto an IOS device
ios_config:
backup: yes
src: ios_template.j2
- name: configurable backup path
ios_config:
src: ios_template.j2
backup: yes
backup_options:
filename: backup.cfg
dir_path: /home/user
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['hostname foo', 'router ospf 1', 'router-id 192.0.2.1']
commands:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['hostname foo', 'router ospf 1', 'router-id 192.0.2.1']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: str
sample: /playbooks/ansible/backup/ios_config.2016-07-16@22:28:34
filename:
description: The name of the backup file
returned: when backup is yes and filename is not specified in backup options
type: str
sample: ios_config.2016-07-16@22:28:34
shortname:
description: The full path to the backup file excluding the timestamp
returned: when backup is yes and filename is not specified in backup options
type: str
sample: /playbooks/ansible/backup/ios_config
date:
description: The date extracted from the backup file name
returned: when backup is yes
type: str
sample: "2016-07-16"
time:
description: The time extracted from the backup file name
returned: when backup is yes
type: str
sample: "22:28:34"
"""
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import ConnectionError
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import (
run_commands,
get_config,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import (
get_defaults_flag,
get_connection,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import (
ios_argument_spec,
)
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import (
NetworkConfig,
dumps,
)
def check_args(module, warnings):
if module.params["multiline_delimiter"]:
if len(module.params["multiline_delimiter"]) != 1:
module.fail_json(
msg="multiline_delimiter value can only be a "
"single character"
)
def edit_config_or_macro(connection, commands):
# only catch the macro configuration command,
# not negated 'no' variation.
if commands[0].startswith("macro name"):
connection.edit_macro(candidate=commands)
else:
connection.edit_config(candidate=commands)
def get_candidate_config(module):
candidate = ""
if module.params["src"]:
candidate = module.params["src"]
elif module.params["lines"]:
candidate_obj = NetworkConfig(indent=1)
parents = module.params["parents"] or list()
candidate_obj.add(module.params["lines"], parents=parents)
candidate = dumps(candidate_obj, "raw")
return candidate
def get_running_config(module, current_config=None, flags=None):
running = module.params["running_config"]
if not running:
if not module.params["defaults"] and current_config:
running = current_config
else:
running = get_config(module, flags=flags)
return running
def save_config(module, result):
result["changed"] = True
if not module.check_mode:
run_commands(module, "copy running-config startup-config\r")
else:
module.warn(
"Skipping command `copy running-config startup-config` "
"due to check_mode. Configuration not copied to "
"non-volatile storage"
)
def main():
""" main entry point for module execution
"""
backup_spec = dict(filename=dict(), dir_path=dict(type="path"))
argument_spec = dict(
src=dict(type="path"),
lines=dict(aliases=["commands"], type="list"),
parents=dict(type="list"),
before=dict(type="list"),
after=dict(type="list"),
match=dict(
default="line", choices=["line", "strict", "exact", "none"]
),
replace=dict(default="line", choices=["line", "block"]),
multiline_delimiter=dict(default="@"),
running_config=dict(aliases=["config"]),
intended_config=dict(),
defaults=dict(type="bool", default=False),
backup=dict(type="bool", default=False),
backup_options=dict(type="dict", options=backup_spec),
save_when=dict(
choices=["always", "never", "modified", "changed"], default="never"
),
diff_against=dict(choices=["startup", "intended", "running"]),
diff_ignore_lines=dict(type="list"),
)
argument_spec.update(ios_argument_spec)
mutually_exclusive = [("lines", "src"), ("parents", "src")]
required_if = [
("match", "strict", ["lines"]),
("match", "exact", ["lines"]),
("replace", "block", ["lines"]),
("diff_against", "intended", ["intended_config"]),
]
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True,
)
result = {"changed": False}
warnings = list()
check_args(module, warnings)
result["warnings"] = warnings
diff_ignore_lines = module.params["diff_ignore_lines"]
config = None
contents = None
flags = get_defaults_flag(module) if module.params["defaults"] else []
connection = get_connection(module)
if module.params["backup"] or (
module._diff and module.params["diff_against"] == "running"
):
contents = get_config(module, flags=flags)
config = NetworkConfig(indent=1, contents=contents)
if module.params["backup"]:
result["__backup__"] = contents
if any((module.params["lines"], module.params["src"])):
match = module.params["match"]
replace = module.params["replace"]
path = module.params["parents"]
candidate = get_candidate_config(module)
running = get_running_config(module, contents, flags=flags)
try:
response = connection.get_diff(
candidate=candidate,
running=running,
diff_match=match,
diff_ignore_lines=diff_ignore_lines,
path=path,
diff_replace=replace,
)
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
config_diff = response["config_diff"]
banner_diff = response["banner_diff"]
if config_diff or banner_diff:
commands = config_diff.split("\n")
if module.params["before"]:
commands[:0] = module.params["before"]
if module.params["after"]:
commands.extend(module.params["after"])
result["commands"] = commands
result["updates"] = commands
result["banners"] = banner_diff
# send the configuration commands to the device and merge
# them with the current running config
if not module.check_mode:
if commands:
edit_config_or_macro(connection, commands)
if banner_diff:
connection.edit_banner(
candidate=json.dumps(banner_diff),
multiline_delimiter=module.params[
"multiline_delimiter"
],
)
result["changed"] = True
running_config = module.params["running_config"]
startup_config = None
if module.params["save_when"] == "always":
save_config(module, result)
elif module.params["save_when"] == "modified":
output = run_commands(
module, ["show running-config", "show startup-config"]
)
running_config = NetworkConfig(
indent=1, contents=output[0], ignore_lines=diff_ignore_lines
)
startup_config = NetworkConfig(
indent=1, contents=output[1], ignore_lines=diff_ignore_lines
)
if running_config.sha1 != startup_config.sha1:
save_config(module, result)
elif module.params["save_when"] == "changed" and result["changed"]:
save_config(module, result)
if module._diff:
if not running_config:
output = run_commands(module, "show running-config")
contents = output[0]
else:
contents = running_config
# recreate the object in order to process diff_ignore_lines
running_config = NetworkConfig(
indent=1, contents=contents, ignore_lines=diff_ignore_lines
)
if module.params["diff_against"] == "running":
if module.check_mode:
module.warn(
"unable to perform diff against running-config due to check mode"
)
contents = None
else:
contents = config.config_text
elif module.params["diff_against"] == "startup":
if not startup_config:
output = run_commands(module, "show startup-config")
contents = output[0]
else:
contents = startup_config.config_text
elif module.params["diff_against"] == "intended":
contents = module.params["intended_config"]
if contents is not None:
base_config = NetworkConfig(
indent=1, contents=contents, ignore_lines=diff_ignore_lines
)
if running_config.sha1 != base_config.sha1:
if module.params["diff_against"] == "intended":
before = running_config
after = base_config
elif module.params["diff_against"] in ("startup", "running"):
before = base_config
after = running_config
result.update(
{
"changed": True,
"diff": {"before": str(before), "after": str(after)},
}
)
module.exit_json(**result)
if __name__ == "__main__":
main()
| gpl-3.0 | -8,803,086,275,834,204,000 | 227,876,393,763,200,640 | 35.694631 | 172 | 0.655784 | false |
nozuono/calibre-webserver | src/calibre/library/database2.py | 4 | 164046 | from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
'''
The database used to store ebook metadata
'''
import os, sys, shutil, cStringIO, glob, time, functools, traceback, re, \
json, uuid, hashlib, copy, types
from collections import defaultdict, namedtuple
import threading, random
from itertools import repeat
from calibre import prints, force_unicode
from calibre.ebooks.metadata import (title_sort, author_to_author_sort,
string_to_authors, get_title_sort_pat)
from calibre.ebooks.metadata.opf2 import metadata_to_opf
from calibre.library.database import LibraryDatabase
from calibre.library.field_metadata import FieldMetadata, TagsIcons
from calibre.library.schema_upgrades import SchemaUpgrade
from calibre.library.caches import ResultCache
from calibre.library.custom_columns import CustomColumns
from calibre.library.sqlite import connect, IntegrityError
from calibre.library.prefs import DBPrefs
from calibre.ebooks.metadata.book.base import Metadata
from calibre.constants import preferred_encoding, iswindows, filesystem_encoding
from calibre.ptempfile import (PersistentTemporaryFile,
base_dir, SpooledTemporaryFile)
from calibre.customize.ui import (run_plugins_on_import,
run_plugins_on_postimport)
from calibre import isbytestring
from calibre.utils.filenames import (ascii_filename, samefile,
WindowsAtomicFolderMove, hardlink_file)
from calibre.utils.date import (utcnow, now as nowf, utcfromtimestamp,
parse_only_date, UNDEFINED_DATE, parse_date)
from calibre.utils.config import prefs, tweaks, from_json, to_json
from calibre.utils.icu import sort_key, strcmp, lower
from calibre.utils.search_query_parser import saved_searches, set_saved_searches
from calibre.ebooks import check_ebook_format
from calibre.utils.magick.draw import save_cover_data_to
from calibre.utils.recycle_bin import delete_file, delete_tree
from calibre.utils.formatter_functions import load_user_template_functions
from calibre.db import _get_next_series_num_for_list, _get_series_values, get_data_as_dict
from calibre.db.adding import find_books_in_directory, import_book_directory_multiple, import_book_directory, recursive_import
from calibre.db.errors import NoSuchFormat
from calibre.db.lazy import FormatMetadata, FormatsList
from calibre.db.categories import Tag, CATEGORY_SORTS
from calibre.utils.localization import (canonicalize_lang,
calibre_langcode_to_name)
copyfile = os.link if hasattr(os, 'link') else shutil.copyfile
SPOOL_SIZE = 30*1024*1024
ProxyMetadata = namedtuple('ProxyMetadata', 'book_size ondevice_col db_approx_formats')
class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
'''
An ebook metadata database that stores references to ebook files on disk.
'''
PATH_LIMIT = 40 if 'win32' in sys.platform else 100
WINDOWS_LIBRARY_PATH_LIMIT = 75
@dynamic_property
def user_version(self):
doc = 'The user version of this database'
def fget(self):
return self.conn.get('pragma user_version;', all=False)
def fset(self, val):
self.conn.execute('pragma user_version=%d'%int(val))
self.conn.commit()
return property(doc=doc, fget=fget, fset=fset)
@dynamic_property
def library_id(self):
doc = ('The UUID for this library. As long as the user only operates'
' on libraries with calibre, it will be unique')
def fget(self):
if self._library_id_ is None:
ans = self.conn.get('SELECT uuid FROM library_id', all=False)
if ans is None:
ans = str(uuid.uuid4())
self.library_id = ans
else:
self._library_id_ = ans
return self._library_id_
def fset(self, val):
self._library_id_ = unicode(val)
self.conn.executescript('''
DELETE FROM library_id;
INSERT INTO library_id (uuid) VALUES ("%s");
'''%self._library_id_)
self.conn.commit()
return property(doc=doc, fget=fget, fset=fset)
def connect(self):
if iswindows and len(self.library_path) + 4*self.PATH_LIMIT + 10 > 259:
raise ValueError(_(
'Path to library too long. Must be less than'
' %d characters.')%(259-4*self.PATH_LIMIT-10))
exists = os.path.exists(self.dbpath)
if not exists:
# Be more strict when creating new libraries as the old calculation
# allowed for max path lengths of 265 chars.
if (iswindows and len(self.library_path) >
self.WINDOWS_LIBRARY_PATH_LIMIT):
raise ValueError(_(
'Path to library too long. Must be less than'
' %d characters.')%self.WINDOWS_LIBRARY_PATH_LIMIT)
self.conn = connect(self.dbpath, self.row_factory)
if exists and self.user_version == 0:
self.conn.close()
os.remove(self.dbpath)
self.conn = connect(self.dbpath, self.row_factory)
if self.user_version == 0:
self.initialize_database()
# remember to add any filter to the connect method in sqlite.py as well
# so that various code that connects directly will not complain about
# missing functions
self.books_list_filter = self.conn.create_dynamic_filter('books_list_filter')
# Store temporary tables in memory
self.conn.execute('pragma temp_store=2')
self.conn.commit()
@classmethod
def exists_at(cls, path):
return path and os.path.exists(os.path.join(path, 'metadata.db'))
def __init__(self, library_path, row_factory=False, default_prefs=None,
read_only=False, is_second_db=False, progress_callback=None,
restore_all_prefs=False):
self.is_second_db = is_second_db
self.get_data_as_dict = types.MethodType(get_data_as_dict, self, LibraryDatabase2)
try:
if isbytestring(library_path):
library_path = library_path.decode(filesystem_encoding)
except:
traceback.print_exc()
self.field_metadata = FieldMetadata()
self.format_filename_cache = defaultdict(dict)
self._library_id_ = None
# Create the lock to be used to guard access to the metadata writer
# queues. This must be an RLock, not a Lock
self.dirtied_lock = threading.RLock()
if not os.path.exists(library_path):
os.makedirs(library_path)
self.listeners = set([])
self.library_path = os.path.abspath(library_path)
self.row_factory = row_factory
self.dbpath = os.path.join(library_path, 'metadata.db')
self.dbpath = os.environ.get('CALIBRE_OVERRIDE_DATABASE_PATH',
self.dbpath)
if read_only and os.path.exists(self.dbpath):
# Work on only a copy of metadata.db to ensure that
# metadata.db is not changed
pt = PersistentTemporaryFile('_metadata_ro.db')
pt.close()
shutil.copyfile(self.dbpath, pt.name)
self.dbpath = pt.name
apply_default_prefs = not os.path.exists(self.dbpath)
self.connect()
self.is_case_sensitive = (not iswindows and
not os.path.exists(self.dbpath.replace('metadata.db',
'MeTAdAtA.dB')))
SchemaUpgrade.__init__(self)
# Guarantee that the library_id is set
self.library_id
# if we are to copy the prefs and structure from some other DB, then
# we need to do it before we call initialize_dynamic
if apply_default_prefs and default_prefs is not None:
if progress_callback is None:
progress_callback = lambda x, y: True
dbprefs = DBPrefs(self)
progress_callback(None, len(default_prefs))
for i, key in enumerate(default_prefs):
# be sure that prefs not to be copied are listed below
if not restore_all_prefs and key in frozenset(['news_to_be_synced']):
continue
dbprefs[key] = default_prefs[key]
progress_callback(_('restored preference ') + key, i+1)
if 'field_metadata' in default_prefs:
fmvals = [f for f in default_prefs['field_metadata'].values() if f['is_custom']]
progress_callback(None, len(fmvals))
for i, f in enumerate(fmvals):
progress_callback(_('creating custom column ') + f['label'], i)
self.create_custom_column(f['label'], f['name'], f['datatype'],
f['is_multiple'] is not None and len(f['is_multiple']) > 0,
f['is_editable'], f['display'])
self.initialize_template_cache()
self.initialize_dynamic()
def initialize_template_cache(self):
self.formatter_template_cache = {}
def get_property(self, idx, index_is_id=False, loc=-1):
row = self.data._data[idx] if index_is_id else self.data[idx]
if row is not None:
return row[loc]
def initialize_dynamic(self):
self.field_metadata = FieldMetadata() # Ensure we start with a clean copy
self.prefs = DBPrefs(self)
defs = self.prefs.defaults
defs['gui_restriction'] = defs['cs_restriction'] = ''
defs['categories_using_hierarchy'] = []
defs['column_color_rules'] = []
defs['column_icon_rules'] = []
defs['grouped_search_make_user_categories'] = []
defs['similar_authors_search_key'] = 'authors'
defs['similar_authors_match_kind'] = 'match_any'
defs['similar_publisher_search_key'] = 'publisher'
defs['similar_publisher_match_kind'] = 'match_any'
defs['similar_tags_search_key'] = 'tags'
defs['similar_tags_match_kind'] = 'match_all'
defs['similar_series_search_key'] = 'series'
defs['similar_series_match_kind'] = 'match_any'
defs['book_display_fields'] = [
('title', False), ('authors', True), ('formats', True),
('series', True), ('identifiers', True), ('tags', True),
('path', True), ('publisher', False), ('rating', False),
('author_sort', False), ('sort', False), ('timestamp', False),
('uuid', False), ('comments', True), ('id', False), ('pubdate', False),
('last_modified', False), ('size', False), ('languages', False),
]
defs['virtual_libraries'] = {}
defs['virtual_lib_on_startup'] = defs['cs_virtual_lib_on_startup'] = ''
defs['virt_libs_hidden'] = defs['virt_libs_order'] = ()
# Migrate the bool tristate tweak
defs['bools_are_tristate'] = \
tweaks.get('bool_custom_columns_are_tristate', 'yes') == 'yes'
if self.prefs.get('bools_are_tristate') is None:
self.prefs.set('bools_are_tristate', defs['bools_are_tristate'])
# Migrate column coloring rules
if self.prefs.get('column_color_name_1', None) is not None:
from calibre.library.coloring import migrate_old_rule
old_rules = []
for i in range(1, 6):
col = self.prefs.get('column_color_name_'+str(i), None)
templ = self.prefs.get('column_color_template_'+str(i), None)
if col and templ:
try:
del self.prefs['column_color_name_'+str(i)]
rules = migrate_old_rule(self.field_metadata, templ)
for templ in rules:
old_rules.append((col, templ))
except:
pass
if old_rules:
self.prefs['column_color_rules'] += old_rules
# Migrate saved search and user categories to db preference scheme
def migrate_preference(key, default):
oldval = prefs[key]
if oldval != default:
self.prefs[key] = oldval
prefs[key] = default
if key not in self.prefs:
self.prefs[key] = default
migrate_preference('user_categories', {})
migrate_preference('saved_searches', {})
if not self.is_second_db:
set_saved_searches(self, 'saved_searches')
# migrate grouped_search_terms
if self.prefs.get('grouped_search_terms', None) is None:
try:
ogst = tweaks.get('grouped_search_terms', {})
ngst = {}
for t in ogst:
ngst[icu_lower(t)] = ogst[t]
self.prefs.set('grouped_search_terms', ngst)
except:
pass
# migrate the gui_restriction preference to a virtual library
gr_pref = self.prefs.get('gui_restriction', None)
if gr_pref:
virt_libs = self.prefs.get('virtual_libraries', {})
virt_libs[gr_pref] = 'search:"' + gr_pref + '"'
self.prefs['virtual_libraries'] = virt_libs
self.prefs['gui_restriction'] = ''
self.prefs['virtual_lib_on_startup'] = gr_pref
# migrate the cs_restriction preference to a virtual library
gr_pref = self.prefs.get('cs_restriction', None)
if gr_pref:
virt_libs = self.prefs.get('virtual_libraries', {})
virt_libs[gr_pref] = 'search:"' + gr_pref + '"'
self.prefs['virtual_libraries'] = virt_libs
self.prefs['cs_restriction'] = ''
self.prefs['cs_virtual_lib_on_startup'] = gr_pref
# Rename any user categories with names that differ only in case
user_cats = self.prefs.get('user_categories', [])
catmap = {}
for uc in user_cats:
ucl = icu_lower(uc)
if ucl not in catmap:
catmap[ucl] = []
catmap[ucl].append(uc)
cats_changed = False
for uc in catmap:
if len(catmap[uc]) > 1:
prints('found user category case overlap', catmap[uc])
cat = catmap[uc][0]
suffix = 1
while icu_lower((cat + unicode(suffix))) in catmap:
suffix += 1
prints('Renaming user category %s to %s'%(cat, cat+unicode(suffix)))
user_cats[cat + unicode(suffix)] = user_cats[cat]
del user_cats[cat]
cats_changed = True
if cats_changed:
self.prefs.set('user_categories', user_cats)
if not self.is_second_db:
load_user_template_functions(self.library_id,
self.prefs.get('user_template_functions', []))
# Load the format filename cache
self.refresh_format_cache()
self.conn.executescript('''
DROP TRIGGER IF EXISTS author_insert_trg;
CREATE TEMP TRIGGER author_insert_trg
AFTER INSERT ON authors
BEGIN
UPDATE authors SET sort=author_to_author_sort(NEW.name) WHERE id=NEW.id;
END;
DROP TRIGGER IF EXISTS author_update_trg;
CREATE TEMP TRIGGER author_update_trg
BEFORE UPDATE ON authors
BEGIN
UPDATE authors SET sort=author_to_author_sort(NEW.name)
WHERE id=NEW.id AND name <> NEW.name;
END;
''')
self.conn.execute(
'UPDATE authors SET sort=author_to_author_sort(name) WHERE sort IS NULL')
self.conn.executescript(u'''
CREATE TEMP VIEW IF NOT EXISTS tag_browser_news AS SELECT DISTINCT
id,
name,
(SELECT COUNT(books_tags_link.id) FROM books_tags_link WHERE tag=x.id) count,
(0) as avg_rating,
name as sort
FROM tags as x WHERE name!="{0}" AND id IN
(SELECT DISTINCT tag FROM books_tags_link WHERE book IN
(SELECT DISTINCT book FROM books_tags_link WHERE tag IN
(SELECT id FROM tags WHERE name="{0}")));
'''.format(_('News')))
self.conn.executescript(u'''
CREATE TEMP VIEW IF NOT EXISTS tag_browser_filtered_news AS SELECT DISTINCT
id,
name,
(SELECT COUNT(books_tags_link.id) FROM books_tags_link WHERE tag=x.id and books_list_filter(book)) count,
(0) as avg_rating,
name as sort
FROM tags as x WHERE name!="{0}" AND id IN
(SELECT DISTINCT tag FROM books_tags_link WHERE book IN
(SELECT DISTINCT book FROM books_tags_link WHERE tag IN
(SELECT id FROM tags WHERE name="{0}")));
'''.format(_('News')))
self.conn.commit()
CustomColumns.__init__(self)
template = '''\
(SELECT {query} FROM books_{table}_link AS link INNER JOIN
{table} ON(link.{link_col}={table}.id) WHERE link.book=books.id)
{col}
'''
columns = ['id', 'title',
# col table link_col query
('authors', 'authors', 'author', 'sortconcat(link.id, name)'),
'timestamp',
'(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size',
('rating', 'ratings', 'rating', 'ratings.rating'),
('tags', 'tags', 'tag', 'group_concat(name)'),
'(SELECT text FROM comments WHERE book=books.id) comments',
('series', 'series', 'series', 'name'),
('publisher', 'publishers', 'publisher', 'name'),
'series_index',
'sort',
'author_sort',
'(SELECT group_concat(format) FROM data WHERE data.book=books.id) formats',
'path',
'pubdate',
'uuid',
'has_cover',
('au_map', 'authors', 'author',
'aum_sortconcat(link.id, authors.name, authors.sort, authors.link)'),
'last_modified',
'(SELECT identifiers_concat(type, val) FROM identifiers WHERE identifiers.book=books.id) identifiers',
('languages', 'languages', 'lang_code',
'sortconcat(link.id, languages.lang_code)'),
]
lines = []
for col in columns:
line = col
if isinstance(col, tuple):
line = template.format(col=col[0], table=col[1],
link_col=col[2], query=col[3])
lines.append(line)
custom_map = self.custom_columns_in_meta()
# custom col labels are numbers (the id in the custom_columns table)
custom_cols = list(sorted(custom_map.keys()))
lines.extend([custom_map[x] for x in custom_cols])
self.FIELD_MAP = {'id':0, 'title':1, 'authors':2, 'timestamp':3,
'size':4, 'rating':5, 'tags':6, 'comments':7, 'series':8,
'publisher':9, 'series_index':10, 'sort':11, 'author_sort':12,
'formats':13, 'path':14, 'pubdate':15, 'uuid':16, 'cover':17,
'au_map':18, 'last_modified':19, 'identifiers':20, 'languages':21}
for k,v in self.FIELD_MAP.iteritems():
self.field_metadata.set_field_record_index(k, v, prefer_custom=False)
base = max(self.FIELD_MAP.values())
for col in custom_cols:
self.FIELD_MAP[col] = base = base+1
self.field_metadata.set_field_record_index(
self.custom_column_num_map[col]['label'],
base,
prefer_custom=True)
if self.custom_column_num_map[col]['datatype'] == 'series':
# account for the series index column. Field_metadata knows that
# the series index is one larger than the series. If you change
# it here, be sure to change it there as well.
self.FIELD_MAP[str(col)+'_index'] = base = base+1
self.field_metadata.set_field_record_index(
self.custom_column_num_map[col]['label']+'_index',
base,
prefer_custom=True)
self.FIELD_MAP['ondevice'] = base = base+1
self.field_metadata.set_field_record_index('ondevice', base, prefer_custom=False)
self.FIELD_MAP['marked'] = base = base+1
self.field_metadata.set_field_record_index('marked', base, prefer_custom=False)
self.FIELD_MAP['series_sort'] = base = base+1
self.field_metadata.set_field_record_index('series_sort', base, prefer_custom=False)
script = '''
DROP VIEW IF EXISTS meta2;
CREATE TEMP VIEW meta2 AS
SELECT
{0}
FROM books;
'''.format(', \n'.join(lines))
self.conn.executescript(script)
self.conn.commit()
# Reconstruct the user categories, putting them into field_metadata
# Assumption is that someone else will fix them if they change.
self.field_metadata.remove_dynamic_categories()
for user_cat in sorted(self.prefs.get('user_categories', {}).keys(), key=sort_key):
cat_name = '@' + user_cat # add the '@' to avoid name collision
self.field_metadata.add_user_category(label=cat_name, name=user_cat)
# add grouped search term user categories
muc = self.prefs.get('grouped_search_make_user_categories', [])
for cat in sorted(self.prefs.get('grouped_search_terms', {}).keys(), key=sort_key):
if cat in muc:
# There is a chance that these can be duplicates of an existing
# user category. Print the exception and continue.
try:
self.field_metadata.add_user_category(label=u'@' + cat, name=cat)
except:
traceback.print_exc()
if len(saved_searches().names()):
self.field_metadata.add_search_category(label='search', name=_('Searches'))
self.field_metadata.add_grouped_search_terms(
self.prefs.get('grouped_search_terms', {}))
self.book_on_device_func = None
self.data = ResultCache(self.FIELD_MAP, self.field_metadata, db_prefs=self.prefs)
self.search = self.data.search
self.search_getting_ids = self.data.search_getting_ids
self.refresh = functools.partial(self.data.refresh, self)
self.sort = self.data.sort
self.multisort = self.data.multisort
self.index = self.data.index
self.refresh_ids = functools.partial(self.data.refresh_ids, self)
self.row = self.data.row
self.has_id = self.data.has_id
self.count = self.data.count
self.set_marked_ids = self.data.set_marked_ids
for prop in (
'author_sort', 'authors', 'comment', 'comments',
'publisher', 'rating', 'series', 'series_index', 'tags',
'title', 'timestamp', 'uuid', 'pubdate', 'ondevice',
'metadata_last_modified', 'languages',
):
fm = {'comment':'comments', 'metadata_last_modified':
'last_modified'}.get(prop, prop)
setattr(self, prop, functools.partial(self.get_property,
loc=self.FIELD_MAP[fm]))
setattr(self, 'title_sort', functools.partial(self.get_property,
loc=self.FIELD_MAP['sort']))
d = self.conn.get('SELECT book FROM metadata_dirtied', all=True)
with self.dirtied_lock:
self.dirtied_sequence = 0
self.dirtied_cache = {}
for x in d:
self.dirtied_cache[x[0]] = self.dirtied_sequence
self.dirtied_sequence += 1
self.refresh_ondevice = functools.partial(self.data.refresh_ondevice, self)
self.refresh()
self.last_update_check = self.last_modified()
def break_cycles(self):
self.data.break_cycles()
self.data = self.field_metadata = self.prefs = self.listeners = \
self.refresh_ondevice = None
def initialize_database(self):
metadata_sqlite = P('metadata_sqlite.sql', data=True,
allow_user_override=False).decode('utf-8')
self.conn.executescript(metadata_sqlite)
self.conn.commit()
if self.user_version == 0:
self.user_version = 1
def saved_search_names(self):
return saved_searches().names()
def saved_search_rename(self, old_name, new_name):
saved_searches().rename(old_name, new_name)
def saved_search_lookup(self, name):
return saved_searches().lookup(name)
def saved_search_add(self, name, val):
saved_searches().add(name, val)
def saved_search_delete(self, name):
saved_searches().delete(name)
def saved_search_set_all(self, smap):
saved_searches().set_all(smap)
def last_modified(self):
''' Return last modified time as a UTC datetime object'''
return utcfromtimestamp(os.stat(self.dbpath).st_mtime)
def refresh_format_cache(self):
self.format_filename_cache = defaultdict(dict)
for book_id, fmt, name in self.conn.get(
'SELECT book,format,name FROM data'):
self.format_filename_cache[book_id][fmt.upper() if fmt else ''] = name
self.format_metadata_cache = defaultdict(dict)
def check_if_modified(self):
if self.last_modified() > self.last_update_check:
self.refresh()
self.refresh_format_cache()
self.last_update_check = utcnow()
def path(self, index, index_is_id=False):
'Return the relative path to the directory containing this books files as a unicode string.'
row = self.data._data[index] if index_is_id else self.data[index]
return row[self.FIELD_MAP['path']].replace('/', os.sep)
def abspath(self, index, index_is_id=False, create_dirs=True):
'Return the absolute path to the directory containing this books files as a unicode string.'
path = os.path.join(self.library_path, self.path(index, index_is_id=index_is_id))
if create_dirs and not os.path.exists(path):
os.makedirs(path)
return path
def construct_path_name(self, id):
'''
Construct the directory name for this book based on its metadata.
'''
authors = self.authors(id, index_is_id=True)
if not authors:
authors = _('Unknown')
author = ascii_filename(authors.split(',')[0].replace('|', ',')
)[:self.PATH_LIMIT].decode('ascii', 'replace')
title = ascii_filename(self.title(id, index_is_id=True)
)[:self.PATH_LIMIT].decode('ascii', 'replace')
while author[-1] in (' ', '.'):
author = author[:-1]
if not author:
author = ascii_filename(_('Unknown')).decode(
'ascii', 'replace')
path = author + '/' + title + ' (%d)'%id
return path
def construct_file_name(self, id):
'''
Construct the file name for this book based on its metadata.
'''
authors = self.authors(id, index_is_id=True)
if not authors:
authors = _('Unknown')
author = ascii_filename(authors.split(',')[0].replace('|', ',')
)[:self.PATH_LIMIT].decode('ascii', 'replace')
title = ascii_filename(self.title(id, index_is_id=True)
)[:self.PATH_LIMIT].decode('ascii', 'replace')
name = title + ' - ' + author
while name.endswith('.'):
name = name[:-1]
return name
def rmtree(self, path, permanent=False):
if not self.normpath(self.library_path).startswith(self.normpath(path)):
delete_tree(path, permanent=permanent)
def normpath(self, path):
path = os.path.abspath(os.path.realpath(path))
if not self.is_case_sensitive:
path = os.path.normcase(path).lower()
return path
def set_path(self, index, index_is_id=False):
'''
Set the path to the directory containing this books files based on its
current title and author. If there was a previous directory, its contents
are copied and it is deleted.
'''
id = index if index_is_id else self.id(index)
path = self.construct_path_name(id)
current_path = self.path(id, index_is_id=True).replace(os.sep, '/')
formats = self.formats(id, index_is_id=True)
formats = formats.split(',') if formats else []
# Check if the metadata used to construct paths has changed
fname = self.construct_file_name(id)
changed = False
for format in formats:
name = self.format_filename_cache[id].get(format.upper(), None)
if name and name != fname:
changed = True
break
if path == current_path and not changed:
return
spath = os.path.join(self.library_path, *current_path.split('/'))
tpath = os.path.join(self.library_path, *path.split('/'))
source_ok = current_path and os.path.exists(spath)
wam = WindowsAtomicFolderMove(spath) if iswindows and source_ok else None
try:
if not os.path.exists(tpath):
os.makedirs(tpath)
if source_ok: # Migrate existing files
self.copy_cover_to(id, os.path.join(tpath, 'cover.jpg'),
index_is_id=True, windows_atomic_move=wam,
use_hardlink=True)
for format in formats:
copy_function = functools.partial(self.copy_format_to, id,
format, index_is_id=True, windows_atomic_move=wam,
use_hardlink=True)
try:
self.add_format(id, format, None, index_is_id=True,
path=tpath, notify=False, copy_function=copy_function)
except NoSuchFormat:
continue
self.conn.execute('UPDATE books SET path=? WHERE id=?', (path, id))
self.dirtied([id], commit=False)
self.conn.commit()
self.data.set(id, self.FIELD_MAP['path'], path, row_is_id=True)
# Delete not needed directories
if source_ok:
if not samefile(spath, tpath):
if wam is not None:
wam.delete_originals()
self.rmtree(spath, permanent=True)
parent = os.path.dirname(spath)
if len(os.listdir(parent)) == 0:
self.rmtree(parent, permanent=True)
finally:
if wam is not None:
wam.close_handles()
curpath = self.library_path
c1, c2 = current_path.split('/'), path.split('/')
if not self.is_case_sensitive and len(c1) == len(c2):
# On case-insensitive systems, title and author renames that only
# change case don't cause any changes to the directories in the file
# system. This can lead to having the directory names not match the
# title/author, which leads to trouble when libraries are copied to
# a case-sensitive system. The following code attempts to fix this
# by checking each segment. If they are different because of case,
# then rename the segment to some temp file name, then rename it
# back to the correct name. Note that the code above correctly
# handles files in the directories, so no need to do them here.
for oldseg, newseg in zip(c1, c2):
if oldseg.lower() == newseg.lower() and oldseg != newseg:
try:
os.rename(os.path.join(curpath, oldseg),
os.path.join(curpath, newseg))
except:
break # Fail silently since nothing catastrophic has happened
curpath = os.path.join(curpath, newseg)
def add_listener(self, listener):
'''
Add a listener. Will be called on change events with two arguments.
Event name and list of affected ids.
'''
self.listeners.add(listener)
def notify(self, event, ids=[]):
'Notify all listeners'
for listener in self.listeners:
try:
listener(event, ids)
except:
traceback.print_exc()
continue
def cover(self, index, index_is_id=False, as_file=False, as_image=False,
as_path=False):
'''
Return the cover image as a bytestring (in JPEG format) or None.
WARNING: Using as_path will copy the cover to a temp file and return
the path to the temp file. You should delete the temp file when you are
done with it.
:param as_file: If True return the image as an open file object (a SpooledTemporaryFile)
:param as_image: If True return the image as a QImage object
'''
id = index if index_is_id else self.id(index)
path = os.path.join(self.library_path, self.path(id, index_is_id=True), 'cover.jpg')
if os.access(path, os.R_OK):
try:
f = lopen(path, 'rb')
except (IOError, OSError):
time.sleep(0.2)
f = lopen(path, 'rb')
with f:
if as_path:
pt = PersistentTemporaryFile('_dbcover.jpg')
with pt:
shutil.copyfileobj(f, pt)
return pt.name
if as_file:
ret = SpooledTemporaryFile(SPOOL_SIZE)
shutil.copyfileobj(f, ret)
ret.seek(0)
else:
ret = f.read()
if as_image:
from PyQt4.Qt import QImage
i = QImage()
i.loadFromData(ret)
ret = i
return ret
def cover_last_modified(self, index, index_is_id=False):
id = index if index_is_id else self.id(index)
path = os.path.join(self.library_path, self.path(id, index_is_id=True), 'cover.jpg')
try:
return utcfromtimestamp(os.stat(path).st_mtime)
except:
# Cover doesn't exist
pass
return self.last_modified()
### The field-style interface. These use field keys.
def get_field(self, idx, key, default=None, index_is_id=False):
mi = self.get_metadata(idx, index_is_id=index_is_id,
get_cover=key == 'cover')
return mi.get(key, default)
def standard_field_keys(self):
return self.field_metadata.standard_field_keys()
def custom_field_keys(self, include_composites=True):
return self.field_metadata.custom_field_keys(include_composites)
def all_field_keys(self):
return self.field_metadata.all_field_keys()
def sortable_field_keys(self):
return self.field_metadata.sortable_field_keys()
def searchable_fields(self):
return self.field_metadata.searchable_fields()
def search_term_to_field_key(self, term):
return self.field_metadata.search_term_to_field_key(term)
def custom_field_metadata(self, include_composites=True):
return self.field_metadata.custom_field_metadata(include_composites)
def all_metadata(self):
return self.field_metadata.all_metadata()
def metadata_for_field(self, key):
return self.field_metadata[key]
def clear_dirtied(self, book_id, sequence):
'''
Clear the dirtied indicator for the books. This is used when fetching
metadata, creating an OPF, and writing a file are separated into steps.
The last step is clearing the indicator
'''
with self.dirtied_lock:
dc_sequence = self.dirtied_cache.get(book_id, None)
# print 'clear_dirty: check book', book_id, dc_sequence
if dc_sequence is None or sequence is None or dc_sequence == sequence:
# print 'needs to be cleaned'
self.conn.execute('DELETE FROM metadata_dirtied WHERE book=?',
(book_id,))
self.conn.commit()
try:
del self.dirtied_cache[book_id]
except:
pass
elif dc_sequence is not None:
# print 'book needs to be done again'
pass
def dump_metadata(self, book_ids=None, remove_from_dirtied=True,
commit=True, callback=None):
'''
Write metadata for each record to an individual OPF file. If callback
is not None, it is called once at the start with the number of book_ids
being processed. And once for every book_id, with arguments (book_id,
mi, ok).
'''
if book_ids is None:
book_ids = [x[0] for x in self.conn.get(
'SELECT book FROM metadata_dirtied', all=True)]
if callback is not None:
book_ids = tuple(book_ids)
callback(len(book_ids), True, False)
for book_id in book_ids:
if not self.data.has_id(book_id):
if callback is not None:
callback(book_id, None, False)
continue
path, mi, sequence = self.get_metadata_for_dump(book_id)
if path is None:
if callback is not None:
callback(book_id, mi, False)
continue
try:
raw = metadata_to_opf(mi)
with lopen(path, 'wb') as f:
f.write(raw)
if remove_from_dirtied:
self.clear_dirtied(book_id, sequence)
except:
pass
if callback is not None:
callback(book_id, mi, True)
if commit:
self.conn.commit()
def update_last_modified(self, book_ids, commit=False, now=None):
if now is None:
now = nowf()
if book_ids:
self.conn.executemany(
'UPDATE books SET last_modified=? WHERE id=?',
[(now, book) for book in book_ids])
for book_id in book_ids:
self.data.set(book_id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
if commit:
self.conn.commit()
def dirtied(self, book_ids, commit=True):
self.update_last_modified(book_ids)
for book in book_ids:
with self.dirtied_lock:
# print 'dirtied: check id', book
if book in self.dirtied_cache:
self.dirtied_cache[book] = self.dirtied_sequence
self.dirtied_sequence += 1
continue
# print 'book not already dirty'
self.conn.execute(
'INSERT OR IGNORE INTO metadata_dirtied (book) VALUES (?)',
(book,))
self.dirtied_cache[book] = self.dirtied_sequence
self.dirtied_sequence += 1
# If the commit doesn't happen, then the DB table will be wrong. This
# could lead to a problem because on restart, we won't put the book back
# into the dirtied_cache. We deal with this by writing the dirtied_cache
# back to the table on GUI exit. Not perfect, but probably OK
if book_ids and commit:
self.conn.commit()
def get_a_dirtied_book(self):
with self.dirtied_lock:
l = len(self.dirtied_cache)
if l > 0:
# The random stuff is here to prevent a single book from
# blocking progress if its metadata cannot be written for some
# reason.
id_ = self.dirtied_cache.keys()[random.randint(0, l-1)]
sequence = self.dirtied_cache[id_]
return (id_, sequence)
return (None, None)
def dirty_queue_length(self):
return len(self.dirtied_cache)
def commit_dirty_cache(self):
'''
Set the dirty indication for every book in the cache. The vast majority
of the time, the indication will already be set. However, sometimes
exceptions may have prevented a commit, which may remove some dirty
indications from the DB. This call will put them back. Note that there
is no problem with setting a dirty indication for a book that isn't in
fact dirty. Just wastes a few cycles.
'''
with self.dirtied_lock:
book_ids = list(self.dirtied_cache.keys())
self.dirtied_cache = {}
self.dirtied(book_ids)
def get_metadata_for_dump(self, idx):
path, mi = (None, None)
# get the current sequence number for this book to pass back to the
# backup thread. This will avoid double calls in the case where the
# thread has not done the work between the put and the get_metadata
with self.dirtied_lock:
sequence = self.dirtied_cache.get(idx, None)
# print 'get_md_for_dump', idx, sequence
try:
# While a book is being created, the path is empty. Don't bother to
# try to write the opf, because it will go to the wrong folder.
if self.path(idx, index_is_id=True):
path = os.path.join(self.abspath(idx, index_is_id=True), 'metadata.opf')
mi = self.get_metadata(idx, index_is_id=True)
# Always set cover to cover.jpg. Even if cover doesn't exist,
# no harm done. This way no need to call dirtied when
# cover is set/removed
mi.cover = 'cover.jpg'
except:
# This almost certainly means that the book has been deleted while
# the backup operation sat in the queue.
pass
return (path, mi, sequence)
def get_metadata(self, idx, index_is_id=False, get_cover=False,
get_user_categories=True, cover_as_data=False):
'''
Convenience method to return metadata as a :class:`Metadata` object.
Note that the list of formats is not verified.
'''
idx = idx if index_is_id else self.id(idx)
try:
row = self.data._data[idx]
except:
row = None
if row is None:
raise ValueError('No book with id: %d'%idx)
fm = self.FIELD_MAP
mi = Metadata(None, template_cache=self.formatter_template_cache)
aut_list = row[fm['au_map']]
if aut_list:
aut_list = [p.split(':::') for p in aut_list.split(':#:') if p]
else:
aut_list = []
aum = []
aus = {}
aul = {}
try:
for (author, author_sort, link) in aut_list:
aut = author.replace('|', ',')
aum.append(aut)
aus[aut] = author_sort.replace('|', ',')
aul[aut] = link
except ValueError:
# Author has either ::: or :#: in it
for x in row[fm['authors']].split(','):
aum.append(x.replace('|', ','))
aul[aum[-1]] = ''
aus[aum[-1]] = aum[-1]
mi.title = row[fm['title']]
mi.authors = aum
mi.author_sort = row[fm['author_sort']]
mi.author_sort_map = aus
mi.author_link_map = aul
mi.comments = row[fm['comments']]
mi.publisher = row[fm['publisher']]
mi.timestamp = row[fm['timestamp']]
mi.pubdate = row[fm['pubdate']]
mi.uuid = row[fm['uuid']]
mi.title_sort = row[fm['sort']]
mi.last_modified = row[fm['last_modified']]
formats = row[fm['formats']]
mi.format_metadata = {}
if not formats:
good_formats = None
else:
formats = sorted(formats.split(','))
mi.format_metadata = FormatMetadata(self, idx, formats)
good_formats = FormatsList(formats, mi.format_metadata)
mi.formats = good_formats
mi.db_approx_formats = formats
mi._proxy_metadata = p = ProxyMetadata(row[fm['size']], row[fm['ondevice']], formats)
mi.book_size = p.book_size
mi.ondevice_col= p.ondevice_col
tags = row[fm['tags']]
if tags:
mi.tags = [i.strip() for i in tags.split(',')]
languages = row[fm['languages']]
if languages:
mi.languages = [i.strip() for i in languages.split(',')]
mi.series = row[fm['series']]
if mi.series:
mi.series_index = row[fm['series_index']]
mi.rating = row[fm['rating']]
mi.set_identifiers(self.get_identifiers(idx, index_is_id=True))
mi.application_id = idx
mi.id = idx
mi.set_all_user_metadata(self.field_metadata.custom_field_metadata())
for key, meta in self.field_metadata.custom_iteritems():
if meta['datatype'] == 'composite':
mi.set(key, val=row[meta['rec_index']])
else:
val, extra = self.get_custom_and_extra(idx, label=meta['label'],
index_is_id=True)
mi.set(key, val=val, extra=extra)
user_cats = self.prefs['user_categories']
user_cat_vals = {}
if get_user_categories:
for ucat in user_cats:
res = []
for name,cat,ign in user_cats[ucat]:
v = mi.get(cat, None)
if isinstance(v, list):
if name in v:
res.append([name,cat])
elif name == v:
res.append([name,cat])
user_cat_vals[ucat] = res
mi.user_categories = user_cat_vals
if get_cover:
if cover_as_data:
cdata = self.cover(idx, index_is_id=True)
if cdata:
mi.cover_data = ('jpeg', cdata)
else:
mi.cover = self.cover(idx, index_is_id=True, as_path=True)
mi.has_cover = _('Yes') if self.has_cover(idx) else ''
return mi
def has_book(self, mi):
title = mi.title
if title:
if not isinstance(title, unicode):
title = title.decode(preferred_encoding, 'replace')
return bool(self.conn.get('SELECT id FROM books where title=?', (title,), all=False))
return False
def has_id(self, id_):
return self.data._data[id_] is not None
def books_with_same_title(self, mi, all_matches=True):
title = mi.title
ans = set()
if title:
title = lower(force_unicode(title))
for book_id in self.all_ids():
x = self.title(book_id, index_is_id=True)
if lower(x) == title:
ans.add(book_id)
if not all_matches:
break
return ans
def find_identical_books(self, mi):
fuzzy_title_patterns = [(re.compile(pat, re.IGNORECASE) if
isinstance(pat, basestring) else pat, repl) for pat, repl in
[
(r'[\[\](){}<>\'";,:#]', ''),
(get_title_sort_pat(), ''),
(r'[-._]', ' '),
(r'\s+', ' ')
]
]
def fuzzy_title(title):
title = title.strip().lower()
for pat, repl in fuzzy_title_patterns:
title = pat.sub(repl, title)
return title
identical_book_ids = set([])
if mi.authors:
try:
quathors = mi.authors[:10] # Too many authors causes parsing of
# the search expression to fail
query = u' and '.join([u'author:"=%s"'%(a.replace('"', '')) for a in
quathors])
qauthors = mi.authors[10:]
except ValueError:
return identical_book_ids
try:
book_ids = self.data.parse(query)
except:
traceback.print_exc()
return identical_book_ids
if qauthors and book_ids:
matches = set()
qauthors = {lower(x) for x in qauthors}
for book_id in book_ids:
aut = self.authors(book_id, index_is_id=True)
if aut:
aut = {lower(x.replace('|', ',')) for x in
aut.split(',')}
if aut.issuperset(qauthors):
matches.add(book_id)
book_ids = matches
for book_id in book_ids:
fbook_title = self.title(book_id, index_is_id=True)
fbook_title = fuzzy_title(fbook_title)
mbook_title = fuzzy_title(mi.title)
if fbook_title == mbook_title:
identical_book_ids.add(book_id)
return identical_book_ids
def remove_cover(self, id, notify=True, commit=True):
path = os.path.join(self.library_path, self.path(id, index_is_id=True), 'cover.jpg')
if os.path.exists(path):
try:
os.remove(path)
except (IOError, OSError):
time.sleep(0.2)
os.remove(path)
self.conn.execute('UPDATE books SET has_cover=0 WHERE id=?', (id,))
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['cover'], False, row_is_id=True)
if notify:
self.notify('cover', [id])
def set_cover(self, id, data, notify=True, commit=True):
'''
Set the cover for this book.
`data`: Can be either a QImage, QPixmap, file object or bytestring
'''
base_path = os.path.join(self.library_path, self.path(id,
index_is_id=True))
if not os.path.exists(base_path):
self.set_path(id, index_is_id=True)
base_path = os.path.join(self.library_path, self.path(id,
index_is_id=True))
self.dirtied([id])
if not os.path.exists(base_path):
os.makedirs(base_path)
path = os.path.join(base_path, 'cover.jpg')
if callable(getattr(data, 'save', None)):
data.save(path)
else:
if callable(getattr(data, 'read', None)):
data = data.read()
try:
save_cover_data_to(data, path)
except (IOError, OSError):
time.sleep(0.2)
save_cover_data_to(data, path)
now = nowf()
self.conn.execute(
'UPDATE books SET has_cover=1,last_modified=? WHERE id=?',
(now, id))
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['cover'], True, row_is_id=True)
self.data.set(id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
if notify:
self.notify('cover', [id])
def has_cover(self, id):
return self.data.get(id, self.FIELD_MAP['cover'], row_is_id=True)
def set_has_cover(self, id, val):
dval = 1 if val else 0
now = nowf()
self.conn.execute(
'UPDATE books SET has_cover=?,last_modified=? WHERE id=?',
(dval, now, id))
self.data.set(id, self.FIELD_MAP['cover'], val, row_is_id=True)
self.data.set(id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
def book_on_device(self, id):
if callable(self.book_on_device_func):
return self.book_on_device_func(id)
return None
def book_on_device_string(self, id):
loc = []
count = 0
on = self.book_on_device(id)
if on is not None:
m, a, b, count = on[:4]
if m is not None:
loc.append(_('Main'))
if a is not None:
loc.append(_('Card A'))
if b is not None:
loc.append(_('Card B'))
return ', '.join(loc) + ((_(' (%s books)')%count) if count > 1 else '')
def set_book_on_device_func(self, func):
self.book_on_device_func = func
def all_formats(self):
formats = self.conn.get('SELECT DISTINCT format from data')
if not formats:
return set([])
return set([f[0] for f in formats])
def format_files(self, index, index_is_id=False):
id = index if index_is_id else self.id(index)
return [(v, k) for k, v in self.format_filename_cache[id].iteritems()]
def formats(self, index, index_is_id=False, verify_formats=True):
''' Return available formats as a comma separated list or None if there are no available formats '''
id_ = index if index_is_id else self.id(index)
formats = self.data.get(id_, self.FIELD_MAP['formats'], row_is_id=True)
if not formats:
return None
if not verify_formats:
return formats
formats = formats.split(',')
ans = []
for fmt in formats:
if self.format_abspath(id_, fmt, index_is_id=True) is not None:
ans.append(fmt)
if not ans:
return None
return ','.join(ans)
def has_format(self, index, format, index_is_id=False):
return self.format_abspath(index, format, index_is_id) is not None
def format_last_modified(self, id_, fmt):
m = self.format_metadata(id_, fmt)
if m:
return m['mtime']
def format_metadata(self, id_, fmt, allow_cache=True, update_db=False,
commit=False):
if not fmt:
return {}
fmt = fmt.upper()
if allow_cache:
x = self.format_metadata_cache[id_].get(fmt, None)
if x is not None:
return x
path = self.format_abspath(id_, fmt, index_is_id=True)
ans = {}
if path is not None:
stat = os.stat(path)
ans['path'] = path
ans['size'] = stat.st_size
ans['mtime'] = utcfromtimestamp(stat.st_mtime)
self.format_metadata_cache[id_][fmt] = ans
if update_db:
self.conn.execute(
'UPDATE data SET uncompressed_size=? WHERE format=? AND'
' book=?', (stat.st_size, fmt, id_))
if commit:
self.conn.commit()
return ans
def format_hash(self, id_, fmt):
path = self.format_abspath(id_, fmt, index_is_id=True)
if path is None:
raise NoSuchFormat('Record %d has no fmt: %s'%(id_, fmt))
sha = hashlib.sha256()
with lopen(path, 'rb') as f:
while True:
raw = f.read(SPOOL_SIZE)
sha.update(raw)
if len(raw) < SPOOL_SIZE:
break
return sha.hexdigest()
def format_path(self, index, fmt, index_is_id=False):
'''
This method is intended to be used only in those rare situations, like
Drag'n Drop, when you absolutely need the path to the original file.
Otherwise, use format(..., as_path=True).
Note that a networked backend will always return None.
'''
path = self.format_abspath(index, fmt, index_is_id=index_is_id)
if path is None:
id_ = index if index_is_id else self.id(index)
raise NoSuchFormat('Record %d has no format: %s'%(id_, fmt))
return path
def format_abspath(self, index, format, index_is_id=False):
'''
Return absolute path to the ebook file of format `format`
WARNING: This method will return a dummy path for a network backend DB,
so do not rely on it, use format(..., as_path=True) instead.
Currently used only in calibredb list, the viewer and the catalogs (via
get_data_as_dict()).
Apart from the viewer, I don't believe any of the others do any file
I/O with the results of this call.
'''
id = index if index_is_id else self.id(index)
try:
name = self.format_filename_cache[id][format.upper()]
except:
return None
if name:
path = os.path.join(self.library_path, self.path(id, index_is_id=True))
format = ('.' + format.lower()) if format else ''
fmt_path = os.path.join(path, name+format)
if os.path.exists(fmt_path):
return fmt_path
try:
candidates = glob.glob(os.path.join(path, '*'+format))
except: # If path contains strange characters this throws an exc
candidates = []
if format and candidates and os.path.exists(candidates[0]):
try:
shutil.copyfile(candidates[0], fmt_path)
except:
# This can happen if candidates[0] or fmt_path is too long,
# which can happen if the user copied the library from a
# non windows machine to a windows machine.
return None
return fmt_path
def copy_format_to(self, index, fmt, dest, index_is_id=False,
windows_atomic_move=None, use_hardlink=False):
'''
Copy the format ``fmt`` to the file like object ``dest``. If the
specified format does not exist, raises :class:`NoSuchFormat` error.
dest can also be a path, in which case the format is copied to it, iff
the path is different from the current path (taking case sensitivity
into account).
If use_hardlink is True, a hard link will be created instead of the
file being copied. Use with care, because a hard link means that
modifying any one file will cause both files to be modified.
windows_atomic_move is an internally used parameter. You should not use
it in any code outside this module.
'''
path = self.format_abspath(index, fmt, index_is_id=index_is_id)
if path is None:
id_ = index if index_is_id else self.id(index)
raise NoSuchFormat('Record %d has no %s file'%(id_, fmt))
if windows_atomic_move is not None:
if not isinstance(dest, basestring):
raise Exception("Error, you must pass the dest as a path when"
" using windows_atomic_move")
if dest:
if samefile(path, dest):
# Ensure that the file has the same case as dest
try:
if path != dest:
os.rename(path, dest)
except:
pass # Nothing too catastrophic happened, the cases mismatch, that's all
else:
windows_atomic_move.copy_path_to(path, dest)
else:
if hasattr(dest, 'write'):
with lopen(path, 'rb') as f:
shutil.copyfileobj(f, dest)
if hasattr(dest, 'flush'):
dest.flush()
elif dest:
if samefile(dest, path):
if not self.is_case_sensitive and path != dest:
# Ensure that the file has the same case as dest
try:
os.rename(path, dest)
except:
pass # Nothing too catastrophic happened, the cases mismatch, that's all
else:
if use_hardlink:
try:
hardlink_file(path, dest)
return
except:
pass
with lopen(path, 'rb') as f, lopen(dest, 'wb') as d:
shutil.copyfileobj(f, d)
def copy_cover_to(self, index, dest, index_is_id=False,
windows_atomic_move=None, use_hardlink=False):
'''
Copy the cover to the file like object ``dest``. Returns False
if no cover exists or dest is the same file as the current cover.
dest can also be a path in which case the cover is
copied to it iff the path is different from the current path (taking
case sensitivity into account).
If use_hardlink is True, a hard link will be created instead of the
file being copied. Use with care, because a hard link means that
modifying any one file will cause both files to be modified.
windows_atomic_move is an internally used parameter. You should not use
it in any code outside this module.
'''
id = index if index_is_id else self.id(index)
path = os.path.join(self.library_path, self.path(id, index_is_id=True), 'cover.jpg')
if windows_atomic_move is not None:
if not isinstance(dest, basestring):
raise Exception("Error, you must pass the dest as a path when"
" using windows_atomic_move")
if os.access(path, os.R_OK) and dest and not samefile(dest, path):
windows_atomic_move.copy_path_to(path, dest)
return True
else:
if os.access(path, os.R_OK):
try:
f = lopen(path, 'rb')
except (IOError, OSError):
time.sleep(0.2)
f = lopen(path, 'rb')
with f:
if hasattr(dest, 'write'):
shutil.copyfileobj(f, dest)
if hasattr(dest, 'flush'):
dest.flush()
return True
elif dest and not samefile(dest, path):
if use_hardlink:
try:
hardlink_file(path, dest)
return True
except:
pass
with lopen(dest, 'wb') as d:
shutil.copyfileobj(f, d)
return True
return False
def format(self, index, format, index_is_id=False, as_file=False,
mode='r+b', as_path=False, preserve_filename=False):
'''
Return the ebook format as a bytestring or `None` if the format doesn't exist,
or we don't have permission to write to the ebook file.
:param as_file: If True the ebook format is returned as a file object. Note
that the file object is a SpooledTemporaryFile, so if what you want to
do is copy the format to another file, use :method:`copy_format_to`
instead for performance.
:param as_path: Copies the format file to a temp file and returns the
path to the temp file
:param preserve_filename: If True and returning a path the filename is
the same as that used in the library. Note that using
this means that repeated calls yield the same
temp file (which is re-created each time)
:param mode: This is ignored (present for legacy compatibility)
'''
path = self.format_abspath(index, format, index_is_id=index_is_id)
if path is not None:
with lopen(path, mode) as f:
if as_path:
if preserve_filename:
bd = base_dir()
d = os.path.join(bd, 'format_abspath')
try:
os.makedirs(d)
except:
pass
fname = os.path.basename(path)
ret = os.path.join(d, fname)
with lopen(ret, 'wb') as f2:
shutil.copyfileobj(f, f2)
else:
with PersistentTemporaryFile('.'+format.lower()) as pt:
shutil.copyfileobj(f, pt)
ret = pt.name
elif as_file:
ret = SpooledTemporaryFile(SPOOL_SIZE)
shutil.copyfileobj(f, ret)
ret.seek(0)
# Various bits of code try to use the name as the default
# title when reading metadata, so set it
ret.name = f.name
else:
ret = f.read()
return ret
def add_format_with_hooks(self, index, format, fpath, index_is_id=False,
path=None, notify=True, replace=True):
npath = self.run_import_plugins(fpath, format)
format = os.path.splitext(npath)[-1].lower().replace('.', '').upper()
stream = lopen(npath, 'rb')
format = check_ebook_format(stream, format)
id = index if index_is_id else self.id(index)
retval = self.add_format(id, format, stream, replace=replace,
index_is_id=True, path=path, notify=notify)
run_plugins_on_postimport(self, id, format)
return retval
def add_format(self, index, format, stream, index_is_id=False, path=None,
notify=True, replace=True, copy_function=None):
id = index if index_is_id else self.id(index)
if not format:
format = ''
self.format_metadata_cache[id].pop(format.upper(), None)
name = self.format_filename_cache[id].get(format.upper(), None)
if path is None:
path = os.path.join(self.library_path, self.path(id, index_is_id=True))
if name and not replace:
return False
name = self.construct_file_name(id)
ext = ('.' + format.lower()) if format else ''
dest = os.path.join(path, name+ext)
pdir = os.path.dirname(dest)
if not os.path.exists(pdir):
os.makedirs(pdir)
size = 0
if copy_function is not None:
copy_function(dest)
size = os.path.getsize(dest)
else:
if (not getattr(stream, 'name', False) or not samefile(dest,
stream.name)):
with lopen(dest, 'wb') as f:
shutil.copyfileobj(stream, f)
size = f.tell()
elif os.path.exists(dest):
size = os.path.getsize(dest)
self.conn.execute('INSERT OR REPLACE INTO data (book,format,uncompressed_size,name) VALUES (?,?,?,?)',
(id, format.upper(), size, name))
self.update_last_modified([id], commit=False)
self.conn.commit()
self.format_filename_cache[id][format.upper()] = name
self.refresh_ids([id])
if notify:
self.notify('metadata', [id])
return True
def save_original_format(self, book_id, fmt, notify=True):
fmt = fmt.upper()
if 'ORIGINAL' in fmt:
raise ValueError('Cannot save original of an original fmt')
opath = self.format_abspath(book_id, fmt, index_is_id=True)
if opath is None:
return False
nfmt = 'ORIGINAL_'+fmt
with lopen(opath, 'rb') as f:
return self.add_format(book_id, nfmt, f, index_is_id=True, notify=notify)
def original_fmt(self, book_id, fmt):
fmt = fmt
nfmt = ('ORIGINAL_%s'%fmt).upper()
opath = self.format_abspath(book_id, nfmt, index_is_id=True)
return fmt if opath is None else nfmt
def restore_original_format(self, book_id, original_fmt, notify=True):
opath = self.format_abspath(book_id, original_fmt, index_is_id=True)
if opath is not None:
fmt = original_fmt.partition('_')[2]
with lopen(opath, 'rb') as f:
self.add_format(book_id, fmt, f, index_is_id=True, notify=False)
self.remove_format(book_id, original_fmt, index_is_id=True, notify=notify)
return True
return False
def delete_book(self, id, notify=True, commit=True, permanent=False,
do_clean=True):
'''
Removes book from the result cache and the underlying database.
If you set commit to False, you must call clean() manually afterwards
'''
try:
path = os.path.join(self.library_path, self.path(id, index_is_id=True))
except:
path = None
if path and os.path.exists(path):
self.rmtree(path, permanent=permanent)
parent = os.path.dirname(path)
if len(os.listdir(parent)) == 0:
self.rmtree(parent, permanent=permanent)
self.conn.execute('DELETE FROM books WHERE id=?', (id,))
if commit:
self.conn.commit()
if do_clean:
self.clean()
self.data.books_deleted([id])
if notify:
self.notify('delete', [id])
def remove_format(self, index, format, index_is_id=False, notify=True,
commit=True, db_only=False):
id = index if index_is_id else self.id(index)
if not format:
format = ''
self.format_metadata_cache[id].pop(format.upper(), None)
name = self.format_filename_cache[id].get(format.upper(), None)
if name:
if not db_only:
try:
path = self.format_abspath(id, format, index_is_id=True)
if path:
delete_file(path)
except:
traceback.print_exc()
self.format_filename_cache[id].pop(format.upper(), None)
self.conn.execute('DELETE FROM data WHERE book=? AND format=?', (id, format.upper()))
if commit:
self.conn.commit()
self.refresh_ids([id])
if notify:
self.notify('metadata', [id])
def clean_standard_field(self, field, commit=False):
# Don't bother with validity checking. Let the exception fly out so
# we can see what happened
def doit(table, ltable_col):
st = ('DELETE FROM books_%s_link WHERE (SELECT COUNT(id) '
'FROM books WHERE id=book) < 1;')%table
self.conn.execute(st)
st = ('DELETE FROM %(table)s WHERE (SELECT COUNT(id) '
'FROM books_%(table)s_link WHERE '
'%(ltable_col)s=%(table)s.id) < 1;') % dict(
table=table, ltable_col=ltable_col)
self.conn.execute(st)
fm = self.field_metadata[field]
doit(fm['table'], fm['link_column'])
if commit:
self.conn.commit()
def clean(self):
'''
Remove orphaned entries.
'''
def doit(ltable, table, ltable_col):
st = ('DELETE FROM books_%s_link WHERE (SELECT COUNT(id) '
'FROM books WHERE id=book) < 1;')%ltable
self.conn.execute(st)
st = ('DELETE FROM %(table)s WHERE (SELECT COUNT(id) '
'FROM books_%(ltable)s_link WHERE '
'%(ltable_col)s=%(table)s.id) < 1;') % dict(
ltable=ltable, table=table, ltable_col=ltable_col)
self.conn.execute(st)
for ltable, table, ltable_col in [
('authors', 'authors', 'author'),
('publishers', 'publishers', 'publisher'),
('tags', 'tags', 'tag'),
('series', 'series', 'series'),
('languages', 'languages', 'lang_code'),
]:
doit(ltable, table, ltable_col)
for id_, tag in self.conn.get('SELECT id, name FROM tags', all=True):
if not tag.strip():
self.conn.execute('DELETE FROM books_tags_link WHERE tag=?',
(id_,))
self.conn.execute('DELETE FROM tags WHERE id=?', (id_,))
self.clean_custom()
self.conn.commit()
def get_books_for_category(self, category, id_):
ans = set([])
if category not in self.field_metadata:
return ans
field = self.field_metadata[category]
if field['datatype'] == 'composite':
dex = field['rec_index']
for book in self.data.iterall():
if field['is_multiple']:
vals = [v.strip() for v in
book[dex].split(field['is_multiple']['cache_to_list'])
if v.strip()]
if id_ in vals:
ans.add(book[0])
elif book[dex] == id_:
ans.add(book[0])
return ans
ans = self.conn.get(
'SELECT book FROM books_{tn}_link WHERE {col}=?'.format(
tn=field['table'], col=field['link_column']), (id_,))
return set(x[0] for x in ans)
########## data structures for get_categories
CATEGORY_SORTS = CATEGORY_SORTS
MATCH_TYPE = ('any', 'all')
class TCat_Tag(object):
def __init__(self, name, sort):
self.n = name
self.s = sort
self.c = 0
self.id_set = set()
self.rt = 0
self.rc = 0
self.id = None
def set_all(self, c, rt, rc, id):
self.c = c
self.rt = rt
self.rc = rc
self.id = id
def __str__(self):
return unicode(self)
def __unicode__(self):
return 'n=%s s=%s c=%d rt=%d rc=%d id=%s'%\
(self.n, self.s, self.c, self.rt, self.rc, self.id)
def clean_user_categories(self):
user_cats = self.prefs.get('user_categories', {})
new_cats = {}
for k in user_cats:
comps = [c.strip() for c in k.split('.') if c.strip()]
if len(comps) == 0:
i = 1
while True:
if unicode(i) not in user_cats:
new_cats[unicode(i)] = user_cats[k]
break
i += 1
else:
new_cats['.'.join(comps)] = user_cats[k]
try:
if new_cats != user_cats:
self.prefs.set('user_categories', new_cats)
except:
pass
return new_cats
def get_categories(self, sort='name', ids=None, icon_map=None):
#start = last = time.clock()
if icon_map is not None and type(icon_map) != TagsIcons:
raise TypeError('icon_map passed to get_categories must be of type TagIcons')
if sort not in self.CATEGORY_SORTS:
raise ValueError('sort ' + sort + ' not a valid value')
self.books_list_filter.change([] if not ids else ids)
id_filter = None if ids is None else frozenset(ids)
tb_cats = self.field_metadata
tcategories = {}
tids = {}
md = []
# First, build the maps. We need a category->items map and an
# item -> (item_id, sort_val) map to use in the books loop
for category in tb_cats.iterkeys():
cat = tb_cats[category]
if not cat['is_category'] or cat['kind'] in ['user', 'search'] \
or category in ['news', 'formats'] or cat.get('is_csp',
False):
continue
# Get the ids for the item values
if not cat['is_custom']:
funcs = {
'authors': self.get_authors_with_ids,
'series': self.get_series_with_ids,
'publisher': self.get_publishers_with_ids,
'tags': self.get_tags_with_ids,
'languages': self.get_languages_with_ids,
'rating': self.get_ratings_with_ids,
}
func = funcs.get(category, None)
if func:
list = func()
else:
raise ValueError(category + ' has no get with ids function')
else:
list = self.get_custom_items_with_ids(label=cat['label'])
tids[category] = {}
if category == 'authors':
for l in list:
(id, val, sort_val) = (l[0], l[1], l[2])
tids[category][val] = (id, sort_val)
elif category == 'languages':
for l in list:
id, val = l[0], calibre_langcode_to_name(l[1])
tids[category][l[1]] = (id, val)
elif cat['datatype'] == 'series':
for l in list:
(id, val) = (l[0], l[1])
tids[category][val] = (id, title_sort(val))
elif cat['datatype'] == 'rating':
for l in list:
(id, val) = (l[0], l[1])
tids[category][val] = (id, '{0:05.2f}'.format(val))
elif cat['datatype'] == 'text' and cat['is_multiple'] and \
cat['display'].get('is_names', False):
for l in list:
(id, val) = (l[0], l[1])
tids[category][val] = (id, author_to_author_sort(val))
else:
for l in list:
(id, val) = (l[0], l[1])
tids[category][val] = (id, val)
# add an empty category to the category map
tcategories[category] = {}
# create a list of category/field_index for the books scan to use.
# This saves iterating through field_metadata for each book
md.append((category, cat['rec_index'],
cat['is_multiple'].get('cache_to_list', None), False))
for category in tb_cats.iterkeys():
cat = tb_cats[category]
if cat['datatype'] == 'composite' and \
cat['display'].get('make_category', False):
tids[category] = {}
tcategories[category] = {}
md.append((category, cat['rec_index'],
cat['is_multiple'].get('cache_to_list', None),
cat['datatype'] == 'composite'))
#print 'end phase "collection":', time.clock() - last, 'seconds'
#last = time.clock()
# Now scan every book looking for category items.
# Code below is duplicated because it shaves off 10% of the loop time
id_dex = self.FIELD_MAP['id']
rating_dex = self.FIELD_MAP['rating']
tag_class = LibraryDatabase2.TCat_Tag
for book in self.data.iterall():
if id_filter is not None and book[id_dex] not in id_filter:
continue
rating = book[rating_dex]
# We kept track of all possible category field_map positions above
for (cat, dex, mult, is_comp) in md:
if not book[dex]:
continue
tid_cat = tids[cat]
tcats_cat = tcategories[cat]
if not mult:
val = book[dex]
if is_comp:
item = tcats_cat.get(val, None)
if not item:
item = tag_class(val, val)
tcats_cat[val] = item
item.c += 1
item.id = val
if rating > 0:
item.rt += rating
item.rc += 1
continue
try:
(item_id, sort_val) = tid_cat[val] # let exceptions fly
item = tcats_cat.get(val, None)
if not item:
item = tag_class(val, sort_val)
tcats_cat[val] = item
item.c += 1
item.id_set.add(book[0])
item.id = item_id
if rating > 0:
item.rt += rating
item.rc += 1
except:
prints('get_categories: item', val, 'is not in', cat, 'list!')
else:
vals = book[dex].split(mult)
if is_comp:
vals = [v.strip() for v in vals if v.strip()]
for val in vals:
if val not in tid_cat:
tid_cat[val] = (val, val)
for val in vals:
try:
(item_id, sort_val) = tid_cat[val] # let exceptions fly
item = tcats_cat.get(val, None)
if not item:
item = tag_class(val, sort_val)
tcats_cat[val] = item
item.c += 1
item.id_set.add(book[0])
item.id = item_id
if rating > 0:
item.rt += rating
item.rc += 1
except:
prints('get_categories: item', val, 'is not in', cat, 'list!')
#print 'end phase "books":', time.clock() - last, 'seconds'
#last = time.clock()
# Now do news
tcategories['news'] = {}
cat = tb_cats['news']
tn = cat['table']
cn = cat['column']
if ids is None:
query = '''SELECT id, {0}, count, avg_rating, sort
FROM tag_browser_{1}'''.format(cn, tn)
else:
query = '''SELECT id, {0}, count, avg_rating, sort
FROM tag_browser_filtered_{1}'''.format(cn, tn)
# results will be sorted later
data = self.conn.get(query)
for r in data:
item = LibraryDatabase2.TCat_Tag(r[1], r[1])
item.set_all(c=r[2], rt=r[2]*r[3], rc=r[2], id=r[0])
tcategories['news'][r[1]] = item
#print 'end phase "news":', time.clock() - last, 'seconds'
#last = time.clock()
# Build the real category list by iterating over the temporary copy
# and building the Tag instances.
categories = {}
tag_class = Tag
for category in tb_cats.iterkeys():
if category not in tcategories:
continue
cat = tb_cats[category]
# prepare the place where we will put the array of Tags
categories[category] = []
# icon_map is not None if get_categories is to store an icon and
# possibly a tooltip in the tag structure.
icon = None
label = tb_cats.key_to_label(category)
if icon_map:
if not tb_cats.is_custom_field(category):
if category in icon_map:
icon = icon_map[label]
else:
icon = icon_map['custom:']
icon_map[category] = icon
datatype = cat['datatype']
avgr = lambda x: 0.0 if x.rc == 0 else x.rt/x.rc
# Duplicate the build of items below to avoid using a lambda func
# in the main Tag loop. Saves a few %
if datatype == 'rating':
formatter = (lambda x:u'\u2605'*int(x/2))
avgr = lambda x: x.n
# eliminate the zero ratings line as well as count == 0
items = [v for v in tcategories[category].values() if v.c > 0 and v.n != 0]
elif category == 'authors':
# Clean up the authors strings to human-readable form
formatter = (lambda x: x.replace('|', ','))
items = [v for v in tcategories[category].values() if v.c > 0]
elif category == 'languages':
# Use a human readable language string
formatter = calibre_langcode_to_name
items = [v for v in tcategories[category].values() if v.c > 0]
else:
formatter = (lambda x:unicode(x))
items = [v for v in tcategories[category].values() if v.c > 0]
# sort the list
if sort == 'name':
kf = lambda x:sort_key(x.s)
reverse=False
elif sort == 'popularity':
kf = lambda x: x.c
reverse=True
else:
kf = avgr
reverse=True
items.sort(key=kf, reverse=reverse)
if tweaks['categories_use_field_for_author_name'] == 'author_sort' and\
(category == 'authors' or
(cat['display'].get('is_names', False) and
cat['is_custom'] and cat['is_multiple'] and
cat['datatype'] == 'text')):
use_sort_as_name = True
else:
use_sort_as_name = False
is_editable = (category not in ['news', 'rating', 'languages'] and
datatype != "composite")
categories[category] = [tag_class(formatter(r.n), count=r.c, id=r.id,
avg=avgr(r), sort=r.s, icon=icon,
category=category,
id_set=r.id_set, is_editable=is_editable,
use_sort_as_name=use_sort_as_name)
for r in items]
#print 'end phase "tags list":', time.clock() - last, 'seconds'
#last = time.clock()
# Needed for legacy databases that have multiple ratings that
# map to n stars
for r in categories['rating']:
r.id_set = None
for x in categories['rating']:
if r.name == x.name and r.id != x.id:
r.count = r.count + x.count
categories['rating'].remove(x)
break
# We delayed computing the standard formats category because it does not
# use a view, but is computed dynamically
categories['formats'] = []
icon = None
if icon_map and 'formats' in icon_map:
icon = icon_map['formats']
for fmt in self.conn.get('SELECT DISTINCT format FROM data'):
fmt = fmt[0]
if ids is not None:
count = self.conn.get('''SELECT COUNT(id)
FROM data
WHERE format=? AND
books_list_filter(book)''', (fmt,),
all=False)
else:
count = self.conn.get('''SELECT COUNT(id)
FROM data
WHERE format=?''', (fmt,),
all=False)
if count > 0:
categories['formats'].append(Tag(fmt, count=count, icon=icon,
category='formats', is_editable=False))
if sort == 'popularity':
categories['formats'].sort(key=lambda x: x.count, reverse=True)
else: # no ratings exist to sort on
# No need for ICU here.
categories['formats'].sort(key=lambda x:x.name)
# Now do identifiers. This works like formats
categories['identifiers'] = []
icon = None
if icon_map and 'identifiers' in icon_map:
icon = icon_map['identifiers']
for ident in self.conn.get('SELECT DISTINCT type FROM identifiers'):
ident = ident[0]
if ids is not None:
count = self.conn.get('''SELECT COUNT(book)
FROM identifiers
WHERE type=? AND
books_list_filter(book)''', (ident,),
all=False)
else:
count = self.conn.get('''SELECT COUNT(id)
FROM identifiers
WHERE type=?''', (ident,),
all=False)
if count > 0:
categories['identifiers'].append(Tag(ident, count=count, icon=icon,
category='identifiers',
is_editable=False))
if sort == 'popularity':
categories['identifiers'].sort(key=lambda x: x.count, reverse=True)
else: # no ratings exist to sort on
# No need for ICU here.
categories['identifiers'].sort(key=lambda x:x.name)
#### Now do the user-defined categories. ####
user_categories = dict.copy(self.clean_user_categories())
# We want to use same node in the user category as in the source
# category. To do that, we need to find the original Tag node. There is
# a time/space tradeoff here. By converting the tags into a map, we can
# do the verification in the category loop much faster, at the cost of
# temporarily duplicating the categories lists.
taglist = {}
for c in categories.keys():
taglist[c] = dict(map(lambda t:(icu_lower(t.name), t), categories[c]))
muc = self.prefs.get('grouped_search_make_user_categories', [])
gst = self.prefs.get('grouped_search_terms', {})
for c in gst:
if c not in muc:
continue
user_categories[c] = []
for sc in gst[c]:
if sc in categories.keys():
for t in categories[sc]:
user_categories[c].append([t.name, sc, 0])
gst_icon = icon_map['gst'] if icon_map else None
for user_cat in sorted(user_categories.keys(), key=sort_key):
items = []
names_seen = {}
for (name,label,ign) in user_categories[user_cat]:
n = icu_lower(name)
if label in taglist and n in taglist[label]:
if user_cat in gst:
# for gst items, make copy and consolidate the tags by name.
if n in names_seen:
t = names_seen[n]
t.id_set |= taglist[label][n].id_set
t.count += taglist[label][n].count
t.tooltip = t.tooltip.replace(')', ', ' + label + ')')
else:
t = copy.copy(taglist[label][n])
t.icon = gst_icon
names_seen[t.name] = t
items.append(t)
else:
items.append(taglist[label][n])
# else: do nothing, to not include nodes w zero counts
cat_name = '@' + user_cat # add the '@' to avoid name collision
# Not a problem if we accumulate entries in the icon map
if icon_map is not None:
icon_map[cat_name] = icon_map['user:']
if sort == 'popularity':
categories[cat_name] = \
sorted(items, key=lambda x: x.count, reverse=True)
elif sort == 'name':
categories[cat_name] = \
sorted(items, key=lambda x: sort_key(x.sort))
else:
categories[cat_name] = \
sorted(items, key=lambda x:x.avg_rating, reverse=True)
#### Finally, the saved searches category ####
items = []
icon = None
if icon_map and 'search' in icon_map:
icon = icon_map['search']
for srch in saved_searches().names():
items.append(Tag(srch, tooltip=saved_searches().lookup(srch),
sort=srch, icon=icon, category='search',
is_editable=False))
if len(items):
if icon_map is not None:
icon_map['search'] = icon_map['search']
categories['search'] = items
#print 'last phase ran in:', time.clock() - last, 'seconds'
#print 'get_categories ran in:', time.clock() - start, 'seconds'
return categories
############# End get_categories
def tags_older_than(self, tag, delta, must_have_tag=None,
must_have_authors=None):
'''
Return the ids of all books having the tag ``tag`` that are older than
than the specified time. tag comparison is case insensitive.
:param delta: A timedelta object or None. If None, then all ids with
the tag are returned.
:param must_have_tag: If not None the list of matches will be
restricted to books that have this tag
:param must_have_authors: A list of authors. If not None the list of
matches will be restricted to books that have these authors (case
insensitive).
'''
tag = tag.lower().strip()
mht = must_have_tag.lower().strip() if must_have_tag else None
now = nowf()
tindex = self.FIELD_MAP['timestamp']
gindex = self.FIELD_MAP['tags']
iindex = self.FIELD_MAP['id']
aindex = self.FIELD_MAP['authors']
mah = must_have_authors
if mah is not None:
mah = [x.replace(',', '|').lower() for x in mah]
mah = ','.join(mah)
for r in self.data._data:
if r is not None:
if delta is None or (now - r[tindex]) > delta:
if mah:
authors = r[aindex] or ''
if authors.lower() != mah:
continue
tags = r[gindex]
if tags:
tags = [x.strip() for x in tags.lower().split(',')]
if tag in tags and (mht is None or mht in tags):
yield r[iindex]
def get_next_series_num_for(self, series):
series_id = None
if series:
series_id = self.conn.get('SELECT id from series WHERE name=?',
(series,), all=False)
if series_id is None:
if isinstance(tweaks['series_index_auto_increment'], (int, float)):
return float(tweaks['series_index_auto_increment'])
return 1.0
series_indices = self.conn.get(
('SELECT series_index FROM books WHERE id IN '
'(SELECT book FROM books_series_link where series=?) '
'ORDER BY series_index'),
(series_id,))
return self._get_next_series_num_for_list(series_indices)
def _get_next_series_num_for_list(self, series_indices):
return _get_next_series_num_for_list(series_indices)
def set(self, row, column, val, allow_case_change=False):
'''
Convenience method for setting the title, authors, publisher, tags or
rating
'''
id = self.data[row][0]
col = self.FIELD_MAP[column]
books_to_refresh = {id}
set_args = (row, col, val)
if column == 'authors':
val = string_to_authors(val)
books_to_refresh |= self.set_authors(id, val, notify=False,
allow_case_change=allow_case_change)
elif column == 'title':
self.set_title(id, val, notify=False)
elif column == 'publisher':
books_to_refresh |= self.set_publisher(id, val, notify=False,
allow_case_change=allow_case_change)
elif column == 'rating':
self.set_rating(id, val, notify=False)
elif column == 'tags':
books_to_refresh |= \
self.set_tags(id, [x.strip() for x in val.split(',') if x.strip()],
append=False, notify=False, allow_case_change=allow_case_change)
self.data.set(*set_args)
self.data.refresh_ids(self, [id])
self.set_path(id, True)
self.notify('metadata', [id])
return books_to_refresh
def set_metadata(self, id, mi, ignore_errors=False, set_title=True,
set_authors=True, commit=True, force_changes=False,
notify=True):
'''
Set metadata for the book `id` from the `Metadata` object `mi`
Setting force_changes=True will force set_metadata to update fields even
if mi contains empty values. In this case, 'None' is distinguished from
'empty'. If mi.XXX is None, the XXX is not replaced, otherwise it is.
The tags, identifiers, and cover attributes are special cases. Tags and
identifiers cannot be set to None so then will always be replaced if
force_changes is true. You must ensure that mi contains the values you
want the book to have. Covers are always changed if a new cover is
provided, but are never deleted. Also note that force_changes has no
effect on setting title or authors.
'''
if callable(getattr(mi, 'to_book_metadata', None)):
# Handle code passing in a OPF object instead of a Metadata object
mi = mi.to_book_metadata()
def doit(func, *args, **kwargs):
try:
func(*args, **kwargs)
except:
if ignore_errors:
traceback.print_exc()
else:
raise
def should_replace_field(attr):
return (force_changes and (mi.get(attr, None) is not None)) or \
not mi.is_null(attr)
path_changed = False
if set_title and mi.title:
self._set_title(id, mi.title)
path_changed = True
if set_authors:
if not mi.authors:
mi.authors = [_('Unknown')]
authors = []
for a in mi.authors:
authors += string_to_authors(a)
self._set_authors(id, authors)
path_changed = True
if path_changed:
self.set_path(id, index_is_id=True)
if should_replace_field('title_sort'):
self.set_title_sort(id, mi.title_sort, notify=False, commit=False)
if should_replace_field('author_sort'):
doit(self.set_author_sort, id, mi.author_sort, notify=False,
commit=False)
if should_replace_field('publisher'):
doit(self.set_publisher, id, mi.publisher, notify=False,
commit=False)
# Setting rating to zero is acceptable.
if mi.rating is not None:
doit(self.set_rating, id, mi.rating, notify=False, commit=False)
if should_replace_field('series'):
doit(self.set_series, id, mi.series, notify=False, commit=False)
# force_changes has no effect on cover manipulation
if mi.cover_data[1] is not None:
doit(self.set_cover, id, mi.cover_data[1], commit=False)
elif isinstance(mi.cover, basestring) and mi.cover:
if os.access(mi.cover, os.R_OK):
with lopen(mi.cover, 'rb') as f:
raw = f.read()
if raw:
doit(self.set_cover, id, raw, commit=False)
# if force_changes is true, tags are always replaced because the
# attribute cannot be set to None.
if should_replace_field('tags'):
doit(self.set_tags, id, mi.tags, notify=False, commit=False)
if should_replace_field('comments'):
doit(self.set_comment, id, mi.comments, notify=False, commit=False)
if should_replace_field('languages'):
doit(self.set_languages, id, mi.languages, notify=False, commit=False)
# Setting series_index to zero is acceptable
if mi.series_index is not None:
doit(self.set_series_index, id, mi.series_index, notify=False,
commit=False)
if should_replace_field('pubdate'):
doit(self.set_pubdate, id, mi.pubdate, notify=False, commit=False)
if getattr(mi, 'timestamp', None) is not None:
doit(self.set_timestamp, id, mi.timestamp, notify=False,
commit=False)
# identifiers will always be replaced if force_changes is True
mi_idents = mi.get_identifiers()
if force_changes:
self.set_identifiers(id, mi_idents, notify=False, commit=False)
elif mi_idents:
identifiers = self.get_identifiers(id, index_is_id=True)
for key, val in mi_idents.iteritems():
if val and val.strip(): # Don't delete an existing identifier
identifiers[icu_lower(key)] = val
self.set_identifiers(id, identifiers, notify=False, commit=False)
user_mi = mi.get_all_user_metadata(make_copy=False)
for key in user_mi.iterkeys():
if key in self.field_metadata and \
user_mi[key]['datatype'] == self.field_metadata[key]['datatype'] and \
(user_mi[key]['datatype'] != 'text' or
user_mi[key]['is_multiple'] == self.field_metadata[key]['is_multiple']):
val = mi.get(key, None)
if force_changes or val is not None:
doit(self.set_custom, id, val=val, extra=mi.get_extra(key),
label=user_mi[key]['label'], commit=False, notify=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def authors_sort_strings(self, id, index_is_id=False):
'''
Given a book, return the list of author sort strings
for the book's authors
'''
id = id if index_is_id else self.id(id)
aut_strings = self.conn.get('''
SELECT sort
FROM authors, books_authors_link as bl
WHERE bl.book=? and authors.id=bl.author
ORDER BY bl.id''', (id,))
result = []
for (sort,) in aut_strings:
result.append(sort)
return result
# Given a book, return the map of author sort strings for the book's authors
def authors_with_sort_strings(self, id, index_is_id=False):
id = id if index_is_id else self.id(id)
aut_strings = self.conn.get('''
SELECT authors.id, authors.name, authors.sort, authors.link
FROM authors, books_authors_link as bl
WHERE bl.book=? and authors.id=bl.author
ORDER BY bl.id''', (id,))
result = []
for (id_, author, sort, link) in aut_strings:
result.append((id_, author.replace('|', ','), sort, link))
return result
# Given a book, return the author_sort string for authors of the book
def author_sort_from_book(self, id, index_is_id=False):
auts = self.authors_sort_strings(id, index_is_id)
return ' & '.join(auts).replace('|', ',')
# Given an author, return a list of books with that author
def books_for_author(self, id_, index_is_id=False):
id_ = id_ if index_is_id else self.id(id_)
books = self.conn.get('''
SELECT bl.book
FROM books_authors_link as bl
WHERE bl.author=?''', (id_,))
return [b[0] for b in books]
# Given a list of authors, return the author_sort string for the authors,
# preferring the author sort associated with the author over the computed
# string
def author_sort_from_authors(self, authors):
result = []
for aut in authors:
r = self.conn.get('SELECT sort FROM authors WHERE name=?',
(aut.replace(',', '|'),), all=False)
if r is None:
result.append(author_to_author_sort(aut))
else:
result.append(r)
return ' & '.join(result).replace('|', ',')
def _update_author_in_cache(self, id_, ss, final_authors):
self.conn.execute('UPDATE books SET author_sort=? WHERE id=?', (ss, id_))
self.data.set(id_, self.FIELD_MAP['authors'],
','.join([a.replace(',', '|') for a in final_authors]),
row_is_id=True)
self.data.set(id_, self.FIELD_MAP['author_sort'], ss, row_is_id=True)
aum = self.authors_with_sort_strings(id_, index_is_id=True)
self.data.set(id_, self.FIELD_MAP['au_map'],
':#:'.join([':::'.join((au.replace(',', '|'), aus, aul))
for (_, au, aus, aul) in aum]),
row_is_id=True)
def _set_authors(self, id, authors, allow_case_change=False):
if not authors:
authors = [_('Unknown')]
self.conn.execute('DELETE FROM books_authors_link WHERE book=?',(id,))
books_to_refresh = {id}
final_authors = []
for a in authors:
case_change = False
if not a:
continue
a = a.strip().replace(',', '|')
if not isinstance(a, unicode):
a = a.decode(preferred_encoding, 'replace')
aus = self.conn.get('SELECT id, name, sort FROM authors WHERE name=?', (a,))
if aus:
aid, name, sort = aus[0]
# Handle change of case
if name != a:
if allow_case_change:
ns = author_to_author_sort(a.replace('|', ','))
if strcmp(sort, ns) == 0:
sort = ns
self.conn.execute('''UPDATE authors SET name=?, sort=?
WHERE id=?''', (a, sort, aid))
case_change = True
else:
a = name
else:
aid = self.conn.execute('''INSERT INTO authors(name)
VALUES (?)''', (a,)).lastrowid
final_authors.append(a.replace('|', ','))
try:
self.conn.execute('''INSERT INTO books_authors_link(book, author)
VALUES (?,?)''', (id, aid))
except IntegrityError: # Sometimes books specify the same author twice in their metadata
pass
if case_change:
bks = self.conn.get('''SELECT book FROM books_authors_link
WHERE author=?''', (aid,))
books_to_refresh |= set([bk[0] for bk in bks])
for bk in books_to_refresh:
ss = self.author_sort_from_book(id, index_is_id=True)
aus = self.author_sort(bk, index_is_id=True)
if strcmp(aus, ss) == 0:
self._update_author_in_cache(bk, ss, final_authors)
# This can repeat what was done above in rare cases. Let it.
ss = self.author_sort_from_book(id, index_is_id=True)
self._update_author_in_cache(id, ss, final_authors)
self.clean_standard_field('authors', commit=True)
return books_to_refresh
def windows_check_if_files_in_use(self, book_id):
'''
Raises an EACCES IOError if any of the files in the folder of book_id
are opened in another program on windows.
'''
if iswindows:
path = self.path(book_id, index_is_id=True)
if path:
spath = os.path.join(self.library_path, *path.split('/'))
wam = None
if os.path.exists(spath):
try:
wam = WindowsAtomicFolderMove(spath)
finally:
if wam is not None:
wam.close_handles()
def set_authors(self, id, authors, notify=True, commit=True,
allow_case_change=False):
'''
Note that even if commit is False, the db will still be committed to
because this causes the location of files to change
:param authors: A list of authors.
'''
self.windows_check_if_files_in_use(id)
books_to_refresh = self._set_authors(id, authors,
allow_case_change=allow_case_change)
self.dirtied(set([id])|books_to_refresh, commit=False)
if commit:
self.conn.commit()
self.set_path(id, index_is_id=True)
if notify:
self.notify('metadata', [id])
return books_to_refresh
def set_title_sort(self, id, title_sort_, notify=True, commit=True):
if not title_sort_:
return False
if isbytestring(title_sort_):
title_sort_ = title_sort_.decode(preferred_encoding, 'replace')
self.conn.execute('UPDATE books SET sort=? WHERE id=?', (title_sort_, id))
self.data.set(id, self.FIELD_MAP['sort'], title_sort_, row_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
return True
def _set_title(self, id, title):
if not title:
return False
if isbytestring(title):
title = title.decode(preferred_encoding, 'replace')
old_title = self.title(id, index_is_id=True)
# We cannot check if old_title == title as previous code might have
# already updated the cache
only_case_change = icu_lower(old_title) == icu_lower(title)
self.conn.execute('UPDATE books SET title=? WHERE id=?', (title, id))
self.data.set(id, self.FIELD_MAP['title'], title, row_is_id=True)
if only_case_change:
# SQLite update trigger will not update sort on a case change
self.conn.execute('UPDATE books SET sort=? WHERE id=?',
(title_sort(title), id))
ts = self.conn.get('SELECT sort FROM books WHERE id=?', (id,),
all=False)
if ts:
self.data.set(id, self.FIELD_MAP['sort'], ts, row_is_id=True)
return True
def set_title(self, id, title, notify=True, commit=True):
'''
Note that even if commit is False, the db will still be committed to
because this causes the location of files to change
'''
self.windows_check_if_files_in_use(id)
if not self._set_title(id, title):
return
self.set_path(id, index_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def set_languages(self, book_id, languages, notify=True, commit=True):
self.conn.execute(
'DELETE FROM books_languages_link WHERE book=?', (book_id,))
self.conn.execute('''DELETE FROM languages WHERE (SELECT COUNT(id)
FROM books_languages_link WHERE
books_languages_link.lang_code=languages.id) < 1''')
books_to_refresh = set([book_id])
final_languages = []
for l in languages:
lc = canonicalize_lang(l)
if not lc or lc in final_languages or lc in ('und', 'zxx', 'mis',
'mul'):
continue
final_languages.append(lc)
lc_id = self.conn.get('SELECT id FROM languages WHERE lang_code=?',
(lc,), all=False)
if lc_id is None:
lc_id = self.conn.execute('''INSERT INTO languages(lang_code)
VALUES (?)''', (lc,)).lastrowid
self.conn.execute('''INSERT INTO books_languages_link(book, lang_code)
VALUES (?,?)''', (book_id, lc_id))
self.dirtied(books_to_refresh, commit=False)
if commit:
self.conn.commit()
self.data.set(book_id, self.FIELD_MAP['languages'],
u','.join(final_languages), row_is_id=True)
if notify:
self.notify('metadata', [book_id])
return books_to_refresh
def set_timestamp(self, id, dt, notify=True, commit=True):
if dt:
if isinstance(dt, (unicode, bytes)):
dt = parse_date(dt, as_utc=True, assume_utc=False)
self.conn.execute('UPDATE books SET timestamp=? WHERE id=?', (dt, id))
self.data.set(id, self.FIELD_MAP['timestamp'], dt, row_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def set_pubdate(self, id, dt, notify=True, commit=True):
if not dt:
dt = UNDEFINED_DATE
if isinstance(dt, basestring):
dt = parse_only_date(dt)
self.conn.execute('UPDATE books SET pubdate=? WHERE id=?', (dt, id))
self.data.set(id, self.FIELD_MAP['pubdate'], dt, row_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def set_publisher(self, id, publisher, notify=True, commit=True,
allow_case_change=False):
self.conn.execute('DELETE FROM books_publishers_link WHERE book=?',(id,))
books_to_refresh = {id}
if publisher:
case_change = False
if not isinstance(publisher, unicode):
publisher = publisher.decode(preferred_encoding, 'replace')
pubx = self.conn.get('''SELECT id,name from publishers
WHERE name=?''', (publisher,))
if pubx:
aid, cur_name = pubx[0]
if publisher != cur_name:
if allow_case_change:
self.conn.execute('''UPDATE publishers SET name=?
WHERE id=?''', (publisher, aid))
case_change = True
else:
publisher = cur_name
books_to_refresh = set()
else:
aid = self.conn.execute('''INSERT INTO publishers(name)
VALUES (?)''', (publisher,)).lastrowid
self.conn.execute('''INSERT INTO books_publishers_link(book, publisher)
VALUES (?,?)''', (id, aid))
if case_change:
bks = self.conn.get('''SELECT book FROM books_publishers_link
WHERE publisher=?''', (aid,))
books_to_refresh |= set([bk[0] for bk in bks])
self.conn.execute('''DELETE FROM publishers WHERE (SELECT COUNT(id)
FROM books_publishers_link
WHERE publisher=publishers.id) < 1''')
self.dirtied(set([id])|books_to_refresh, commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['publisher'], publisher, row_is_id=True)
if notify:
self.notify('metadata', [id])
return books_to_refresh
def set_uuid(self, id, uuid, notify=True, commit=True):
if uuid:
self.conn.execute('UPDATE books SET uuid=? WHERE id=?', (uuid, id))
self.data.set(id, self.FIELD_MAP['uuid'], uuid, row_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def get_id_from_uuid(self, uuid):
if uuid:
return (self.data._uuid_map.get(uuid, None) or
self.conn.get('SELECT id FROM books WHERE uuid=?', (uuid,),
all=False))
# Convenience methods for tags_list_editor
# Note: we generally do not need to refresh_ids because library_view will
# refresh everything.
def get_ratings_with_ids(self):
result = self.conn.get('SELECT id,rating FROM ratings')
if not result:
return []
return result
def dirty_books_referencing(self, field, id, commit=True):
# Get the list of books to dirty -- all books that reference the item
table = self.field_metadata[field]['table']
link = self.field_metadata[field]['link_column']
bks = self.conn.get(
'SELECT book from books_{0}_link WHERE {1}=?'.format(table, link),
(id,))
books = []
for (book_id,) in bks:
books.append(book_id)
self.dirtied(books, commit=commit)
def get_tags_with_ids(self):
result = self.conn.get('SELECT id,name FROM tags')
if not result:
return []
return result
def get_languages_with_ids(self):
result = self.conn.get('SELECT id,lang_code FROM languages')
if not result:
return []
return result
def rename_tag(self, old_id, new_name):
# It is possible that new_name is in fact a set of names. Split it on
# comma to find out. If it is, then rename the first one and append the
# rest
new_names = [t.strip() for t in new_name.strip().split(',') if t.strip()]
new_name = new_names[0]
new_names = new_names[1:]
# get the list of books that reference the tag being changed
books = self.conn.get('''SELECT book from books_tags_link
WHERE tag=?''', (old_id,))
books = [b[0] for b in books]
new_id = self.conn.get(
'''SELECT id from tags
WHERE name=?''', (new_name,), all=False)
if new_id is None or old_id == new_id:
# easy cases. Simply rename the tag. Do it even if equal, in case
# there is a change of case
self.conn.execute('''UPDATE tags SET name=?
WHERE id=?''', (new_name, old_id))
new_id = old_id
else:
# It is possible that by renaming a tag, the tag will appear
# twice on a book. This will throw an integrity error, aborting
# all the changes. To get around this, we first delete any links
# to the new_id from books referencing the old_id, so that
# renaming old_id to new_id will be unique on the book
for book_id in books:
self.conn.execute('''DELETE FROM books_tags_link
WHERE book=? and tag=?''', (book_id, new_id))
# Change the link table to point at the new tag
self.conn.execute('''UPDATE books_tags_link SET tag=?
WHERE tag=?''',(new_id, old_id,))
# Get rid of the no-longer used publisher
self.conn.execute('DELETE FROM tags WHERE id=?', (old_id,))
if new_names:
# have some left-over names to process. Add them to the book.
for book_id in books:
self.set_tags(book_id, new_names, append=True, notify=False,
commit=False)
self.dirtied(books, commit=False)
self.clean_standard_field('tags', commit=False)
self.conn.commit()
def delete_tag_using_id(self, id):
self.dirty_books_referencing('tags', id, commit=False)
self.conn.execute('DELETE FROM books_tags_link WHERE tag=?', (id,))
self.conn.execute('DELETE FROM tags WHERE id=?', (id,))
self.conn.commit()
def get_series_with_ids(self):
result = self.conn.get('SELECT id,name FROM series')
if not result:
return []
return result
def rename_series(self, old_id, new_name, change_index=True):
new_name = new_name.strip()
new_id = self.conn.get(
'''SELECT id from series
WHERE name=?''', (new_name,), all=False)
if new_id is None or old_id == new_id:
new_id = old_id
self.conn.execute('UPDATE series SET name=? WHERE id=?',
(new_name, old_id))
else:
# New series exists. Must update the link, then assign a
# new series index to each of the books.
if change_index:
# Get the list of books where we must update the series index
books = self.conn.get('''SELECT books.id
FROM books, books_series_link as lt
WHERE books.id = lt.book AND lt.series=?
ORDER BY books.series_index''', (old_id,))
# Now update the link table
self.conn.execute('''UPDATE books_series_link
SET series=?
WHERE series=?''',(new_id, old_id,))
if change_index and tweaks['series_index_auto_increment'] != 'no_change':
# Now set the indices
for (book_id,) in books:
# Get the next series index
index = self.get_next_series_num_for(new_name)
self.conn.execute('''UPDATE books
SET series_index=?
WHERE id=?''',(index, book_id,))
self.dirty_books_referencing('series', new_id, commit=False)
self.clean_standard_field('series', commit=False)
self.conn.commit()
def delete_series_using_id(self, id):
self.dirty_books_referencing('series', id, commit=False)
books = self.conn.get('SELECT book from books_series_link WHERE series=?', (id,))
self.conn.execute('DELETE FROM books_series_link WHERE series=?', (id,))
self.conn.execute('DELETE FROM series WHERE id=?', (id,))
for (book_id,) in books:
self.conn.execute('UPDATE books SET series_index=1.0 WHERE id=?', (book_id,))
self.conn.commit()
def get_publishers_with_ids(self):
result = self.conn.get('SELECT id,name FROM publishers')
if not result:
return []
return result
def rename_publisher(self, old_id, new_name):
new_name = new_name.strip()
new_id = self.conn.get(
'''SELECT id from publishers
WHERE name=?''', (new_name,), all=False)
if new_id is None or old_id == new_id:
new_id = old_id
# New name doesn't exist. Simply change the old name
self.conn.execute('UPDATE publishers SET name=? WHERE id=?',
(new_name, old_id))
else:
# Change the link table to point at the new one
self.conn.execute('''UPDATE books_publishers_link
SET publisher=?
WHERE publisher=?''',(new_id, old_id,))
# Get rid of the no-longer used publisher
self.conn.execute('DELETE FROM publishers WHERE id=?', (old_id,))
self.dirty_books_referencing('publisher', new_id, commit=False)
self.clean_standard_field('publisher', commit=False)
self.conn.commit()
def delete_publisher_using_id(self, old_id):
self.dirty_books_referencing('publisher', old_id, commit=False)
self.conn.execute('''DELETE FROM books_publishers_link
WHERE publisher=?''', (old_id,))
self.conn.execute('DELETE FROM publishers WHERE id=?', (old_id,))
self.conn.commit()
def get_authors_with_ids(self):
result = self.conn.get('SELECT id,name,sort,link FROM authors')
if not result:
return []
return result
def get_author_id(self, author):
author = author.replace(',', '|')
result = self.conn.get('SELECT id FROM authors WHERE name=?',
(author,), all=False)
return result
def set_link_field_for_author(self, aid, link, commit=True, notify=False):
if not link:
link = ''
self.conn.execute('UPDATE authors SET link=? WHERE id=?', (link.strip(), aid))
if commit:
self.conn.commit()
def set_sort_field_for_author(self, old_id, new_sort, commit=True, notify=False):
self.conn.execute('UPDATE authors SET sort=? WHERE id=?',
(new_sort.strip(), old_id))
if commit:
self.conn.commit()
# Now change all the author_sort fields in books by this author
bks = self.conn.get('SELECT book from books_authors_link WHERE author=?', (old_id,))
for (book_id,) in bks:
ss = self.author_sort_from_book(book_id, index_is_id=True)
self.set_author_sort(book_id, ss, notify=notify, commit=commit)
def rename_author(self, old_id, new_name):
# Make sure that any commas in new_name are changed to '|'!
new_name = new_name.replace(',', '|').strip()
if not new_name:
new_name = _('Unknown')
# Get the list of books we must fix up, one way or the other
# Save the list so we can use it twice
bks = self.conn.get('SELECT book from books_authors_link WHERE author=?', (old_id,))
books = []
for (book_id,) in bks:
books.append(book_id)
# check if the new author already exists
new_id = self.conn.get('SELECT id from authors WHERE name=?',
(new_name,), all=False)
if new_id is None or old_id == new_id:
# No name clash. Go ahead and update the author's name
self.conn.execute('UPDATE authors SET name=? WHERE id=?',
(new_name, old_id))
else:
# First check for the degenerate case -- changing a value to itself.
# Update it in case there is a change of case, but do nothing else
if old_id == new_id:
self.conn.execute('UPDATE authors SET name=? WHERE id=?',
(new_name, old_id))
self.conn.commit()
return new_id
# Author exists. To fix this, we must replace all the authors
# instead of replacing the one. Reason: db integrity checks can stop
# the rename process, which would leave everything half-done. We
# can't do it the same way as tags (delete and add) because author
# order is important.
for book_id in books:
# Get the existing list of authors
authors = self.conn.get('''
SELECT author from books_authors_link
WHERE book=?
ORDER BY id''',(book_id,))
# unpack the double-list structure, replacing the old author
# with the new one while we are at it
for i,aut in enumerate(authors):
authors[i] = aut[0] if aut[0] != old_id else new_id
# Delete the existing authors list
self.conn.execute('''DELETE FROM books_authors_link
WHERE book=?''',(book_id,))
# Change the authors to the new list
for aid in authors:
try:
self.conn.execute('''
INSERT INTO books_authors_link(book, author)
VALUES (?,?)''', (book_id, aid))
except IntegrityError:
# Sometimes books specify the same author twice in their
# metadata. Ignore it.
pass
# Now delete the old author from the DB
self.conn.execute('DELETE FROM authors WHERE id=?', (old_id,))
self.dirtied(books, commit=False)
self.conn.commit()
# the authors are now changed, either by changing the author's name
# or replacing the author in the list. Now must fix up the books.
for book_id in books:
# First, must refresh the cache to see the new authors
self.data.refresh_ids(self, [book_id])
# now fix the filesystem paths
self.set_path(book_id, index_is_id=True)
# Next fix the author sort. Reset it to the default
ss = self.author_sort_from_book(book_id, index_is_id=True)
self.set_author_sort(book_id, ss)
# the caller will do a general refresh, so we don't need to
# do one here
return new_id
# end convenience methods
def get_tags(self, id):
result = self.conn.get(
'SELECT name FROM tags WHERE id IN (SELECT tag FROM books_tags_link WHERE book=?)',
(id,), all=True)
if not result:
return set([])
return set([r[0] for r in result])
@classmethod
def cleanup_tags(cls, tags):
tags = [x.strip().replace(',', ';') for x in tags if x.strip()]
tags = [x.decode(preferred_encoding, 'replace')
if isbytestring(x) else x for x in tags]
tags = [u' '.join(x.split()) for x in tags]
ans, seen = [], set([])
for tag in tags:
if tag.lower() not in seen:
seen.add(tag.lower())
ans.append(tag)
return ans
def remove_all_tags(self, ids, notify=False, commit=True):
self.conn.executemany(
'DELETE FROM books_tags_link WHERE book=?', [(x,) for x in ids])
self.dirtied(ids, commit=False)
if commit:
self.conn.commit()
for x in ids:
self.data.set(x, self.FIELD_MAP['tags'], '', row_is_id=True)
if notify:
self.notify('metadata', ids)
def bulk_modify_tags(self, ids, add=[], remove=[], notify=False):
add = self.cleanup_tags(add)
remove = self.cleanup_tags(remove)
remove = set(remove) - set(add)
if not ids or (not add and not remove):
return
# Add tags that do not already exist into the tag table
all_tags = self.all_tags()
lt = [t.lower() for t in all_tags]
new_tags = [t for t in add if t.lower() not in lt]
if new_tags:
self.conn.executemany('INSERT INTO tags(name) VALUES (?)', [(x,) for x in
new_tags])
# Create the temporary tables to store the ids for books and tags
# to be operated on
tables = ('temp_bulk_tag_edit_books', 'temp_bulk_tag_edit_add',
'temp_bulk_tag_edit_remove')
drops = '\n'.join(['DROP TABLE IF EXISTS %s;'%t for t in tables])
creates = '\n'.join(['CREATE TEMP TABLE %s(id INTEGER PRIMARY KEY);'%t
for t in tables])
self.conn.executescript(drops + creates)
# Populate the books temp table
self.conn.executemany(
'INSERT INTO temp_bulk_tag_edit_books VALUES (?)',
[(x,) for x in ids])
# Populate the add/remove tags temp tables
for table, tags in enumerate([add, remove]):
if not tags:
continue
table = tables[table+1]
insert = ('INSERT INTO %s(id) SELECT tags.id FROM tags WHERE name=?'
' COLLATE PYNOCASE LIMIT 1')
self.conn.executemany(insert%table, [(x,) for x in tags])
if remove:
self.conn.execute(
'''DELETE FROM books_tags_link WHERE
book IN (SELECT id FROM %s) AND
tag IN (SELECT id FROM %s)'''
% (tables[0], tables[2]))
if add:
self.conn.execute(
'''
INSERT OR REPLACE INTO books_tags_link(book, tag) SELECT {0}.id, {1}.id FROM
{0}, {1}
'''.format(tables[0], tables[1])
)
self.conn.executescript(drops)
self.dirtied(ids, commit=False)
self.conn.commit()
for x in ids:
tags = u','.join(self.get_tags(x))
self.data.set(x, self.FIELD_MAP['tags'], tags, row_is_id=True)
if notify:
self.notify('metadata', ids)
def commit(self):
self.conn.commit()
def set_tags(self, id, tags, append=False, notify=True, commit=True,
allow_case_change=False):
'''
@param tags: list of strings
@param append: If True existing tags are not removed
'''
if not tags:
tags = []
if not append:
self.conn.execute('DELETE FROM books_tags_link WHERE book=?', (id,))
otags = self.get_tags(id)
tags = self.cleanup_tags(tags)
books_to_refresh = {id}
for tag in (set(tags)-otags):
case_changed = False
tag = tag.strip()
if not tag:
continue
if not isinstance(tag, unicode):
tag = tag.decode(preferred_encoding, 'replace')
existing_tags = self.all_tags()
lt = [t.lower() for t in existing_tags]
try:
idx = lt.index(tag.lower())
except ValueError:
idx = -1
if idx > -1:
etag = existing_tags[idx]
tid = self.conn.get('SELECT id FROM tags WHERE name=?', (etag,), all=False)
if allow_case_change and etag != tag:
self.conn.execute('UPDATE tags SET name=? WHERE id=?', (tag, tid))
case_changed = True
else:
tid = self.conn.execute('INSERT INTO tags(name) VALUES(?)', (tag,)).lastrowid
if not self.conn.get('''SELECT book FROM books_tags_link
WHERE book=? AND tag=?''', (id, tid), all=False):
self.conn.execute('''INSERT INTO books_tags_link(book, tag)
VALUES (?,?)''', (id, tid))
if case_changed:
bks = self.conn.get('SELECT book FROM books_tags_link WHERE tag=?',
(tid,))
books_to_refresh |= set([bk[0] for bk in bks])
self.conn.execute('''DELETE FROM tags WHERE (SELECT COUNT(id)
FROM books_tags_link WHERE tag=tags.id) < 1''')
self.dirtied(set([id])|books_to_refresh, commit=False)
if commit:
self.conn.commit()
tags = u','.join(self.get_tags(id))
self.data.set(id, self.FIELD_MAP['tags'], tags, row_is_id=True)
if notify:
self.notify('metadata', [id])
return books_to_refresh
def unapply_tags(self, book_id, tags, notify=True):
for tag in tags:
id = self.conn.get('SELECT id FROM tags WHERE name=?', (tag,), all=False)
if id:
self.conn.execute('''DELETE FROM books_tags_link
WHERE tag=? AND book=?''', (id, book_id))
self.conn.commit()
self.data.refresh_ids(self, [book_id])
if notify:
self.notify('metadata', [id])
def is_tag_used(self, tag):
existing_tags = self.all_tags()
lt = [t.lower() for t in existing_tags]
try:
lt.index(tag.lower())
return True
except ValueError:
return False
def delete_tag(self, tag):
existing_tags = self.all_tags()
lt = [t.lower() for t in existing_tags]
try:
idx = lt.index(tag.lower())
except ValueError:
idx = -1
if idx > -1:
id = self.conn.get('SELECT id FROM tags WHERE name=?', (existing_tags[idx],), all=False)
if id:
self.conn.execute('DELETE FROM books_tags_link WHERE tag=?', (id,))
self.conn.execute('DELETE FROM tags WHERE id=?', (id,))
self.conn.commit()
series_index_pat = re.compile(r'(.*)\s+\[([.0-9]+)\]$')
def _get_series_values(self, val):
return _get_series_values(val)
def set_series(self, id, series, notify=True, commit=True, allow_case_change=True):
self.conn.execute('DELETE FROM books_series_link WHERE book=?',(id,))
(series, idx) = self._get_series_values(series)
books_to_refresh = {id}
if series:
case_change = False
if not isinstance(series, unicode):
series = series.decode(preferred_encoding, 'replace')
series = series.strip()
series = u' '.join(series.split())
sx = self.conn.get('SELECT id,name from series WHERE name=?', (series,))
if sx:
aid, cur_name = sx[0]
if cur_name != series:
if allow_case_change:
self.conn.execute('UPDATE series SET name=? WHERE id=?', (series, aid))
case_change = True
else:
series = cur_name
books_to_refresh = set()
else:
aid = self.conn.execute('INSERT INTO series(name) VALUES (?)', (series,)).lastrowid
self.conn.execute('INSERT INTO books_series_link(book, series) VALUES (?,?)', (id, aid))
if idx:
self.set_series_index(id, idx, notify=notify, commit=commit)
if case_change:
bks = self.conn.get('SELECT book FROM books_series_link WHERE series=?',
(aid,))
books_to_refresh |= set([bk[0] for bk in bks])
self.conn.execute('''DELETE FROM series
WHERE (SELECT COUNT(id) FROM books_series_link
WHERE series=series.id) < 1''')
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['series'], series, row_is_id=True)
if notify:
self.notify('metadata', [id])
return books_to_refresh
def set_series_index(self, id, idx, notify=True, commit=True):
if idx is None:
idx = 1.0
try:
idx = float(idx)
except:
idx = 1.0
self.conn.execute('UPDATE books SET series_index=? WHERE id=?', (idx, id))
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['series_index'], idx, row_is_id=True)
if notify:
self.notify('metadata', [id])
def set_rating(self, id, rating, notify=True, commit=True):
if not rating:
rating = 0
rating = int(rating)
self.conn.execute('DELETE FROM books_ratings_link WHERE book=?',(id,))
rat = self.conn.get('SELECT id FROM ratings WHERE rating=?', (rating,), all=False)
rat = rat if rat is not None else self.conn.execute('INSERT INTO ratings(rating) VALUES (?)', (rating,)).lastrowid
self.conn.execute('INSERT INTO books_ratings_link(book, rating) VALUES (?,?)', (id, rat))
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['rating'], rating, row_is_id=True)
if notify:
self.notify('metadata', [id])
def set_comment(self, id, text, notify=True, commit=True):
self.conn.execute('DELETE FROM comments WHERE book=?', (id,))
if text:
self.conn.execute('INSERT INTO comments(book,text) VALUES (?,?)', (id, text))
else:
text = ''
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['comments'], text, row_is_id=True)
self.dirtied([id], commit=False)
if notify:
self.notify('metadata', [id])
def set_author_sort(self, id, sort, notify=True, commit=True):
if not sort:
sort = ''
self.conn.execute('UPDATE books SET author_sort=? WHERE id=?', (sort, id))
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['author_sort'], sort, row_is_id=True)
if notify:
self.notify('metadata', [id])
def isbn(self, idx, index_is_id=False):
row = self.data._data[idx] if index_is_id else self.data[idx]
if row is not None:
raw = row[self.FIELD_MAP['identifiers']]
if raw:
for x in raw.split(','):
if x.startswith('isbn:'):
return x[5:].strip()
def get_identifiers(self, idx, index_is_id=False):
ans = {}
row = self.data._data[idx] if index_is_id else self.data[idx]
if row is not None:
raw = row[self.FIELD_MAP['identifiers']]
if raw:
for x in raw.split(','):
key, _, val = x.partition(':')
key, val = key.strip(), val.strip()
if key and val:
ans[key] = val
return ans
def get_all_identifier_types(self):
idents = self.conn.get('SELECT DISTINCT type FROM identifiers')
return [ident[0] for ident in idents]
def _clean_identifier(self, typ, val):
typ = icu_lower(typ).strip().replace(':', '').replace(',', '')
val = val.strip().replace(',', '|').replace(':', '|')
return typ, val
def set_identifier(self, id_, typ, val, notify=True, commit=True):
'If val is empty, deletes identifier of type typ'
typ, val = self._clean_identifier(typ, val)
identifiers = self.get_identifiers(id_, index_is_id=True)
if not typ:
return
changed = False
if not val and typ in identifiers:
identifiers.pop(typ)
changed = True
self.conn.execute(
'DELETE from identifiers WHERE book=? AND type=?',
(id_, typ))
if val and identifiers.get(typ, None) != val:
changed = True
identifiers[typ] = val
self.conn.execute(
'INSERT OR REPLACE INTO identifiers (book, type, val) VALUES (?, ?, ?)',
(id_, typ, val))
if changed:
raw = ','.join(['%s:%s'%(k, v) for k, v in
identifiers.iteritems()])
self.data.set(id_, self.FIELD_MAP['identifiers'], raw,
row_is_id=True)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id_])
def set_identifiers(self, id_, identifiers, notify=True, commit=True):
cleaned = {}
if not identifiers:
identifiers = {}
for typ, val in identifiers.iteritems():
typ, val = self._clean_identifier(typ, val)
if val:
cleaned[typ] = val
self.conn.execute('DELETE FROM identifiers WHERE book=?', (id_,))
self.conn.executemany(
'INSERT INTO identifiers (book, type, val) VALUES (?, ?, ?)',
[(id_, k, v) for k, v in cleaned.iteritems()])
raw = ','.join(['%s:%s'%(k, v) for k, v in
cleaned.iteritems()])
self.data.set(id_, self.FIELD_MAP['identifiers'], raw,
row_is_id=True)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id_])
def set_isbn(self, id_, isbn, notify=True, commit=True):
self.set_identifier(id_, 'isbn', isbn, notify=notify, commit=commit)
def add_catalog(self, path, title):
from calibre.ebooks.metadata.meta import get_metadata
format = os.path.splitext(path)[1][1:].lower()
with lopen(path, 'rb') as stream:
matches = self.data.get_matches('title', '='+title)
if matches:
tag_matches = self.data.get_matches('tags', '='+_('Catalog'))
matches = matches.intersection(tag_matches)
db_id = None
if matches:
db_id = list(matches)[0]
if db_id is None:
obj = self.conn.execute('INSERT INTO books(title, author_sort) VALUES (?, ?)',
(title, 'calibre'))
db_id = obj.lastrowid
self.data.books_added([db_id], self)
self.set_path(db_id, index_is_id=True)
self.conn.commit()
try:
mi = get_metadata(stream, format)
except:
mi = Metadata(title, ['calibre'])
stream.seek(0)
mi.title, mi.authors = title, ['calibre']
mi.tags = [_('Catalog')]
mi.pubdate = mi.timestamp = utcnow()
if format == 'mobi':
mi.cover, mi.cover_data = None, (None, None)
self.set_metadata(db_id, mi)
self.add_format(db_id, format, stream, index_is_id=True)
self.conn.commit()
self.data.refresh_ids(self, [db_id]) # Needed to update format list and size
return db_id
def add_news(self, path, arg):
from calibre.ebooks.metadata.meta import get_metadata
format = os.path.splitext(path)[1][1:].lower()
stream = path if hasattr(path, 'read') else lopen(path, 'rb')
stream.seek(0)
mi = get_metadata(stream, format, use_libprs_metadata=False,
force_read_metadata=True)
# Force the author to calibre as the auto delete of old news checks for
# both the author==calibre and the tag News
mi.authors = ['calibre']
stream.seek(0)
if mi.series_index is None:
mi.series_index = self.get_next_series_num_for(mi.series)
mi.tags = [_('News')]
if arg['add_title_tag']:
mi.tags += [arg['title']]
if arg['custom_tags']:
mi.tags += arg['custom_tags']
obj = self.conn.execute('INSERT INTO books(title, author_sort) VALUES (?, ?)',
(mi.title, mi.authors[0]))
id = obj.lastrowid
self.data.books_added([id], self)
self.set_path(id, index_is_id=True)
self.conn.commit()
if mi.pubdate is None:
mi.pubdate = utcnow()
if mi.timestamp is None:
mi.timestamp = utcnow()
self.set_metadata(id, mi)
self.add_format(id, format, stream, index_is_id=True)
if not hasattr(path, 'read'):
stream.close()
self.conn.commit()
self.data.refresh_ids(self, [id]) # Needed to update format list and size
return id
def run_import_plugins(self, path_or_stream, format):
format = format.lower()
if hasattr(path_or_stream, 'seek'):
path_or_stream.seek(0)
pt = PersistentTemporaryFile('_import_plugin.'+format)
shutil.copyfileobj(path_or_stream, pt, 1024**2)
pt.close()
path = pt.name
else:
path = path_or_stream
return run_plugins_on_import(path, format)
def _add_newbook_tag(self, mi):
tags = prefs['new_book_tags']
if tags:
for tag in [t.strip() for t in tags]:
if tag:
if mi.tags is None:
mi.tags = [tag]
else:
mi.tags.append(tag)
def create_book_entry(self, mi, cover=None, add_duplicates=True,
force_id=None):
if mi.tags:
mi.tags = list(mi.tags)
self._add_newbook_tag(mi)
if not add_duplicates and self.has_book(mi):
return None
series_index = self.get_next_series_num_for(mi.series) \
if mi.series_index is None else mi.series_index
aus = mi.author_sort if mi.author_sort else self.author_sort_from_authors(mi.authors)
title = mi.title
if isbytestring(aus):
aus = aus.decode(preferred_encoding, 'replace')
if isbytestring(title):
title = title.decode(preferred_encoding, 'replace')
if force_id is None:
obj = self.conn.execute('INSERT INTO books(title, series_index, author_sort) VALUES (?, ?, ?)',
(title, series_index, aus))
id = obj.lastrowid
else:
id = force_id
obj = self.conn.execute(
'INSERT INTO books(id, title, series_index, '
'author_sort) VALUES (?, ?, ?, ?)',
(id, title, series_index, aus))
self.data.books_added([id], self)
if mi.timestamp is None:
mi.timestamp = utcnow()
if mi.pubdate is None:
mi.pubdate = UNDEFINED_DATE
self.set_metadata(id, mi, ignore_errors=True, commit=True)
if cover is not None:
try:
self.set_cover(id, cover)
except:
traceback.print_exc()
return id
def add_books(self, paths, formats, metadata, add_duplicates=True,
return_ids=False):
'''
Add a book to the database. The result cache is not updated.
:param:`paths` List of paths to book files or file-like objects
'''
formats, metadata = iter(formats), iter(metadata)
duplicates = []
ids = []
postimport = []
for path in paths:
mi = metadata.next()
self._add_newbook_tag(mi)
format = formats.next()
if not add_duplicates and self.has_book(mi):
duplicates.append((path, format, mi))
continue
series_index = self.get_next_series_num_for(mi.series) \
if mi.series_index is None else mi.series_index
aus = mi.author_sort if mi.author_sort else self.author_sort_from_authors(mi.authors)
title = mi.title
if isinstance(aus, str):
aus = aus.decode(preferred_encoding, 'replace')
if isinstance(title, str):
title = title.decode(preferred_encoding)
obj = self.conn.execute('INSERT INTO books(title, series_index, author_sort) VALUES (?, ?, ?)',
(title, series_index, aus))
id = obj.lastrowid
self.data.books_added([id], self)
ids.append(id)
if mi.timestamp is None:
mi.timestamp = utcnow()
if mi.pubdate is None:
mi.pubdate = UNDEFINED_DATE
self.set_metadata(id, mi, commit=True, ignore_errors=True)
npath = self.run_import_plugins(path, format)
format = os.path.splitext(npath)[-1].lower().replace('.', '').upper()
stream = lopen(npath, 'rb')
format = check_ebook_format(stream, format)
self.add_format(id, format, stream, index_is_id=True)
stream.close()
postimport.append((id, format))
self.conn.commit()
self.data.refresh_ids(self, ids) # Needed to update format list and size
for book_id, fmt in postimport:
run_plugins_on_postimport(self, book_id, fmt)
if duplicates:
paths = list(duplicate[0] for duplicate in duplicates)
formats = list(duplicate[1] for duplicate in duplicates)
metadata = list(duplicate[2] for duplicate in duplicates)
return (paths, formats, metadata), (ids if return_ids else
len(ids))
return None, (ids if return_ids else len(ids))
def import_book(self, mi, formats, notify=True, import_hooks=True,
apply_import_tags=True, preserve_uuid=False):
series_index = self.get_next_series_num_for(mi.series) \
if mi.series_index is None else mi.series_index
if apply_import_tags:
self._add_newbook_tag(mi)
if not mi.title:
mi.title = _('Unknown')
if not mi.authors:
mi.authors = [_('Unknown')]
aus = mi.author_sort if mi.author_sort else self.author_sort_from_authors(mi.authors)
if isinstance(aus, str):
aus = aus.decode(preferred_encoding, 'replace')
title = mi.title if isinstance(mi.title, unicode) else \
mi.title.decode(preferred_encoding, 'replace')
obj = self.conn.execute('INSERT INTO books(title, series_index, author_sort) VALUES (?, ?, ?)',
(title, series_index, aus))
id = obj.lastrowid
self.data.books_added([id], self)
if mi.timestamp is None:
mi.timestamp = utcnow()
if mi.pubdate is None:
mi.pubdate = UNDEFINED_DATE
self.set_metadata(id, mi, ignore_errors=True, commit=True)
if preserve_uuid and mi.uuid:
self.set_uuid(id, mi.uuid, commit=False)
for path in formats:
ext = os.path.splitext(path)[1][1:].lower()
if ext == 'opf':
continue
if import_hooks:
self.add_format_with_hooks(id, ext, path, index_is_id=True)
else:
with lopen(path, 'rb') as f:
self.add_format(id, ext, f, index_is_id=True)
# Mark the book dirty, It probably already has been done by
# set_metadata, but probably isn't good enough
self.dirtied([id], commit=False)
self.conn.commit()
self.data.refresh_ids(self, [id]) # Needed to update format list and size
if notify:
self.notify('add', [id])
return id
def get_top_level_move_items(self):
items = set(os.listdir(self.library_path))
paths = set([])
for x in self.data.universal_set():
path = self.path(x, index_is_id=True)
path = path.split(os.sep)[0]
paths.add(path)
paths.update({'metadata.db', 'metadata_db_prefs_backup.json'})
path_map = {}
for x in paths:
path_map[x] = x
if not self.is_case_sensitive:
for x in items:
path_map[x.lower()] = x
items = set(path_map)
paths = set([x.lower() for x in paths])
items = items.intersection(paths)
return items, path_map
def move_library_to(self, newloc, progress=None):
if progress is None:
progress = lambda x:x
if not os.path.exists(newloc):
os.makedirs(newloc)
old_dirs = set([])
items, path_map = self.get_top_level_move_items()
for x in items:
src = os.path.join(self.library_path, x)
dest = os.path.join(newloc, path_map[x])
if os.path.isdir(src):
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(src, dest)
old_dirs.add(src)
else:
if os.path.exists(dest):
os.remove(dest)
shutil.copyfile(src, dest)
x = path_map[x]
if not isinstance(x, unicode):
x = x.decode(filesystem_encoding, 'replace')
progress(x)
dbpath = os.path.join(newloc, os.path.basename(self.dbpath))
opath = self.dbpath
self.conn.close()
self.library_path, self.dbpath = newloc, dbpath
self.connect()
try:
os.unlink(opath)
except:
pass
for dir in old_dirs:
try:
shutil.rmtree(dir)
except:
pass
def __iter__(self):
for record in self.data._data:
if record is not None:
yield record
def all_ids(self):
x = self.FIELD_MAP['id']
for i in iter(self):
yield i[x]
def migrate_old(self, db, progress):
from PyQt4.QtCore import QCoreApplication
header = _(u'<p>Migrating old database to ebook library in %s<br><center>')%self.library_path
progress.setValue(0)
progress.setLabelText(header)
QCoreApplication.processEvents()
db.conn.row_factory = lambda cursor, row: tuple(row)
db.conn.text_factory = lambda x: unicode(x, 'utf-8', 'replace')
books = db.conn.get('SELECT id, title, sort, timestamp, series_index, author_sort, isbn FROM books ORDER BY id ASC')
progress.setAutoReset(False)
progress.setRange(0, len(books))
for book in books:
self.conn.execute('INSERT INTO books(id, title, sort, timestamp, series_index, author_sort, isbn) VALUES(?, ?, ?, ?, ?, ?, ?, ?);', book)
tables = '''
authors ratings tags series books_tags_link
comments publishers
books_authors_link conversion_options
books_publishers_link
books_ratings_link
books_series_link feeds
'''.split()
for table in tables:
rows = db.conn.get('SELECT * FROM %s ORDER BY id ASC'%table)
for row in rows:
self.conn.execute('INSERT INTO %s VALUES(%s)'%(table, ','.join(repeat('?', len(row)))), row)
self.conn.commit()
self.refresh('timestamp', True)
for i, book in enumerate(books):
progress.setLabelText(header+_(u'Copying <b>%s</b>')%book[1])
id = book[0]
self.set_path(id, True)
formats = db.formats(id, index_is_id=True)
if not formats:
formats = []
else:
formats = formats.split(',')
for format in formats:
data = db.format(id, format, index_is_id=True)
if data:
self.add_format(id, format, cStringIO.StringIO(data), index_is_id=True)
cover = db.cover(id, index_is_id=True)
if cover:
self.set_cover(id, cover)
progress.setValue(i+1)
self.conn.commit()
progress.setLabelText(_('Compacting database'))
self.vacuum()
progress.reset()
return len(books)
def find_books_in_directory(self, dirpath, single_book_per_directory):
return find_books_in_directory(dirpath, single_book_per_directory)
def import_book_directory_multiple(self, dirpath, callback=None,
added_ids=None):
return import_book_directory_multiple(self, dirpath, callback=callback, added_ids=added_ids)
def import_book_directory(self, dirpath, callback=None, added_ids=None):
return import_book_directory(self, dirpath, callback=callback, added_ids=added_ids)
def recursive_import(self, root, single_book_per_directory=True,
callback=None, added_ids=None):
return recursive_import(self, root, single_book_per_directory=single_book_per_directory, callback=callback, added_ids=added_ids)
def add_custom_book_data(self, book_id, name, val):
x = self.conn.get('SELECT id FROM books WHERE ID=?', (book_id,), all=False)
if x is None:
raise ValueError('add_custom_book_data: no such book_id %d'%book_id)
# Do the json encode first, in case it throws an exception
s = json.dumps(val, default=to_json)
self.conn.execute('''INSERT OR REPLACE INTO books_plugin_data(book, name, val)
VALUES(?, ?, ?)''', (book_id, name, s))
self.commit()
def add_multiple_custom_book_data(self, name, vals, delete_first=False):
if delete_first:
self.conn.execute('DELETE FROM books_plugin_data WHERE name=?', (name, ))
self.conn.executemany(
'INSERT OR REPLACE INTO books_plugin_data (book, name, val) VALUES (?, ?, ?)',
[(book_id, name, json.dumps(val, default=to_json))
for book_id, val in vals.iteritems()])
self.commit()
def get_custom_book_data(self, book_id, name, default=None):
try:
s = self.conn.get('''select val FROM books_plugin_data
WHERE book=? AND name=?''', (book_id, name), all=False)
if s is None:
return default
return json.loads(s, object_hook=from_json)
except:
pass
return default
def get_all_custom_book_data(self, name, default=None):
try:
s = self.conn.get('''select book, val FROM books_plugin_data
WHERE name=?''', (name,))
if s is None:
return default
res = {}
for r in s:
res[r[0]] = json.loads(r[1], object_hook=from_json)
return res
except:
pass
return default
def delete_custom_book_data(self, book_id, name):
self.conn.execute('DELETE FROM books_plugin_data WHERE book=? AND name=?',
(book_id, name))
self.commit()
def delete_all_custom_book_data(self, name):
self.conn.execute('DELETE FROM books_plugin_data WHERE name=?', (name, ))
self.commit()
def get_ids_for_custom_book_data(self, name):
s = self.conn.get('''SELECT book FROM books_plugin_data WHERE name=?''', (name,))
return [x[0] for x in s]
def get_usage_count_by_id(self, field):
fm = self.field_metadata[field]
if not fm.get('link_column', None):
raise ValueError('%s is not an is_multiple field')
return self.conn.get(
'SELECT {0}, count(*) FROM books_{1}_link GROUP BY {0}'.format(
fm['link_column'], fm['table']))
def all_author_names(self):
ai = self.FIELD_MAP['authors']
ans = set()
for rec in self.data.iterall():
auts = rec[ai]
if auts:
for x in auts.split(','):
ans.add(x.replace('|', ','))
return ans
def all_tag_names(self):
ai = self.FIELD_MAP['tags']
ans = set()
for rec in self.data.iterall():
auts = rec[ai]
if auts:
for x in auts.split(','):
ans.add(x)
return ans
def all_publisher_names(self):
ai = self.FIELD_MAP['publisher']
ans = set()
for rec in self.data.iterall():
auts = rec[ai]
if auts:
ans.add(auts)
return ans
def all_series_names(self):
ai = self.FIELD_MAP['series']
ans = set()
for rec in self.data.iterall():
auts = rec[ai]
if auts:
ans.add(auts)
return ans
| gpl-3.0 | 2,626,218,342,285,648,000 | -2,772,320,749,557,528,000 | 41.966475 | 149 | 0.526724 | false |
Arcanemagus/SickRage | lib/pysrt/commands.py | 71 | 8471 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable-all
import os
import re
import sys
import codecs
import shutil
import argparse
from textwrap import dedent
from chardet import detect
from pysrt import SubRipFile, SubRipTime, VERSION_STRING
def underline(string):
return "\033[4m%s\033[0m" % string
class TimeAwareArgumentParser(argparse.ArgumentParser):
RE_TIME_REPRESENTATION = re.compile(r'^\-?(\d+[hms]{0,2}){1,4}$')
def parse_args(self, args=None, namespace=None):
time_index = -1
for index, arg in enumerate(args):
match = self.RE_TIME_REPRESENTATION.match(arg)
if match:
time_index = index
break
if time_index >= 0:
args.insert(time_index, '--')
return super(TimeAwareArgumentParser, self).parse_args(args, namespace)
class SubRipShifter(object):
BACKUP_EXTENSION = '.bak'
RE_TIME_STRING = re.compile(r'(\d+)([hms]{0,2})')
UNIT_RATIOS = {
'ms': 1,
'': SubRipTime.SECONDS_RATIO,
's': SubRipTime.SECONDS_RATIO,
'm': SubRipTime.MINUTES_RATIO,
'h': SubRipTime.HOURS_RATIO,
}
DESCRIPTION = dedent("""\
Srt subtitle editor
It can either shift, split or change the frame rate.
""")
TIMESTAMP_HELP = "A timestamp in the form: [-][Hh][Mm]S[s][MSms]"
SHIFT_EPILOG = dedent("""\
Examples:
1 minute and 12 seconds foreward (in place):
$ srt -i shift 1m12s movie.srt
half a second foreward:
$ srt shift 500ms movie.srt > othername.srt
1 second and half backward:
$ srt -i shift -1s500ms movie.srt
3 seconds backward:
$ srt -i shift -3 movie.srt
""")
RATE_EPILOG = dedent("""\
Examples:
Convert 23.9fps subtitles to 25fps:
$ srt -i rate 23.9 25 movie.srt
""")
LIMITS_HELP = "Each parts duration in the form: [Hh][Mm]S[s][MSms]"
SPLIT_EPILOG = dedent("""\
Examples:
For a movie in 2 parts with the first part 48 minutes and 18 seconds long:
$ srt split 48m18s movie.srt
=> creates movie.1.srt and movie.2.srt
For a movie in 3 parts of 20 minutes each:
$ srt split 20m 20m movie.srt
=> creates movie.1.srt, movie.2.srt and movie.3.srt
""")
FRAME_RATE_HELP = "A frame rate in fps (commonly 23.9 or 25)"
ENCODING_HELP = dedent("""\
Change file encoding. Useful for players accepting only latin1 subtitles.
List of supported encodings: http://docs.python.org/library/codecs.html#standard-encodings
""")
BREAK_EPILOG = dedent("""\
Break lines longer than defined length
""")
LENGTH_HELP = "Maximum number of characters per line"
def __init__(self):
self.output_file_path = None
def build_parser(self):
parser = TimeAwareArgumentParser(description=self.DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-i', '--in-place', action='store_true', dest='in_place',
help="Edit file in-place, saving a backup as file.bak (do not works for the split command)")
parser.add_argument('-e', '--output-encoding', metavar=underline('encoding'), action='store', dest='output_encoding',
type=self.parse_encoding, help=self.ENCODING_HELP)
parser.add_argument('-v', '--version', action='version', version='%%(prog)s %s' % VERSION_STRING)
subparsers = parser.add_subparsers(title='commands')
shift_parser = subparsers.add_parser('shift', help="Shift subtitles by specified time offset", epilog=self.SHIFT_EPILOG, formatter_class=argparse.RawTextHelpFormatter)
shift_parser.add_argument('time_offset', action='store', metavar=underline('offset'),
type=self.parse_time, help=self.TIMESTAMP_HELP)
shift_parser.set_defaults(action=self.shift)
rate_parser = subparsers.add_parser('rate', help="Convert subtitles from a frame rate to another", epilog=self.RATE_EPILOG, formatter_class=argparse.RawTextHelpFormatter)
rate_parser.add_argument('initial', action='store', type=float, help=self.FRAME_RATE_HELP)
rate_parser.add_argument('final', action='store', type=float, help=self.FRAME_RATE_HELP)
rate_parser.set_defaults(action=self.rate)
split_parser = subparsers.add_parser('split', help="Split a file in multiple parts", epilog=self.SPLIT_EPILOG, formatter_class=argparse.RawTextHelpFormatter)
split_parser.add_argument('limits', action='store', nargs='+', type=self.parse_time, help=self.LIMITS_HELP)
split_parser.set_defaults(action=self.split)
break_parser = subparsers.add_parser('break', help="Break long lines", epilog=self.BREAK_EPILOG, formatter_class=argparse.RawTextHelpFormatter)
break_parser.add_argument('length', action='store', type=int, help=self.LENGTH_HELP)
break_parser.set_defaults(action=self.break_lines)
parser.add_argument('file', action='store')
return parser
def run(self, args):
self.arguments = self.build_parser().parse_args(args)
if self.arguments.in_place:
self.create_backup()
self.arguments.action()
def parse_time(self, time_string):
negative = time_string.startswith('-')
if negative:
time_string = time_string[1:]
ordinal = sum(int(value) * self.UNIT_RATIOS[unit] for value, unit
in self.RE_TIME_STRING.findall(time_string))
return -ordinal if negative else ordinal
def parse_encoding(self, encoding_name):
try:
codecs.lookup(encoding_name)
except LookupError as error:
raise argparse.ArgumentTypeError(error.message)
return encoding_name
def shift(self):
self.input_file.shift(milliseconds=self.arguments.time_offset)
self.input_file.write_into(self.output_file)
def rate(self):
ratio = self.arguments.final / self.arguments.initial
self.input_file.shift(ratio=ratio)
self.input_file.write_into(self.output_file)
def split(self):
limits = [0] + self.arguments.limits + [self.input_file[-1].end.ordinal + 1]
base_name, extension = os.path.splitext(self.arguments.file)
for index, (start, end) in enumerate(zip(limits[:-1], limits[1:])):
file_name = '%s.%s%s' % (base_name, index + 1, extension)
part_file = self.input_file.slice(ends_after=start, starts_before=end)
part_file.shift(milliseconds=-start)
part_file.clean_indexes()
part_file.save(path=file_name, encoding=self.output_encoding)
def create_backup(self):
backup_file = self.arguments.file + self.BACKUP_EXTENSION
if not os.path.exists(backup_file):
shutil.copy2(self.arguments.file, backup_file)
self.output_file_path = self.arguments.file
self.arguments.file = backup_file
def break_lines(self):
split_re = re.compile(r'(.{,%i})(?:\s+|$)' % self.arguments.length)
for item in self.input_file:
item.text = '\n'.join(split_re.split(item.text)[1::2])
self.input_file.write_into(self.output_file)
@property
def output_encoding(self):
return self.arguments.output_encoding or self.input_file.encoding
@property
def input_file(self):
if not hasattr(self, '_source_file'):
with open(self.arguments.file, 'rb') as f:
content = f.read()
encoding = detect(content).get('encoding')
encoding = self.normalize_encoding(encoding)
self._source_file = SubRipFile.open(self.arguments.file,
encoding=encoding, error_handling=SubRipFile.ERROR_LOG)
return self._source_file
@property
def output_file(self):
if not hasattr(self, '_output_file'):
if self.output_file_path:
self._output_file = codecs.open(self.output_file_path, 'w+', encoding=self.output_encoding)
else:
self._output_file = sys.stdout
return self._output_file
def normalize_encoding(self, encoding):
return encoding.lower().replace('-', '_')
def main():
SubRipShifter().run(sys.argv[1:])
if __name__ == '__main__':
main()
| gpl-3.0 | 5,390,411,170,390,333,000 | 6,397,732,509,814,214,000 | 37.680365 | 178 | 0.624956 | false |
strands-project/robomongo | tests/gtest-1.7.0/test/gtest_env_var_test.py | 2408 | 3487 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-3.0 | 7,160,448,557,492,255,000 | -352,927,811,200,311,360 | 32.854369 | 79 | 0.708632 | false |
pgmillon/ansible | lib/ansible/modules/network/fortios/fortios_log_syslogd_override_setting.py | 23 | 12526 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_syslogd_override_setting
short_description: Override settings for remote syslog server in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify log_syslogd feature and override_setting category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
log_syslogd_override_setting:
description:
- Override settings for remote syslog server.
default: null
suboptions:
certificate:
description:
- Certificate used to communicate with Syslog server. Source certificate.local.name.
custom-field-name:
description:
- Custom field name for CEF format logging.
suboptions:
custom:
description:
- Field custom name.
id:
description:
- Entry ID.
required: true
name:
description:
- Field name.
enc-algorithm:
description:
- Enable/disable reliable syslogging with TLS encryption.
choices:
- high-medium
- high
- low
- disable
facility:
description:
- Remote syslog facility.
choices:
- kernel
- user
- mail
- daemon
- auth
- syslog
- lpr
- news
- uucp
- cron
- authpriv
- ftp
- ntp
- audit
- alert
- clock
- local0
- local1
- local2
- local3
- local4
- local5
- local6
- local7
format:
description:
- Log format.
choices:
- default
- csv
- cef
mode:
description:
- Remote syslog logging over UDP/Reliable TCP.
choices:
- udp
- legacy-reliable
- reliable
override:
description:
- Enable/disable override syslog settings.
choices:
- enable
- disable
port:
description:
- Server listen port.
server:
description:
- Address of remote syslog server.
source-ip:
description:
- Source IP address of syslog.
status:
description:
- Enable/disable remote syslog logging.
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Override settings for remote syslog server.
fortios_log_syslogd_override_setting:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
log_syslogd_override_setting:
certificate: "<your_own_value> (source certificate.local.name)"
custom-field-name:
-
custom: "<your_own_value>"
id: "6"
name: "default_name_7"
enc-algorithm: "high-medium"
facility: "kernel"
format: "default"
mode: "udp"
override: "enable"
port: "13"
server: "192.168.100.40"
source-ip: "84.230.14.43"
status: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_log_syslogd_override_setting_data(json):
option_list = ['certificate', 'custom-field-name', 'enc-algorithm',
'facility', 'format', 'mode',
'override', 'port', 'server',
'source-ip', 'status']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def flatten_multilists_attributes(data):
multilist_attrs = []
for attr in multilist_attrs:
try:
path = "data['" + "']['".join(elem for elem in attr) + "']"
current_val = eval(path)
flattened_val = ' '.join(elem for elem in current_val)
exec(path + '= flattened_val')
except BaseException:
pass
return data
def log_syslogd_override_setting(data, fos):
vdom = data['vdom']
log_syslogd_override_setting_data = data['log_syslogd_override_setting']
flattened_data = flatten_multilists_attributes(log_syslogd_override_setting_data)
filtered_data = filter_log_syslogd_override_setting_data(flattened_data)
return fos.set('log.syslogd',
'override-setting',
data=filtered_data,
vdom=vdom)
def fortios_log_syslogd(data, fos):
login(data)
if data['log_syslogd_override_setting']:
resp = log_syslogd_override_setting(data, fos)
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"log_syslogd_override_setting": {
"required": False, "type": "dict",
"options": {
"certificate": {"required": False, "type": "str"},
"custom-field-name": {"required": False, "type": "list",
"options": {
"custom": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"name": {"required": False, "type": "str"}
}},
"enc-algorithm": {"required": False, "type": "str",
"choices": ["high-medium", "high", "low",
"disable"]},
"facility": {"required": False, "type": "str",
"choices": ["kernel", "user", "mail",
"daemon", "auth", "syslog",
"lpr", "news", "uucp",
"cron", "authpriv", "ftp",
"ntp", "audit", "alert",
"clock", "local0", "local1",
"local2", "local3", "local4",
"local5", "local6", "local7"]},
"format": {"required": False, "type": "str",
"choices": ["default", "csv", "cef"]},
"mode": {"required": False, "type": "str",
"choices": ["udp", "legacy-reliable", "reliable"]},
"override": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"port": {"required": False, "type": "int"},
"server": {"required": False, "type": "str"},
"source-ip": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_log_syslogd(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | 3,465,982,930,459,144,000 | -3,131,099,749,343,366,700 | 31.117949 | 104 | 0.509181 | false |
Dave667/service | plugin.video.kinobaza.tv/resources/lib/demjson.py | 2 | 87948 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
r""" A JSON data encoder and decoder.
This Python module implements the JSON (http://json.org/) data
encoding format; a subset of ECMAScript (aka JavaScript) for encoding
primitive data types (numbers, strings, booleans, lists, and
associative arrays) in a language-neutral simple text-based syntax.
It can encode or decode between JSON formatted strings and native
Python data types. Normally you would use the encode() and decode()
functions defined by this module, but if you want more control over
the processing you can use the JSON class.
This implementation tries to be as completely cormforming to all
intricacies of the standards as possible. It can operate in strict
mode (which only allows JSON-compliant syntax) or a non-strict mode
(which allows much more of the whole ECMAScript permitted syntax).
This includes complete support for Unicode strings (including
surrogate-pairs for non-BMP characters), and all number formats
including negative zero and IEEE 754 non-numbers such a NaN or
Infinity.
The JSON/ECMAScript to Python type mappings are:
---JSON--- ---Python---
null None
undefined undefined (note 1)
Boolean (true,false) bool (True or False)
Integer int or long (note 2)
Float float
String str or unicode ( "..." or u"..." )
Array [a, ...] list ( [...] )
Object {a:b, ...} dict ( {...} )
-- Note 1. an 'undefined' object is declared in this module which
represents the native Python value for this type when in
non-strict mode.
-- Note 2. some ECMAScript integers may be up-converted to Python
floats, such as 1e+40. Also integer -0 is converted to
float -0, so as to preserve the sign (which ECMAScript requires).
In addition, when operating in non-strict mode, several IEEE 754
non-numbers are also handled, and are mapped to specific Python
objects declared in this module:
NaN (not a number) nan (float('nan'))
Infinity, +Infinity inf (float('inf'))
-Infinity neginf (float('-inf'))
When encoding Python objects into JSON, you may use types other than
native lists or dictionaries, as long as they support the minimal
interfaces required of all sequences or mappings. This means you can
use generators and iterators, tuples, UserDict subclasses, etc.
To make it easier to produce JSON encoded representations of user
defined classes, if the object has a method named json_equivalent(),
then it will call that method and attempt to encode the object
returned from it instead. It will do this recursively as needed and
before any attempt to encode the object using it's default
strategies. Note that any json_equivalent() method should return
"equivalent" Python objects to be encoded, not an already-encoded
JSON-formatted string. There is no such aid provided to decode
JSON back into user-defined classes as that would dramatically
complicate the interface.
When decoding strings with this module it may operate in either
strict or non-strict mode. The strict mode only allows syntax which
is conforming to RFC 4627 (JSON), while the non-strict allows much
more of the permissible ECMAScript syntax.
The following are permitted when processing in NON-STRICT mode:
* Unicode format control characters are allowed anywhere in the input.
* All Unicode line terminator characters are recognized.
* All Unicode white space characters are recognized.
* The 'undefined' keyword is recognized.
* Hexadecimal number literals are recognized (e.g., 0xA6, 0177).
* String literals may use either single or double quote marks.
* Strings may contain \x (hexadecimal) escape sequences, as well as the
\v and \0 escape sequences.
* Lists may have omitted (elided) elements, e.g., [,,,,,], with
missing elements interpreted as 'undefined' values.
* Object properties (dictionary keys) can be of any of the
types: string literals, numbers, or identifiers (the later of
which are treated as if they are string literals)---as permitted
by ECMAScript. JSON only permits strings literals as keys.
Concerning non-strict and non-ECMAScript allowances:
* Octal numbers: If you allow the 'octal_numbers' behavior (which
is never enabled by default), then you can use octal integers
and octal character escape sequences (per the ECMAScript
standard Annex B.1.2). This behavior is allowed, if enabled,
because it was valid JavaScript at one time.
* Multi-line string literals: Strings which are more than one
line long (contain embedded raw newline characters) are never
permitted. This is neither valid JSON nor ECMAScript. Some other
JSON implementations may allow this, but this module considers
that behavior to be a mistake.
References:
* JSON (JavaScript Object Notation)
<http://json.org/>
* RFC 4627. The application/json Media Type for JavaScript Object Notation (JSON)
<http://www.ietf.org/rfc/rfc4627.txt>
* ECMA-262 3rd edition (1999)
<http://www.ecma-international.org/publications/files/ecma-st/ECMA-262.pdf>
* IEEE 754-1985: Standard for Binary Floating-Point Arithmetic.
<http://www.cs.berkeley.edu/~ejr/Projects/ieee754/>
"""
__author__ = "Deron Meranda <http://deron.meranda.us/>"
__date__ = "2010-10-10"
__version__ = "1.5"
__credits__ = """Copyright (c) 2006-2010 Deron E. Meranda <http://deron.meranda.us/>
Licensed under GNU LGPL 3.0 (GNU Lesser General Public License) or
later. See LICENSE.txt included with this software.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
or <http://www.fsf.org/licensing/>.
"""
# ------------------------------
# useful global constants
content_type = 'application/json'
file_ext = 'json'
hexdigits = '0123456789ABCDEFabcdef'
octaldigits = '01234567'
# ----------------------------------------------------------------------
# Decimal and float types.
#
# If a JSON number can not be stored in a Python float without loosing
# precision and the Python has the decimal type, then we will try to
# use decimal instead of float. To make this determination we need to
# know the limits of the float type, but Python doesn't have an easy
# way to tell what the largest floating-point number it supports. So,
# we detemine the precision and scale of the float type by testing it.
try:
# decimal module was introduced in Python 2.4
import decimal
except ImportError:
decimal = None
def determine_float_precision():
"""Returns a tuple (significant_digits, max_exponent) for the float type.
"""
import math
# Just count the digits in pi. The last two decimal digits
# may only be partial digits, so discount for them.
whole, frac = repr(math.pi).split('.')
sigdigits = len(whole) + len(frac) - 2
# This is a simple binary search. We find the largest exponent
# that the float() type can handle without going infinite or
# raising errors.
maxexp = None
minv = 0; maxv = 1000
while True:
if minv+1 == maxv:
maxexp = minv - 1
break
elif maxv < minv:
maxexp = None
break
m = (minv + maxv) // 2
try:
f = repr(float( '1e+%d' % m ))
except ValueError:
f = None
else:
if not f or f[0] < '0' or f[0] > '9':
f = None
if not f:
# infinite
maxv = m
else:
minv = m
return sigdigits, maxexp
float_sigdigits, float_maxexp = determine_float_precision()
# ----------------------------------------------------------------------
# The undefined value.
#
# ECMAScript has an undefined value (similar to yet distinct from null).
# Neither Python or strict JSON have support undefined, but to allow
# JavaScript behavior we must simulate it.
class _undefined_class(object):
"""Represents the ECMAScript 'undefined' value."""
__slots__ = []
def __repr__(self):
return self.__module__ + '.undefined'
def __str__(self):
return 'undefined'
def __nonzero__(self):
return False
undefined = _undefined_class()
del _undefined_class
# ----------------------------------------------------------------------
# Non-Numbers: NaN, Infinity, -Infinity
#
# ECMAScript has official support for non-number floats, although
# strict JSON does not. Python doesn't either. So to support the
# full JavaScript behavior we must try to add them into Python, which
# is unfortunately a bit of black magic. If our python implementation
# happens to be built on top of IEEE 754 we can probably trick python
# into using real floats. Otherwise we must simulate it with classes.
def _nonnumber_float_constants():
"""Try to return the Nan, Infinity, and -Infinity float values.
This is unnecessarily complex because there is no standard
platform- independent way to do this in Python as the language
(opposed to some implementation of it) doesn't discuss
non-numbers. We try various strategies from the best to the
worst.
If this Python interpreter uses the IEEE 754 floating point
standard then the returned values will probably be real instances
of the 'float' type. Otherwise a custom class object is returned
which will attempt to simulate the correct behavior as much as
possible.
"""
try:
# First, try (mostly portable) float constructor. Works under
# Linux x86 (gcc) and some Unices.
nan = float('nan')
inf = float('inf')
neginf = float('-inf')
except ValueError:
try:
# Try the AIX (PowerPC) float constructors
nan = float('NaNQ')
inf = float('INF')
neginf = float('-INF')
except ValueError:
try:
# Next, try binary unpacking. Should work under
# platforms using IEEE 754 floating point.
import struct, sys
xnan = '7ff8000000000000'.decode('hex') # Quiet NaN
xinf = '7ff0000000000000'.decode('hex')
xcheck = 'bdc145651592979d'.decode('hex') # -3.14159e-11
# Could use float.__getformat__, but it is a new python feature,
# so we use sys.byteorder.
if sys.byteorder == 'big':
nan = struct.unpack('d', xnan)[0]
inf = struct.unpack('d', xinf)[0]
check = struct.unpack('d', xcheck)[0]
else:
nan = struct.unpack('d', xnan[::-1])[0]
inf = struct.unpack('d', xinf[::-1])[0]
check = struct.unpack('d', xcheck[::-1])[0]
neginf = - inf
if check != -3.14159e-11:
raise ValueError('Unpacking raw IEEE 754 floats does not work')
except (ValueError, TypeError):
# Punt, make some fake classes to simulate. These are
# not perfect though. For instance nan * 1.0 == nan,
# as expected, but 1.0 * nan == 0.0, which is wrong.
class nan(float):
"""An approximation of the NaN (not a number) floating point number."""
def __repr__(self): return 'nan'
def __str__(self): return 'nan'
def __add__(self,x): return self
def __radd__(self,x): return self
def __sub__(self,x): return self
def __rsub__(self,x): return self
def __mul__(self,x): return self
def __rmul__(self,x): return self
def __div__(self,x): return self
def __rdiv__(self,x): return self
def __divmod__(self,x): return (self,self)
def __rdivmod__(self,x): return (self,self)
def __mod__(self,x): return self
def __rmod__(self,x): return self
def __pow__(self,exp): return self
def __rpow__(self,exp): return self
def __neg__(self): return self
def __pos__(self): return self
def __abs__(self): return self
def __lt__(self,x): return False
def __le__(self,x): return False
def __eq__(self,x): return False
def __neq__(self,x): return True
def __ge__(self,x): return False
def __gt__(self,x): return False
def __complex__(self,*a): raise NotImplementedError('NaN can not be converted to a complex')
if decimal:
nan = decimal.Decimal('NaN')
else:
nan = nan()
class inf(float):
"""An approximation of the +Infinity floating point number."""
def __repr__(self): return 'inf'
def __str__(self): return 'inf'
def __add__(self,x): return self
def __radd__(self,x): return self
def __sub__(self,x): return self
def __rsub__(self,x): return self
def __mul__(self,x):
if x is neginf or x < 0:
return neginf
elif x == 0:
return nan
else:
return self
def __rmul__(self,x): return self.__mul__(x)
def __div__(self,x):
if x == 0:
raise ZeroDivisionError('float division')
elif x < 0:
return neginf
else:
return self
def __rdiv__(self,x):
if x is inf or x is neginf or x is nan:
return nan
return 0.0
def __divmod__(self,x):
if x == 0:
raise ZeroDivisionError('float divmod()')
elif x < 0:
return (nan,nan)
else:
return (self,self)
def __rdivmod__(self,x):
if x is inf or x is neginf or x is nan:
return (nan, nan)
return (0.0, x)
def __mod__(self,x):
if x == 0:
raise ZeroDivisionError('float modulo')
else:
return nan
def __rmod__(self,x):
if x is inf or x is neginf or x is nan:
return nan
return x
def __pow__(self, exp):
if exp == 0:
return 1.0
else:
return self
def __rpow__(self, x):
if -1 < x < 1: return 0.0
elif x == 1.0: return 1.0
elif x is nan or x is neginf or x < 0:
return nan
else:
return self
def __neg__(self): return neginf
def __pos__(self): return self
def __abs__(self): return self
def __lt__(self,x): return False
def __le__(self,x):
if x is self:
return True
else:
return False
def __eq__(self,x):
if x is self:
return True
else:
return False
def __neq__(self,x):
if x is self:
return False
else:
return True
def __ge__(self,x): return True
def __gt__(self,x): return True
def __complex__(self,*a): raise NotImplementedError('Infinity can not be converted to a complex')
if decimal:
inf = decimal.Decimal('Infinity')
else:
inf = inf()
class neginf(float):
"""An approximation of the -Infinity floating point number."""
def __repr__(self): return '-inf'
def __str__(self): return '-inf'
def __add__(self,x): return self
def __radd__(self,x): return self
def __sub__(self,x): return self
def __rsub__(self,x): return self
def __mul__(self,x):
if x is self or x < 0:
return inf
elif x == 0:
return nan
else:
return self
def __rmul__(self,x): return self.__mul__(self)
def __div__(self,x):
if x == 0:
raise ZeroDivisionError('float division')
elif x < 0:
return inf
else:
return self
def __rdiv__(self,x):
if x is inf or x is neginf or x is nan:
return nan
return -0.0
def __divmod__(self,x):
if x == 0:
raise ZeroDivisionError('float divmod()')
elif x < 0:
return (nan,nan)
else:
return (self,self)
def __rdivmod__(self,x):
if x is inf or x is neginf or x is nan:
return (nan, nan)
return (-0.0, x)
def __mod__(self,x):
if x == 0:
raise ZeroDivisionError('float modulo')
else:
return nan
def __rmod__(self,x):
if x is inf or x is neginf or x is nan:
return nan
return x
def __pow__(self,exp):
if exp == 0:
return 1.0
else:
return self
def __rpow__(self, x):
if x is nan or x is inf or x is inf:
return nan
return 0.0
def __neg__(self): return inf
def __pos__(self): return self
def __abs__(self): return inf
def __lt__(self,x): return True
def __le__(self,x): return True
def __eq__(self,x):
if x is self:
return True
else:
return False
def __neq__(self,x):
if x is self:
return False
else:
return True
def __ge__(self,x):
if x is self:
return True
else:
return False
def __gt__(self,x): return False
def __complex__(self,*a): raise NotImplementedError('-Infinity can not be converted to a complex')
if decimal:
neginf = decimal.Decimal('-Infinity')
else:
neginf = neginf(0)
return nan, inf, neginf
nan, inf, neginf = _nonnumber_float_constants()
del _nonnumber_float_constants
# ----------------------------------------------------------------------
# String processing helpers
unsafe_string_chars = '"\\' + ''.join([chr(i) for i in range(0x20)])
def skipstringsafe( s, start=0, end=None ):
i = start
#if end is None:
# end = len(s)
while i < end and s[i] not in unsafe_string_chars:
#c = s[i]
#if c in unsafe_string_chars:
# break
i += 1
return i
def skipstringsafe_slow( s, start=0, end=None ):
i = start
if end is None:
end = len(s)
while i < end:
c = s[i]
if c == '"' or c == '\\' or ord(c) <= 0x1f:
break
i += 1
return i
def extend_list_with_sep( orig_seq, extension_seq, sepchar='' ):
if not sepchar:
orig_seq.extend( extension_seq )
else:
for i, x in enumerate(extension_seq):
if i > 0:
orig_seq.append( sepchar )
orig_seq.append( x )
def extend_and_flatten_list_with_sep( orig_seq, extension_seq, separator='' ):
for i, part in enumerate(extension_seq):
if i > 0 and separator:
orig_seq.append( separator )
orig_seq.extend( part )
# ----------------------------------------------------------------------
# Unicode helpers
#
# JSON requires that all JSON implementations must support the UTF-32
# encoding (as well as UTF-8 and UTF-16). But earlier versions of
# Python did not provide a UTF-32 codec. So we must implement UTF-32
# ourselves in case we need it.
def utf32le_encode( obj, errors='strict' ):
"""Encodes a Unicode string into a UTF-32LE encoded byte string."""
import struct
try:
import cStringIO as sio
except ImportError:
import StringIO as sio
f = sio.StringIO()
write = f.write
pack = struct.pack
for c in obj:
n = ord(c)
if 0xD800 <= n <= 0xDFFF: # surrogate codepoints are prohibited by UTF-32
if errors == 'ignore':
continue
elif errors == 'replace':
n = ord('?')
else:
cname = 'U+%04X'%n
raise UnicodeError('UTF-32 can not encode surrogate characters',cname)
write( pack('<L', n) )
return f.getvalue()
def utf32be_encode( obj, errors='strict' ):
"""Encodes a Unicode string into a UTF-32BE encoded byte string."""
import struct
try:
import cStringIO as sio
except ImportError:
import StringIO as sio
f = sio.StringIO()
write = f.write
pack = struct.pack
for c in obj:
n = ord(c)
if 0xD800 <= n <= 0xDFFF: # surrogate codepoints are prohibited by UTF-32
if errors == 'ignore':
continue
elif errors == 'replace':
n = ord('?')
else:
cname = 'U+%04X'%n
raise UnicodeError('UTF-32 can not encode surrogate characters',cname)
write( pack('>L', n) )
return f.getvalue()
def utf32le_decode( obj, errors='strict' ):
"""Decodes a UTF-32LE byte string into a Unicode string."""
if len(obj) % 4 != 0:
raise UnicodeError('UTF-32 decode error, data length not a multiple of 4 bytes')
import struct
unpack = struct.unpack
chars = []
i = 0
for i in range(0, len(obj), 4):
seq = obj[i:i+4]
n = unpack('<L',seq)[0]
chars.append( unichr(n) )
return u''.join( chars )
def utf32be_decode( obj, errors='strict' ):
"""Decodes a UTF-32BE byte string into a Unicode string."""
if len(obj) % 4 != 0:
raise UnicodeError('UTF-32 decode error, data length not a multiple of 4 bytes')
import struct
unpack = struct.unpack
chars = []
i = 0
for i in range(0, len(obj), 4):
seq = obj[i:i+4]
n = unpack('>L',seq)[0]
chars.append( unichr(n) )
return u''.join( chars )
def auto_unicode_decode( s ):
"""Takes a string and tries to convert it to a Unicode string.
This will return a Python unicode string type corresponding to the
input string (either str or unicode). The character encoding is
guessed by looking for either a Unicode BOM prefix, or by the
rules specified by RFC 4627. When in doubt it is assumed the
input is encoded in UTF-8 (the default for JSON).
"""
if isinstance(s, unicode):
return s
if len(s) < 4:
return s.decode('utf8') # not enough bytes, assume default of utf-8
# Look for BOM marker
import codecs
bom2 = s[:2]
bom4 = s[:4]
a, b, c, d = map(ord, s[:4]) # values of first four bytes
if bom4 == codecs.BOM_UTF32_LE:
encoding = 'utf-32le'
s = s[4:]
elif bom4 == codecs.BOM_UTF32_BE:
encoding = 'utf-32be'
s = s[4:]
elif bom2 == codecs.BOM_UTF16_LE:
encoding = 'utf-16le'
s = s[2:]
elif bom2 == codecs.BOM_UTF16_BE:
encoding = 'utf-16be'
s = s[2:]
# No BOM, so autodetect encoding used by looking at first four bytes
# according to RFC 4627 section 3.
elif a==0 and b==0 and c==0 and d!=0: # UTF-32BE
encoding = 'utf-32be'
elif a==0 and b!=0 and c==0 and d!=0: # UTF-16BE
encoding = 'utf-16be'
elif a!=0 and b==0 and c==0 and d==0: # UTF-32LE
encoding = 'utf-32le'
elif a!=0 and b==0 and c!=0 and d==0: # UTF-16LE
encoding = 'utf-16le'
else: #if a!=0 and b!=0 and c!=0 and d!=0: # UTF-8
# JSON spec says default is UTF-8, so always guess it
# if we can't guess otherwise
encoding = 'utf8'
# Make sure the encoding is supported by Python
try:
cdk = codecs.lookup(encoding)
except LookupError:
if encoding.startswith('utf-32') \
or encoding.startswith('ucs4') \
or encoding.startswith('ucs-4'):
# Python doesn't natively have a UTF-32 codec, but JSON
# requires that it be supported. So we must decode these
# manually.
if encoding.endswith('le'):
unis = utf32le_decode(s)
else:
unis = utf32be_decode(s)
else:
raise JSONDecodeError('this python has no codec for this character encoding',encoding)
else:
# Convert to unicode using a standard codec
unis = s.decode(encoding)
return unis
def surrogate_pair_as_unicode( c1, c2 ):
"""Takes a pair of unicode surrogates and returns the equivalent unicode character.
The input pair must be a surrogate pair, with c1 in the range
U+D800 to U+DBFF and c2 in the range U+DC00 to U+DFFF.
"""
n1, n2 = ord(c1), ord(c2)
if n1 < 0xD800 or n1 > 0xDBFF or n2 < 0xDC00 or n2 > 0xDFFF:
raise JSONDecodeError('illegal Unicode surrogate pair',(c1,c2))
a = n1 - 0xD800
b = n2 - 0xDC00
v = (a << 10) | b
v += 0x10000
return unichr(v)
def unicode_as_surrogate_pair( c ):
"""Takes a single unicode character and returns a sequence of surrogate pairs.
The output of this function is a tuple consisting of one or two unicode
characters, such that if the input character is outside the BMP range
then the output is a two-character surrogate pair representing that character.
If the input character is inside the BMP then the output tuple will have
just a single character...the same one.
"""
n = ord(c)
if n < 0x10000:
return (unichr(n),) # in BMP, surrogate pair not required
v = n - 0x10000
vh = (v >> 10) & 0x3ff # highest 10 bits
vl = v & 0x3ff # lowest 10 bits
w1 = 0xD800 | vh
w2 = 0xDC00 | vl
return (unichr(w1), unichr(w2))
# ----------------------------------------------------------------------
# Type identification
def isnumbertype( obj ):
"""Is the object of a Python number type (excluding complex)?"""
return isinstance(obj, (int,long,float)) \
and not isinstance(obj, bool) \
or obj is nan or obj is inf or obj is neginf
def isstringtype( obj ):
"""Is the object of a Python string type?"""
if isinstance(obj, basestring):
return True
# Must also check for some other pseudo-string types
import types, UserString
return isinstance(obj, types.StringTypes) \
or isinstance(obj, UserString.UserString) \
or isinstance(obj, UserString.MutableString)
# ----------------------------------------------------------------------
# Numeric helpers
def decode_hex( hexstring ):
"""Decodes a hexadecimal string into it's integer value."""
# We don't use the builtin 'hex' codec in python since it can
# not handle odd numbers of digits, nor raise the same type
# of exceptions we want to.
n = 0
for c in hexstring:
if '0' <= c <= '9':
d = ord(c) - ord('0')
elif 'a' <= c <= 'f':
d = ord(c) - ord('a') + 10
elif 'A' <= c <= 'F':
d = ord(c) - ord('A') + 10
else:
raise JSONDecodeError('not a hexadecimal number',hexstring)
# Could use ((n << 4 ) | d), but python 2.3 issues a FutureWarning.
n = (n * 16) + d
return n
def decode_octal( octalstring ):
"""Decodes an octal string into it's integer value."""
n = 0
for c in octalstring:
if '0' <= c <= '7':
d = ord(c) - ord('0')
else:
raise JSONDecodeError('not an octal number',octalstring)
# Could use ((n << 3 ) | d), but python 2.3 issues a FutureWarning.
n = (n * 8) + d
return n
# ----------------------------------------------------------------------
# Exception classes.
class JSONError(ValueError):
"""Our base class for all JSON-related errors.
"""
def pretty_description(self):
err = self.args[0]
if len(self.args) > 1:
err += ': '
for anum, a in enumerate(self.args[1:]):
if anum > 1:
err += ', '
astr = repr(a)
if len(astr) > 20:
astr = astr[:20] + '...'
err += astr
return err
class JSONDecodeError(JSONError):
"""An exception class raised when a JSON decoding error (syntax error) occurs."""
class JSONEncodeError(JSONError):
"""An exception class raised when a python object can not be encoded as a JSON string."""
#----------------------------------------------------------------------
# The main JSON encoder/decoder class.
class JSON(object):
"""An encoder/decoder for JSON data streams.
Usually you will call the encode() or decode() methods. The other
methods are for lower-level processing.
Whether the JSON parser runs in strict mode (which enforces exact
compliance with the JSON spec) or the more forgiving non-string mode
can be affected by setting the 'strict' argument in the object's
initialization; or by assigning True or False to the 'strict'
property of the object.
You can also adjust a finer-grained control over strictness by
allowing or preventing specific behaviors. You can get a list of
all the available behaviors by accessing the 'behaviors' property.
Likewise the allowed_behaviors and prevented_behaviors list which
behaviors will be allowed and which will not. Call the allow()
or prevent() methods to adjust these.
"""
_escapes_json = { # character escapes in JSON
'"': '"',
'/': '/',
'\\': '\\',
'b': '\b',
'f': '\f',
'n': '\n',
'r': '\r',
't': '\t',
}
_escapes_js = { # character escapes in Javascript
'"': '"',
'\'': '\'',
'\\': '\\',
'b': '\b',
'f': '\f',
'n': '\n',
'r': '\r',
't': '\t',
'v': '\v',
'0': '\x00'
}
# Following is a reverse mapping of escape characters, used when we
# output JSON. Only those escapes which are always safe (e.g., in JSON)
# are here. It won't hurt if we leave questionable ones out.
_rev_escapes = {'\n': '\\n',
'\t': '\\t',
'\b': '\\b',
'\r': '\\r',
'\f': '\\f',
'"': '\\"',
'\\': '\\\\'}
def __init__(self, strict=False, compactly=True, escape_unicode=False):
"""Creates a JSON encoder/decoder object.
If 'strict' is set to True, then only strictly-conforming JSON
output will be produced. Note that this means that some types
of values may not be convertable and will result in a
JSONEncodeError exception.
If 'compactly' is set to True, then the resulting string will
have all extraneous white space removed; if False then the
string will be "pretty printed" with whitespace and indentation
added to make it more readable.
If 'escape_unicode' is set to True, then all non-ASCII characters
will be represented as a unicode escape sequence; if False then
the actual real unicode character will be inserted if possible.
The 'escape_unicode' can also be a function, which when called
with a single argument of a unicode character will return True
if the character should be escaped or False if it should not.
If you wish to extend the encoding to ba able to handle
additional types, you should subclass this class and override
the encode_default() method.
"""
import sys
self._set_strictness(strict)
self._encode_compactly = compactly
try:
# see if we were passed a predicate function
b = escape_unicode(u'A')
self._encode_unicode_as_escapes = escape_unicode
except (ValueError, NameError, TypeError):
# Just set to True or False. We could use lambda x:True
# to make it more consistent (always a function), but it
# will be too slow, so we'll make explicit tests later.
self._encode_unicode_as_escapes = bool(escape_unicode)
self._sort_dictionary_keys = True
# The following is a boolean map of the first 256 characters
# which will quickly tell us which of those characters never
# need to be escaped.
self._asciiencodable = [32 <= c < 128 and not self._rev_escapes.has_key(chr(c))
for c in range(0,256)]
def _set_strictness(self, strict):
"""Changes the strictness behavior.
Pass True to be very strict about JSON syntax, or False to be looser.
"""
self._allow_any_type_at_start = not strict
self._allow_all_numeric_signs = not strict
self._allow_comments = not strict
self._allow_control_char_in_string = not strict
self._allow_hex_numbers = not strict
self._allow_initial_decimal_point = not strict
self._allow_js_string_escapes = not strict
self._allow_non_numbers = not strict
self._allow_nonescape_characters = not strict # "\z" -> "z"
self._allow_nonstring_keys = not strict
self._allow_omitted_array_elements = not strict
self._allow_single_quoted_strings = not strict
self._allow_trailing_comma_in_literal = not strict
self._allow_undefined_values = not strict
self._allow_unicode_format_control_chars = not strict
self._allow_unicode_whitespace = not strict
# Always disable this by default
self._allow_octal_numbers = False
def allow(self, behavior):
"""Allow the specified behavior (turn off a strictness check).
The list of all possible behaviors is available in the behaviors property.
You can see which behaviors are currently allowed by accessing the
allowed_behaviors property.
"""
p = '_allow_' + behavior
if hasattr(self, p):
setattr(self, p, True)
else:
raise AttributeError('Behavior is not known',behavior)
def prevent(self, behavior):
"""Prevent the specified behavior (turn on a strictness check).
The list of all possible behaviors is available in the behaviors property.
You can see which behaviors are currently prevented by accessing the
prevented_behaviors property.
"""
p = '_allow_' + behavior
if hasattr(self, p):
setattr(self, p, False)
else:
raise AttributeError('Behavior is not known',behavior)
def _get_behaviors(self):
return sorted([ n[len('_allow_'):] for n in self.__dict__ \
if n.startswith('_allow_')])
behaviors = property(_get_behaviors,
doc='List of known behaviors that can be passed to allow() or prevent() methods')
def _get_allowed_behaviors(self):
return sorted([ n[len('_allow_'):] for n in self.__dict__ \
if n.startswith('_allow_') and getattr(self,n)])
allowed_behaviors = property(_get_allowed_behaviors,
doc='List of known behaviors that are currently allowed')
def _get_prevented_behaviors(self):
return sorted([ n[len('_allow_'):] for n in self.__dict__ \
if n.startswith('_allow_') and not getattr(self,n)])
prevented_behaviors = property(_get_prevented_behaviors,
doc='List of known behaviors that are currently prevented')
def _is_strict(self):
return not self.allowed_behaviors
strict = property(_is_strict, _set_strictness,
doc='True if adherence to RFC 4627 syntax is strict, or False is more generous ECMAScript syntax is permitted')
def isws(self, c):
"""Determines if the given character is considered as white space.
Note that Javscript is much more permissive on what it considers
to be whitespace than does JSON.
Ref. ECMAScript section 7.2
"""
if not self._allow_unicode_whitespace:
return c in ' \t\n\r'
else:
if not isinstance(c,unicode):
c = unicode(c)
if c in u' \t\n\r\f\v':
return True
import unicodedata
return unicodedata.category(c) == 'Zs'
def islineterm(self, c):
"""Determines if the given character is considered a line terminator.
Ref. ECMAScript section 7.3
"""
if c == '\r' or c == '\n':
return True
if c == u'\u2028' or c == u'\u2029': # unicodedata.category(c) in ['Zl', 'Zp']
return True
return False
def strip_format_control_chars(self, txt):
"""Filters out all Unicode format control characters from the string.
ECMAScript permits any Unicode "format control characters" to
appear at any place in the source code. They are to be
ignored as if they are not there before any other lexical
tokenization occurs. Note that JSON does not allow them.
Ref. ECMAScript section 7.1.
"""
import unicodedata
txt2 = filter( lambda c: unicodedata.category(unicode(c)) != 'Cf',
txt )
return txt2
def decode_null(self, s, i=0):
"""Intermediate-level decoder for ECMAScript 'null' keyword.
Takes a string and a starting index, and returns a Python
None object and the index of the next unparsed character.
"""
if i < len(s) and s[i:i+4] == 'null':
return None, i+4
raise JSONDecodeError('literal is not the JSON "null" keyword', s)
def encode_undefined(self):
"""Produces the ECMAScript 'undefined' keyword."""
return 'undefined'
def encode_null(self):
"""Produces the JSON 'null' keyword."""
return 'null'
def decode_boolean(self, s, i=0):
"""Intermediate-level decode for JSON boolean literals.
Takes a string and a starting index, and returns a Python bool
(True or False) and the index of the next unparsed character.
"""
if s[i:i+4] == 'true':
return True, i+4
elif s[i:i+5] == 'false':
return False, i+5
raise JSONDecodeError('literal value is not a JSON boolean keyword',s)
def encode_boolean(self, b):
"""Encodes the Python boolean into a JSON Boolean literal."""
if bool(b):
return 'true'
return 'false'
def decode_number(self, s, i=0, imax=None):
"""Intermediate-level decoder for JSON numeric literals.
Takes a string and a starting index, and returns a Python
suitable numeric type and the index of the next unparsed character.
The returned numeric type can be either of a Python int,
long, or float. In addition some special non-numbers may
also be returned such as nan, inf, and neginf (technically
which are Python floats, but have no numeric value.)
Ref. ECMAScript section 8.5.
"""
if imax is None:
imax = len(s)
# Detect initial sign character(s)
if not self._allow_all_numeric_signs:
if s[i] == '+' or (s[i] == '-' and i+1 < imax and \
s[i+1] in '+-'):
raise JSONDecodeError('numbers in strict JSON may only have a single "-" as a sign prefix',s[i:])
sign = +1
j = i # j will point after the sign prefix
while j < imax and s[j] in '+-':
if s[j] == '-': sign = sign * -1
j += 1
# Check for ECMAScript symbolic non-numbers
if s[j:j+3] == 'NaN':
if self._allow_non_numbers:
return nan, j+3
else:
raise JSONDecodeError('NaN literals are not allowed in strict JSON')
elif s[j:j+8] == 'Infinity':
if self._allow_non_numbers:
if sign < 0:
return neginf, j+8
else:
return inf, j+8
else:
raise JSONDecodeError('Infinity literals are not allowed in strict JSON')
elif s[j:j+2] in ('0x','0X'):
if self._allow_hex_numbers:
k = j+2
while k < imax and s[k] in hexdigits:
k += 1
n = sign * decode_hex( s[j+2:k] )
return n, k
else:
raise JSONDecodeError('hexadecimal literals are not allowed in strict JSON',s[i:])
else:
# Decimal (or octal) number, find end of number.
# General syntax is: \d+[\.\d+][e[+-]?\d+]
k = j # will point to end of digit sequence
could_be_octal = ( k+1 < imax and s[k] == '0' ) # first digit is 0
decpt = None # index into number of the decimal point, if any
ept = None # index into number of the e|E exponent start, if any
esign = '+' # sign of exponent
sigdigits = 0 # number of significant digits (approx, counts end zeros)
while k < imax and (s[k].isdigit() or s[k] in '.+-eE'):
c = s[k]
if c not in octaldigits:
could_be_octal = False
if c == '.':
if decpt is not None or ept is not None:
break
else:
decpt = k-j
elif c in 'eE':
if ept is not None:
break
else:
ept = k-j
elif c in '+-':
if not ept:
break
esign = c
else: #digit
if not ept:
sigdigits += 1
k += 1
number = s[j:k] # The entire number as a string
#print 'NUMBER IS: ', repr(number), ', sign', sign, ', esign', esign, \
# ', sigdigits', sigdigits, \
# ', decpt', decpt, ', ept', ept
# Handle octal integers first as an exception. If octal
# is not enabled (the ECMAScipt standard) then just do
# nothing and treat the string as a decimal number.
if could_be_octal and self._allow_octal_numbers:
n = sign * decode_octal( number )
return n, k
# A decimal number. Do a quick check on JSON syntax restrictions.
if number[0] == '.' and not self._allow_initial_decimal_point:
raise JSONDecodeError('numbers in strict JSON must have at least one digit before the decimal point',s[i:])
elif number[0] == '0' and \
len(number) > 1 and number[1].isdigit():
if self._allow_octal_numbers:
raise JSONDecodeError('initial zero digit is only allowed for octal integers',s[i:])
else:
raise JSONDecodeError('initial zero digit must not be followed by other digits (octal numbers are not permitted)',s[i:])
# Make sure decimal point is followed by a digit
if decpt is not None:
if decpt+1 >= len(number) or not number[decpt+1].isdigit():
raise JSONDecodeError('decimal point must be followed by at least one digit',s[i:])
# Determine the exponential part
if ept is not None:
if ept+1 >= len(number):
raise JSONDecodeError('exponent in number is truncated',s[i:])
try:
exponent = int(number[ept+1:])
except ValueError:
raise JSONDecodeError('not a valid exponent in number',s[i:])
##print 'EXPONENT', exponent
else:
exponent = 0
# Try to make an int/long first.
if decpt is None and exponent >= 0:
# An integer
if ept:
n = int(number[:ept])
else:
n = int(number)
n *= sign
if exponent:
n *= 10**exponent
if n == 0 and sign < 0:
# minus zero, must preserve negative sign so make a float
n = -0.0
else:
try:
if decimal and (abs(exponent) > float_maxexp or sigdigits > float_sigdigits):
try:
n = decimal.Decimal(number)
n = n.normalize()
except decimal.Overflow:
if sign<0:
n = neginf
else:
n = inf
else:
n *= sign
else:
n = float(number) * sign
except ValueError:
raise JSONDecodeError('not a valid JSON numeric literal', s[i:j])
return n, k
def encode_number(self, n):
"""Encodes a Python numeric type into a JSON numeric literal.
The special non-numeric values of float('nan'), float('inf')
and float('-inf') are translated into appropriate JSON
literals.
Note that Python complex types are not handled, as there is no
ECMAScript equivalent type.
"""
if isinstance(n, complex):
if n.imag:
raise JSONEncodeError('Can not encode a complex number that has a non-zero imaginary part',n)
n = n.real
if isinstance(n, (int,long)):
return str(n)
if decimal and isinstance(n, decimal.Decimal):
return str(n)
global nan, inf, neginf
if n is nan:
return 'NaN'
elif n is inf:
return 'Infinity'
elif n is neginf:
return '-Infinity'
elif isinstance(n, float):
# Check for non-numbers.
# In python nan == inf == -inf, so must use repr() to distinguish
reprn = repr(n).lower()
if ('inf' in reprn and '-' in reprn) or n == neginf:
return '-Infinity'
elif 'inf' in reprn or n is inf:
return 'Infinity'
elif 'nan' in reprn or n is nan:
return 'NaN'
return repr(n)
else:
raise TypeError('encode_number expected an integral, float, or decimal number type',type(n))
def decode_string(self, s, i=0, imax=None):
"""Intermediate-level decoder for JSON string literals.
Takes a string and a starting index, and returns a Python
string (or unicode string) and the index of the next unparsed
character.
"""
if imax is None:
imax = len(s)
if imax < i+2 or s[i] not in '"\'':
raise JSONDecodeError('string literal must be properly quoted',s[i:])
closer = s[i]
if closer == '\'' and not self._allow_single_quoted_strings:
raise JSONDecodeError('string literals must use double quotation marks in strict JSON',s[i:])
i += 1 # skip quote
if self._allow_js_string_escapes:
escapes = self._escapes_js
else:
escapes = self._escapes_json
ccallowed = self._allow_control_char_in_string
chunks = []
_append = chunks.append
done = False
high_surrogate = None
while i < imax:
c = s[i]
# Make sure a high surrogate is immediately followed by a low surrogate
if high_surrogate and (i+1 >= imax or s[i:i+2] != '\\u'):
raise JSONDecodeError('High unicode surrogate must be followed by a low surrogate',s[i:])
if c == closer:
i += 1 # skip end quote
done = True
break
elif c == '\\':
# Escaped character
i += 1
if i >= imax:
raise JSONDecodeError('escape in string literal is incomplete',s[i-1:])
c = s[i]
if '0' <= c <= '7' and self._allow_octal_numbers:
# Handle octal escape codes first so special \0 doesn't kick in yet.
# Follow Annex B.1.2 of ECMAScript standard.
if '0' <= c <= '3':
maxdigits = 3
else:
maxdigits = 2
for k in range(i, i+maxdigits+1):
if k >= imax or s[k] not in octaldigits:
break
n = decode_octal(s[i:k])
if n < 128:
_append( chr(n) )
else:
_append( unichr(n) )
i = k
continue
if escapes.has_key(c):
_append(escapes[c])
i += 1
elif c == 'u' or c == 'x':
i += 1
if c == 'u':
digits = 4
else: # c== 'x'
if not self._allow_js_string_escapes:
raise JSONDecodeError(r'string literals may not use the \x hex-escape in strict JSON',s[i-1:])
digits = 2
if i+digits >= imax:
raise JSONDecodeError('numeric character escape sequence is truncated',s[i-1:])
n = decode_hex( s[i:i+digits] )
if high_surrogate:
# Decode surrogate pair and clear high surrogate
_append( surrogate_pair_as_unicode( high_surrogate, unichr(n) ) )
high_surrogate = None
elif n < 128:
# ASCII chars always go in as a str
_append( chr(n) )
elif 0xd800 <= n <= 0xdbff: # high surrogate
if imax < i + digits + 2 or s[i+digits] != '\\' or s[i+digits+1] != 'u':
raise JSONDecodeError('High unicode surrogate must be followed by a low surrogate',s[i-2:])
high_surrogate = unichr(n) # remember until we get to the low surrogate
elif 0xdc00 <= n <= 0xdfff: # low surrogate
raise JSONDecodeError('Low unicode surrogate must be proceeded by a high surrogate',s[i-2:])
else:
# Other chars go in as a unicode char
_append( unichr(n) )
i += digits
else:
# Unknown escape sequence
if self._allow_nonescape_characters:
_append( c )
i += 1
else:
raise JSONDecodeError('unsupported escape code in JSON string literal',s[i-1:])
elif ord(c) <= 0x1f: # A control character
if self.islineterm(c):
raise JSONDecodeError('line terminator characters must be escaped inside string literals',s[i:])
elif ccallowed:
_append( c )
i += 1
else:
raise JSONDecodeError('control characters must be escaped inside JSON string literals',s[i:])
else: # A normal character; not an escape sequence or end-quote.
# Find a whole sequence of "safe" characters so we can append them
# all at once rather than one a time, for speed.
j = i
i += 1
while i < imax and s[i] not in unsafe_string_chars and s[i] != closer:
i += 1
_append(s[j:i])
if not done:
raise JSONDecodeError('string literal is not terminated with a quotation mark',s)
s = ''.join( chunks )
return s, i
def encode_string(self, s):
"""Encodes a Python string into a JSON string literal.
"""
# Must handle instances of UserString specially in order to be
# able to use ord() on it's simulated "characters".
import UserString
if isinstance(s, (UserString.UserString, UserString.MutableString)):
def tochar(c):
return c.data
else:
# Could use "lambda c:c", but that is too slow. So we set to None
# and use an explicit if test inside the loop.
tochar = None
chunks = []
chunks.append('"')
revesc = self._rev_escapes
asciiencodable = self._asciiencodable
encunicode = self._encode_unicode_as_escapes
i = 0
imax = len(s)
while i < imax:
if tochar:
c = tochar(s[i])
else:
c = s[i]
cord = ord(c)
if cord < 256 and asciiencodable[cord] and isinstance(encunicode, bool):
# Contiguous runs of plain old printable ASCII can be copied
# directly to the JSON output without worry (unless the user
# has supplied a custom is-encodable function).
j = i
i += 1
while i < imax:
if tochar:
c = tochar(s[i])
else:
c = s[i]
cord = ord(c)
if cord < 256 and asciiencodable[cord]:
i += 1
else:
break
chunks.append( unicode(s[j:i]) )
elif revesc.has_key(c):
# Has a shortcut escape sequence, like "\n"
chunks.append(revesc[c])
i += 1
elif cord <= 0x1F:
# Always unicode escape ASCII-control characters
chunks.append(r'\u%04x' % cord)
i += 1
elif 0xD800 <= cord <= 0xDFFF:
# A raw surrogate character! This should never happen
# and there's no way to include it in the JSON output.
# So all we can do is complain.
cname = 'U+%04X' % cord
raise JSONEncodeError('can not include or escape a Unicode surrogate character',cname)
elif cord <= 0xFFFF:
# Other BMP Unicode character
if isinstance(encunicode, bool):
doesc = encunicode
else:
doesc = encunicode( c )
if doesc:
chunks.append(r'\u%04x' % cord)
else:
chunks.append( c )
i += 1
else: # ord(c) >= 0x10000
# Non-BMP Unicode
if isinstance(encunicode, bool):
doesc = encunicode
else:
doesc = encunicode( c )
if doesc:
for surrogate in unicode_as_surrogate_pair(c):
chunks.append(r'\u%04x' % ord(surrogate))
else:
chunks.append( c )
i += 1
chunks.append('"')
return ''.join( chunks )
def skip_comment(self, txt, i=0):
"""Skips an ECMAScript comment, either // or /* style.
The contents of the comment are returned as a string, as well
as the index of the character immediately after the comment.
"""
if i+1 >= len(txt) or txt[i] != '/' or txt[i+1] not in '/*':
return None, i
if not self._allow_comments:
raise JSONDecodeError('comments are not allowed in strict JSON',txt[i:])
multiline = (txt[i+1] == '*')
istart = i
i += 2
while i < len(txt):
if multiline:
if txt[i] == '*' and i+1 < len(txt) and txt[i+1] == '/':
j = i+2
break
elif txt[i] == '/' and i+1 < len(txt) and txt[i+1] == '*':
raise JSONDecodeError('multiline /* */ comments may not nest',txt[istart:i+1])
else:
if self.islineterm(txt[i]):
j = i # line terminator is not part of comment
break
i += 1
if i >= len(txt):
if not multiline:
j = len(txt) # // comment terminated by end of file is okay
else:
raise JSONDecodeError('comment was never terminated',txt[istart:])
return txt[istart:j], j
def skipws(self, txt, i=0, imax=None, skip_comments=True):
"""Skips whitespace.
"""
if not self._allow_comments and not self._allow_unicode_whitespace:
if imax is None:
imax = len(txt)
while i < imax and txt[i] in ' \r\n\t':
i += 1
return i
else:
return self.skipws_any(txt, i, imax, skip_comments)
def skipws_any(self, txt, i=0, imax=None, skip_comments=True):
"""Skips all whitespace, including comments and unicode whitespace
Takes a string and a starting index, and returns the index of the
next non-whitespace character.
If skip_comments is True and not running in strict JSON mode, then
comments will be skipped over just like whitespace.
"""
if imax is None:
imax = len(txt)
while i < imax:
if txt[i] == '/':
cmt, i = self.skip_comment(txt, i)
if i < imax and self.isws(txt[i]):
i += 1
else:
break
return i
def decode_composite(self, txt, i=0, imax=None):
"""Intermediate-level JSON decoder for composite literal types (array and object).
Takes text and a starting index, and returns either a Python list or
dictionary and the index of the next unparsed character.
"""
if imax is None:
imax = len(txt)
i = self.skipws(txt, i, imax)
starti = i
if i >= imax or txt[i] not in '{[':
raise JSONDecodeError('composite object must start with "[" or "{"',txt[i:])
if txt[i] == '[':
isdict = False
closer = ']'
obj = []
else:
isdict = True
closer = '}'
obj = {}
i += 1 # skip opener
i = self.skipws(txt, i, imax)
if i < imax and txt[i] == closer:
# empty composite
i += 1
done = True
else:
saw_value = False # set to false at beginning and after commas
done = False
while i < imax:
i = self.skipws(txt, i, imax)
if i < imax and (txt[i] == ',' or txt[i] == closer):
c = txt[i]
i += 1
if c == ',':
if not saw_value:
# no preceeding value, an elided (omitted) element
if isdict:
raise JSONDecodeError('can not omit elements of an object (dictionary)')
if self._allow_omitted_array_elements:
if self._allow_undefined_values:
obj.append( undefined )
else:
obj.append( None )
else:
raise JSONDecodeError('strict JSON does not permit omitted array (list) elements',txt[i:])
saw_value = False
continue
else: # c == closer
if not saw_value and not self._allow_trailing_comma_in_literal:
if isdict:
raise JSONDecodeError('strict JSON does not allow a final comma in an object (dictionary) literal',txt[i-2:])
else:
raise JSONDecodeError('strict JSON does not allow a final comma in an array (list) literal',txt[i-2:])
done = True
break
# Decode the item
if isdict and self._allow_nonstring_keys:
r = self.decodeobj(txt, i, identifier_as_string=True)
else:
r = self.decodeobj(txt, i, identifier_as_string=False)
if r:
if saw_value:
# two values without a separating comma
raise JSONDecodeError('values must be separated by a comma', txt[i:r[1]])
saw_value = True
i = self.skipws(txt, r[1], imax)
if isdict:
key = r[0] # Ref 11.1.5
if not isstringtype(key):
if isnumbertype(key):
if not self._allow_nonstring_keys:
raise JSONDecodeError('strict JSON only permits string literals as object properties (dictionary keys)',txt[starti:])
else:
raise JSONDecodeError('object properties (dictionary keys) must be either string literals or numbers',txt[starti:])
if i >= imax or txt[i] != ':':
raise JSONDecodeError('object property (dictionary key) has no value, expected ":"',txt[starti:])
i += 1
i = self.skipws(txt, i, imax)
rval = self.decodeobj(txt, i)
if rval:
i = self.skipws(txt, rval[1], imax)
obj[key] = rval[0]
else:
raise JSONDecodeError('object property (dictionary key) has no value',txt[starti:])
else: # list
obj.append( r[0] )
else: # not r
if isdict:
raise JSONDecodeError('expected a value, or "}"',txt[i:])
elif not self._allow_omitted_array_elements:
raise JSONDecodeError('expected a value or "]"',txt[i:])
else:
raise JSONDecodeError('expected a value, "," or "]"',txt[i:])
# end while
if not done:
if isdict:
raise JSONDecodeError('object literal (dictionary) is not terminated',txt[starti:])
else:
raise JSONDecodeError('array literal (list) is not terminated',txt[starti:])
return obj, i
def decode_javascript_identifier(self, name):
"""Convert a JavaScript identifier into a Python string object.
This method can be overriden by a subclass to redefine how JavaScript
identifiers are turned into Python objects. By default this just
converts them into strings.
"""
return name
def decodeobj(self, txt, i=0, imax=None, identifier_as_string=False, only_object_or_array=False):
"""Intermediate-level JSON decoder.
Takes a string and a starting index, and returns a two-tuple consting
of a Python object and the index of the next unparsed character.
If there is no value at all (empty string, etc), the None is
returned instead of a tuple.
"""
if imax is None:
imax = len(txt)
obj = None
i = self.skipws(txt, i, imax)
if i >= imax:
raise JSONDecodeError('Unexpected end of input')
c = txt[i]
if c == '[' or c == '{':
obj, i = self.decode_composite(txt, i, imax)
elif only_object_or_array:
raise JSONDecodeError('JSON document must start with an object or array type only', txt[i:i+20])
elif c == '"' or c == '\'':
obj, i = self.decode_string(txt, i, imax)
elif c.isdigit() or c in '.+-':
obj, i = self.decode_number(txt, i, imax)
elif c.isalpha() or c in'_$':
j = i
while j < imax and (txt[j].isalnum() or txt[j] in '_$'):
j += 1
kw = txt[i:j]
if kw == 'null':
obj, i = None, j
elif kw == 'true':
obj, i = True, j
elif kw == 'false':
obj, i = False, j
elif kw == 'undefined':
if self._allow_undefined_values:
obj, i = undefined, j
else:
raise JSONDecodeError('strict JSON does not allow undefined elements',txt[i:])
elif kw == 'NaN' or kw == 'Infinity':
obj, i = self.decode_number(txt, i)
else:
if identifier_as_string:
obj, i = self.decode_javascript_identifier(kw), j
else:
raise JSONDecodeError('unknown keyword or identifier',kw)
else:
raise JSONDecodeError('can not decode value',txt[i:])
return obj, i
def decode(self, txt):
"""Decodes a JSON-endoded string into a Python object."""
if self._allow_unicode_format_control_chars:
txt = self.strip_format_control_chars(txt)
r = self.decodeobj(txt, 0, only_object_or_array=not self._allow_any_type_at_start)
if not r:
raise JSONDecodeError('can not decode value',txt)
else:
obj, i = r
i = self.skipws(txt, i)
if i < len(txt):
raise JSONDecodeError('unexpected or extra text',txt[i:])
return obj
def encode(self, obj, nest_level=0):
"""Encodes the Python object into a JSON string representation.
This method will first attempt to encode an object by seeing
if it has a json_equivalent() method. If so than it will
call that method and then recursively attempt to encode
the object resulting from that call.
Next it will attempt to determine if the object is a native
type or acts like a squence or dictionary. If so it will
encode that object directly.
Finally, if no other strategy for encoding the object of that
type exists, it will call the encode_default() method. That
method currently raises an error, but it could be overridden
by subclasses to provide a hook for extending the types which
can be encoded.
"""
chunks = []
self.encode_helper(chunks, obj, nest_level)
return ''.join( chunks )
def encode_helper(self, chunklist, obj, nest_level):
#print 'encode_helper(chunklist=%r, obj=%r, nest_level=%r)'%(chunklist,obj,nest_level)
if hasattr(obj, 'json_equivalent'):
json = self.encode_equivalent( obj, nest_level=nest_level )
if json is not None:
chunklist.append( json )
return
if obj is None:
chunklist.append( self.encode_null() )
elif obj is undefined:
if self._allow_undefined_values:
chunklist.append( self.encode_undefined() )
else:
raise JSONEncodeError('strict JSON does not permit "undefined" values')
elif isinstance(obj, bool):
chunklist.append( self.encode_boolean(obj) )
elif isinstance(obj, (int,long,float,complex)) or \
(decimal and isinstance(obj, decimal.Decimal)):
chunklist.append( self.encode_number(obj) )
elif isinstance(obj, basestring) or isstringtype(obj):
chunklist.append( self.encode_string(obj) )
else:
self.encode_composite(chunklist, obj, nest_level)
def encode_composite(self, chunklist, obj, nest_level):
"""Encodes just dictionaries, lists, or sequences.
Basically handles any python type for which iter() can create
an iterator object.
This method is not intended to be called directly. Use the
encode() method instead.
"""
#print 'encode_complex_helper(chunklist=%r, obj=%r, nest_level=%r)'%(chunklist,obj,nest_level)
try:
# Is it a dictionary or UserDict? Try iterkeys method first.
it = obj.iterkeys()
except AttributeError:
try:
# Is it a sequence? Try to make an iterator for it.
it = iter(obj)
except TypeError:
it = None
if it is not None:
# Does it look like a dictionary? Check for a minimal dict or
# UserDict interface.
isdict = hasattr(obj, '__getitem__') and hasattr(obj, 'keys')
compactly = self._encode_compactly
if isdict:
chunklist.append('{')
if compactly:
dictcolon = ':'
else:
dictcolon = ' : '
else:
chunklist.append('[')
#print nest_level, 'opening sequence:', repr(chunklist)
if not compactly:
indent0 = ' ' * nest_level
indent = ' ' * (nest_level+1)
chunklist.append(' ')
sequence_chunks = [] # use this to allow sorting afterwards if dict
try: # while not StopIteration
numitems = 0
while True:
obj2 = it.next()
if obj2 is obj:
raise JSONEncodeError('trying to encode an infinite sequence',obj)
if isdict and not isstringtype(obj2):
# Check JSON restrictions on key types
if isnumbertype(obj2):
if not self._allow_nonstring_keys:
raise JSONEncodeError('object properties (dictionary keys) must be strings in strict JSON',obj2)
else:
raise JSONEncodeError('object properties (dictionary keys) can only be strings or numbers in ECMAScript',obj2)
# Encode this item in the sequence and put into item_chunks
item_chunks = []
self.encode_helper( item_chunks, obj2, nest_level=nest_level+1 )
if isdict:
item_chunks.append(dictcolon)
obj3 = obj[obj2]
self.encode_helper(item_chunks, obj3, nest_level=nest_level+2)
#print nest_level, numitems, 'item:', repr(obj2)
#print nest_level, numitems, 'sequence_chunks:', repr(sequence_chunks)
#print nest_level, numitems, 'item_chunks:', repr(item_chunks)
#extend_list_with_sep(sequence_chunks, item_chunks)
sequence_chunks.append(item_chunks)
#print nest_level, numitems, 'new sequence_chunks:', repr(sequence_chunks)
numitems += 1
except StopIteration:
pass
if isdict and self._sort_dictionary_keys:
sequence_chunks.sort() # Note sorts by JSON repr, not original Python object
if compactly:
sep = ','
else:
sep = ',\n' + indent
#print nest_level, 'closing sequence'
#print nest_level, 'chunklist:', repr(chunklist)
#print nest_level, 'sequence_chunks:', repr(sequence_chunks)
extend_and_flatten_list_with_sep( chunklist, sequence_chunks, sep )
#print nest_level, 'new chunklist:', repr(chunklist)
if not compactly:
if numitems > 1:
chunklist.append('\n' + indent0)
else:
chunklist.append(' ')
if isdict:
chunklist.append('}')
else:
chunklist.append(']')
else: # Can't create an iterator for the object
json2 = self.encode_default( obj, nest_level=nest_level )
chunklist.append( json2 )
def encode_equivalent( self, obj, nest_level=0 ):
"""This method is used to encode user-defined class objects.
The object being encoded should have a json_equivalent()
method defined which returns another equivalent object which
is easily JSON-encoded. If the object in question has no
json_equivalent() method available then None is returned
instead of a string so that the encoding will attempt the next
strategy.
If a caller wishes to disable the calling of json_equivalent()
methods, then subclass this class and override this method
to just return None.
"""
if hasattr(obj, 'json_equivalent') \
and callable(getattr(obj,'json_equivalent')):
obj2 = obj.json_equivalent()
if obj2 is obj:
# Try to prevent careless infinite recursion
raise JSONEncodeError('object has a json_equivalent() method that returns itself',obj)
json2 = self.encode( obj2, nest_level=nest_level )
return json2
else:
return None
def encode_default( self, obj, nest_level=0 ):
"""This method is used to encode objects into JSON which are not straightforward.
This method is intended to be overridden by subclasses which wish
to extend this encoder to handle additional types.
"""
raise JSONEncodeError('can not encode object into a JSON representation',obj)
# ------------------------------
def encode( obj, strict=False, compactly=True, escape_unicode=False, encoding=None ):
"""Encodes a Python object into a JSON-encoded string.
If 'strict' is set to True, then only strictly-conforming JSON
output will be produced. Note that this means that some types
of values may not be convertable and will result in a
JSONEncodeError exception.
If 'compactly' is set to True, then the resulting string will
have all extraneous white space removed; if False then the
string will be "pretty printed" with whitespace and indentation
added to make it more readable.
If 'escape_unicode' is set to True, then all non-ASCII characters
will be represented as a unicode escape sequence; if False then
the actual real unicode character will be inserted.
If no encoding is specified (encoding=None) then the output will
either be a Python string (if entirely ASCII) or a Python unicode
string type.
However if an encoding name is given then the returned value will
be a python string which is the byte sequence encoding the JSON
value. As the default/recommended encoding for JSON is UTF-8,
you should almost always pass in encoding='utf8'.
"""
import sys
encoder = None # Custom codec encoding function
bom = None # Byte order mark to prepend to final output
cdk = None # Codec to use
if encoding is not None:
import codecs
try:
cdk = codecs.lookup(encoding)
except LookupError:
cdk = None
if cdk:
pass
elif not cdk:
# No built-in codec was found, see if it is something we
# can do ourself.
encoding = encoding.lower()
if encoding.startswith('utf-32') or encoding.startswith('utf32') \
or encoding.startswith('ucs4') \
or encoding.startswith('ucs-4'):
# Python doesn't natively have a UTF-32 codec, but JSON
# requires that it be supported. So we must decode these
# manually.
if encoding.endswith('le'):
encoder = utf32le_encode
elif encoding.endswith('be'):
encoder = utf32be_encode
else:
encoder = utf32be_encode
bom = codecs.BOM_UTF32_BE
elif encoding.startswith('ucs2') or encoding.startswith('ucs-2'):
# Python has no UCS-2, but we can simulate with
# UTF-16. We just need to force us to not try to
# encode anything past the BMP.
encoding = 'utf-16'
if not escape_unicode and not callable(escape_unicode):
escape_unicode = lambda c: (0xD800 <= ord(c) <= 0xDFFF) or ord(c) >= 0x10000
else:
raise JSONEncodeError('this python has no codec for this character encoding',encoding)
if not escape_unicode and not callable(escape_unicode):
if encoding and encoding.startswith('utf'):
# All UTF-x encodings can do the whole Unicode repertoire, so
# do nothing special.
pass
else:
# Even though we don't want to escape all unicode chars,
# the encoding being used may force us to do so anyway.
# We must pass in a function which says which characters
# the encoding can handle and which it can't.
def in_repertoire( c, encoding_func ):
try:
x = encoding_func( c, errors='strict' )
except UnicodeError:
return False
return True
if encoder:
escape_unicode = lambda c: not in_repertoire(c, encoder)
elif cdk:
escape_unicode = lambda c: not in_repertoire(c, cdk[0])
else:
pass # Let the JSON object deal with it
j = JSON( strict=strict, compactly=compactly, escape_unicode=escape_unicode )
unitxt = j.encode( obj )
if encoder:
txt = encoder( unitxt )
elif encoding is not None:
txt = unitxt.encode( encoding )
else:
txt = unitxt
if bom:
txt = bom + txt
return txt
def decode( txt, strict=False, encoding=None, **kw ):
"""Decodes a JSON-encoded string into a Python object.
If 'strict' is set to True, then those strings that are not
entirely strictly conforming to JSON will result in a
JSONDecodeError exception.
The input string can be either a python string or a python unicode
string. If it is already a unicode string, then it is assumed
that no character set decoding is required.
However, if you pass in a non-Unicode text string (i.e., a python
type 'str') then an attempt will be made to auto-detect and decode
the character encoding. This will be successful if the input was
encoded in any of UTF-8, UTF-16 (BE or LE), or UTF-32 (BE or LE),
and of course plain ASCII works too.
Note though that if you know the character encoding, then you
should convert to a unicode string yourself, or pass it the name
of the 'encoding' to avoid the guessing made by the auto
detection, as with
python_object = demjson.decode( input_bytes, encoding='utf8' )
Optional keywords arguments must be of the form
allow_xxxx=True/False
or
prevent_xxxx=True/False
where each will allow or prevent the specific behavior, after the
evaluation of the 'strict' argument. For example, if strict=True
then by also passing 'allow_comments=True' then comments will be
allowed. If strict=False then prevent_comments=True will allow
everything except comments.
"""
# Initialize the JSON object
j = JSON( strict=strict )
for keyword, value in kw.items():
if keyword.startswith('allow_'):
behavior = keyword[6:]
allow = bool(value)
elif keyword.startswith('prevent_'):
behavior = keyword[8:]
allow = not bool(value)
else:
raise ValueError('unknown keyword argument', keyword)
if allow:
j.allow(behavior)
else:
j.prevent(behavior)
# Convert the input string into unicode if needed.
if isinstance(txt,unicode):
unitxt = txt
else:
if encoding is None:
unitxt = auto_unicode_decode( txt )
else:
cdk = None # codec
decoder = None
import codecs
try:
cdk = codecs.lookup(encoding)
except LookupError:
encoding = encoding.lower()
decoder = None
if encoding.startswith('utf-32') \
or encoding.startswith('ucs4') \
or encoding.startswith('ucs-4'):
# Python doesn't natively have a UTF-32 codec, but JSON
# requires that it be supported. So we must decode these
# manually.
if encoding.endswith('le'):
decoder = utf32le_decode
elif encoding.endswith('be'):
decoder = utf32be_decode
else:
if txt.startswith( codecs.BOM_UTF32_BE ):
decoder = utf32be_decode
txt = txt[4:]
elif txt.startswith( codecs.BOM_UTF32_LE ):
decoder = utf32le_decode
txt = txt[4:]
else:
if encoding.startswith('ucs'):
raise JSONDecodeError('UCS-4 encoded string must start with a BOM')
decoder = utf32be_decode # Default BE for UTF, per unicode spec
elif encoding.startswith('ucs2') or encoding.startswith('ucs-2'):
# Python has no UCS-2, but we can simulate with
# UTF-16. We just need to force us to not try to
# encode anything past the BMP.
encoding = 'utf-16'
if decoder:
unitxt = decoder(txt)
elif encoding:
unitxt = txt.decode(encoding)
else:
raise JSONDecodeError('this python has no codec for this character encoding',encoding)
# Check that the decoding seems sane. Per RFC 4627 section 3:
# "Since the first two characters of a JSON text will
# always be ASCII characters [RFC0020], ..."
#
# This check is probably not necessary, but it allows us to
# raise a suitably descriptive error rather than an obscure
# syntax error later on.
#
# Note that the RFC requirements of two ASCII characters seems
# to be an incorrect statement as a JSON string literal may
# have as it's first character any unicode character. Thus
# the first two characters will always be ASCII, unless the
# first character is a quotation mark. And in non-strict
# mode we can also have a few other characters too.
if len(unitxt) > 2:
first, second = unitxt[:2]
if first in '"\'':
pass # second can be anything inside string literal
else:
if ((ord(first) < 0x20 or ord(first) > 0x7f) or \
(ord(second) < 0x20 or ord(second) > 0x7f)) and \
(not j.isws(first) and not j.isws(second)):
# Found non-printable ascii, must check unicode
# categories to see if the character is legal.
# Only whitespace, line and paragraph separators,
# and format control chars are legal here.
import unicodedata
catfirst = unicodedata.category(unicode(first))
catsecond = unicodedata.category(unicode(second))
if catfirst not in ('Zs','Zl','Zp','Cf') or \
catsecond not in ('Zs','Zl','Zp','Cf'):
raise JSONDecodeError('the decoded string is gibberish, is the encoding correct?',encoding)
# Now ready to do the actual decoding
obj = j.decode( unitxt )
return obj
# end file
| gpl-2.0 | -8,431,671,516,501,916,000 | 5,497,557,428,033,323,000 | 40.135641 | 153 | 0.525674 | false |
Chilledheart/chromium | chrome/test/chromedriver/test/webserver.py | 17 | 6856 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import BaseHTTPServer
import os
import threading
class Responder(object):
"""Sends a HTTP response. Used with TestWebServer."""
def __init__(self, handler):
self._handler = handler
def SendResponse(self, headers, body):
"""Sends OK response with body."""
self.SendHeaders(headers, len(body))
self.SendBody(body)
def SendResponseFromFile(self, path):
"""Sends OK response with the given file as the body."""
with open(path, 'r') as f:
self.SendResponse({}, f.read())
def SendHeaders(self, headers={}, content_length=None):
"""Sends headers for OK response."""
self._handler.send_response(200)
for field, value in headers.iteritems():
self._handler.send_header(field, value)
if content_length:
self._handler.send_header('Content-Length', content_length)
self._handler.end_headers()
def SendError(self, code):
"""Sends response for the given HTTP error code."""
self._handler.send_error(code)
def SendBody(self, body):
"""Just sends the body, no headers."""
self._handler.wfile.write(body)
class Request(object):
"""An HTTP request."""
def __init__(self, handler):
self._handler = handler
def GetPath(self):
return self._handler.path
def GetHeader(self, name):
return self._handler.headers.getheader(name)
class _BaseServer(BaseHTTPServer.HTTPServer):
"""Internal server that throws if timed out waiting for a request."""
def __init__(self, on_request, server_cert_and_key_path=None):
"""Starts the server.
It is an HTTP server if parameter server_cert_and_key_path is not provided.
Otherwise, it is an HTTPS server.
Args:
server_cert_and_key_path: path to a PEM file containing the cert and key.
if it is None, start the server as an HTTP one.
"""
class _Handler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Internal handler that just asks the server to handle the request."""
def do_GET(self):
if self.path.endswith('favicon.ico'):
self.send_error(404)
return
on_request(Request(self), Responder(self))
def log_message(self, *args, **kwargs):
"""Overriddes base class method to disable logging."""
pass
BaseHTTPServer.HTTPServer.__init__(self, ('127.0.0.1', 0), _Handler)
if server_cert_and_key_path is not None:
self._is_https_enabled = True
self._server.socket = ssl.wrap_socket(
self._server.socket, certfile=server_cert_and_key_path,
server_side=True)
else:
self._is_https_enabled = False
def handle_timeout(self):
"""Overridden from SocketServer."""
raise RuntimeError('Timed out waiting for http request')
def GetUrl(self):
"""Returns the base URL of the server."""
postfix = '://127.0.0.1:%s' % self.server_port
if self._is_https_enabled:
return 'https' + postfix
return 'http' + postfix
class WebServer(object):
"""An HTTP or HTTPS server that serves on its own thread.
Serves files from given directory but may use custom data for specific paths.
"""
def __init__(self, root_dir, server_cert_and_key_path=None):
"""Starts the server.
It is an HTTP server if parameter server_cert_and_key_path is not provided.
Otherwise, it is an HTTPS server.
Args:
root_dir: root path to serve files from. This parameter is required.
server_cert_and_key_path: path to a PEM file containing the cert and key.
if it is None, start the server as an HTTP one.
"""
self._root_dir = os.path.abspath(root_dir)
self._server = _BaseServer(self._OnRequest, server_cert_and_key_path)
self._thread = threading.Thread(target=self._server.serve_forever)
self._thread.daemon = True
self._thread.start()
self._path_data_map = {}
self._path_callback_map = {}
self._path_maps_lock = threading.Lock()
def _OnRequest(self, request, responder):
path = request.GetPath().split('?')[0]
# Serve from path -> callback and data maps.
self._path_maps_lock.acquire()
try:
if path in self._path_callback_map:
headers, body = self._path_callback_map[path](request)
if body:
responder.SendResponse(headers, body)
else:
responder.SendError(503)
return
if path in self._path_data_map:
responder.SendResponse({}, self._path_data_map[path])
return
finally:
self._path_maps_lock.release()
# Serve from file.
path = os.path.normpath(
os.path.join(self._root_dir, *path.split('/')))
if not path.startswith(self._root_dir):
responder.SendError(403)
return
if not os.path.exists(path):
responder.SendError(404)
return
responder.SendResponseFromFile(path)
def SetDataForPath(self, path, data):
self._path_maps_lock.acquire()
try:
self._path_data_map[path] = data
finally:
self._path_maps_lock.release()
def SetCallbackForPath(self, path, func):
self._path_maps_lock.acquire()
try:
self._path_callback_map[path] = func
finally:
self._path_maps_lock.release()
def GetUrl(self):
"""Returns the base URL of the server."""
return self._server.GetUrl()
def Shutdown(self):
"""Shuts down the server synchronously."""
self._server.shutdown()
self._thread.join()
class SyncWebServer(object):
"""WebServer for testing.
Incoming requests are blocked until explicitly handled.
This was designed for single thread use. All requests should be handled on
the same thread.
"""
def __init__(self):
self._server = _BaseServer(self._OnRequest)
# Recognized by SocketServer.
self._server.timeout = 10
self._on_request = None
def _OnRequest(self, request, responder):
self._on_request(responder)
self._on_request = None
def Respond(self, on_request):
"""Blocks until request comes in, then calls given handler function.
Args:
on_request: Function that handles the request. Invoked with single
parameter, an instance of Responder.
"""
if self._on_request:
raise RuntimeError('Must handle 1 request at a time.')
self._on_request = on_request
while self._on_request:
# Don't use handle_one_request, because it won't work with the timeout.
self._server.handle_request()
def RespondWithContent(self, content):
"""Blocks until request comes in, then handles it with the given content."""
def SendContent(responder):
responder.SendResponse({}, content)
self.Respond(SendContent)
def GetUrl(self):
return self._server.GetUrl()
| bsd-3-clause | 8,782,197,250,300,059,000 | 8,926,285,386,793,366,000 | 29.202643 | 80 | 0.655193 | false |
kienpham2000/ansible-modules-core | source_control/subversion.py | 28 | 7706 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: subversion
short_description: Deploys a subversion repository.
description:
- Deploy given repository URL / revision to dest. If dest exists, update to the specified revision, otherwise perform a checkout.
version_added: "0.7"
author: Dane Summers, njharman@gmail.com
notes:
- Requires I(svn) to be installed on the client.
requirements: []
options:
repo:
description:
- The subversion URL to the repository.
required: true
aliases: [ name, repository ]
default: null
dest:
description:
- Absolute path where the repository should be deployed.
required: true
default: null
revision:
description:
- Specific revision to checkout.
required: false
default: HEAD
aliases: [ version ]
force:
description:
- If C(yes), modified files will be discarded. If C(no), module will fail if it encounters modified files.
required: false
default: "yes"
choices: [ "yes", "no" ]
username:
description:
- --username parameter passed to svn.
required: false
default: null
password:
description:
- --password parameter passed to svn.
required: false
default: null
executable:
required: false
default: null
version_added: "1.4"
description:
- Path to svn executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
export:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.6"
description:
- If C(yes), do export instead of checkout/update.
'''
EXAMPLES = '''
# Checkout subversion repository to specified folder.
- subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/checkout
# Export subversion directory to folder
- subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/export export=True
'''
import re
import tempfile
class Subversion(object):
def __init__(
self, module, dest, repo, revision, username, password, svn_path):
self.module = module
self.dest = dest
self.repo = repo
self.revision = revision
self.username = username
self.password = password
self.svn_path = svn_path
def _exec(self, args):
bits = [
self.svn_path,
'--non-interactive',
'--trust-server-cert',
'--no-auth-cache',
]
if self.username:
bits.extend(["--username", self.username])
if self.password:
bits.extend(["--password", self.password])
bits.extend(args)
rc, out, err = self.module.run_command(bits, check_rc=True)
return out.splitlines()
def checkout(self):
'''Creates new svn working directory if it does not already exist.'''
self._exec(["checkout", "-r", self.revision, self.repo, self.dest])
def export(self, force=False):
'''Export svn repo to directory'''
self._exec(["export", "-r", self.revision, self.repo, self.dest])
def switch(self):
'''Change working directory's repo.'''
# switch to ensure we are pointing at correct repo.
self._exec(["switch", self.repo, self.dest])
def update(self):
'''Update existing svn working directory.'''
self._exec(["update", "-r", self.revision, self.dest])
def revert(self):
'''Revert svn working directory.'''
self._exec(["revert", "-R", self.dest])
def get_revision(self):
'''Revision and URL of subversion working directory.'''
text = '\n'.join(self._exec(["info", self.dest]))
rev = re.search(r'^Revision:.*$', text, re.MULTILINE).group(0)
url = re.search(r'^URL:.*$', text, re.MULTILINE).group(0)
return rev, url
def has_local_mods(self):
'''True if revisioned files have been added or modified. Unrevisioned files are ignored.'''
lines = self._exec(["status", self.dest])
# Match only revisioned files, i.e. ignore status '?'.
regex = re.compile(r'^[^?]')
# Has local mods if more than 0 modifed revisioned files.
return len(filter(regex.match, lines)) > 0
def needs_update(self):
curr, url = self.get_revision()
out2 = '\n'.join(self._exec(["info", "-r", "HEAD", self.dest]))
head = re.search(r'^Revision:.*$', out2, re.MULTILINE).group(0)
rev1 = int(curr.split(':')[1].strip())
rev2 = int(head.split(':')[1].strip())
change = False
if rev1 < rev2:
change = True
return change, curr, head
# ===========================================
def main():
module = AnsibleModule(
argument_spec=dict(
dest=dict(required=True),
repo=dict(required=True, aliases=['name', 'repository']),
revision=dict(default='HEAD', aliases=['rev', 'version']),
force=dict(default='yes', type='bool'),
username=dict(required=False),
password=dict(required=False),
executable=dict(default=None),
export=dict(default=False, required=False, type='bool'),
),
supports_check_mode=True
)
dest = os.path.expanduser(module.params['dest'])
repo = module.params['repo']
revision = module.params['revision']
force = module.params['force']
username = module.params['username']
password = module.params['password']
svn_path = module.params['executable'] or module.get_bin_path('svn', True)
export = module.params['export']
os.environ['LANG'] = 'C'
svn = Subversion(module, dest, repo, revision, username, password, svn_path)
if not os.path.exists(dest):
before = None
local_mods = False
if module.check_mode:
module.exit_json(changed=True)
if not export:
svn.checkout()
else:
svn.export()
elif os.path.exists("%s/.svn" % (dest, )):
# Order matters. Need to get local mods before switch to avoid false
# positives. Need to switch before revert to ensure we are reverting to
# correct repo.
if module.check_mode:
check, before, after = svn.needs_update()
module.exit_json(changed=check, before=before, after=after)
before = svn.get_revision()
local_mods = svn.has_local_mods()
svn.switch()
if local_mods:
if force:
svn.revert()
else:
module.fail_json(msg="ERROR: modified files exist in the repository.")
svn.update()
else:
module.fail_json(msg="ERROR: %s folder already exists, but its not a subversion repository." % (dest, ))
after = svn.get_revision()
changed = before != after or local_mods
module.exit_json(changed=changed, before=before, after=after)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | -4,359,057,028,843,938,300 | 2,825,564,229,206,370,300 | 32.359307 | 132 | 0.615884 | false |
SkySchermer/uweclang | uweclang/query/grammar/QueryLexer.py | 1 | 6565 | # Generated from java-escape by ANTLR 4.5
# encoding: utf-8
from __future__ import print_function
from antlr4 import *
from io import StringIO
def serializedATN():
with StringIO() as buf:
buf.write(u"\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2")
buf.write(u"\27\u00a4\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6")
buf.write(u"\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4")
buf.write(u"\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t")
buf.write(u"\22\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27")
buf.write(u"\4\30\t\30\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\3")
buf.write(u"\2\3\2\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\4\3\4\3\5\3\5\3")
buf.write(u"\6\3\6\3\7\3\7\3\7\3\b\3\b\3\t\3\t\3\n\3\n\3\13\3\13")
buf.write(u"\3\f\3\f\3\r\3\r\3\16\3\16\3\17\3\17\3\20\3\20\3\20\7")
buf.write(u"\20_\n\20\f\20\16\20b\13\20\3\20\3\20\3\21\3\21\3\21")
buf.write(u"\7\21i\n\21\f\21\16\21l\13\21\5\21n\n\21\3\22\6\22q\n")
buf.write(u"\22\r\22\16\22r\3\23\3\23\3\24\5\24x\n\24\3\24\3\24\3")
buf.write(u"\25\3\25\3\25\3\25\7\25\u0080\n\25\f\25\16\25\u0083\13")
buf.write(u"\25\3\25\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26\7\26")
buf.write(u"\u008e\n\26\f\26\16\26\u0091\13\26\3\26\3\26\3\27\3\27")
buf.write(u"\3\30\3\30\3\31\3\31\3\32\3\32\3\33\3\33\3\34\3\34\3")
buf.write(u"\34\3\34\5\34\u00a3\n\34\3\u0081\2\35\3\3\5\4\7\5\t\6")
buf.write(u"\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35\20")
buf.write(u"\37\21!\22#\23%\24\'\25)\26+\27-\2/\2\61\2\63\2\65\2")
buf.write(u"\67\2\3\2\6\5\2\13\f\17\17\"\"\4\2\f\f\17\17\3\2$$\r")
buf.write(u"\2\13\f\17\17\"\"$$*+..\61\61<=]_}}\177\u0080\u00a6\2")
buf.write(u"\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3")
buf.write(u"\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2")
buf.write(u"\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2")
buf.write(u"\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2")
buf.write(u"\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\39\3\2")
buf.write(u"\2\2\5;\3\2\2\2\7B\3\2\2\2\tD\3\2\2\2\13F\3\2\2\2\rH")
buf.write(u"\3\2\2\2\17K\3\2\2\2\21M\3\2\2\2\23O\3\2\2\2\25Q\3\2")
buf.write(u"\2\2\27S\3\2\2\2\31U\3\2\2\2\33W\3\2\2\2\35Y\3\2\2\2")
buf.write(u"\37[\3\2\2\2!m\3\2\2\2#p\3\2\2\2%t\3\2\2\2\'w\3\2\2\2")
buf.write(u"){\3\2\2\2+\u0089\3\2\2\2-\u0094\3\2\2\2/\u0096\3\2\2")
buf.write(u"\2\61\u0098\3\2\2\2\63\u009a\3\2\2\2\65\u009c\3\2\2\2")
buf.write(u"\67\u00a2\3\2\2\29:\7=\2\2:\4\3\2\2\2;<\7f\2\2<=\7g\2")
buf.write(u"\2=>\7h\2\2>?\7k\2\2?@\7p\2\2@A\7g\2\2A\6\3\2\2\2BC\7")
buf.write(u"\u0080\2\2C\b\3\2\2\2DE\7*\2\2E\n\3\2\2\2FG\7+\2\2G\f")
buf.write(u"\3\2\2\2HI\7<\2\2IJ\7<\2\2J\16\3\2\2\2KL\7<\2\2L\20\3")
buf.write(u"\2\2\2MN\7]\2\2N\22\3\2\2\2OP\7.\2\2P\24\3\2\2\2QR\7")
buf.write(u"_\2\2R\26\3\2\2\2ST\7^\2\2T\30\3\2\2\2UV\7A\2\2V\32\3")
buf.write(u"\2\2\2WX\7}\2\2X\34\3\2\2\2YZ\7\177\2\2Z\36\3\2\2\2[")
buf.write(u"`\7$\2\2\\_\5\67\34\2]_\5\61\31\2^\\\3\2\2\2^]\3\2\2")
buf.write(u"\2_b\3\2\2\2`^\3\2\2\2`a\3\2\2\2ac\3\2\2\2b`\3\2\2\2")
buf.write(u"cd\7$\2\2d \3\2\2\2en\5-\27\2fj\5/\30\2gi\5-\27\2hg\3")
buf.write(u"\2\2\2il\3\2\2\2jh\3\2\2\2jk\3\2\2\2kn\3\2\2\2lj\3\2")
buf.write(u"\2\2me\3\2\2\2mf\3\2\2\2n\"\3\2\2\2oq\5\65\33\2po\3\2")
buf.write(u"\2\2qr\3\2\2\2rp\3\2\2\2rs\3\2\2\2s$\3\2\2\2tu\t\2\2")
buf.write(u"\2u&\3\2\2\2vx\7\17\2\2wv\3\2\2\2wx\3\2\2\2xy\3\2\2\2")
buf.write(u"yz\7\f\2\2z(\3\2\2\2{|\7\61\2\2|}\7,\2\2}\u0081\3\2\2")
buf.write(u"\2~\u0080\13\2\2\2\177~\3\2\2\2\u0080\u0083\3\2\2\2\u0081")
buf.write(u"\u0082\3\2\2\2\u0081\177\3\2\2\2\u0082\u0084\3\2\2\2")
buf.write(u"\u0083\u0081\3\2\2\2\u0084\u0085\7,\2\2\u0085\u0086\7")
buf.write(u"\61\2\2\u0086\u0087\3\2\2\2\u0087\u0088\b\25\2\2\u0088")
buf.write(u"*\3\2\2\2\u0089\u008a\7\61\2\2\u008a\u008b\7\61\2\2\u008b")
buf.write(u"\u008f\3\2\2\2\u008c\u008e\n\3\2\2\u008d\u008c\3\2\2")
buf.write(u"\2\u008e\u0091\3\2\2\2\u008f\u008d\3\2\2\2\u008f\u0090")
buf.write(u"\3\2\2\2\u0090\u0092\3\2\2\2\u0091\u008f\3\2\2\2\u0092")
buf.write(u"\u0093\b\26\2\2\u0093,\3\2\2\2\u0094\u0095\4\62;\2\u0095")
buf.write(u".\3\2\2\2\u0096\u0097\4\63;\2\u0097\60\3\2\2\2\u0098")
buf.write(u"\u0099\n\4\2\2\u0099\62\3\2\2\2\u009a\u009b\t\2\2\2\u009b")
buf.write(u"\64\3\2\2\2\u009c\u009d\n\5\2\2\u009d\66\3\2\2\2\u009e")
buf.write(u"\u009f\7^\2\2\u009f\u00a3\7$\2\2\u00a0\u00a1\7^\2\2\u00a1")
buf.write(u"\u00a3\7^\2\2\u00a2\u009e\3\2\2\2\u00a2\u00a0\3\2\2\2")
buf.write(u"\u00a38\3\2\2\2\f\2^`jmrw\u0081\u008f\u00a2\3\b\2\2")
return buf.getvalue()
class QueryLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
T__8 = 9
T__9 = 10
T__10 = 11
T__11 = 12
T__12 = 13
T__13 = 14
ExplicitRegex = 15
Digits = 16
Symbol = 17
WhiteSpace = 18
NewLine = 19
BlockComment = 20
LineComment = 21
modeNames = [ u"DEFAULT_MODE" ]
literalNames = [ u"<INVALID>",
u"';'", u"'define'", u"'~'", u"'('", u"')'", u"'::'", u"':'",
u"'['", u"','", u"']'", u"'\\'", u"'?'", u"'{'", u"'}'" ]
symbolicNames = [ u"<INVALID>",
u"ExplicitRegex", u"Digits", u"Symbol", u"WhiteSpace", u"NewLine",
u"BlockComment", u"LineComment" ]
ruleNames = [ u"T__0", u"T__1", u"T__2", u"T__3", u"T__4", u"T__5",
u"T__6", u"T__7", u"T__8", u"T__9", u"T__10", u"T__11",
u"T__12", u"T__13", u"ExplicitRegex", u"Digits", u"Symbol",
u"WhiteSpace", u"NewLine", u"BlockComment", u"LineComment",
u"DIGIT", u"NONZERO_DIGIT", u"NON_QUOTE", u"WHITESPACE",
u"SYMBOL_CHAR", u"ESCAPED_CHAR" ]
grammarFileName = u"Query.g4"
def __init__(self, input=None):
super(QueryLexer, self).__init__(input)
self.checkVersion("4.5")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| mit | 4,000,562,379,594,004,500 | 1,694,232,063,655,835,100 | 49.5 | 103 | 0.527494 | false |
PetrDlouhy/django | django/db/models/sql/subqueries.py | 25 | 7754 | """
Query subclasses which provide extra functionality beyond simple data retrieval.
"""
from django.core.exceptions import FieldError
from django.db import connections
from django.db.models.query_utils import Q
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE, NO_RESULTS
from django.db.models.sql.query import Query
from django.utils import six
__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'AggregateQuery']
class DeleteQuery(Query):
"""
Delete queries are done through this class, since they are more constrained
than general queries.
"""
compiler = 'SQLDeleteCompiler'
def do_query(self, table, where, using):
self.tables = [table]
self.where = where
self.get_compiler(using).execute_sql(NO_RESULTS)
def delete_batch(self, pk_list, using, field=None):
"""
Set up and execute delete queries for all the objects in pk_list.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
if not field:
field = self.get_meta().pk
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(
**{field.attname + '__in': pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]}))
self.do_query(self.get_meta().db_table, self.where, using=using)
def delete_qs(self, query, using):
"""
Delete the queryset in one SQL query (if possible). For simple queries
this is done by copying the query.query.where to self.query, for
complex queries by using subquery.
"""
innerq = query.query
# Make sure the inner query has at least one table in use.
innerq.get_initial_alias()
# The same for our new query.
self.get_initial_alias()
innerq_used_tables = [t for t in innerq.tables
if innerq.alias_refcount[t]]
if not innerq_used_tables or innerq_used_tables == self.tables:
# There is only the base table in use in the query.
self.where = innerq.where
else:
pk = query.model._meta.pk
if not connections[using].features.update_can_self_select:
# We can't do the delete using subquery.
values = list(query.values_list('pk', flat=True))
if not values:
return
self.delete_batch(values, using)
return
else:
innerq.clear_select_clause()
innerq.select = [
pk.get_col(self.get_initial_alias())
]
values = innerq
self.where = self.where_class()
self.add_q(Q(pk__in=values))
self.get_compiler(using).execute_sql(NO_RESULTS)
class UpdateQuery(Query):
"""
Represents an "update" SQL query.
"""
compiler = 'SQLUpdateCompiler'
def __init__(self, *args, **kwargs):
super(UpdateQuery, self).__init__(*args, **kwargs)
self._setup_query()
def _setup_query(self):
"""
Runs on initialization and after cloning. Any attributes that would
normally be set in __init__ should go in here, instead, so that they
are also set up after a clone() call.
"""
self.values = []
self.related_ids = None
if not hasattr(self, 'related_updates'):
self.related_updates = {}
def clone(self, klass=None, **kwargs):
return super(UpdateQuery, self).clone(klass,
related_updates=self.related_updates.copy(), **kwargs)
def update_batch(self, pk_list, values, using):
self.add_update_values(values)
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(pk__in=pk_list[offset: offset + GET_ITERATOR_CHUNK_SIZE]))
self.get_compiler(using).execute_sql(NO_RESULTS)
def add_update_values(self, values):
"""
Convert a dictionary of field name to value mappings into an update
query. This is the entry point for the public update() method on
querysets.
"""
values_seq = []
for name, val in six.iteritems(values):
field = self.get_meta().get_field(name)
direct = not (field.auto_created and not field.concrete) or not field.concrete
model = field.model._meta.concrete_model
if not direct or (field.is_relation and field.many_to_many):
raise FieldError(
'Cannot update model field %r (only non-relations and '
'foreign keys permitted).' % field
)
if model is not self.get_meta().model:
self.add_related_update(model, field, val)
continue
values_seq.append((field, model, val))
return self.add_update_fields(values_seq)
def add_update_fields(self, values_seq):
"""
Turn a sequence of (field, model, value) triples into an update query.
Used by add_update_values() as well as the "fast" update path when
saving models.
"""
self.values.extend(values_seq)
def add_related_update(self, model, field, value):
"""
Adds (name, value) to an update query for an ancestor model.
Updates are coalesced so that we only run one update query per ancestor.
"""
self.related_updates.setdefault(model, []).append((field, None, value))
def get_related_updates(self):
"""
Returns a list of query objects: one for each update required to an
ancestor model. Each query will have the same filtering conditions as
the current query but will only update a single table.
"""
if not self.related_updates:
return []
result = []
for model, values in six.iteritems(self.related_updates):
query = UpdateQuery(model)
query.values = values
if self.related_ids is not None:
query.add_filter(('pk__in', self.related_ids))
result.append(query)
return result
class InsertQuery(Query):
compiler = 'SQLInsertCompiler'
def __init__(self, *args, **kwargs):
super(InsertQuery, self).__init__(*args, **kwargs)
self.fields = []
self.objs = []
def clone(self, klass=None, **kwargs):
extras = {
'fields': self.fields[:],
'objs': self.objs[:],
'raw': self.raw,
}
extras.update(kwargs)
return super(InsertQuery, self).clone(klass, **extras)
def insert_values(self, fields, objs, raw=False):
"""
Set up the insert query from the 'insert_values' dictionary. The
dictionary gives the model field names and their target values.
If 'raw_values' is True, the values in the 'insert_values' dictionary
are inserted directly into the query, rather than passed as SQL
parameters. This provides a way to insert NULL and DEFAULT keywords
into the query, for example.
"""
self.fields = fields
self.objs = objs
self.raw = raw
class AggregateQuery(Query):
"""
An AggregateQuery takes another query as a parameter to the FROM
clause and only selects the elements in the provided list.
"""
compiler = 'SQLAggregateCompiler'
def add_subquery(self, query, using):
self.subquery, self.sub_params = query.get_compiler(using).as_sql(
with_col_aliases=True,
subquery=True,
)
| bsd-3-clause | -8,664,467,858,663,541,000 | 7,412,656,929,124,738,000 | 35.575472 | 94 | 0.594403 | false |
qguv/config | weechat/plugins/python/weestreamer.py | 1 | 4152 | # Copyright (c) 2015 by Miblo <miblodelcarpio@gmail.com>
# All rights reserved
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import weechat
weechat.register("weestreamer", "Miblo", "0.4.2", "GPL3", "Streamlink companion for WeeChat", "", "")
def stream(data, buffer, args):
bufserver = weechat.buffer_get_string(weechat.current_buffer(), "localvar_server")
bufchannel = weechat.buffer_get_string(weechat.current_buffer(), "localvar_channel").lstrip("#")
quality = "best"
input = args.split()
if not input:
server = bufserver
channel = bufchannel
elif len(input) == 1:
server = bufserver
channel = input[0]
elif len(input) == 2:
server = input[0]
channel = input[1]
else:
weechat.prnt(weechat.current_buffer(), "%sToo many arguments (%s). Please see /help weestreamer"
% (weechat.prefix("error"),(len(input))))
return weechat.WEECHAT_RC_ERROR
# NOTE(matt): https://streamlink.github.io/plugin_matrix.html
servers = { "afreeca":"http://play.afreeca.com/%s" % (channel),
"hitbox":"http://www.hitbox.tv/%s" % (channel),
"twitch":"http://www.twitch.tv/%s" % (channel),
"ustream":"http://www.ustream.tv/%s" % (channel.replace("-", ""))}
streamurl = ""
for key in servers.keys():
if key in server:
streamurl = servers[key]
if not streamurl:
weechat.prnt(weechat.current_buffer(), "%sUnsupported server: %s"
% (weechat.prefix("error"), server))
weechat.prnt(weechat.current_buffer(), "Currently supported servers:")
for key in sorted(servers.keys()):
weechat.prnt(weechat.current_buffer(), " %s" % key)
return weechat.WEECHAT_RC_ERROR
command = "streamlink %s %s" % (streamurl, quality)
weechat.prnt(weechat.current_buffer(), "%sLAUNCHING: %s" % (weechat.prefix("action"), command))
weechat.hook_process("%s" % (command), 0, "handle_output", "")
return weechat.WEECHAT_RC_OK
def handle_output(data, command, rc, out, err):
global process_output
process_output = ""
if out != "":
process_output += out
if int(rc) >= 0:
weechat.prnt(weechat.current_buffer(), process_output)
return weechat.WEECHAT_RC_OK
weechat.hook_command("weestreamer", "Streamlink companion for WeeChat",
"server channel",
"Run /weestreamer without any arguments while in a channel on a supported irc\n"
"server to launch the stream associated with that channel.\n"
"\n"
"You may optionally pass the server and / or channel (in that order) to launch\n"
"the required stream from any channel, e.g.:\n"
" /weestreamer twitch handmade_hero\n"
" /weestreamer handmade_hero\n"
"\n"
"Currently supported servers:\n"
" afreeca\n"
" hitbox\n"
" twitch\n"
" ustream\n"
"\n"
"\n"
"Troubleshooting: If you expect that your current server should be supported but\n"
"weestreamer keeps erroring, please check the name of the server by running:\n"
"\n"
" /buffer localvar\n"
"\n"
"If you have named the server such that it doesn't contain the string in\n"
"\"Currently supported servers\" (above), weestreamer will not recognise it.",
# NOTE(matt): list of valid parameters
"afreeca"
" || hitbox"
" || twitch"
" || ustream",
"stream", "")
| gpl-3.0 | -5,336,138,424,976,543,000 | -5,513,613,394,863,975,000 | 38.542857 | 104 | 0.622592 | false |
lawnmowerlatte/hyper | test/test_ssl_socket.py | 4 | 2762 | # -*- coding: utf-8 -*-
"""
test/test_ssl_socket
~~~~~~~~~~~~~~~~~~~~
This file defines tests for hyper that validate our TLS handling.
"""
import os
import socket
import ssl
import threading
import pytest
from hyper.tls import wrap_socket, init_context
from server import SocketLevelTest
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
TEST_CERTS_DIR = os.path.join(TEST_DIR, "certs")
CLIENT_CERT_FILE = os.path.join(TEST_CERTS_DIR, 'client.crt')
CLIENT_KEY_FILE = os.path.join(TEST_CERTS_DIR, 'client.key')
CLIENT_PEM_FILE = os.path.join(TEST_CERTS_DIR, 'nopassword.pem')
SERVER_CERT_FILE = os.path.join(TEST_CERTS_DIR, 'server.crt')
SERVER_KEY_FILE = os.path.join(TEST_CERTS_DIR, 'server.key')
class TestBasicSocketManipulation(SocketLevelTest):
# These aren't HTTP/2 tests, but it doesn't hurt to leave it.
h2 = True
def test_connection_string(self):
self.set_up()
evt = threading.Event()
def socket_handler(listener):
sock = listener.accept()[0]
evt.wait(5)
sock.close()
self._start_server(socket_handler)
s = socket.create_connection((self.host, self.port))
s, proto = wrap_socket(s, "localhost", force_proto=b"test")
s.close()
evt.set()
assert proto == b"test"
self.tear_down()
@pytest.mark.parametrize(
'context_kwargs',
[
{'cert': CLIENT_PEM_FILE},
{
'cert': (CLIENT_CERT_FILE, CLIENT_KEY_FILE),
'cert_password': b'abc123'
},
]
)
def test_client_certificate(self, context_kwargs):
# Don't have the server thread do TLS: we'll do it ourselves.
self.set_up(secure=False)
evt = threading.Event()
data = []
def socket_handler(listener):
sock = listener.accept()[0]
sock = ssl.wrap_socket(
sock,
ssl_version=ssl.PROTOCOL_SSLv23,
certfile=SERVER_CERT_FILE,
keyfile=SERVER_KEY_FILE,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=CLIENT_PEM_FILE,
server_side=True
)
data.append(sock.recv(65535))
evt.wait(5)
sock.close()
self._start_server(socket_handler)
# Set up the client context. Don't validate the server cert though.
context = init_context(**context_kwargs)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
s = socket.create_connection((self.host, self.port))
s, proto = wrap_socket(s, "localhost", ssl_context=context)
s.sendall(b'hi')
s.close()
evt.set()
self.tear_down()
| mit | -7,800,163,785,742,552,000 | 998,656,435,529,681,400 | 27.474227 | 75 | 0.579652 | false |
ibinti/intellij-community | python/helpers/py3only/docutils/utils/math/unichar2tex.py | 54 | 16811 | # LaTeX math to Unicode symbols translation table
# for use with the translate() method of unicode objects.
# Generated with ``write_unichar2tex.py`` from the data in
# http://milde.users.sourceforge.net/LUCR/Math/
# Includes commands from: standard LaTeX, amssymb, amsmath
uni2tex_table = {
160: '~',
163: '\\pounds ',
165: '\\yen ',
172: '\\neg ',
174: '\\circledR ',
177: '\\pm ',
215: '\\times ',
240: '\\eth ',
247: '\\div ',
305: '\\imath ',
567: '\\jmath ',
915: '\\Gamma ',
916: '\\Delta ',
920: '\\Theta ',
923: '\\Lambda ',
926: '\\Xi ',
928: '\\Pi ',
931: '\\Sigma ',
933: '\\Upsilon ',
934: '\\Phi ',
936: '\\Psi ',
937: '\\Omega ',
945: '\\alpha ',
946: '\\beta ',
947: '\\gamma ',
948: '\\delta ',
949: '\\varepsilon ',
950: '\\zeta ',
951: '\\eta ',
952: '\\theta ',
953: '\\iota ',
954: '\\kappa ',
955: '\\lambda ',
956: '\\mu ',
957: '\\nu ',
958: '\\xi ',
960: '\\pi ',
961: '\\rho ',
962: '\\varsigma ',
963: '\\sigma ',
964: '\\tau ',
965: '\\upsilon ',
966: '\\varphi ',
967: '\\chi ',
968: '\\psi ',
969: '\\omega ',
977: '\\vartheta ',
981: '\\phi ',
982: '\\varpi ',
989: '\\digamma ',
1014: '\\backepsilon ',
8193: '\\quad ',
8214: '\\| ',
8224: '\\dagger ',
8225: '\\ddagger ',
8230: '\\ldots ',
8242: '\\prime ',
8245: '\\backprime ',
8287: '\\: ',
8450: '\\mathbb{C}',
8459: '\\mathcal{H}',
8460: '\\mathfrak{H}',
8461: '\\mathbb{H}',
8463: '\\hslash ',
8464: '\\mathcal{I}',
8465: '\\Im ',
8466: '\\mathcal{L}',
8467: '\\ell ',
8469: '\\mathbb{N}',
8472: '\\wp ',
8473: '\\mathbb{P}',
8474: '\\mathbb{Q}',
8475: '\\mathcal{R}',
8476: '\\Re ',
8477: '\\mathbb{R}',
8484: '\\mathbb{Z}',
8487: '\\mho ',
8488: '\\mathfrak{Z}',
8492: '\\mathcal{B}',
8493: '\\mathfrak{C}',
8496: '\\mathcal{E}',
8497: '\\mathcal{F}',
8498: '\\Finv ',
8499: '\\mathcal{M}',
8501: '\\aleph ',
8502: '\\beth ',
8503: '\\gimel ',
8504: '\\daleth ',
8592: '\\leftarrow ',
8593: '\\uparrow ',
8594: '\\rightarrow ',
8595: '\\downarrow ',
8596: '\\leftrightarrow ',
8597: '\\updownarrow ',
8598: '\\nwarrow ',
8599: '\\nearrow ',
8600: '\\searrow ',
8601: '\\swarrow ',
8602: '\\nleftarrow ',
8603: '\\nrightarrow ',
8606: '\\twoheadleftarrow ',
8608: '\\twoheadrightarrow ',
8610: '\\leftarrowtail ',
8611: '\\rightarrowtail ',
8614: '\\mapsto ',
8617: '\\hookleftarrow ',
8618: '\\hookrightarrow ',
8619: '\\looparrowleft ',
8620: '\\looparrowright ',
8621: '\\leftrightsquigarrow ',
8622: '\\nleftrightarrow ',
8624: '\\Lsh ',
8625: '\\Rsh ',
8630: '\\curvearrowleft ',
8631: '\\curvearrowright ',
8634: '\\circlearrowleft ',
8635: '\\circlearrowright ',
8636: '\\leftharpoonup ',
8637: '\\leftharpoondown ',
8638: '\\upharpoonright ',
8639: '\\upharpoonleft ',
8640: '\\rightharpoonup ',
8641: '\\rightharpoondown ',
8642: '\\downharpoonright ',
8643: '\\downharpoonleft ',
8644: '\\rightleftarrows ',
8646: '\\leftrightarrows ',
8647: '\\leftleftarrows ',
8648: '\\upuparrows ',
8649: '\\rightrightarrows ',
8650: '\\downdownarrows ',
8651: '\\leftrightharpoons ',
8652: '\\rightleftharpoons ',
8653: '\\nLeftarrow ',
8654: '\\nLeftrightarrow ',
8655: '\\nRightarrow ',
8656: '\\Leftarrow ',
8657: '\\Uparrow ',
8658: '\\Rightarrow ',
8659: '\\Downarrow ',
8660: '\\Leftrightarrow ',
8661: '\\Updownarrow ',
8666: '\\Lleftarrow ',
8667: '\\Rrightarrow ',
8669: '\\rightsquigarrow ',
8672: '\\dashleftarrow ',
8674: '\\dashrightarrow ',
8704: '\\forall ',
8705: '\\complement ',
8706: '\\partial ',
8707: '\\exists ',
8708: '\\nexists ',
8709: '\\varnothing ',
8711: '\\nabla ',
8712: '\\in ',
8713: '\\notin ',
8715: '\\ni ',
8719: '\\prod ',
8720: '\\coprod ',
8721: '\\sum ',
8722: '-',
8723: '\\mp ',
8724: '\\dotplus ',
8725: '\\slash ',
8726: '\\smallsetminus ',
8727: '\\ast ',
8728: '\\circ ',
8729: '\\bullet ',
8730: '\\sqrt ',
8731: '\\sqrt[3] ',
8732: '\\sqrt[4] ',
8733: '\\propto ',
8734: '\\infty ',
8736: '\\angle ',
8737: '\\measuredangle ',
8738: '\\sphericalangle ',
8739: '\\mid ',
8740: '\\nmid ',
8741: '\\parallel ',
8742: '\\nparallel ',
8743: '\\wedge ',
8744: '\\vee ',
8745: '\\cap ',
8746: '\\cup ',
8747: '\\int ',
8748: '\\iint ',
8749: '\\iiint ',
8750: '\\oint ',
8756: '\\therefore ',
8757: '\\because ',
8758: ':',
8764: '\\sim ',
8765: '\\backsim ',
8768: '\\wr ',
8769: '\\nsim ',
8770: '\\eqsim ',
8771: '\\simeq ',
8773: '\\cong ',
8775: '\\ncong ',
8776: '\\approx ',
8778: '\\approxeq ',
8781: '\\asymp ',
8782: '\\Bumpeq ',
8783: '\\bumpeq ',
8784: '\\doteq ',
8785: '\\Doteq ',
8786: '\\fallingdotseq ',
8787: '\\risingdotseq ',
8790: '\\eqcirc ',
8791: '\\circeq ',
8796: '\\triangleq ',
8800: '\\neq ',
8801: '\\equiv ',
8804: '\\leq ',
8805: '\\geq ',
8806: '\\leqq ',
8807: '\\geqq ',
8808: '\\lneqq ',
8809: '\\gneqq ',
8810: '\\ll ',
8811: '\\gg ',
8812: '\\between ',
8814: '\\nless ',
8815: '\\ngtr ',
8816: '\\nleq ',
8817: '\\ngeq ',
8818: '\\lesssim ',
8819: '\\gtrsim ',
8822: '\\lessgtr ',
8823: '\\gtrless ',
8826: '\\prec ',
8827: '\\succ ',
8828: '\\preccurlyeq ',
8829: '\\succcurlyeq ',
8830: '\\precsim ',
8831: '\\succsim ',
8832: '\\nprec ',
8833: '\\nsucc ',
8834: '\\subset ',
8835: '\\supset ',
8838: '\\subseteq ',
8839: '\\supseteq ',
8840: '\\nsubseteq ',
8841: '\\nsupseteq ',
8842: '\\subsetneq ',
8843: '\\supsetneq ',
8846: '\\uplus ',
8847: '\\sqsubset ',
8848: '\\sqsupset ',
8849: '\\sqsubseteq ',
8850: '\\sqsupseteq ',
8851: '\\sqcap ',
8852: '\\sqcup ',
8853: '\\oplus ',
8854: '\\ominus ',
8855: '\\otimes ',
8856: '\\oslash ',
8857: '\\odot ',
8858: '\\circledcirc ',
8859: '\\circledast ',
8861: '\\circleddash ',
8862: '\\boxplus ',
8863: '\\boxminus ',
8864: '\\boxtimes ',
8865: '\\boxdot ',
8866: '\\vdash ',
8867: '\\dashv ',
8868: '\\top ',
8869: '\\bot ',
8871: '\\models ',
8872: '\\vDash ',
8873: '\\Vdash ',
8874: '\\Vvdash ',
8876: '\\nvdash ',
8877: '\\nvDash ',
8878: '\\nVdash ',
8879: '\\nVDash ',
8882: '\\vartriangleleft ',
8883: '\\vartriangleright ',
8884: '\\trianglelefteq ',
8885: '\\trianglerighteq ',
8888: '\\multimap ',
8890: '\\intercal ',
8891: '\\veebar ',
8892: '\\barwedge ',
8896: '\\bigwedge ',
8897: '\\bigvee ',
8898: '\\bigcap ',
8899: '\\bigcup ',
8900: '\\diamond ',
8901: '\\cdot ',
8902: '\\star ',
8903: '\\divideontimes ',
8904: '\\bowtie ',
8905: '\\ltimes ',
8906: '\\rtimes ',
8907: '\\leftthreetimes ',
8908: '\\rightthreetimes ',
8909: '\\backsimeq ',
8910: '\\curlyvee ',
8911: '\\curlywedge ',
8912: '\\Subset ',
8913: '\\Supset ',
8914: '\\Cap ',
8915: '\\Cup ',
8916: '\\pitchfork ',
8918: '\\lessdot ',
8919: '\\gtrdot ',
8920: '\\lll ',
8921: '\\ggg ',
8922: '\\lesseqgtr ',
8923: '\\gtreqless ',
8926: '\\curlyeqprec ',
8927: '\\curlyeqsucc ',
8928: '\\npreceq ',
8929: '\\nsucceq ',
8934: '\\lnsim ',
8935: '\\gnsim ',
8936: '\\precnsim ',
8937: '\\succnsim ',
8938: '\\ntriangleleft ',
8939: '\\ntriangleright ',
8940: '\\ntrianglelefteq ',
8941: '\\ntrianglerighteq ',
8942: '\\vdots ',
8943: '\\cdots ',
8945: '\\ddots ',
8968: '\\lceil ',
8969: '\\rceil ',
8970: '\\lfloor ',
8971: '\\rfloor ',
8988: '\\ulcorner ',
8989: '\\urcorner ',
8990: '\\llcorner ',
8991: '\\lrcorner ',
8994: '\\frown ',
8995: '\\smile ',
9182: '\\overbrace ',
9183: '\\underbrace ',
9651: '\\bigtriangleup ',
9655: '\\rhd ',
9661: '\\bigtriangledown ',
9665: '\\lhd ',
9671: '\\Diamond ',
9674: '\\lozenge ',
9723: '\\square ',
9724: '\\blacksquare ',
9733: '\\bigstar ',
9824: '\\spadesuit ',
9825: '\\heartsuit ',
9826: '\\diamondsuit ',
9827: '\\clubsuit ',
9837: '\\flat ',
9838: '\\natural ',
9839: '\\sharp ',
10003: '\\checkmark ',
10016: '\\maltese ',
10178: '\\perp ',
10216: '\\langle ',
10217: '\\rangle ',
10222: '\\lgroup ',
10223: '\\rgroup ',
10229: '\\longleftarrow ',
10230: '\\longrightarrow ',
10231: '\\longleftrightarrow ',
10232: '\\Longleftarrow ',
10233: '\\Longrightarrow ',
10234: '\\Longleftrightarrow ',
10236: '\\longmapsto ',
10731: '\\blacklozenge ',
10741: '\\setminus ',
10752: '\\bigodot ',
10753: '\\bigoplus ',
10754: '\\bigotimes ',
10756: '\\biguplus ',
10758: '\\bigsqcup ',
10764: '\\iiiint ',
10781: '\\Join ',
10815: '\\amalg ',
10846: '\\doublebarwedge ',
10877: '\\leqslant ',
10878: '\\geqslant ',
10885: '\\lessapprox ',
10886: '\\gtrapprox ',
10887: '\\lneq ',
10888: '\\gneq ',
10889: '\\lnapprox ',
10890: '\\gnapprox ',
10891: '\\lesseqqgtr ',
10892: '\\gtreqqless ',
10901: '\\eqslantless ',
10902: '\\eqslantgtr ',
10927: '\\preceq ',
10928: '\\succeq ',
10935: '\\precapprox ',
10936: '\\succapprox ',
10937: '\\precnapprox ',
10938: '\\succnapprox ',
10949: '\\subseteqq ',
10950: '\\supseteqq ',
10955: '\\subsetneqq ',
10956: '\\supsetneqq ',
119808: '\\mathbf{A}',
119809: '\\mathbf{B}',
119810: '\\mathbf{C}',
119811: '\\mathbf{D}',
119812: '\\mathbf{E}',
119813: '\\mathbf{F}',
119814: '\\mathbf{G}',
119815: '\\mathbf{H}',
119816: '\\mathbf{I}',
119817: '\\mathbf{J}',
119818: '\\mathbf{K}',
119819: '\\mathbf{L}',
119820: '\\mathbf{M}',
119821: '\\mathbf{N}',
119822: '\\mathbf{O}',
119823: '\\mathbf{P}',
119824: '\\mathbf{Q}',
119825: '\\mathbf{R}',
119826: '\\mathbf{S}',
119827: '\\mathbf{T}',
119828: '\\mathbf{U}',
119829: '\\mathbf{V}',
119830: '\\mathbf{W}',
119831: '\\mathbf{X}',
119832: '\\mathbf{Y}',
119833: '\\mathbf{Z}',
119834: '\\mathbf{a}',
119835: '\\mathbf{b}',
119836: '\\mathbf{c}',
119837: '\\mathbf{d}',
119838: '\\mathbf{e}',
119839: '\\mathbf{f}',
119840: '\\mathbf{g}',
119841: '\\mathbf{h}',
119842: '\\mathbf{i}',
119843: '\\mathbf{j}',
119844: '\\mathbf{k}',
119845: '\\mathbf{l}',
119846: '\\mathbf{m}',
119847: '\\mathbf{n}',
119848: '\\mathbf{o}',
119849: '\\mathbf{p}',
119850: '\\mathbf{q}',
119851: '\\mathbf{r}',
119852: '\\mathbf{s}',
119853: '\\mathbf{t}',
119854: '\\mathbf{u}',
119855: '\\mathbf{v}',
119856: '\\mathbf{w}',
119857: '\\mathbf{x}',
119858: '\\mathbf{y}',
119859: '\\mathbf{z}',
119860: 'A',
119861: 'B',
119862: 'C',
119863: 'D',
119864: 'E',
119865: 'F',
119866: 'G',
119867: 'H',
119868: 'I',
119869: 'J',
119870: 'K',
119871: 'L',
119872: 'M',
119873: 'N',
119874: 'O',
119875: 'P',
119876: 'Q',
119877: 'R',
119878: 'S',
119879: 'T',
119880: 'U',
119881: 'V',
119882: 'W',
119883: 'X',
119884: 'Y',
119885: 'Z',
119886: 'a',
119887: 'b',
119888: 'c',
119889: 'd',
119890: 'e',
119891: 'f',
119892: 'g',
119894: 'i',
119895: 'j',
119896: 'k',
119897: 'l',
119898: 'm',
119899: 'n',
119900: 'o',
119901: 'p',
119902: 'q',
119903: 'r',
119904: 's',
119905: 't',
119906: 'u',
119907: 'v',
119908: 'w',
119909: 'x',
119910: 'y',
119911: 'z',
119964: '\\mathcal{A}',
119966: '\\mathcal{C}',
119967: '\\mathcal{D}',
119970: '\\mathcal{G}',
119973: '\\mathcal{J}',
119974: '\\mathcal{K}',
119977: '\\mathcal{N}',
119978: '\\mathcal{O}',
119979: '\\mathcal{P}',
119980: '\\mathcal{Q}',
119982: '\\mathcal{S}',
119983: '\\mathcal{T}',
119984: '\\mathcal{U}',
119985: '\\mathcal{V}',
119986: '\\mathcal{W}',
119987: '\\mathcal{X}',
119988: '\\mathcal{Y}',
119989: '\\mathcal{Z}',
120068: '\\mathfrak{A}',
120069: '\\mathfrak{B}',
120071: '\\mathfrak{D}',
120072: '\\mathfrak{E}',
120073: '\\mathfrak{F}',
120074: '\\mathfrak{G}',
120077: '\\mathfrak{J}',
120078: '\\mathfrak{K}',
120079: '\\mathfrak{L}',
120080: '\\mathfrak{M}',
120081: '\\mathfrak{N}',
120082: '\\mathfrak{O}',
120083: '\\mathfrak{P}',
120084: '\\mathfrak{Q}',
120086: '\\mathfrak{S}',
120087: '\\mathfrak{T}',
120088: '\\mathfrak{U}',
120089: '\\mathfrak{V}',
120090: '\\mathfrak{W}',
120091: '\\mathfrak{X}',
120092: '\\mathfrak{Y}',
120094: '\\mathfrak{a}',
120095: '\\mathfrak{b}',
120096: '\\mathfrak{c}',
120097: '\\mathfrak{d}',
120098: '\\mathfrak{e}',
120099: '\\mathfrak{f}',
120100: '\\mathfrak{g}',
120101: '\\mathfrak{h}',
120102: '\\mathfrak{i}',
120103: '\\mathfrak{j}',
120104: '\\mathfrak{k}',
120105: '\\mathfrak{l}',
120106: '\\mathfrak{m}',
120107: '\\mathfrak{n}',
120108: '\\mathfrak{o}',
120109: '\\mathfrak{p}',
120110: '\\mathfrak{q}',
120111: '\\mathfrak{r}',
120112: '\\mathfrak{s}',
120113: '\\mathfrak{t}',
120114: '\\mathfrak{u}',
120115: '\\mathfrak{v}',
120116: '\\mathfrak{w}',
120117: '\\mathfrak{x}',
120118: '\\mathfrak{y}',
120119: '\\mathfrak{z}',
120120: '\\mathbb{A}',
120121: '\\mathbb{B}',
120123: '\\mathbb{D}',
120124: '\\mathbb{E}',
120125: '\\mathbb{F}',
120126: '\\mathbb{G}',
120128: '\\mathbb{I}',
120129: '\\mathbb{J}',
120130: '\\mathbb{K}',
120131: '\\mathbb{L}',
120132: '\\mathbb{M}',
120134: '\\mathbb{O}',
120138: '\\mathbb{S}',
120139: '\\mathbb{T}',
120140: '\\mathbb{U}',
120141: '\\mathbb{V}',
120142: '\\mathbb{W}',
120143: '\\mathbb{X}',
120144: '\\mathbb{Y}',
120156: '\\Bbbk ',
120224: '\\mathsf{A}',
120225: '\\mathsf{B}',
120226: '\\mathsf{C}',
120227: '\\mathsf{D}',
120228: '\\mathsf{E}',
120229: '\\mathsf{F}',
120230: '\\mathsf{G}',
120231: '\\mathsf{H}',
120232: '\\mathsf{I}',
120233: '\\mathsf{J}',
120234: '\\mathsf{K}',
120235: '\\mathsf{L}',
120236: '\\mathsf{M}',
120237: '\\mathsf{N}',
120238: '\\mathsf{O}',
120239: '\\mathsf{P}',
120240: '\\mathsf{Q}',
120241: '\\mathsf{R}',
120242: '\\mathsf{S}',
120243: '\\mathsf{T}',
120244: '\\mathsf{U}',
120245: '\\mathsf{V}',
120246: '\\mathsf{W}',
120247: '\\mathsf{X}',
120248: '\\mathsf{Y}',
120249: '\\mathsf{Z}',
120250: '\\mathsf{a}',
120251: '\\mathsf{b}',
120252: '\\mathsf{c}',
120253: '\\mathsf{d}',
120254: '\\mathsf{e}',
120255: '\\mathsf{f}',
120256: '\\mathsf{g}',
120257: '\\mathsf{h}',
120258: '\\mathsf{i}',
120259: '\\mathsf{j}',
120260: '\\mathsf{k}',
120261: '\\mathsf{l}',
120262: '\\mathsf{m}',
120263: '\\mathsf{n}',
120264: '\\mathsf{o}',
120265: '\\mathsf{p}',
120266: '\\mathsf{q}',
120267: '\\mathsf{r}',
120268: '\\mathsf{s}',
120269: '\\mathsf{t}',
120270: '\\mathsf{u}',
120271: '\\mathsf{v}',
120272: '\\mathsf{w}',
120273: '\\mathsf{x}',
120274: '\\mathsf{y}',
120275: '\\mathsf{z}',
120432: '\\mathtt{A}',
120433: '\\mathtt{B}',
120434: '\\mathtt{C}',
120435: '\\mathtt{D}',
120436: '\\mathtt{E}',
120437: '\\mathtt{F}',
120438: '\\mathtt{G}',
120439: '\\mathtt{H}',
120440: '\\mathtt{I}',
120441: '\\mathtt{J}',
120442: '\\mathtt{K}',
120443: '\\mathtt{L}',
120444: '\\mathtt{M}',
120445: '\\mathtt{N}',
120446: '\\mathtt{O}',
120447: '\\mathtt{P}',
120448: '\\mathtt{Q}',
120449: '\\mathtt{R}',
120450: '\\mathtt{S}',
120451: '\\mathtt{T}',
120452: '\\mathtt{U}',
120453: '\\mathtt{V}',
120454: '\\mathtt{W}',
120455: '\\mathtt{X}',
120456: '\\mathtt{Y}',
120457: '\\mathtt{Z}',
120458: '\\mathtt{a}',
120459: '\\mathtt{b}',
120460: '\\mathtt{c}',
120461: '\\mathtt{d}',
120462: '\\mathtt{e}',
120463: '\\mathtt{f}',
120464: '\\mathtt{g}',
120465: '\\mathtt{h}',
120466: '\\mathtt{i}',
120467: '\\mathtt{j}',
120468: '\\mathtt{k}',
120469: '\\mathtt{l}',
120470: '\\mathtt{m}',
120471: '\\mathtt{n}',
120472: '\\mathtt{o}',
120473: '\\mathtt{p}',
120474: '\\mathtt{q}',
120475: '\\mathtt{r}',
120476: '\\mathtt{s}',
120477: '\\mathtt{t}',
120478: '\\mathtt{u}',
120479: '\\mathtt{v}',
120480: '\\mathtt{w}',
120481: '\\mathtt{x}',
120482: '\\mathtt{y}',
120483: '\\mathtt{z}',
120484: '\\imath ',
120485: '\\jmath ',
120490: '\\mathbf{\\Gamma}',
120491: '\\mathbf{\\Delta}',
120495: '\\mathbf{\\Theta}',
120498: '\\mathbf{\\Lambda}',
120501: '\\mathbf{\\Xi}',
120503: '\\mathbf{\\Pi}',
120506: '\\mathbf{\\Sigma}',
120508: '\\mathbf{\\Upsilon}',
120509: '\\mathbf{\\Phi}',
120511: '\\mathbf{\\Psi}',
120512: '\\mathbf{\\Omega}',
120548: '\\mathit{\\Gamma}',
120549: '\\mathit{\\Delta}',
120553: '\\mathit{\\Theta}',
120556: '\\mathit{\\Lambda}',
120559: '\\mathit{\\Xi}',
120561: '\\mathit{\\Pi}',
120564: '\\mathit{\\Sigma}',
120566: '\\mathit{\\Upsilon}',
120567: '\\mathit{\\Phi}',
120569: '\\mathit{\\Psi}',
120570: '\\mathit{\\Omega}',
120572: '\\alpha ',
120573: '\\beta ',
120574: '\\gamma ',
120575: '\\delta ',
120576: '\\varepsilon ',
120577: '\\zeta ',
120578: '\\eta ',
120579: '\\theta ',
120580: '\\iota ',
120581: '\\kappa ',
120582: '\\lambda ',
120583: '\\mu ',
120584: '\\nu ',
120585: '\\xi ',
120587: '\\pi ',
120588: '\\rho ',
120589: '\\varsigma ',
120590: '\\sigma ',
120591: '\\tau ',
120592: '\\upsilon ',
120593: '\\varphi ',
120594: '\\chi ',
120595: '\\psi ',
120596: '\\omega ',
120597: '\\partial ',
120598: '\\epsilon ',
120599: '\\vartheta ',
120600: '\\varkappa ',
120601: '\\phi ',
120602: '\\varrho ',
120603: '\\varpi ',
120782: '\\mathbf{0}',
120783: '\\mathbf{1}',
120784: '\\mathbf{2}',
120785: '\\mathbf{3}',
120786: '\\mathbf{4}',
120787: '\\mathbf{5}',
120788: '\\mathbf{6}',
120789: '\\mathbf{7}',
120790: '\\mathbf{8}',
120791: '\\mathbf{9}',
120802: '\\mathsf{0}',
120803: '\\mathsf{1}',
120804: '\\mathsf{2}',
120805: '\\mathsf{3}',
120806: '\\mathsf{4}',
120807: '\\mathsf{5}',
120808: '\\mathsf{6}',
120809: '\\mathsf{7}',
120810: '\\mathsf{8}',
120811: '\\mathsf{9}',
120822: '\\mathtt{0}',
120823: '\\mathtt{1}',
120824: '\\mathtt{2}',
120825: '\\mathtt{3}',
120826: '\\mathtt{4}',
120827: '\\mathtt{5}',
120828: '\\mathtt{6}',
120829: '\\mathtt{7}',
120830: '\\mathtt{8}',
120831: '\\mathtt{9}',
}
| apache-2.0 | 4,020,327,097,100,969,500 | -8,841,224,548,842,772,000 | 20.333756 | 58 | 0.566712 | false |
daviur/py-cracking-the-coding-interview | trees-and-graphs/tg_traversal.py | 1 | 4187 | # The MIT License (MIT)
#
# Copyright (c) 2016 David I Urbina
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from collections import deque
from collections import namedtuple
Tree = namedtuple('Tree', 'x l r')
def breath_first_search(T):
"""
a.k.a. Level-order traversal
Time: O(num) Space: O(num)
"""
sequence = []
if T.x == None:
return sequence
queue = deque()
queue.append(T)
while len(queue) > 0:
node = queue.popleft()
sequence.append(node.x)
if node.l:
queue.append(node.l)
if node.r:
queue.append(node.r)
return sequence
def preorder(T, sequence=None):
"""
root-left-right
Time: O(num) Space: O(h) where h worst case = num , best/avg case = log num
"""
if sequence == None:
sequence = []
if T == None:
return sequence
sequence.append(T.x)
sequence = preorder(T.l, sequence)
sequence = preorder(T.r, sequence)
return sequence
def inorder(T, sequence=None):
"""
Inorder traversal of binary search tree give sorted list of elements.
left-root-right
Time: O(num) Space: O(h) where h worst case = num , best/avg case = log num
"""
if sequence == None:
sequence = []
if T == None:
return sequence
sequence = inorder(T.l, sequence)
sequence.append(T.x)
sequence = inorder(T.r, sequence)
return sequence
def postorder(T, sequence=None):
"""
left-right-root
Time: O(num) Space: O(h) where h worst case = num , best/avg case = log num
"""
if sequence == None:
sequence = []
if T == None:
return sequence
sequence = postorder(T.l, sequence)
sequence = postorder(T.r, sequence)
sequence.append(T.x)
return sequence
NO_BST = Tree(10,
Tree(1,
Tree(2,
Tree(4,
Tree(19, None, None),
None),
Tree(8, None, None)),
Tree(7, None, None)),
Tree(15,
Tree(16, None,
Tree(3, None, None)),
None))
BST = Tree(7,
Tree(5,
Tree(3,
Tree(2,
Tree(1, None, None),
None),
Tree(4, None, None)),
Tree(6, None, None)),
Tree(10,
Tree(8, None,
Tree(9, None, None)),
None))
BST2 = Tree(20,
Tree(20, None, None),
None)
NO_BST2 = Tree(20, None,
Tree(20, None, None), )
def test_breath_first_search():
assert breath_first_search(NO_BST) == [10, 1, 15, 2, 7, 16, 4, 8, 3, 19]
def test_preorder():
assert preorder(NO_BST) == [10, 1, 2, 4, 19, 8, 7, 15, 16, 3]
def test_inorder():
assert inorder(NO_BST) == [19, 4, 2, 8, 1, 7, 10, 16, 3, 15]
def test_inorder_on_bst():
assert inorder(BST) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
def test_postorder():
assert postorder(NO_BST) == [19, 4, 8, 2, 7, 1, 3, 16, 15, 10]
| mit | -1,581,149,797,334,131,000 | 2,470,744,428,117,986,300 | 25.333333 | 80 | 0.567471 | false |
guschmue/tensorflow | tensorflow/contrib/timeseries/python/timeseries/saved_model_utils.py | 27 | 8059 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convenience functions for working with time series saved_models.
@@predict_continuation
@@filter_continuation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries import feature_keys as _feature_keys
from tensorflow.contrib.timeseries.python.timeseries import head as _head
from tensorflow.contrib.timeseries.python.timeseries import input_pipeline as _input_pipeline
from tensorflow.contrib.timeseries.python.timeseries import model_utils as _model_utils
from tensorflow.python.util.all_util import remove_undocumented
def _colate_features_to_feeds_and_fetches(continue_from, signature, features,
graph):
"""Uses a saved model signature to construct feed and fetch dictionaries."""
if _feature_keys.FilteringResults.STATE_TUPLE in continue_from:
# We're continuing from an evaluation, so we need to unpack/flatten state.
state_values = _head.state_to_dictionary(
continue_from[_feature_keys.FilteringResults.STATE_TUPLE])
else:
state_values = continue_from
input_feed_tensors_by_name = {
input_key: graph.as_graph_element(input_value.name)
for input_key, input_value in signature.inputs.items()
}
output_tensors_by_name = {
output_key: graph.as_graph_element(output_value.name)
for output_key, output_value in signature.outputs.items()
}
feed_dict = {}
for state_key, state_value in state_values.items():
feed_dict[input_feed_tensors_by_name[state_key]] = state_value
for feature_key, feature_value in features.items():
feed_dict[input_feed_tensors_by_name[feature_key]] = feature_value
return output_tensors_by_name, feed_dict
def predict_continuation(continue_from,
signatures,
session,
steps=None,
times=None,
exogenous_features=None):
"""Perform prediction using an exported saved model.
Analogous to _input_pipeline.predict_continuation_input_fn, but operates on a
saved model rather than feeding into Estimator's predict method.
Args:
continue_from: A dictionary containing the results of either an Estimator's
evaluate method or filter_continuation. Used to determine the model
state to make predictions starting from.
signatures: The `MetaGraphDef` protocol buffer returned from
`tf.saved_model.loader.load`. Used to determine the names of Tensors to
feed and fetch. Must be from the same model as `continue_from`.
session: The session to use. The session's graph must be the one into which
`tf.saved_model.loader.load` loaded the model.
steps: The number of steps to predict (scalar), starting after the
evaluation or filtering. If `times` is specified, `steps` must not be; one
is required.
times: A [batch_size x window_size] array of integers (not a Tensor)
indicating times to make predictions for. These times must be after the
corresponding evaluation or filtering. If `steps` is specified, `times`
must not be; one is required. If the batch dimension is omitted, it is
assumed to be 1.
exogenous_features: Optional dictionary. If specified, indicates exogenous
features for the model to use while making the predictions. Values must
have shape [batch_size x window_size x ...], where `batch_size` matches
the batch dimension used when creating `continue_from`, and `window_size`
is either the `steps` argument or the `window_size` of the `times`
argument (depending on which was specified).
Returns:
A dictionary with model-specific predictions (typically having keys "mean"
and "covariance") and a feature_keys.PredictionResults.TIMES key indicating
the times for which the predictions were computed.
Raises:
ValueError: If `times` or `steps` are misspecified.
"""
if exogenous_features is None:
exogenous_features = {}
predict_times = _model_utils.canonicalize_times_or_steps_from_output(
times=times, steps=steps, previous_model_output=continue_from)
features = {_feature_keys.PredictionFeatures.TIMES: predict_times}
features.update(exogenous_features)
predict_signature = signatures.signature_def[
_feature_keys.SavedModelLabels.PREDICT]
output_tensors_by_name, feed_dict = _colate_features_to_feeds_and_fetches(
continue_from=continue_from,
signature=predict_signature,
features=features,
graph=session.graph)
output = session.run(output_tensors_by_name, feed_dict=feed_dict)
output[_feature_keys.PredictionResults.TIMES] = features[
_feature_keys.PredictionFeatures.TIMES]
return output
def filter_continuation(continue_from, signatures, session, features):
"""Perform filtering using an exported saved model.
Filtering refers to updating model state based on new observations.
Predictions based on the returned model state will be conditioned on these
observations.
Args:
continue_from: A dictionary containing the results of either an Estimator's
evaluate method or a previous filter_continuation. Used to determine the
model state to start filtering from.
signatures: The `MetaGraphDef` protocol buffer returned from
`tf.saved_model.loader.load`. Used to determine the names of Tensors to
feed and fetch. Must be from the same model as `continue_from`.
session: The session to use. The session's graph must be the one into which
`tf.saved_model.loader.load` loaded the model.
features: A dictionary mapping keys to Numpy arrays, with several possible
shapes (requires keys `FilteringFeatures.TIMES` and
`FilteringFeatures.VALUES`):
Single example; `TIMES` is a scalar and `VALUES` is either a scalar or a
vector of length [number of features].
Sequence; `TIMES` is a vector of shape [series length], `VALUES` either
has shape [series length] (univariate) or [series length x number of
features] (multivariate).
Batch of sequences; `TIMES` is a vector of shape [batch size x series
length], `VALUES` has shape [batch size x series length] or [batch
size x series length x number of features].
In any case, `VALUES` and any exogenous features must have their shapes
prefixed by the shape of the value corresponding to the `TIMES` key.
Returns:
A dictionary containing model state updated to account for the observations
in `features`.
"""
filter_signature = signatures.signature_def[
_feature_keys.SavedModelLabels.FILTER]
features = _input_pipeline._canonicalize_numpy_data( # pylint: disable=protected-access
data=features,
require_single_batch=False)
output_tensors_by_name, feed_dict = _colate_features_to_feeds_and_fetches(
continue_from=continue_from,
signature=filter_signature,
features=features,
graph=session.graph)
output = session.run(output_tensors_by_name, feed_dict=feed_dict)
# Make it easier to chain filter -> predict by keeping track of the current
# time.
output[_feature_keys.FilteringResults.TIMES] = features[
_feature_keys.FilteringFeatures.TIMES]
return output
remove_undocumented(module_name=__name__)
| apache-2.0 | 2,954,796,195,493,045,000 | -3,931,219,560,690,091,500 | 46.970238 | 93 | 0.715101 | false |
jclc/discus-inferno | flaskenv/lib/python2.7/site-packages/gunicorn/pidfile.py | 7 | 2273 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
from __future__ import with_statement
import errno
import os
import tempfile
class Pidfile(object):
"""\
Manage a PID file. If a specific name is provided
it and '"%s.oldpid" % name' will be used. Otherwise
we create a temp file using os.mkstemp.
"""
def __init__(self, fname):
self.fname = fname
self.pid = None
def create(self, pid):
oldpid = self.validate()
if oldpid:
if oldpid == os.getpid():
return
raise RuntimeError("Already running on PID %s " \
"(or pid file '%s' is stale)" % (os.getpid(), self.fname))
self.pid = pid
# Write pidfile
fdir = os.path.dirname(self.fname)
if fdir and not os.path.isdir(fdir):
raise RuntimeError("%s doesn't exist. Can't create pidfile." % fdir)
fd, fname = tempfile.mkstemp(dir=fdir)
os.write(fd, "%s\n" % self.pid)
if self.fname:
os.rename(fname, self.fname)
else:
self.fname = fname
os.close(fd)
# set permissions to -rw-r--r--
os.chmod(self.fname, 420)
def rename(self, path):
self.unlink()
self.fname = path
self.create(self.pid)
def unlink(self):
""" delete pidfile"""
try:
with open(self.fname, "r") as f:
pid1 = int(f.read() or 0)
if pid1 == self.pid:
os.unlink(self.fname)
except:
pass
def validate(self):
""" Validate pidfile and make it stale if needed"""
if not self.fname:
return
try:
with open(self.fname, "r") as f:
wpid = int(f.read() or 0)
if wpid <= 0:
return
try:
os.kill(wpid, 0)
return wpid
except OSError as e:
if e.args[0] == errno.ESRCH:
return
raise
except IOError as e:
if e.args[0] == errno.ENOENT:
return
raise
| mit | 8,155,939,771,706,215,000 | 3,928,441,909,798,102,000 | 25.430233 | 80 | 0.494061 | false |
ogenstad/ansible | contrib/inventory/nagios_ndo.py | 43 | 3807 | #!/usr/bin/env python
# (c) 2014, Jonathan Lestrelin <jonathan.lestrelin@gmail.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
Nagios NDO external inventory script.
========================================
Returns hosts and hostgroups from Nagios NDO.
Configuration is read from `nagios_ndo.ini`.
"""
import os
import argparse
import sys
try:
import configparser
except ImportError:
import ConfigParser
configparser = ConfigParser
import json
try:
from sqlalchemy import text
from sqlalchemy.engine import create_engine
except ImportError:
sys.exit("Error: SQLAlchemy is needed. Try something like: pip install sqlalchemy")
class NagiosNDOInventory(object):
def read_settings(self):
config = configparser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_ndo.ini')
if config.has_option('ndo', 'database_uri'):
self.ndo_database_uri = config.get('ndo', 'database_uri')
def read_cli(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host', nargs=1)
parser.add_argument('--list', action='store_true')
self.options = parser.parse_args()
def get_hosts(self):
engine = create_engine(self.ndo_database_uri)
connection = engine.connect()
select_hosts = text("SELECT display_name \
FROM nagios_hosts")
select_hostgroups = text("SELECT alias \
FROM nagios_hostgroups")
select_hostgroup_hosts = text("SELECT h.display_name \
FROM nagios_hostgroup_members hgm, nagios_hosts h, nagios_hostgroups hg \
WHERE hgm.hostgroup_id = hg.hostgroup_id \
AND hgm.host_object_id = h.host_object_id \
AND hg.alias =:hostgroup_alias")
hosts = connection.execute(select_hosts)
self.result['all']['hosts'] = [host['display_name'] for host in hosts]
for hostgroup in connection.execute(select_hostgroups):
hostgroup_alias = hostgroup['alias']
self.result[hostgroup_alias] = {}
hosts = connection.execute(select_hostgroup_hosts, hostgroup_alias=hostgroup_alias)
self.result[hostgroup_alias]['hosts'] = [host['display_name'] for host in hosts]
def __init__(self):
self.defaultgroup = 'group_all'
self.ndo_database_uri = None
self.options = None
self.read_settings()
self.read_cli()
self.result = {}
self.result['all'] = {}
self.result['all']['hosts'] = []
self.result['_meta'] = {}
self.result['_meta']['hostvars'] = {}
if self.ndo_database_uri:
self.get_hosts()
if self.options.host:
print(json.dumps({}))
elif self.options.list:
print(json.dumps(self.result))
else:
sys.exit("usage: --list or --host HOSTNAME")
else:
sys.exit("Error: Database configuration is missing. See nagios_ndo.ini.")
NagiosNDOInventory()
| gpl-3.0 | 8,259,538,464,803,026,000 | 1,526,288,395,619,009,800 | 34.25 | 112 | 0.612818 | false |
espadrine/opera | chromium/src/third_party/chromite/scripts/check_gdata_token_unittest.py | 4 | 18523 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for cros_portage_upgrade.py."""
import filecmp
import mox
import os
import shutil
import gdata.service
from gdata.projecthosting import client as gdata_ph_client
from gdata.spreadsheet import service as gdata_ss_service
from chromite.lib import cros_build_lib as build_lib
from chromite.lib import cros_test_lib
from chromite.lib import gdata_lib
from chromite.scripts import check_gdata_token as cgt
# pylint: disable=W0212,R0904,E1120,E1101
class MainTest(cros_test_lib.MoxOutputTestCase):
"""Test argument handling at the main method level."""
def testHelp(self):
"""Test that --help is functioning"""
argv = [ '--help' ]
with self.OutputCapturer() as output:
# Running with --help should exit with code==0.
self.AssertFuncSystemExitZero(cgt.main, argv)
# Verify that a message beginning with "Usage: " was printed.
stdout = output.GetStdout()
self.assertTrue(stdout.startswith('Usage: '))
def testMainOutsideChroot(self):
"""Test flow outside chroot"""
argv = []
mocked_outsidechroot = self.mox.CreateMock(cgt.OutsideChroot)
# Create replay script.
self.mox.StubOutWithMock(build_lib, 'IsInsideChroot')
self.mox.StubOutWithMock(cgt.OutsideChroot, '__new__')
build_lib.IsInsideChroot().AndReturn(False)
cgt.OutsideChroot.__new__(cgt.OutsideChroot, argv,
).AndReturn(mocked_outsidechroot)
mocked_outsidechroot.Run()
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
cgt.main(argv)
self.mox.VerifyAll()
def testMainInsideChroot(self):
"""Test flow inside chroot"""
argv = []
mocked_insidechroot = self.mox.CreateMock(cgt.InsideChroot)
# Create replay script.
self.mox.StubOutWithMock(build_lib, 'IsInsideChroot')
self.mox.StubOutWithMock(cgt.InsideChroot, '__new__')
build_lib.IsInsideChroot().AndReturn(True)
cgt.InsideChroot.__new__(cgt.InsideChroot
).AndReturn(mocked_insidechroot)
mocked_insidechroot.Run()
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
cgt.main(argv)
self.mox.VerifyAll()
class OutsideChrootTest(cros_test_lib.MoxOutputTestCase):
"""Test flow when run outside chroot."""
def _MockOutsideChroot(self, *args):
"""Prepare mocked OutsideChroot object with |args|."""
mocked_outsidechroot = self.mox.CreateMock(cgt.OutsideChroot)
mocked_outsidechroot.args = list(args) if args else []
return mocked_outsidechroot
def testOutsideChrootRestartFail(self):
mocked_outsidechroot = self._MockOutsideChroot()
self.mox.StubOutWithMock(build_lib, 'RunCommand')
cmd = ['check_gdata_token']
run_result = cros_test_lib.EasyAttr(returncode=1)
# Create replay script.
build_lib.RunCommand(cmd, enter_chroot=True,
print_cmd=False,
error_code_ok=True).AndReturn(run_result)
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
# Test should exit with failure.
self.AssertFuncSystemExitNonZero(cgt.OutsideChroot.Run,
mocked_outsidechroot)
self.mox.VerifyAll()
self.AssertOutputContainsError()
def testOutsideChrootNoTokenFile(self):
mocked_outsidechroot = self._MockOutsideChroot('foo')
self.mox.StubOutWithMock(cgt, '_ChrootPathToExternalPath')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(build_lib, 'RunCommand')
cmd = ['check_gdata_token', 'foo']
run_result = cros_test_lib.EasyAttr(returncode=0)
# Create replay script.
build_lib.RunCommand(cmd, enter_chroot=True,
print_cmd=False,
error_code_ok=True).AndReturn(run_result)
cgt._ChrootPathToExternalPath(cgt.TOKEN_FILE).AndReturn('chr-tok')
os.path.exists('chr-tok').AndReturn(False)
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
# Test should exit with failure.
self.AssertFuncSystemExitNonZero(cgt.OutsideChroot.Run,
mocked_outsidechroot)
self.mox.VerifyAll()
self.AssertOutputContainsError()
def testOutsideChrootNewTokenFile(self):
mocked_outsidechroot = self._MockOutsideChroot('foo')
self.mox.StubOutWithMock(cgt, '_ChrootPathToExternalPath')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(shutil, 'copy2')
self.mox.StubOutWithMock(build_lib, 'RunCommand')
cmd = ['check_gdata_token', 'foo']
run_result = cros_test_lib.EasyAttr(returncode=0)
# Create replay script.
build_lib.RunCommand(cmd, enter_chroot=True,
print_cmd=False,
error_code_ok=True).AndReturn(run_result)
cgt._ChrootPathToExternalPath(cgt.TOKEN_FILE).AndReturn('chr-tok')
os.path.exists('chr-tok').AndReturn(True)
os.path.exists(cgt.TOKEN_FILE).AndReturn(False)
shutil.copy2('chr-tok', cgt.TOKEN_FILE)
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
cgt.OutsideChroot.Run(mocked_outsidechroot)
self.mox.VerifyAll()
def testOutsideChrootDifferentTokenFile(self):
mocked_outsidechroot = self._MockOutsideChroot('foo')
self.mox.StubOutWithMock(cgt, '_ChrootPathToExternalPath')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(shutil, 'copy2')
self.mox.StubOutWithMock(filecmp, 'cmp')
self.mox.StubOutWithMock(build_lib, 'RunCommand')
cmd = ['check_gdata_token', 'foo']
run_result = cros_test_lib.EasyAttr(returncode=0)
# Create replay script.
build_lib.RunCommand(cmd, enter_chroot=True,
print_cmd=False,
error_code_ok=True).AndReturn(run_result)
cgt._ChrootPathToExternalPath(cgt.TOKEN_FILE).AndReturn('chr-tok')
os.path.exists('chr-tok').AndReturn(True)
os.path.exists(cgt.TOKEN_FILE).AndReturn(True)
filecmp.cmp(cgt.TOKEN_FILE, 'chr-tok').AndReturn(False)
shutil.copy2('chr-tok', cgt.TOKEN_FILE)
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
cgt.OutsideChroot.Run(mocked_outsidechroot)
self.mox.VerifyAll()
def testOutsideChrootNoChangeInTokenFile(self):
mocked_outsidechroot = self._MockOutsideChroot('foo')
self.mox.StubOutWithMock(cgt, '_ChrootPathToExternalPath')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(filecmp, 'cmp')
self.mox.StubOutWithMock(build_lib, 'RunCommand')
cmd = ['check_gdata_token', 'foo']
run_result = cros_test_lib.EasyAttr(returncode=0)
# Create replay script.
build_lib.RunCommand(cmd, enter_chroot=True,
print_cmd=False,
error_code_ok=True).AndReturn(run_result)
cgt._ChrootPathToExternalPath(cgt.TOKEN_FILE).AndReturn('chr-tok')
os.path.exists('chr-tok').AndReturn(True)
os.path.exists(cgt.TOKEN_FILE).AndReturn(True)
filecmp.cmp(cgt.TOKEN_FILE, 'chr-tok').AndReturn(True)
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
cgt.OutsideChroot.Run(mocked_outsidechroot)
self.mox.VerifyAll()
class InsideChrootTest(cros_test_lib.MoxOutputTestCase):
"""Test flow when run inside chroot."""
def _MockInsideChroot(self):
"""Prepare mocked OutsideChroot object."""
mic = self.mox.CreateMock(cgt.InsideChroot)
mic.creds = self.mox.CreateMock(gdata_lib.Creds)
mic.gd_client = self.mox.CreateMock(gdata_ss_service.SpreadsheetsService)
mic.it_client = self.mox.CreateMock(gdata_ph_client.ProjectHostingClient)
return mic
def testLoadTokenFile(self):
mocked_insidechroot = self._MockInsideChroot()
self.mox.StubOutWithMock(os.path, 'exists')
# Create replay script
os.path.exists(cgt.TOKEN_FILE).AndReturn(True)
mocked_insidechroot.creds.LoadAuthToken(cgt.TOKEN_FILE)
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
result = cgt.InsideChroot._LoadTokenFile(mocked_insidechroot)
self.mox.VerifyAll()
self.assertTrue(result)
def testSaveTokenFile(self):
mocked_insidechroot = self._MockInsideChroot()
# Create replay script.
mocked_insidechroot.creds.StoreAuthTokenIfNeeded(cgt.TOKEN_FILE)
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
cgt.InsideChroot._SaveTokenFile(mocked_insidechroot)
self.mox.VerifyAll()
def testLoadTokenFileMissing(self):
mocked_insidechroot = self._MockInsideChroot()
self.mox.StubOutWithMock(os.path, 'exists')
# Create replay script
os.path.exists(cgt.TOKEN_FILE).AndReturn(False)
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
result = cgt.InsideChroot._LoadTokenFile(mocked_insidechroot)
self.mox.VerifyAll()
self.assertFalse(result)
def testInsideChrootValidateOK(self):
mocked_insidechroot = self._MockInsideChroot()
# Create replay script.
mocked_insidechroot._LoadTokenFile()
mocked_insidechroot._ValidateTrackerToken().AndReturn(True)
mocked_insidechroot._ValidateDocsToken().AndReturn(True)
mocked_insidechroot._SaveTokenFile()
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
cgt.InsideChroot.Run(mocked_insidechroot)
self.mox.VerifyAll()
def testInsideChrootTrackerValidateFailGenerateOK(self):
mocked_insidechroot = self._MockInsideChroot()
# Create replay script.
mocked_insidechroot._LoadTokenFile()
mocked_insidechroot._ValidateTrackerToken().AndReturn(True)
mocked_insidechroot._ValidateDocsToken().AndReturn(False)
mocked_insidechroot._GenerateDocsToken().AndReturn(True)
mocked_insidechroot._SaveTokenFile()
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
cgt.InsideChroot.Run(mocked_insidechroot)
self.mox.VerifyAll()
def testInsideChrootDocsValidateFailGenerateOK(self):
mocked_insidechroot = self._MockInsideChroot()
# Create replay script.
mocked_insidechroot._LoadTokenFile()
mocked_insidechroot._ValidateTrackerToken().AndReturn(False)
mocked_insidechroot._GenerateTrackerToken().AndReturn(True)
mocked_insidechroot._ValidateDocsToken().AndReturn(True)
mocked_insidechroot._SaveTokenFile()
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
cgt.InsideChroot.Run(mocked_insidechroot)
self.mox.VerifyAll()
def testInsideChrootTrackerValidateFailGenerateFail(self):
mocked_insidechroot = self._MockInsideChroot()
# Create replay script.
mocked_insidechroot._LoadTokenFile()
mocked_insidechroot._ValidateTrackerToken().AndReturn(False)
mocked_insidechroot._GenerateTrackerToken().AndReturn(False)
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
# Test should exit with failure.
self.AssertFuncSystemExitNonZero(cgt.InsideChroot.Run,
mocked_insidechroot)
self.mox.VerifyAll()
self.AssertOutputContainsError()
def testInsideChrootDocsValidateFailGenerateFail(self):
mocked_insidechroot = self._MockInsideChroot()
# Create replay script.
mocked_insidechroot._LoadTokenFile()
mocked_insidechroot._ValidateTrackerToken().AndReturn(True)
mocked_insidechroot._ValidateDocsToken().AndReturn(False)
mocked_insidechroot._GenerateDocsToken().AndReturn(False)
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
# Test should exit with failure.
self.AssertFuncSystemExitNonZero(cgt.InsideChroot.Run,
mocked_insidechroot)
self.mox.VerifyAll()
self.AssertOutputContainsError()
def testGenerateTrackerTokenOK(self):
mocked_insidechroot = self._MockInsideChroot()
# Create replay script.
mocked_creds = mocked_insidechroot.creds
mocked_itclient = mocked_insidechroot.it_client
mocked_creds.user = 'joe@chromium.org'
mocked_creds.password = 'shhh'
auth_token = 'SomeToken'
mocked_itclient.auth_token = cros_test_lib.EasyAttr(token_string=auth_token)
mocked_creds.LoadCreds(cgt.CRED_FILE)
mocked_itclient.ClientLogin(mocked_creds.user, mocked_creds.password,
source='Package Status', service='code',
account_type='GOOGLE')
mocked_creds.SetTrackerAuthToken(auth_token)
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
result = cgt.InsideChroot._GenerateTrackerToken(mocked_insidechroot)
self.assertTrue(result, '_GenerateTrackerToken should have passed')
self.mox.VerifyAll()
def testGenerateTrackerTokenFail(self):
mocked_insidechroot = self._MockInsideChroot()
# Create replay script.
mocked_creds = mocked_insidechroot.creds
mocked_itclient = mocked_insidechroot.it_client
mocked_creds.user = 'joe@chromium.org'
mocked_creds.password = 'shhh'
mocked_creds.LoadCreds(cgt.CRED_FILE)
mocked_itclient.ClientLogin(mocked_creds.user, mocked_creds.password,
source='Package Status', service='code',
account_type='GOOGLE'
).AndRaise(gdata.client.BadAuthentication())
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
result = cgt.InsideChroot._GenerateTrackerToken(mocked_insidechroot)
self.assertFalse(result, '_GenerateTrackerToken should have failed')
self.mox.VerifyAll()
self.AssertOutputContainsError()
def testValidateTrackerTokenOK(self):
mocked_insidechroot = self._MockInsideChroot()
mocked_itclient = mocked_insidechroot.it_client
self.mox.StubOutWithMock(gdata.gauth.ClientLoginToken, '__new__')
# Create replay script.
auth_token = 'SomeToken'
mocked_insidechroot.creds.tracker_auth_token = auth_token
gdata.gauth.ClientLoginToken.__new__(gdata.gauth.ClientLoginToken,
auth_token).AndReturn('TokenObj')
mocked_itclient.get_issues('chromium-os', query=mox.IgnoreArg())
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
result = cgt.InsideChroot._ValidateTrackerToken(mocked_insidechroot)
self.mox.VerifyAll()
self.assertTrue(result, '_ValidateTrackerToken should have passed')
def testValidateTrackerTokenFail(self):
mocked_insidechroot = self._MockInsideChroot()
mocked_itclient = mocked_insidechroot.it_client
self.mox.StubOutWithMock(gdata.gauth.ClientLoginToken, '__new__')
# Create replay script.
auth_token = 'SomeToken'
mocked_insidechroot.creds.tracker_auth_token = auth_token
gdata.gauth.ClientLoginToken.__new__(gdata.gauth.ClientLoginToken,
auth_token).AndReturn('TokenObj')
mocked_itclient.get_issues('chromium-os', query=mox.IgnoreArg()
).AndRaise(gdata.client.Error())
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
result = cgt.InsideChroot._ValidateTrackerToken(mocked_insidechroot)
self.assertFalse(result, '_ValidateTrackerToken should have failed')
self.mox.VerifyAll()
def testGenerateDocsTokenOK(self):
mocked_insidechroot = self._MockInsideChroot()
# Create replay script.
mocked_creds = mocked_insidechroot.creds
mocked_gdclient = mocked_insidechroot.gd_client
mocked_creds.user = 'joe@chromium.org'
mocked_creds.password = 'shhh'
auth_token = 'SomeToken'
mocked_creds.LoadCreds(cgt.CRED_FILE)
mocked_gdclient.ProgrammaticLogin()
mocked_gdclient.GetClientLoginToken().AndReturn(auth_token)
mocked_creds.SetDocsAuthToken(auth_token)
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
result = cgt.InsideChroot._GenerateDocsToken(mocked_insidechroot)
self.assertTrue(result, '_GenerateDocsToken should have passed')
self.mox.VerifyAll()
def testGenerateDocsTokenFail(self):
mocked_insidechroot = self._MockInsideChroot()
# Create replay script.
mocked_creds = mocked_insidechroot.creds
mocked_gdclient = mocked_insidechroot.gd_client
mocked_creds.user = 'joe@chromium.org'
mocked_creds.password = 'shhh'
mocked_creds.LoadCreds(cgt.CRED_FILE)
mocked_gdclient.ProgrammaticLogin(
).AndRaise(gdata.service.BadAuthentication())
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
result = cgt.InsideChroot._GenerateDocsToken(mocked_insidechroot)
self.assertFalse(result, '_GenerateTrackerToken should have failed')
self.mox.VerifyAll()
self.AssertOutputContainsError()
def testValidateDocsTokenOK(self):
mocked_insidechroot = self._MockInsideChroot()
# Create replay script.
auth_token = 'SomeToken'
mocked_insidechroot.creds.docs_auth_token = auth_token
mocked_insidechroot.gd_client.SetClientLoginToken(auth_token)
mocked_insidechroot.gd_client.GetSpreadsheetsFeed()
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
result = cgt.InsideChroot._ValidateDocsToken(mocked_insidechroot)
self.assertTrue(result, '_ValidateDocsToken should have passed')
self.mox.VerifyAll()
def testValidateDocsTokenFail(self):
mocked_insidechroot = self._MockInsideChroot()
# Create replay script.
auth_token = 'SomeToken'
mocked_insidechroot.creds.docs_auth_token = auth_token
mocked_insidechroot.gd_client.SetClientLoginToken(auth_token)
expired_error = gdata.service.RequestError({'reason': 'Token expired'})
mocked_insidechroot.gd_client.GetSpreadsheetsFeed().AndRaise(expired_error)
self.mox.ReplayAll()
# Run test verification.
with self.OutputCapturer():
result = cgt.InsideChroot._ValidateDocsToken(mocked_insidechroot)
self.assertFalse(result, '_ValidateDocsToken should have failed')
self.mox.VerifyAll()
if __name__ == '__main__':
cros_test_lib.main()
| bsd-3-clause | -3,875,011,395,666,599,400 | 8,095,199,291,081,777,000 | 33.883239 | 80 | 0.699671 | false |
shubhdev/edx-platform | pavelib/servers.py | 30 | 6182 | """
Run and manage servers for local development.
"""
from __future__ import print_function
import sys
import argparse
from paver.easy import *
from .utils.cmd import django_cmd
from .utils.process import run_process, run_multi_processes
DEFAULT_PORT = {"lms": 8000, "studio": 8001}
DEFAULT_SETTINGS = 'devstack'
def run_server(system, settings=None, port=None, skip_assets=False, contracts=False):
"""
Start the server for the specified `system` (lms or studio).
`settings` is the Django settings module to use; if not provided, use the default.
`port` is the port to run the server on; if not provided, use the default port for the system.
If `skip_assets` is True, skip the asset compilation step.
"""
if system not in ['lms', 'studio']:
print("System must be either lms or studio", file=sys.stderr)
exit(1)
if not settings:
settings = DEFAULT_SETTINGS
if not skip_assets:
# Local dev settings use staticfiles to serve assets, so we can skip the collecstatic step
args = [system, '--settings={}'.format(settings), '--skip-collect', '--watch']
call_task('pavelib.assets.update_assets', args=args)
if port is None:
port = DEFAULT_PORT[system]
args = [settings, 'runserver', '--traceback', '--pythonpath=.', '0.0.0.0:{}'.format(port)]
if contracts:
args.append("--contracts")
run_process(django_cmd(system, *args))
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
("port=", "p", "Port"),
("fast", "f", "Skip updating assets")
])
def lms(options):
"""
Run the LMS server.
"""
settings = getattr(options, 'settings', None)
port = getattr(options, 'port', None)
fast = getattr(options, 'fast', False)
run_server('lms', settings=settings, port=port, skip_assets=fast)
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
("port=", "p", "Port"),
("fast", "f", "Skip updating assets")
])
def studio(options):
"""
Run the Studio server.
"""
settings = getattr(options, 'settings', None)
port = getattr(options, 'port', None)
fast = getattr(options, 'fast', False)
run_server('studio', settings=settings, port=port, skip_assets=fast)
@task
@needs('pavelib.prereqs.install_prereqs')
@consume_args
@no_help
def devstack(args):
"""
Start the devstack lms or studio server
"""
parser = argparse.ArgumentParser(prog='paver devstack')
parser.add_argument('system', type=str, nargs=1, help="lms or studio")
parser.add_argument('--fast', action='store_true', default=False, help="Skip updating assets")
parser.add_argument(
'--no-contracts',
action='store_true',
default=False,
help="Disable contracts. By default, they're enabled in devstack."
)
args = parser.parse_args(args)
run_server(args.system[0], settings='devstack', skip_assets=args.fast, contracts=(not args.no_contracts))
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
])
def celery(options):
"""
Runs Celery workers.
"""
settings = getattr(options, 'settings', 'dev_with_worker')
run_process(django_cmd('lms', settings, 'celery', 'worker', '--loglevel=INFO', '--pythonpath=.'))
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings for both LMS and Studio"),
("worker_settings=", "w", "Celery worker Django settings"),
("fast", "f", "Skip updating assets"),
("settings_lms=", "l", "Set LMS only, overriding the value from --settings (if provided)"),
("settings_cms=", "c", "Set Studio only, overriding the value from --settings (if provided)"),
])
def run_all_servers(options):
"""
Runs Celery workers, Studio, and LMS.
"""
settings = getattr(options, 'settings', DEFAULT_SETTINGS)
settings_lms = getattr(options, 'settings_lms', settings)
settings_cms = getattr(options, 'settings_cms', settings)
worker_settings = getattr(options, 'worker_settings', 'dev_with_worker')
fast = getattr(options, 'fast', False)
if not fast:
args = ['lms', '--settings={}'.format(settings_lms), '--skip-collect']
call_task('pavelib.assets.update_assets', args=args)
args = ['studio', '--settings={}'.format(settings_cms), '--skip-collect']
call_task('pavelib.assets.update_assets', args=args)
call_task('pavelib.assets.watch_assets', options={'background': True})
run_multi_processes([
django_cmd('lms', settings_lms, 'runserver', '--traceback', '--pythonpath=.', "0.0.0.0:{}".format(DEFAULT_PORT['lms'])),
django_cmd('studio', settings_cms, 'runserver', '--traceback', '--pythonpath=.', "0.0.0.0:{}".format(DEFAULT_PORT['studio'])),
django_cmd('lms', worker_settings, 'celery', 'worker', '--loglevel=INFO', '--pythonpath=.')
])
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
])
def update_db():
"""
Runs syncdb and then migrate.
"""
settings = getattr(options, 'settings', DEFAULT_SETTINGS)
for system in ('lms', 'cms'):
sh(django_cmd(system, settings, 'syncdb', '--migrate', '--traceback', '--pythonpath=.'))
@task
@needs('pavelib.prereqs.install_prereqs')
@consume_args
def check_settings(args):
"""
Checks settings files.
"""
parser = argparse.ArgumentParser(prog='paver check_settings')
parser.add_argument('system', type=str, nargs=1, help="lms or studio")
parser.add_argument('settings', type=str, nargs=1, help='Django settings')
args = parser.parse_args(args)
system = args.system[0]
settings = args.settings[0]
try:
import_cmd = "echo 'import {system}.envs.{settings}'".format(system=system, settings=settings)
django_shell_cmd = django_cmd(system, settings, 'shell', '--plain', '--pythonpath=.')
sh("{import_cmd} | {shell_cmd}".format(import_cmd=import_cmd, shell_cmd=django_shell_cmd))
except:
print("Failed to import settings", file=sys.stderr)
| agpl-3.0 | -4,279,587,121,030,844,000 | 4,174,256,675,274,754,000 | 32.597826 | 134 | 0.641055 | false |
rembo10/headphones | lib/pygazelle/inbox.py | 26 | 3948 | class MailboxMessage(object):
def __init__(self, api, message):
self.id = message['convId']
self.conv = Conversation(api, self.id)
self.subject = message['subject']
self.unread = message['unread']
self.sticky = message['sticky']
self.fwd_id = message['forwardedId']
self.fwd_name = message['forwardedName']
self.sender_id = message['senderId']
self.username = message['username']
self.donor = message['donor']
self.warned = message['warned']
self.enabled = message['enabled']
self.date = message['date']
def __repr__(self):
return "MailboxMessage ID %s - %s %s %s" % (self.id, self.subject, self.sender_id, self.username)
class ConversationMessage(object):
def __init__(self, msg_resp):
self.id = msg_resp['messageId']
self.sender_id = msg_resp['senderId']
self.sender_name = msg_resp['senderName']
self.sent_date = msg_resp['sentDate']
self.bb_body = msg_resp['bbBody']
self.body = msg_resp['body']
def __repr__(self):
return "ConversationMessage ID %s - %s %s" % (self.id, self.sender_name, self.sent_date)
class Conversation(object):
def __init__(self, api, conv_id):
self.id = conv_id
self.parent_api = api
self.subject = None
self.sticky = None
self.messages = []
def __repr__(self):
return "Conversation ID %s - %s" % (self.id, self.subject)
def set_conv_data(self, conv_resp):
assert self.id == conv_resp['convId']
self.subject = conv_resp['subject']
self.sticky = conv_resp['sticky']
self.messages = [ConversationMessage(m) for m in conv_resp['messages']]
def update_conv_data(self):
response = self.parent_api.request(action='inbox',
type='viewconv', id=self.id)
self.set_conv_data(response)
class Mailbox(object):
"""
This class represents the logged in user's inbox/sentbox
"""
def __init__(self, parent_api, boxtype='inbox', page='1', sort='unread'):
self.parent_api = parent_api
self.boxtype = boxtype
self.current_page = page
self.total_pages = None
self.sort = sort
self.messages = None
def set_mbox_data(self, mbox_resp):
"""
Takes parsed JSON response from 'inbox' action on api
and updates the available subset of mailbox information.
"""
self.current_page = mbox_resp['currentPage']
self.total_pages = mbox_resp['pages']
self.messages = \
[MailboxMessage(self.parent_api, m) for m in mbox_resp['messages']]
def update_mbox_data(self):
response = self.parent_api.request(action='inbox',
type=self.boxtype, page=self.current_page, sort=self.sort)
self.set_mbox_data(response)
def next_page(self):
if not self.total_pages:
raise ValueError("call update_mbox_data() first")
total_pages = int(self.total_pages)
cur_page = int(self.current_page)
if cur_page < total_pages:
return Mailbox(self.parent_api, self.boxtype,
str(cur_page + 1), self.sort)
raise ValueError("Already at page %d/%d" % (cur_page, total_pages))
def prev_page(self):
if not self.total_pages:
raise ValueError("call update_mbox_data() first")
total_pages = int(self.total_pages)
cur_page = int(self.current_page)
if cur_page > 1:
return Mailbox(self.parent_api, self.boxtype,
str(cur_page - 1), self.sort)
raise ValueError("Already at page %d/%d" % (cur_page, total_pages))
def __repr__(self):
return "Mailbox: %s %s Page %s/%s" \
% (self.boxtype, self.sort,
self.current_page, self.total_pages)
| gpl-3.0 | -6,934,703,569,647,166,000 | -4,086,905,143,407,700,000 | 35.897196 | 105 | 0.578267 | false |
tyagiarpit/servo | tests/wpt/harness/wptrunner/update/sync.py | 116 | 6508 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import shutil
import sys
import uuid
from .. import testloader
from base import Step, StepRunner
from tree import Commit
here = os.path.abspath(os.path.split(__file__)[0])
bsd_license = """W3C 3-clause BSD License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of works must retain the original copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the W3C nor the names of its contributors may be
used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
def copy_wpt_tree(tree, dest):
"""Copy the working copy of a Tree to a destination directory.
:param tree: The Tree to copy.
:param dest: The destination directory"""
if os.path.exists(dest):
assert os.path.isdir(dest)
shutil.rmtree(dest)
os.mkdir(dest)
for tree_path in tree.paths():
source_path = os.path.join(tree.root, tree_path)
dest_path = os.path.join(dest, tree_path)
dest_dir = os.path.split(dest_path)[0]
if not os.path.isdir(source_path):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copy2(source_path, dest_path)
for source, destination in [("testharness_runner.html", ""),
("testharnessreport.js", "resources/")]:
source_path = os.path.join(here, os.pardir, source)
dest_path = os.path.join(dest, destination, os.path.split(source)[1])
shutil.copy2(source_path, dest_path)
add_license(dest)
def add_license(dest):
"""Write the bsd license string to a LICENSE file.
:param dest: Directory in which to place the LICENSE file."""
with open(os.path.join(dest, "LICENSE"), "w") as f:
f.write(bsd_license)
class UpdateCheckout(Step):
"""Pull changes from upstream into the local sync tree."""
provides = ["local_branch"]
def create(self, state):
sync_tree = state.sync_tree
state.local_branch = uuid.uuid4().hex
sync_tree.update(state.sync["remote_url"],
state.sync["branch"],
state.local_branch)
sync_path = os.path.abspath(sync_tree.root)
if not sync_path in sys.path:
from update import setup_paths
setup_paths(sync_path)
def restore(self, state):
assert os.path.abspath(state.sync_tree.root) in sys.path
Step.restore(self, state)
class GetSyncTargetCommit(Step):
"""Find the commit that we will sync to."""
provides = ["sync_commit"]
def create(self, state):
if state.target_rev is None:
#Use upstream branch HEAD as the base commit
state.sync_commit = state.sync_tree.get_remote_sha1(state.sync["remote_url"],
state.sync["branch"])
else:
state.sync_commit = Commit(state.sync_tree, state.rev)
state.sync_tree.checkout(state.sync_commit.sha1, state.local_branch, force=True)
self.logger.debug("New base commit is %s" % state.sync_commit.sha1)
class LoadManifest(Step):
"""Load the test manifest"""
provides = ["manifest_path", "test_manifest", "old_manifest"]
def create(self, state):
from manifest import manifest
state.manifest_path = os.path.join(state.metadata_path, "MANIFEST.json")
# Conservatively always rebuild the manifest when doing a sync
state.old_manifest = manifest.load(state.tests_path, state.manifest_path)
state.test_manifest = manifest.Manifest(None, "/")
class UpdateManifest(Step):
"""Update the manifest to match the tests in the sync tree checkout"""
def create(self, state):
from manifest import manifest, update
update.update(state.sync["path"], "/", state.test_manifest)
manifest.write(state.test_manifest, state.manifest_path)
class CopyWorkTree(Step):
"""Copy the sync tree over to the destination in the local tree"""
def create(self, state):
copy_wpt_tree(state.sync_tree,
state.tests_path)
class CreateSyncPatch(Step):
"""Add the updated test files to a commit/patch in the local tree."""
def create(self, state):
if state.no_patch:
return
local_tree = state.local_tree
sync_tree = state.sync_tree
local_tree.create_patch("web-platform-tests_update_%s" % sync_tree.rev,
"Update %s to revision %s" % (state.suite_name, sync_tree.rev))
local_tree.add_new(os.path.relpath(state.tests_path,
local_tree.root))
updated = local_tree.update_patch(include=[state.tests_path,
state.metadata_path])
local_tree.commit_patch()
if not updated:
self.logger.info("Nothing to sync")
class SyncFromUpstreamRunner(StepRunner):
"""(Sub)Runner for doing an upstream sync"""
steps = [UpdateCheckout,
GetSyncTargetCommit,
LoadManifest,
UpdateManifest,
CopyWorkTree,
CreateSyncPatch]
| mpl-2.0 | -5,355,499,812,521,907,000 | 1,962,737,816,244,061,700 | 34.562842 | 95 | 0.655655 | false |
bartsidee/bartsidee-boxee | sources/tv/system/linux/lxml/cssselect.py | 35 | 28750 | """CSS Selectors based on XPath.
This module supports selecting XML/HTML tags based on CSS selectors.
See the `CSSSelector` class for details.
"""
import re
from lxml import etree
__all__ = ['SelectorSyntaxError', 'ExpressionError',
'CSSSelector']
try:
_basestring = basestring
except NameError:
_basestring = str
class SelectorSyntaxError(SyntaxError):
pass
class ExpressionError(RuntimeError):
pass
class CSSSelector(etree.XPath):
"""A CSS selector.
Usage::
>>> from lxml import etree, cssselect
>>> select = cssselect.CSSSelector("a tag > child")
>>> root = etree.XML("<a><b><c/><tag><child>TEXT</child></tag></b></a>")
>>> [ el.tag for el in select(root) ]
['child']
"""
def __init__(self, css):
path = css_to_xpath(css)
etree.XPath.__init__(self, path)
self.css = css
def __repr__(self):
return '<%s %s for %r>' % (
self.__class__.__name__,
hex(abs(id(self)))[2:],
self.css)
##############################
## Token objects:
try:
_unicode = unicode
except NameError:
# Python 3
_unicode = str
class _UniToken(_unicode):
def __new__(cls, contents, pos):
obj = _unicode.__new__(cls, contents)
obj.pos = pos
return obj
def __repr__(self):
return '%s(%s, %r)' % (
self.__class__.__name__,
_unicode.__repr__(self),
self.pos)
class Symbol(_UniToken):
pass
class String(_UniToken):
pass
class Token(_UniToken):
pass
############################################################
## Parsing
############################################################
##############################
## Syntax objects:
class Class(object):
"""
Represents selector.class_name
"""
def __init__(self, selector, class_name):
self.selector = selector
self.class_name = class_name
def __repr__(self):
return '%s[%r.%s]' % (
self.__class__.__name__,
self.selector,
self.class_name)
def xpath(self):
sel_xpath = self.selector.xpath()
sel_xpath.add_condition(
"contains(concat(' ', normalize-space(@class), ' '), %s)" % xpath_repr(' '+self.class_name+' '))
return sel_xpath
class Function(object):
"""
Represents selector:name(expr)
"""
unsupported = [
'target', 'lang', 'enabled', 'disabled',]
def __init__(self, selector, type, name, expr):
self.selector = selector
self.type = type
self.name = name
self.expr = expr
def __repr__(self):
return '%s[%r%s%s(%r)]' % (
self.__class__.__name__,
self.selector,
self.type, self.name, self.expr)
def xpath(self):
sel_path = self.selector.xpath()
if self.name in self.unsupported:
raise ExpressionError(
"The psuedo-class %r is not supported" % self.name)
method = '_xpath_' + self.name.replace('-', '_')
if not hasattr(self, method):
raise ExpressionError(
"The psuedo-class %r is unknown" % self.name)
method = getattr(self, method)
return method(sel_path, self.expr)
def _xpath_nth_child(self, xpath, expr, last=False,
add_name_test=True):
a, b = parse_series(expr)
if not a and not b and not last:
# a=0 means nothing is returned...
xpath.add_condition('false() and position() = 0')
return xpath
if add_name_test:
xpath.add_name_test()
xpath.add_star_prefix()
if a == 0:
if last:
b = 'last() - %s' % b
xpath.add_condition('position() = %s' % b)
return xpath
if last:
# FIXME: I'm not sure if this is right
a = -a
b = -b
if b > 0:
b_neg = str(-b)
else:
b_neg = '+%s' % (-b)
if a != 1:
expr = ['(position() %s) mod %s = 0' % (b_neg, a)]
else:
expr = []
if b >= 0:
expr.append('position() >= %s' % b)
elif b < 0 and last:
expr.append('position() < (last() %s)' % b)
expr = ' and '.join(expr)
if expr:
xpath.add_condition(expr)
return xpath
# FIXME: handle an+b, odd, even
# an+b means every-a, plus b, e.g., 2n+1 means odd
# 0n+b means b
# n+0 means a=1, i.e., all elements
# an means every a elements, i.e., 2n means even
# -n means -1n
# -1n+6 means elements 6 and previous
def _xpath_nth_last_child(self, xpath, expr):
return self._xpath_nth_child(xpath, expr, last=True)
def _xpath_nth_of_type(self, xpath, expr):
if xpath.element == '*':
raise NotImplementedError(
"*:nth-of-type() is not implemented")
return self._xpath_nth_child(xpath, expr, add_name_test=False)
def _xpath_nth_last_of_type(self, xpath, expr):
return self._xpath_nth_child(xpath, expr, last=True, add_name_test=False)
def _xpath_contains(self, xpath, expr):
# text content, minus tags, must contain expr
if isinstance(expr, Element):
expr = expr._format_element()
xpath.add_condition('contains(css:lower-case(string(.)), %s)'
% xpath_repr(expr.lower()))
# FIXME: Currently case insensitive matching doesn't seem to be happening
return xpath
def _xpath_not(self, xpath, expr):
# everything for which not expr applies
expr = expr.xpath()
cond = expr.condition
# FIXME: should I do something about element_path?
xpath.add_condition('not(%s)' % cond)
return xpath
def _make_lower_case(context, s):
return s.lower()
ns = etree.FunctionNamespace('http://codespeak.net/lxml/css/')
ns.prefix = 'css'
ns['lower-case'] = _make_lower_case
class Pseudo(object):
"""
Represents selector:ident
"""
unsupported = ['indeterminate', 'first-line', 'first-letter',
'selection', 'before', 'after', 'link', 'visited',
'active', 'focus', 'hover']
def __init__(self, element, type, ident):
self.element = element
assert type in (':', '::')
self.type = type
self.ident = ident
def __repr__(self):
return '%s[%r%s%s]' % (
self.__class__.__name__,
self.element,
self.type, self.ident)
def xpath(self):
el_xpath = self.element.xpath()
if self.ident in self.unsupported:
raise ExpressionError(
"The psuedo-class %r is unsupported" % self.ident)
method = '_xpath_' + self.ident.replace('-', '_')
if not hasattr(self, method):
raise ExpressionError(
"The psuedo-class %r is unknown" % self.ident)
method = getattr(self, method)
el_xpath = method(el_xpath)
return el_xpath
def _xpath_checked(self, xpath):
# FIXME: is this really all the elements?
xpath.add_condition("(@selected or @checked) and (name(.) = 'input' or name(.) = 'option')")
return xpath
def _xpath_root(self, xpath):
# if this element is the root element
raise NotImplementedError
def _xpath_first_child(self, xpath):
xpath.add_star_prefix()
xpath.add_name_test()
xpath.add_condition('position() = 1')
return xpath
def _xpath_last_child(self, xpath):
xpath.add_star_prefix()
xpath.add_name_test()
xpath.add_condition('position() = last()')
return xpath
def _xpath_first_of_type(self, xpath):
if xpath.element == '*':
raise NotImplementedError(
"*:first-of-type is not implemented")
xpath.add_star_prefix()
xpath.add_condition('position() = 1')
return xpath
def _xpath_last_of_type(self, xpath):
if xpath.element == '*':
raise NotImplementedError(
"*:last-of-type is not implemented")
xpath.add_star_prefix()
xpath.add_condition('position() = last()')
return xpath
def _xpath_only_child(self, xpath):
xpath.add_name_test()
xpath.add_star_prefix()
xpath.add_condition('last() = 1')
return xpath
def _xpath_only_of_type(self, xpath):
if xpath.element == '*':
raise NotImplementedError(
"*:only-of-type is not implemented")
xpath.add_condition('last() = 1')
return xpath
def _xpath_empty(self, xpath):
xpath.add_condition("not(*) and not(normalize-space())")
return xpath
class Attrib(object):
"""
Represents selector[namespace|attrib operator value]
"""
def __init__(self, selector, namespace, attrib, operator, value):
self.selector = selector
self.namespace = namespace
self.attrib = attrib
self.operator = operator
self.value = value
def __repr__(self):
if self.operator == 'exists':
return '%s[%r[%s]]' % (
self.__class__.__name__,
self.selector,
self._format_attrib())
else:
return '%s[%r[%s %s %r]]' % (
self.__class__.__name__,
self.selector,
self._format_attrib(),
self.operator,
self.value)
def _format_attrib(self):
if self.namespace == '*':
return self.attrib
else:
return '%s|%s' % (self.namespace, self.attrib)
def _xpath_attrib(self):
# FIXME: if attrib is *?
if self.namespace == '*':
return '@' + self.attrib
else:
return '@%s:%s' % (self.namespace, self.attrib)
def xpath(self):
path = self.selector.xpath()
attrib = self._xpath_attrib()
value = self.value
if self.operator == 'exists':
assert not value
path.add_condition(attrib)
elif self.operator == '=':
path.add_condition('%s = %s' % (attrib,
xpath_repr(value)))
elif self.operator == '!=':
# FIXME: this seems like a weird hack...
if value:
path.add_condition('not(%s) or %s != %s'
% (attrib, attrib, xpath_repr(value)))
else:
path.add_condition('%s != %s'
% (attrib, xpath_repr(value)))
#path.add_condition('%s != %s' % (attrib, xpath_repr(value)))
elif self.operator == '~=':
path.add_condition("contains(concat(' ', normalize-space(%s), ' '), %s)" % (attrib, xpath_repr(' '+value+' ')))
elif self.operator == '|=':
# Weird, but true...
path.add_condition('%s = %s or starts-with(%s, %s)' % (
attrib, xpath_repr(value),
attrib, xpath_repr(value + '-')))
elif self.operator == '^=':
path.add_condition('starts-with(%s, %s)' % (
attrib, xpath_repr(value)))
elif self.operator == '$=':
# Oddly there is a starts-with in XPath 1.0, but not ends-with
path.add_condition('substring(%s, string-length(%s)-%s) = %s'
% (attrib, attrib, len(value)-1, xpath_repr(value)))
elif self.operator == '*=':
# FIXME: case sensitive?
path.add_condition('contains(%s, %s)' % (
attrib, xpath_repr(value)))
else:
assert 0, ("Unknown operator: %r" % self.operator)
return path
class Element(object):
"""
Represents namespace|element
"""
def __init__(self, namespace, element):
self.namespace = namespace
self.element = element
def __repr__(self):
return '%s[%s]' % (
self.__class__.__name__,
self._format_element())
def _format_element(self):
if self.namespace == '*':
return self.element
else:
return '%s|%s' % (self.namespace, self.element)
def xpath(self):
if self.namespace == '*':
el = self.element.lower()
else:
# FIXME: Should we lowercase here?
el = '%s:%s' % (self.namespace, self.element)
return XPathExpr(element=el)
class Hash(object):
"""
Represents selector#id
"""
def __init__(self, selector, id):
self.selector = selector
self.id = id
def __repr__(self):
return '%s[%r#%s]' % (
self.__class__.__name__,
self.selector, self.id)
def xpath(self):
path = self.selector.xpath()
path.add_condition('@id = %s' % xpath_repr(self.id))
return path
class Or(object):
def __init__(self, items):
self.items = items
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
self.items)
def xpath(self):
paths = [item.xpath() for item in self.items]
return XPathExprOr(paths)
class CombinedSelector(object):
_method_mapping = {
' ': 'descendant',
'>': 'child',
'+': 'direct_adjacent',
'~': 'indirect_adjacent',
}
def __init__(self, selector, combinator, subselector):
assert selector is not None
self.selector = selector
self.combinator = combinator
self.subselector = subselector
def __repr__(self):
if self.combinator == ' ':
comb = '<followed>'
else:
comb = self.combinator
return '%s[%r %s %r]' % (
self.__class__.__name__,
self.selector,
comb,
self.subselector)
def xpath(self):
if self.combinator not in self._method_mapping:
raise ExpressionError(
"Unknown combinator: %r" % self.combinator)
method = '_xpath_' + self._method_mapping[self.combinator]
method = getattr(self, method)
path = self.selector.xpath()
return method(path, self.subselector)
def _xpath_descendant(self, xpath, sub):
# when sub is a descendant in any way of xpath
xpath.join('/descendant::', sub.xpath())
return xpath
def _xpath_child(self, xpath, sub):
# when sub is an immediate child of xpath
xpath.join('/', sub.xpath())
return xpath
def _xpath_direct_adjacent(self, xpath, sub):
# when sub immediately follows xpath
xpath.join('/following-sibling::', sub.xpath())
xpath.add_name_test()
xpath.add_condition('position() = 1')
return xpath
def _xpath_indirect_adjacent(self, xpath, sub):
# when sub comes somewhere after xpath as a sibling
xpath.join('/following-sibling::', sub.xpath())
return xpath
##############################
## XPathExpr objects:
_el_re = re.compile(r'^\w+\s*$')
_id_re = re.compile(r'^(\w*)#(\w+)\s*$')
_class_re = re.compile(r'^(\w*)\.(\w+)\s*$')
def css_to_xpath(css_expr, prefix='descendant-or-self::'):
if isinstance(css_expr, _basestring):
match = _el_re.search(css_expr)
if match is not None:
return '%s%s' % (prefix, match.group(0).strip())
match = _id_re.search(css_expr)
if match is not None:
return "%s%s[@id = '%s']" % (
prefix, match.group(1) or '*', match.group(2))
match = _class_re.search(css_expr)
if match is not None:
return "%s%s[contains(concat(' ', normalize-space(@class), ' '), ' %s ')]" % (
prefix, match.group(1) or '*', match.group(2))
css_expr = parse(css_expr)
expr = css_expr.xpath()
assert expr is not None, (
"Got None for xpath expression from %s" % repr(css_expr))
if prefix:
expr.add_prefix(prefix)
return str(expr)
class XPathExpr(object):
def __init__(self, prefix=None, path=None, element='*', condition=None,
star_prefix=False):
self.prefix = prefix
self.path = path
self.element = element
self.condition = condition
self.star_prefix = star_prefix
def __str__(self):
path = ''
if self.prefix is not None:
path += str(self.prefix)
if self.path is not None:
path += str(self.path)
path += str(self.element)
if self.condition:
path += '[%s]' % self.condition
return path
def __repr__(self):
return '%s[%s]' % (
self.__class__.__name__, self)
def add_condition(self, condition):
if self.condition:
self.condition = '%s and (%s)' % (self.condition, condition)
else:
self.condition = condition
def add_path(self, part):
if self.path is None:
self.path = self.element
else:
self.path += self.element
self.element = part
def add_prefix(self, prefix):
if self.prefix:
self.prefix = prefix + self.prefix
else:
self.prefix = prefix
def add_name_test(self):
if self.element == '*':
# We weren't doing a test anyway
return
self.add_condition("name() = %s" % xpath_repr(self.element))
self.element = '*'
def add_star_prefix(self):
"""
Adds a /* prefix if there is no prefix. This is when you need
to keep context's constrained to a single parent.
"""
if self.path:
self.path += '*/'
else:
self.path = '*/'
self.star_prefix = True
def join(self, combiner, other):
prefix = str(self)
prefix += combiner
path = (other.prefix or '') + (other.path or '')
# We don't need a star prefix if we are joining to this other
# prefix; so we'll get rid of it
if other.star_prefix and path == '*/':
path = ''
self.prefix = prefix
self.path = path
self.element = other.element
self.condition = other.condition
class XPathExprOr(XPathExpr):
"""
Represents |'d expressions. Note that unfortunately it isn't
the union, it's the sum, so duplicate elements will appear.
"""
def __init__(self, items, prefix=None):
for item in items:
assert item is not None
self.items = items
self.prefix = prefix
def __str__(self):
prefix = self.prefix or ''
return ' | '.join([prefix + str(i) for i in self.items])
def xpath_repr(s):
# FIXME: I don't think this is right, but lacking any reasonable
# specification on what XPath literals look like (which doesn't seem
# to be in the XPath specification) it is hard to do 'right'
if isinstance(s, Element):
# This is probably a symbol that looks like an expression...
s = s._format_element()
return repr(str(s))
##############################
## Parsing functions
def parse(string):
stream = TokenStream(tokenize(string))
stream.source = string
try:
return parse_selector_group(stream)
except SelectorSyntaxError:
import sys
e = sys.exc_info()[1]
e.args = tuple(["%s at %s -> %s" % (
e, stream.used, list(stream))])
raise
def parse_selector_group(stream):
result = []
while 1:
result.append(parse_selector(stream))
if stream.peek() == ',':
stream.next()
else:
break
if len(result) == 1:
return result[0]
else:
return Or(result)
def parse_selector(stream):
result = parse_simple_selector(stream)
while 1:
peek = stream.peek()
if peek == ',' or peek is None:
return result
elif peek in ('+', '>', '~'):
# A combinator
combinator = stream.next()
else:
combinator = ' '
next_selector = parse_simple_selector(stream)
result = CombinedSelector(result, combinator, next_selector)
return result
def parse_simple_selector(stream):
peek = stream.peek()
if peek != '*' and not isinstance(peek, Symbol):
element = namespace = '*'
else:
next = stream.next()
if next != '*' and not isinstance(next, Symbol):
raise SelectorSyntaxError(
"Expected symbol, got %r" % next)
if stream.peek() == '|':
namespace = next
stream.next()
element = stream.next()
if element != '*' and not isinstance(next, Symbol):
raise SelectorSyntaxError(
"Expected symbol, got %r" % next)
else:
namespace = '*'
element = next
result = Element(namespace, element)
has_hash = False
while 1:
peek = stream.peek()
if peek == '#':
if has_hash:
# You can't have two hashes
# (FIXME: is there some more general rule I'm missing?)
break
stream.next()
result = Hash(result, stream.next())
has_hash = True
continue
elif peek == '.':
stream.next()
result = Class(result, stream.next())
continue
elif peek == '[':
stream.next()
result = parse_attrib(result, stream)
next = stream.next()
if not next == ']':
raise SelectorSyntaxError(
"] expected, got %r" % next)
continue
elif peek == ':' or peek == '::':
type = stream.next()
ident = stream.next()
if not isinstance(ident, Symbol):
raise SelectorSyntaxError(
"Expected symbol, got %r" % ident)
if stream.peek() == '(':
stream.next()
peek = stream.peek()
if isinstance(peek, String):
selector = stream.next()
elif isinstance(peek, Symbol) and is_int(peek):
selector = int(stream.next())
else:
# FIXME: parse_simple_selector, or selector, or...?
selector = parse_simple_selector(stream)
next = stream.next()
if not next == ')':
raise SelectorSyntaxError(
"Expected ), got %r and %r"
% (next, selector))
result = Function(result, type, ident, selector)
else:
result = Pseudo(result, type, ident)
continue
else:
if peek == ' ':
stream.next()
break
# FIXME: not sure what "negation" is
return result
def is_int(v):
try:
int(v)
except ValueError:
return False
else:
return True
def parse_attrib(selector, stream):
attrib = stream.next()
if stream.peek() == '|':
namespace = attrib
stream.next()
attrib = stream.next()
else:
namespace = '*'
if stream.peek() == ']':
return Attrib(selector, namespace, attrib, 'exists', None)
op = stream.next()
if not op in ('^=', '$=', '*=', '=', '~=', '|=', '!='):
raise SelectorSyntaxError(
"Operator expected, got %r" % op)
value = stream.next()
if not isinstance(value, (Symbol, String)):
raise SelectorSyntaxError(
"Expected string or symbol, got %r" % value)
return Attrib(selector, namespace, attrib, op, value)
def parse_series(s):
"""
Parses things like '1n+2', or 'an+b' generally, returning (a, b)
"""
if isinstance(s, Element):
s = s._format_element()
if not s or s == '*':
# Happens when there's nothing, which the CSS parser thinks of as *
return (0, 0)
if isinstance(s, int):
# Happens when you just get a number
return (0, s)
if s == 'odd':
return (2, 1)
elif s == 'even':
return (2, 0)
elif s == 'n':
return (1, 0)
if 'n' not in s:
# Just a b
return (0, int(s))
a, b = s.split('n', 1)
if not a:
a = 1
elif a == '-' or a == '+':
a = int(a+'1')
else:
a = int(a)
if not b:
b = 0
elif b == '-' or b == '+':
b = int(b+'1')
else:
b = int(b)
return (a, b)
############################################################
## Tokenizing
############################################################
_whitespace_re = re.compile(r'\s+')
_comment_re = re.compile(r'/\*.*?\*/', re.S)
_count_re = re.compile(r'[+-]?\d*n(?:[+-]\d+)?')
def tokenize(s):
pos = 0
s = _comment_re.sub('', s)
while 1:
match = _whitespace_re.match(s, pos=pos)
if match:
preceding_whitespace_pos = pos
pos = match.end()
else:
preceding_whitespace_pos = 0
if pos >= len(s):
return
match = _count_re.match(s, pos=pos)
if match and match.group() != 'n':
sym = s[pos:match.end()]
yield Symbol(sym, pos)
pos = match.end()
continue
c = s[pos]
c2 = s[pos:pos+2]
if c2 in ('~=', '|=', '^=', '$=', '*=', '::', '!='):
yield Token(c2, pos)
pos += 2
continue
if c in '>+~,.*=[]()|:#':
if c in '.#' and preceding_whitespace_pos > 0:
yield Token(' ', preceding_whitespace_pos)
yield Token(c, pos)
pos += 1
continue
if c == '"' or c == "'":
# Quoted string
old_pos = pos
sym, pos = tokenize_escaped_string(s, pos)
yield String(sym, old_pos)
continue
old_pos = pos
sym, pos = tokenize_symbol(s, pos)
yield Symbol(sym, old_pos)
continue
def tokenize_escaped_string(s, pos):
quote = s[pos]
assert quote in ('"', "'")
pos = pos+1
start = pos
while 1:
next = s.find(quote, pos)
if next == -1:
raise SelectorSyntaxError(
"Expected closing %s for string in: %r"
% (quote, s[start:]))
result = s[start:next]
try:
result = result.encode('ASCII', 'backslashreplace').decode('unicode_escape')
except UnicodeDecodeError:
# Probably a hanging \
pos = next+1
else:
return result, next+1
_illegal_symbol = re.compile(r'[^\w\\-]', re.UNICODE)
def tokenize_symbol(s, pos):
start = pos
match = _illegal_symbol.search(s, pos=pos)
if not match:
# Goes to end of s
return s[start:], len(s)
if match.start() == pos:
assert 0, (
"Unexpected symbol: %r at %s" % (s[pos], pos))
if not match:
result = s[start:]
pos = len(s)
else:
result = s[start:match.start()]
pos = match.start()
try:
result = result.encode('ASCII', 'backslashreplace').decode('unicode_escape')
except UnicodeDecodeError:
import sys
e = sys.exc_info()[1]
raise SelectorSyntaxError(
"Bad symbol %r: %s" % (result, e))
return result, pos
class TokenStream(object):
def __init__(self, tokens, source=None):
self.used = []
self.tokens = iter(tokens)
self.source = source
self.peeked = None
self._peeking = False
try:
self.next_token = self.tokens.next
except AttributeError:
# Python 3
self.next_token = self.tokens.__next__
def next(self):
if self._peeking:
self._peeking = False
self.used.append(self.peeked)
return self.peeked
else:
try:
next = self.next_token()
self.used.append(next)
return next
except StopIteration:
return None
def __iter__(self):
return iter(self.next, None)
def peek(self):
if not self._peeking:
try:
self.peeked = self.next_token()
except StopIteration:
return None
self._peeking = True
return self.peeked
| gpl-3.0 | -5,560,944,423,791,474,000 | 3,923,412,649,199,573,000 | 29.359029 | 123 | 0.506191 | false |
jar398/tryphy | tests/test_sl_eol_get_links.py | 1 | 2532 | # 10. sl/eol/get_links
# Parameter: list of species
# Result:
# input_species - repeats input (issue: flush this)
# message, status_code as usual
# meta_data - not very useful
# species - list of blobs about species
# eol_id
# matched_name - contains authority
# searched_name - presumably what was provided
import sys, unittest, json
sys.path.append('./')
sys.path.append('../')
import webapp
service = webapp.get_service(5004, 'sl/eol/get_links')
class SlEolGetLinksTester(webapp.WebappTestCase):
def test_no_parameter(self):
"""What if no parameters are supplied? (Hoping for 400.)"""
m = self.__class__.http_method()
service = self.__class__.get_service()
x = service.get_request(m, None).exchange()
self.assert_response_status(x, 400)
# tbd: check for informativeness
def test_bad_parameter(self):
"""What if the supplied parameter has the wrong name? (Hoping for 400.)"""
m = self.__class__.http_method()
service = self.__class__.get_service()
x = service.get_request(m, {u'bad_parameter': u'Nosuchtaxonia notatall'}).exchange()
self.assert_response_status(x, 400)
# Check for informativeness
mess = x.json()[u'message']
self.assertTrue(u'species' in mess, mess)
def test_bad_species(self):
"""What if the species name is unknown?"""
m = self.__class__.http_method()
service = self.__class__.get_service()
x = service.get_request(m, {u'species': u'Nosuchtaxonia notatall'}).exchange()
# json.dump(x.to_dict(), sys.stdout, indent=2)
# TBD: Issue: Not documented what happens in this case.
self.assert_success(x)
self.assertEqual(x.json()[u'species'][0][u'matched_name'], '')
# Insert here: edge case tests
# Insert here: inputs out of range, leading to error or long delay
# Insert here: error-generating conditions
# (See ../README.md)
class TestSlEolGetLinks(SlEolGetLinksTester):
@classmethod
def get_service(self):
return service
@classmethod
def http_method(self):
return 'GET'
def test_example_23(self):
x = self.start_request_tests(example_23)
self.assert_success(x)
# Insert: whether result is what it should be according to docs
null=None; false=False; true=True
example_23 = service.get_request('GET', {u'species': u'Panthera leo|Panthera onca|Panthera pardus'})
if __name__ == '__main__':
webapp.main()
| bsd-2-clause | -1,572,123,198,718,528,000 | 5,837,135,606,132,128,000 | 31.883117 | 100 | 0.636256 | false |
rspavel/spack | var/spack/repos/builtin/packages/r-fitdistrplus/package.py | 5 | 1434 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RFitdistrplus(RPackage):
"""Extends the fitdistr() function (of the MASS package) with several
functions to help the fit of a parametric distribution to non-censored or
censored data. Censored data may contain left censored, right censored and
interval censored values, with several lower and upper bounds. In addition
to maximum likelihood estimation (MLE), the package provides moment
matching (MME), quantile matching (QME) and maximum goodness-of-fit
estimation (MGE) methods (available only for non-censored data). Weighted
versions of MLE, MME and QME are available. See e.g. Casella & Berger
(2002). Statistical inference. Pacific Grove."""
homepage = "https://lbbe.univ-lyon1.fr/fitdistrplus.html"
url = "https://cloud.r-project.org/src/contrib/fitdistrplus_1.0-14.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/fitdistrplus"
version('1.0-14', sha256='85082590f62aa08d99048ea3414c5cc1e5b780d97b3779d2397c6cb435470083')
depends_on('r@3.2.0:', type=('build', 'run'))
depends_on('r-mass', type=('build', 'run'))
depends_on('r-survival', type=('build', 'run'))
depends_on('r-npsurv', type=('build', 'run'))
| lgpl-2.1 | -4,356,243,191,562,059,300 | -742,215,034,589,309,300 | 48.448276 | 96 | 0.721757 | false |
VitalPet/c2c-rd-addons | sale_order_webkit_chricar/sale.py | 4 | 6140 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2011 ChriCar Beteiligungs- und Beratungs- GmbH (<http://www.camptocamp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv, orm
from openerp.tools.translate import _
import openerp.addons.one2many_sorted as one2many_sorted
class sale_order(osv.osv):
_inherit = "sale.order"
def _print_uom(self, cr, uid, ids, name, args, context=None):
res = {}
for order in self.browse(cr, uid, ids, context=context):
print_uom = False
if order.order_line:
for line in order.order_line:
if not line.product_uos or (line.product_uos and line.product_uom.id == line.product_uos.id):
print_uom = True
res[order.id] = print_uom
return res
def _print_uos(self, cr, uid, ids, name, args, context=None):
res = {}
for order in self.browse(cr, uid, ids, context=context):
print_uos = False
if order.order_line:
for line in order.order_line:
if line.product_uos and line.product_uos_qty != line.product_uom_qty :
print_uos = True
res[order.id] = print_uos
return res
def _print_packing(self, cr, uid, ids, name, args, context=None):
res = {}
for order in self.browse(cr, uid, ids, context=context):
print_packing = False
if order.order_line:
for line in order.order_line:
if line.product_packaging:
print_packing = True
res[order.id] = print_packing
return res
def _print_ean(self, cr, uid, ids, name, args, context=None):
res = {}
for order in self.browse(cr, uid, ids, context=context):
print_ean = False
if order.order_line and order.company_id.print_ean:
for line in order.order_line:
if line.product_packaging.ean or line.product_id.ean13 :
print_ean = True
res[order.id] = print_ean
return res
def _print_discount(self, cr, uid, ids, name, args, context=None):
res = {}
for order in self.browse(cr, uid, ids, context=context):
print_discount = False
if order.order_line:
for line in order.order_line:
if line.discount :
print_discount = True
res[order.id] = print_discount
return res
def _print_code(self, cr, uid, ids, name, args, context=None):
res = {}
for order in self.browse(cr, uid, ids, context=context):
print_code = False
if order.order_line and order.company_id.print_code:
for line in order.order_line:
if line.product_id.default_code:
print_code = True
res[order.id] = print_code
return res
def _get_cols(self, cr, uid, ids, name, args, context=None):
res = {}
for order in self.browse(cr, uid, ids, context=context):
cols = 2
if order.print_uom:
cols += 2
if order.print_uos:
cols += 2
if order.print_packing:
cols += 2
if order.print_ean:
cols += 1
if order.print_discount:
cols += 1
if order.print_code:
cols += 1
res[order.id] = cols
return res
_columns = {
'notes': fields.text('Notes'),
'print_uom': fields.function(_print_uom, method=True, type='boolean', string='Print UoM if different from UoS',),
'print_uos': fields.function(_print_uos, method=True, type='boolean', string='Print UoS if exists',),
'print_packing': fields.function(_print_packing, method=True, type='boolean', string='Print Packing Info if available',),
'print_ean': fields.function(_print_ean, method=True, type='boolean', string='Print EAN if available',),
'print_discount': fields.function(_print_discount, method=True, type='boolean', string='Print Discount if available',),
'print_code': fields.function(_print_code, method=True, type='boolean', string='Print code if available',),
'cols': fields.function(_get_cols, method=True, type='integer', string='No of columns before totals',),
'order_line_sorted' : one2many_sorted.one2many_sorted
( 'sale.order.line'
, 'order_id'
, 'Order Lines Sorted'
, states={'draft': [('readonly', False)]}
, order = 'product_id.name,name'
),
# 'order_line' : one2many_sorted.one2many_sorted
# ( 'sale.order.line'
# , 'order_id'
# , 'Order Lines Sorted'
# , states={'draft': [('readonly', False)]}
# , order = 'product_id.name,name'
# ),
}
sale_order()
class sale_order_line(osv.osv):
_inherit = "sale.order.line"
_columns = {
'notes': fields.text('Notes'),
}
sale_order_line()
| agpl-3.0 | -434,823,811,301,121,100 | 1,269,267,621,458,618,400 | 38.87013 | 135 | 0.548534 | false |
Huluzai/DoonSketch | inkscape-0.48.5/share/extensions/export_gimp_palette.py | 3 | 1454 | #!/usr/bin/env python
'''
Author: Jos Hirth, kaioa.com
License: GNU General Public License - http://www.gnu.org/licenses/gpl.html
Warranty: see above
'''
DOCNAME='sodipodi:docname'
import sys, simplestyle
try:
from xml.dom.minidom import parse
except:
sys.exit(_('The export_gpl.py module requires PyXML. Please download the latest version from http://pyxml.sourceforge.net/.'))
colortags=(u'fill',u'stroke',u'stop-color',u'flood-color',u'lighting-color')
colors={}
def walk(node):
checkStyle(node)
if node.hasChildNodes():
childs=node.childNodes
for child in childs:
walk(child)
def checkStyle(node):
if hasattr(node,"hasAttributes") and node.hasAttributes():
sa=node.getAttribute('style')
if sa!='':
styles=simplestyle.parseStyle(sa)
for c in range(len(colortags)):
if colortags[c] in styles.keys():
addColor(styles[colortags[c]])
def addColor(col):
if simplestyle.isColor(col):
c=simplestyle.parseColor(col)
colors['%3i %3i %3i ' % (c[0],c[1],c[2])]=simplestyle.formatColoria(c).upper()
stream = open(sys.argv[-1:][0],'r')
dom = parse(stream)
stream.close()
walk(dom)
print 'GIMP Palette\nName: %s\n#' % (dom.getElementsByTagName('svg')[0].getAttribute(DOCNAME).split('.')[0])
for k,v in sorted(colors.items()):
print k+v
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 encoding=utf-8
| gpl-2.0 | -7,774,191,800,499,293,000 | -243,340,120,270,069,250 | 28.08 | 131 | 0.654058 | false |
weechat/weechat.org | weechat/about/views.py | 1 | 5180 | #
# Copyright (C) 2003-2021 Sébastien Helleu <flashcode@flashtux.org>
#
# This file is part of WeeChat.org.
#
# WeeChat.org is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# WeeChat.org is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WeeChat.org. If not, see <https://www.gnu.org/licenses/>.
#
"""Views for "about" menu."""
import os
from sys import version as python_version
from django import __version__ as django_version
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Sum
from django.shortcuts import render
from django.utils.translation import ugettext
from weechat.about.models import (
Screenshot,
Keydate,
Sponsor,
SPONSOR_TYPE_CHOICES,
SPONSOR_TYPE_SVG,
)
from weechat.common.path import media_path_join
from weechat.download.models import Release
def screenshots(request, app='weechat', filename=''):
"""
Page with one screenshot (if filename given),
or all screenshots as thumbnails.
"""
if filename:
try:
screenshot = Screenshot.objects.get(app=app, filename=filename)
except ObjectDoesNotExist:
screenshot = None
return render(
request,
'about/screenshots.html',
{
'app': app,
'filename': filename,
'screenshot': screenshot,
},
)
screenshot_list = Screenshot.objects.filter(app=app).order_by('priority')
return render(
request,
'about/screenshots.html',
{
'app': app,
'screenshot_list': screenshot_list,
},
)
def history(request):
"""Page with WeeChat history, including key dates."""
release_list = (Release.objects.all().exclude(version='devel')
.order_by('-date'))
releases = []
for release in release_list:
name = f'weechat-{release.version}.png'
if os.path.exists(media_path_join('images', 'story', name)):
releases.append((release.version, release.date))
return render(
request,
'about/history.html',
{
'releases': releases,
'keydate_list': Keydate.objects.all().order_by('date'),
},
)
def about(request, extra_info=False):
"""About WeeChat.org."""
context = {}
if extra_info:
context.update({
'extra_info': {
'django': django_version,
'python': python_version,
},
})
return render(request, 'about/weechat.org.html', context)
def donate(request, sort_key='date', view_key=''):
"""Page with link for donation and list of sponsors."""
sort_key_top = 'top10'
sort_key_top_count = 10
sort_count = 0
if sort_key.startswith('top'):
sort_key_top = sort_key
sort_count = max(int(sort_key[3:]), 1)
sort_key_top_count = sort_count
sort_key = 'top'
if sort_key == 'type':
sponsor_list = (Sponsor.objects.values('sponsortype')
.annotate(amount=Sum('amount'))
.order_by('-amount'))
total = sum(sponsor['amount'] for sponsor in sponsor_list)
for sponsor in sponsor_list:
sponsor['sponsortype_i18n'] = ugettext(
dict(SPONSOR_TYPE_CHOICES)[sponsor['sponsortype']])
sponsor['sponsortype_svg'] = \
SPONSOR_TYPE_SVG[sponsor['sponsortype']]
elif sort_key == 'top':
sponsor_list = (Sponsor.objects.values('sponsortype', 'name')
.annotate(amount=Sum('amount'))
.order_by('-amount')[:sort_count])
total = sum(sponsor['amount'] for sponsor in sponsor_list)
for sponsor in sponsor_list:
sponsor['sponsortype_i18n'] = ugettext(
dict(SPONSOR_TYPE_CHOICES)[sponsor['sponsortype']])
sponsor['sponsortype_svg'] = \
SPONSOR_TYPE_SVG[sponsor['sponsortype']]
else:
# by default: sort by date
sponsor_list = Sponsor.objects.all().order_by('-date', '-id')
total = sum(sponsor.amount for sponsor in sponsor_list)
view_amount = False
try:
if view_key and view_key == settings.KEY_VIEWAMOUNT:
view_amount = True
except AttributeError:
pass
return render(
request,
'donate.html',
{
'sponsor_list': sponsor_list,
'sort_key': sort_key,
'sort_count': sort_count,
'sort_key_top': sort_key_top,
'sort_key_top_count': sort_key_top_count,
'view_amount': view_amount,
'total': total,
},
)
| gpl-3.0 | -8,913,954,795,790,136,000 | 3,650,845,733,421,954,000 | 31.36875 | 77 | 0.595289 | false |
museomix/2013_Quebec_thermoscope | raspberry/pygame-1.9.1release/test/run_tests__tests/exclude/magic_tag_test.py | 10 | 1141 | __tags__ = ['magic']
if __name__ == '__main__':
import sys
import os
pkg_dir = (os.path.split(
os.path.split(
os.path.split(
os.path.abspath(__file__))[0])[0])[0])
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests import test_utils
from pygame.tests.test_utils import unittest
else:
from test import test_utils
from test.test_utils import unittest
class KeyModuleTest(unittest.TestCase):
def test_get_focused(self):
self.assert_(True)
def test_get_mods(self):
self.assert_(True)
def test_get_pressed(self):
self.assert_(True)
def test_name(self):
self.assert_(True)
def test_set_mods(self):
self.assert_(True)
if __name__ == '__main__':
unittest.main()
| mit | -5,932,176,877,909,794,000 | 9,056,193,190,661,864,000 | 25.166667 | 65 | 0.54163 | false |
mvaled/OpenUpgrade | addons/project_issue/project_issue.py | 217 | 29319 | #-*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from openerp import api
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import fields, osv, orm
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools import html2plaintext
from openerp.tools.translate import _
class project_issue_version(osv.Model):
_name = "project.issue.version"
_order = "name desc"
_columns = {
'name': fields.char('Version Number', required=True),
'active': fields.boolean('Active', required=False),
}
_defaults = {
'active': 1,
}
class project_issue(osv.Model):
_name = "project.issue"
_description = "Project Issue"
_order = "priority desc, create_date desc"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_mail_post_access = 'read'
_track = {
'stage_id': {
# this is only an heuristics; depending on your particular stage configuration it may not match all 'new' stages
'project_issue.mt_issue_new': lambda self, cr, uid, obj, ctx=None: obj.stage_id and obj.stage_id.sequence <= 1,
'project_issue.mt_issue_stage': lambda self, cr, uid, obj, ctx=None: obj.stage_id and obj.stage_id.sequence > 1,
},
'user_id': {
'project_issue.mt_issue_assigned': lambda self, cr, uid, obj, ctx=None: obj.user_id and obj.user_id.id,
},
'kanban_state': {
'project_issue.mt_issue_blocked': lambda self, cr, uid, obj, ctx=None: obj.kanban_state == 'blocked',
'project_issue.mt_issue_ready': lambda self, cr, uid, obj, ctx=None: obj.kanban_state == 'done',
},
}
def _get_default_partner(self, cr, uid, context=None):
project_id = self._get_default_project_id(cr, uid, context)
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return project.partner_id.id
return False
def _get_default_project_id(self, cr, uid, context=None):
""" Gives default project by checking if present in the context """
return self._resolve_project_id_from_context(cr, uid, context=context)
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
project_id = self._get_default_project_id(cr, uid, context=context)
return self.stage_find(cr, uid, [], project_id, [('fold', '=', False)], context=context)
def _resolve_project_id_from_context(self, cr, uid, context=None):
""" Returns ID of project based on the value of 'default_project_id'
context key, or None if it cannot be resolved to a single
project.
"""
if context is None:
context = {}
if type(context.get('default_project_id')) in (int, long):
return context.get('default_project_id')
if isinstance(context.get('default_project_id'), basestring):
project_name = context['default_project_id']
project_ids = self.pool.get('project.project').name_search(cr, uid, name=project_name, context=context)
if len(project_ids) == 1:
return int(project_ids[0][0])
return None
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
access_rights_uid = access_rights_uid or uid
stage_obj = self.pool.get('project.task.type')
order = stage_obj._order
# lame hack to allow reverting search, should just work in the trivial case
if read_group_order == 'stage_id desc':
order = "%s desc" % order
# retrieve section_id from the context and write the domain
# - ('id', 'in', 'ids'): add columns that should be present
# - OR ('case_default', '=', True), ('fold', '=', False): add default columns that are not folded
# - OR ('project_ids', 'in', project_id), ('fold', '=', False) if project_id: add project columns that are not folded
search_domain = []
project_id = self._resolve_project_id_from_context(cr, uid, context=context)
if project_id:
search_domain += ['|', ('project_ids', '=', project_id)]
search_domain += [('id', 'in', ids)]
# perform search
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x,y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
def _compute_day(self, cr, uid, ids, fields, args, context=None):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Openday’s IDs
@return: difference between current date and log date
@param context: A standard dictionary for contextual values
"""
Calendar = self.pool['resource.calendar']
res = dict((res_id, {}) for res_id in ids)
for issue in self.browse(cr, uid, ids, context=context):
values = {
'day_open': 0.0, 'day_close': 0.0,
'working_hours_open': 0.0, 'working_hours_close': 0.0,
'days_since_creation': 0.0, 'inactivity_days': 0.0,
}
# if the working hours on the project are not defined, use default ones (8 -> 12 and 13 -> 17 * 5), represented by None
calendar_id = None
if issue.project_id and issue.project_id.resource_calendar_id:
calendar_id = issue.project_id.resource_calendar_id.id
dt_create_date = datetime.strptime(issue.create_date, DEFAULT_SERVER_DATETIME_FORMAT)
if issue.date_open:
dt_date_open = datetime.strptime(issue.date_open, DEFAULT_SERVER_DATETIME_FORMAT)
values['day_open'] = (dt_date_open - dt_create_date).total_seconds() / (24.0 * 3600)
values['working_hours_open'] = Calendar._interval_hours_get(
cr, uid, calendar_id, dt_create_date, dt_date_open,
timezone_from_uid=issue.user_id.id or uid,
exclude_leaves=False, context=context)
if issue.date_closed:
dt_date_closed = datetime.strptime(issue.date_closed, DEFAULT_SERVER_DATETIME_FORMAT)
values['day_close'] = (dt_date_closed - dt_create_date).total_seconds() / (24.0 * 3600)
values['working_hours_close'] = Calendar._interval_hours_get(
cr, uid, calendar_id, dt_create_date, dt_date_closed,
timezone_from_uid=issue.user_id.id or uid,
exclude_leaves=False, context=context)
days_since_creation = datetime.today() - dt_create_date
values['days_since_creation'] = days_since_creation.days
if issue.date_action_last:
inactive_days = datetime.today() - datetime.strptime(issue.date_action_last, DEFAULT_SERVER_DATETIME_FORMAT)
elif issue.date_last_stage_update:
inactive_days = datetime.today() - datetime.strptime(issue.date_last_stage_update, DEFAULT_SERVER_DATETIME_FORMAT)
else:
inactive_days = datetime.today() - datetime.strptime(issue.create_date, DEFAULT_SERVER_DATETIME_FORMAT)
values['inactivity_days'] = inactive_days.days
# filter only required values
for field in fields:
res[issue.id][field] = values[field]
return res
def _hours_get(self, cr, uid, ids, field_names, args, context=None):
task_pool = self.pool.get('project.task')
res = {}
for issue in self.browse(cr, uid, ids, context=context):
progress = 0.0
if issue.task_id:
progress = task_pool._hours_get(cr, uid, [issue.task_id.id], field_names, args, context=context)[issue.task_id.id]['progress']
res[issue.id] = {'progress' : progress}
return res
def on_change_project(self, cr, uid, ids, project_id, context=None):
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return {'value': {'partner_id': project.partner_id.id}}
return {}
def _get_issue_task(self, cr, uid, ids, context=None):
issues = []
issue_pool = self.pool.get('project.issue')
for task in self.pool.get('project.task').browse(cr, uid, ids, context=context):
issues += issue_pool.search(cr, uid, [('task_id','=',task.id)])
return issues
def _get_issue_work(self, cr, uid, ids, context=None):
issues = []
issue_pool = self.pool.get('project.issue')
for work in self.pool.get('project.task.work').browse(cr, uid, ids, context=context):
if work.task_id:
issues += issue_pool.search(cr, uid, [('task_id','=',work.task_id.id)])
return issues
_columns = {
'id': fields.integer('ID', readonly=True),
'name': fields.char('Issue', required=True),
'active': fields.boolean('Active', required=False),
'create_date': fields.datetime('Creation Date', readonly=True, select=True),
'write_date': fields.datetime('Update Date', readonly=True),
'days_since_creation': fields.function(_compute_day, string='Days since creation date', \
multi='compute_day', type="integer", help="Difference in days between creation date and current date"),
'date_deadline': fields.date('Deadline'),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help='Sales team to which Case belongs to.\
Define Responsible user and Email account for mail gateway.'),
'partner_id': fields.many2one('res.partner', 'Contact', select=1),
'company_id': fields.many2one('res.company', 'Company'),
'description': fields.text('Private Note'),
'kanban_state': fields.selection([('normal', 'Normal'),('blocked', 'Blocked'),('done', 'Ready for next stage')], 'Kanban State',
track_visibility='onchange',
help="A Issue's kanban state indicates special situations affecting it:\n"
" * Normal is the default situation\n"
" * Blocked indicates something is preventing the progress of this issue\n"
" * Ready for next stage indicates the issue is ready to be pulled to the next stage",
required=False),
'email_from': fields.char('Email', size=128, help="These people will receive email.", select=1),
'email_cc': fields.char('Watchers Emails', size=256, help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'date_open': fields.datetime('Assigned', readonly=True, select=True),
# Project Issue fields
'date_closed': fields.datetime('Closed', readonly=True, select=True),
'date': fields.datetime('Date'),
'date_last_stage_update': fields.datetime('Last Stage Update', select=True),
'channel': fields.char('Channel', help="Communication channel."),
'categ_ids': fields.many2many('project.category', string='Tags'),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority', select=True),
'version_id': fields.many2one('project.issue.version', 'Version'),
'stage_id': fields.many2one ('project.task.type', 'Stage',
track_visibility='onchange', select=True,
domain="[('project_ids', '=', project_id)]", copy=False),
'project_id': fields.many2one('project.project', 'Project', track_visibility='onchange', select=True),
'duration': fields.float('Duration'),
'task_id': fields.many2one('project.task', 'Task', domain="[('project_id','=',project_id)]"),
'day_open': fields.function(_compute_day, string='Days to Assign',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_open'], 10)}),
'day_close': fields.function(_compute_day, string='Days to Close',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_closed'], 10)}),
'user_id': fields.many2one('res.users', 'Assigned to', required=False, select=1, track_visibility='onchange'),
'working_hours_open': fields.function(_compute_day, string='Working Hours to assign the Issue',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_open'], 10)}),
'working_hours_close': fields.function(_compute_day, string='Working Hours to close the Issue',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_closed'], 10)}),
'inactivity_days': fields.function(_compute_day, string='Days since last action',
multi='compute_day', type="integer", help="Difference in days between last action and current date"),
'color': fields.integer('Color Index'),
'user_email': fields.related('user_id', 'email', type='char', string='User Email', readonly=True),
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'progress': fields.function(_hours_get, string='Progress (%)', multi='hours', group_operator="avg", help="Computed as: Time Spent / Total Time.",
store = {
'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['task_id'], 10),
'project.task': (_get_issue_task, ['work_ids', 'remaining_hours', 'planned_hours', 'state', 'stage_id'], 10),
'project.task.work': (_get_issue_work, ['hours'], 10),
}),
}
_defaults = {
'active': 1,
'stage_id': lambda s, cr, uid, c: s._get_default_stage_id(cr, uid, c),
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'crm.helpdesk', context=c),
'priority': '0',
'kanban_state': 'normal',
'date_last_stage_update': fields.datetime.now,
'user_id': lambda obj, cr, uid, context: uid,
}
_group_by_full = {
'stage_id': _read_group_stage_ids
}
def copy(self, cr, uid, id, default=None, context=None):
issue = self.read(cr, uid, [id], ['name'], context=context)[0]
if not default:
default = {}
default = default.copy()
default.update(name=_('%s (copy)') % (issue['name']))
return super(project_issue, self).copy(cr, uid, id, default=default, context=context)
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
if vals.get('project_id') and not context.get('default_project_id'):
context['default_project_id'] = vals.get('project_id')
if vals.get('user_id'):
vals['date_open'] = fields.datetime.now()
if 'stage_id' in vals:
vals.update(self.onchange_stage_id(cr, uid, None, vals.get('stage_id'), context=context)['value'])
# context: no_log, because subtype already handle this
create_context = dict(context, mail_create_nolog=True)
return super(project_issue, self).create(cr, uid, vals, context=create_context)
def write(self, cr, uid, ids, vals, context=None):
# stage change: update date_last_stage_update
if 'stage_id' in vals:
vals.update(self.onchange_stage_id(cr, uid, ids, vals.get('stage_id'), context=context)['value'])
vals['date_last_stage_update'] = fields.datetime.now()
if 'kanban_state' not in vals:
vals['kanban_state'] = 'normal'
# user_id change: update date_start
if vals.get('user_id'):
vals['date_open'] = fields.datetime.now()
return super(project_issue, self).write(cr, uid, ids, vals, context)
def onchange_task_id(self, cr, uid, ids, task_id, context=None):
if not task_id:
return {'value': {}}
task = self.pool.get('project.task').browse(cr, uid, task_id, context=context)
return {'value': {'user_id': task.user_id.id, }}
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
""" This function returns value of partner email address based on partner
:param part: Partner's id
"""
result = {}
if partner_id:
partner = self.pool['res.partner'].browse(cr, uid, partner_id, context)
result['email_from'] = partner.email
return {'value': result}
def get_empty_list_help(self, cr, uid, help, context=None):
context = dict(context or {})
context['empty_list_help_model'] = 'project.project'
context['empty_list_help_id'] = context.get('default_project_id')
context['empty_list_help_document_name'] = _("issues")
return super(project_issue, self).get_empty_list_help(cr, uid, help, context=context)
# -------------------------------------------------------
# Stage management
# -------------------------------------------------------
def onchange_stage_id(self, cr, uid, ids, stage_id, context=None):
if not stage_id:
return {'value': {}}
stage = self.pool['project.task.type'].browse(cr, uid, stage_id, context=context)
if stage.fold:
return {'value': {'date_closed': fields.datetime.now()}}
return {'value': {'date_closed': False}}
def stage_find(self, cr, uid, cases, section_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the issue:
- type: stage type must be the same or 'both'
- section_id: if set, stages must belong to this section or
be a default case
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
for task in cases:
if task.project_id:
section_ids.append(task.project_id.id)
# OR all section_ids and OR with case_default
search_domain = []
if section_ids:
search_domain += [('|')] * (len(section_ids)-1)
for section_id in section_ids:
search_domain.append(('project_ids', '=', section_id))
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('project.task.type').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def case_escalate(self, cr, uid, ids, context=None): # FIXME rename this method to issue_escalate
for issue in self.browse(cr, uid, ids, context=context):
data = {}
esc_proj = issue.project_id.project_escalation_id
if not esc_proj:
raise osv.except_osv(_('Warning!'), _('You cannot escalate this issue.\nThe relevant Project has not configured the Escalation Project!'))
data['project_id'] = esc_proj.id
if esc_proj.user_id:
data['user_id'] = esc_proj.user_id.id
issue.write(data)
if issue.task_id:
issue.task_id.write({'project_id': esc_proj.id, 'user_id': False})
return True
# -------------------------------------------------------
# Mail gateway
# -------------------------------------------------------
def message_get_reply_to(self, cr, uid, ids, context=None):
""" Override to get the reply_to of the parent project. """
issues = self.browse(cr, SUPERUSER_ID, ids, context=context)
project_ids = set([issue.project_id.id for issue in issues if issue.project_id])
aliases = self.pool['project.project'].message_get_reply_to(cr, uid, list(project_ids), context=context)
return dict((issue.id, aliases.get(issue.project_id and issue.project_id.id or 0, False)) for issue in issues)
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
recipients = super(project_issue, self).message_get_suggested_recipients(cr, uid, ids, context=context)
try:
for issue in self.browse(cr, uid, ids, context=context):
if issue.partner_id:
self._message_add_suggested_recipient(cr, uid, recipients, issue, partner=issue.partner_id, reason=_('Customer'))
elif issue.email_from:
self._message_add_suggested_recipient(cr, uid, recipients, issue, email=issue.email_from, reason=_('Customer Email'))
except (osv.except_osv, orm.except_orm): # no read access rights -> just ignore suggested recipients because this imply modifying followers
pass
return recipients
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
if custom_values is None:
custom_values = {}
context = dict(context or {}, state_to='draft')
defaults = {
'name': msg.get('subject') or _("No Subject"),
'email_from': msg.get('from'),
'email_cc': msg.get('cc'),
'partner_id': msg.get('author_id', False),
'user_id': False,
}
defaults.update(custom_values)
res_id = super(project_issue, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
return res_id
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification', subtype=None, parent_id=False, attachments=None, context=None, content_subtype='html', **kwargs):
""" Overrides mail_thread message_post so that we can set the date of last action field when
a new message is posted on the issue.
"""
if context is None:
context = {}
res = super(project_issue, self).message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, content_subtype=content_subtype, **kwargs)
if thread_id and subtype:
self.write(cr, SUPERUSER_ID, thread_id, {'date_action_last': fields.datetime.now()}, context=context)
return res
class project(osv.Model):
_inherit = "project.project"
def _get_alias_models(self, cr, uid, context=None):
return [('project.task', "Tasks"), ("project.issue", "Issues")]
def _issue_count(self, cr, uid, ids, field_name, arg, context=None):
Issue = self.pool['project.issue']
return {
project_id: Issue.search_count(cr,uid, [('project_id', '=', project_id), ('stage_id.fold', '=', False)], context=context)
for project_id in ids
}
_columns = {
'project_escalation_id': fields.many2one('project.project', 'Project Escalation',
help='If any issue is escalated from the current Project, it will be listed under the project selected here.',
states={'close': [('readonly', True)], 'cancelled': [('readonly', True)]}),
'issue_count': fields.function(_issue_count, type='integer', string="Issues",),
'issue_ids': fields.one2many('project.issue', 'project_id',
domain=[('stage_id.fold', '=', False)])
}
def _check_escalation(self, cr, uid, ids, context=None):
project_obj = self.browse(cr, uid, ids[0], context=context)
if project_obj.project_escalation_id:
if project_obj.project_escalation_id.id == project_obj.id:
return False
return True
_constraints = [
(_check_escalation, 'Error! You cannot assign escalation to the same project!', ['project_escalation_id'])
]
class account_analytic_account(osv.Model):
_inherit = 'account.analytic.account'
_description = 'Analytic Account'
_columns = {
'use_issues': fields.boolean('Issues', help="Check this field if this project manages issues"),
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context)
if template_id and 'value' in res:
template = self.browse(cr, uid, template_id, context=context)
res['value']['use_issues'] = template.use_issues
return res
def _trigger_project_creation(self, cr, uid, vals, context=None):
if context is None:
context = {}
res = super(account_analytic_account, self)._trigger_project_creation(cr, uid, vals, context=context)
return res or (vals.get('use_issues') and not 'project_creation_in_progress' in context)
class project_project(osv.Model):
_inherit = 'project.project'
_defaults = {
'use_issues': True
}
def _check_create_write_values(self, cr, uid, vals, context=None):
""" Perform some check on values given to create or write. """
# Handle use_tasks / use_issues: if only one is checked, alias should take the same model
if vals.get('use_tasks') and not vals.get('use_issues'):
vals['alias_model'] = 'project.task'
elif vals.get('use_issues') and not vals.get('use_tasks'):
vals['alias_model'] = 'project.issue'
def on_change_use_tasks_or_issues(self, cr, uid, ids, use_tasks, use_issues, context=None):
values = {}
if use_tasks and not use_issues:
values['alias_model'] = 'project.task'
elif not use_tasks and use_issues:
values['alias_model'] = 'project.issue'
return {'value': values}
def create(self, cr, uid, vals, context=None):
self._check_create_write_values(cr, uid, vals, context=context)
return super(project_project, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
self._check_create_write_values(cr, uid, vals, context=context)
return super(project_project, self).write(cr, uid, ids, vals, context=context)
class res_partner(osv.osv):
def _issue_count(self, cr, uid, ids, field_name, arg, context=None):
Issue = self.pool['project.issue']
return {
partner_id: Issue.search_count(cr,uid, [('partner_id', '=', partner_id)])
for partner_id in ids
}
""" Inherits partner and adds Issue information in the partner form """
_inherit = 'res.partner'
_columns = {
'issue_count': fields.function(_issue_count, string='# Issues', type='integer'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -1,061,646,706,463,998,700 | 2,245,259,935,225,885,400 | 51.161922 | 238 | 0.590994 | false |
bop/rango | lib/python2.7/site-packages/django/contrib/gis/tests/geo3d/tests.py | 100 | 11815 | from __future__ import absolute_import, unicode_literals
import os
import re
from django.contrib.gis.db.models import Union, Extent3D
from django.contrib.gis.geos import GEOSGeometry, LineString, Point, Polygon
from django.contrib.gis.utils import LayerMapping, LayerMapError
from django.test import TestCase
from django.utils._os import upath
from .models import (City3D, Interstate2D, Interstate3D, InterstateProj2D,
InterstateProj3D, Point2D, Point3D, MultiPoint3D, Polygon2D, Polygon3D)
data_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), '..', 'data'))
city_file = os.path.join(data_path, 'cities', 'cities.shp')
vrt_file = os.path.join(data_path, 'test_vrt', 'test_vrt.vrt')
# The coordinates of each city, with Z values corresponding to their
# altitude in meters.
city_data = (
('Houston', (-95.363151, 29.763374, 18)),
('Dallas', (-96.801611, 32.782057, 147)),
('Oklahoma City', (-97.521157, 34.464642, 380)),
('Wellington', (174.783117, -41.315268, 14)),
('Pueblo', (-104.609252, 38.255001, 1433)),
('Lawrence', (-95.235060, 38.971823, 251)),
('Chicago', (-87.650175, 41.850385, 181)),
('Victoria', (-123.305196, 48.462611, 15)),
)
# Reference mapping of city name to its altitude (Z value).
city_dict = dict((name, coords) for name, coords in city_data)
# 3D freeway data derived from the National Elevation Dataset:
# http://seamless.usgs.gov/products/9arc.php
interstate_data = (
('I-45',
'LINESTRING(-95.3708481 29.7765870 11.339,-95.3694580 29.7787980 4.536,-95.3690305 29.7797359 9.762,-95.3691886 29.7812450 12.448,-95.3696447 29.7850144 10.457,-95.3702511 29.7868518 9.418,-95.3706724 29.7881286 14.858,-95.3711632 29.7896157 15.386,-95.3714525 29.7936267 13.168,-95.3717848 29.7955007 15.104,-95.3717719 29.7969804 16.516,-95.3717305 29.7982117 13.923,-95.3717254 29.8000778 14.385,-95.3719875 29.8013539 15.160,-95.3720575 29.8026785 15.544,-95.3721321 29.8040912 14.975,-95.3722074 29.8050998 15.688,-95.3722779 29.8060430 16.099,-95.3733818 29.8076750 15.197,-95.3741563 29.8103686 17.268,-95.3749458 29.8129927 19.857,-95.3763564 29.8144557 15.435)',
( 11.339, 4.536, 9.762, 12.448, 10.457, 9.418, 14.858,
15.386, 13.168, 15.104, 16.516, 13.923, 14.385, 15.16 ,
15.544, 14.975, 15.688, 16.099, 15.197, 17.268, 19.857,
15.435),
),
)
# Bounding box polygon for inner-loop of Houston (in projected coordinate
# system 32140), with elevation values from the National Elevation Dataset
# (see above).
bbox_data = (
'POLYGON((941527.97 4225693.20,962596.48 4226349.75,963152.57 4209023.95,942051.75 4208366.38,941527.97 4225693.20))',
(21.71, 13.21, 9.12, 16.40, 21.71)
)
class Geo3DTest(TestCase):
"""
Only a subset of the PostGIS routines are 3D-enabled, and this TestCase
tries to test the features that can handle 3D and that are also
available within GeoDjango. For more information, see the PostGIS docs
on the routines that support 3D:
http://postgis.refractions.net/documentation/manual-1.4/ch08.html#PostGIS_3D_Functions
"""
def _load_interstate_data(self):
# Interstate (2D / 3D and Geographic/Projected variants)
for name, line, exp_z in interstate_data:
line_3d = GEOSGeometry(line, srid=4269)
line_2d = LineString([l[:2] for l in line_3d.coords], srid=4269)
# Creating a geographic and projected version of the
# interstate in both 2D and 3D.
Interstate3D.objects.create(name=name, line=line_3d)
InterstateProj3D.objects.create(name=name, line=line_3d)
Interstate2D.objects.create(name=name, line=line_2d)
InterstateProj2D.objects.create(name=name, line=line_2d)
def _load_city_data(self):
for name, pnt_data in city_data:
City3D.objects.create(name=name, point=Point(*pnt_data, srid=4326))
def _load_polygon_data(self):
bbox_wkt, bbox_z = bbox_data
bbox_2d = GEOSGeometry(bbox_wkt, srid=32140)
bbox_3d = Polygon(tuple((x, y, z) for (x, y), z in zip(bbox_2d[0].coords, bbox_z)), srid=32140)
Polygon2D.objects.create(name='2D BBox', poly=bbox_2d)
Polygon3D.objects.create(name='3D BBox', poly=bbox_3d)
def test_3d_hasz(self):
"""
Make sure data is 3D and has expected Z values -- shouldn't change
because of coordinate system.
"""
self._load_interstate_data()
for name, line, exp_z in interstate_data:
interstate = Interstate3D.objects.get(name=name)
interstate_proj = InterstateProj3D.objects.get(name=name)
for i in [interstate, interstate_proj]:
self.assertTrue(i.line.hasz)
self.assertEqual(exp_z, tuple(i.line.z))
self._load_city_data()
for name, pnt_data in city_data:
city = City3D.objects.get(name=name)
z = pnt_data[2]
self.assertTrue(city.point.hasz)
self.assertEqual(z, city.point.z)
def test_3d_polygons(self):
"""
Test the creation of polygon 3D models.
"""
self._load_polygon_data()
p3d = Polygon3D.objects.get(name='3D BBox')
self.assertTrue(p3d.poly.hasz)
self.assertIsInstance(p3d.poly, Polygon)
self.assertEqual(p3d.poly.srid, 32140)
def test_3d_layermapping(self):
"""
Testing LayerMapping on 3D models.
"""
point_mapping = {'point' : 'POINT'}
mpoint_mapping = {'mpoint' : 'MULTIPOINT'}
# The VRT is 3D, but should still be able to map sans the Z.
lm = LayerMapping(Point2D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point2D.objects.count())
# The city shapefile is 2D, and won't be able to fill the coordinates
# in the 3D model -- thus, a LayerMapError is raised.
self.assertRaises(LayerMapError, LayerMapping,
Point3D, city_file, point_mapping, transform=False)
# 3D model should take 3D data just fine.
lm = LayerMapping(Point3D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point3D.objects.count())
# Making sure LayerMapping.make_multi works right, by converting
# a Point25D into a MultiPoint25D.
lm = LayerMapping(MultiPoint3D, vrt_file, mpoint_mapping, transform=False)
lm.save()
self.assertEqual(3, MultiPoint3D.objects.count())
def test_kml(self):
"""
Test GeoQuerySet.kml() with Z values.
"""
self._load_city_data()
h = City3D.objects.kml(precision=6).get(name='Houston')
# KML should be 3D.
# `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';`
ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$')
self.assertTrue(ref_kml_regex.match(h.kml))
def test_geojson(self):
"""
Test GeoQuerySet.geojson() with Z values.
"""
self._load_city_data()
h = City3D.objects.geojson(precision=6).get(name='Houston')
# GeoJSON should be 3D
# `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';`
ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$')
self.assertTrue(ref_json_regex.match(h.geojson))
def test_union(self):
"""
Testing the Union aggregate of 3D models.
"""
# PostGIS query that returned the reference EWKT for this test:
# `SELECT ST_AsText(ST_Union(point)) FROM geo3d_city3d;`
self._load_city_data()
ref_ewkt = 'SRID=4326;MULTIPOINT(-123.305196 48.462611 15,-104.609252 38.255001 1433,-97.521157 34.464642 380,-96.801611 32.782057 147,-95.363151 29.763374 18,-95.23506 38.971823 251,-87.650175 41.850385 181,174.783117 -41.315268 14)'
ref_union = GEOSGeometry(ref_ewkt)
union = City3D.objects.aggregate(Union('point'))['point__union']
self.assertTrue(union.hasz)
self.assertEqual(ref_union, union)
def test_extent(self):
"""
Testing the Extent3D aggregate for 3D models.
"""
self._load_city_data()
# `SELECT ST_Extent3D(point) FROM geo3d_city3d;`
ref_extent3d = (-123.305196, -41.315268, 14,174.783117, 48.462611, 1433)
extent1 = City3D.objects.aggregate(Extent3D('point'))['point__extent3d']
extent2 = City3D.objects.extent3d()
def check_extent3d(extent3d, tol=6):
for ref_val, ext_val in zip(ref_extent3d, extent3d):
self.assertAlmostEqual(ref_val, ext_val, tol)
for e3d in [extent1, extent2]:
check_extent3d(e3d)
def test_perimeter(self):
"""
Testing GeoQuerySet.perimeter() on 3D fields.
"""
self._load_polygon_data()
# Reference query for values below:
# `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly) FROM geo3d_polygon3d;`
ref_perim_3d = 76859.2620451
ref_perim_2d = 76859.2577803
tol = 6
self.assertAlmostEqual(ref_perim_2d,
Polygon2D.objects.perimeter().get(name='2D BBox').perimeter.m,
tol)
self.assertAlmostEqual(ref_perim_3d,
Polygon3D.objects.perimeter().get(name='3D BBox').perimeter.m,
tol)
def test_length(self):
"""
Testing GeoQuerySet.length() on 3D fields.
"""
# ST_Length_Spheroid Z-aware, and thus does not need to use
# a separate function internally.
# `SELECT ST_Length_Spheroid(line, 'SPHEROID["GRS 1980",6378137,298.257222101]')
# FROM geo3d_interstate[2d|3d];`
self._load_interstate_data()
tol = 3
ref_length_2d = 4368.1721949481
ref_length_3d = 4368.62547052088
self.assertAlmostEqual(ref_length_2d,
Interstate2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
Interstate3D.objects.length().get(name='I-45').length.m,
tol)
# Making sure `ST_Length3D` is used on for a projected
# and 3D model rather than `ST_Length`.
# `SELECT ST_Length(line) FROM geo3d_interstateproj2d;`
ref_length_2d = 4367.71564892392
# `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;`
ref_length_3d = 4368.16897234101
self.assertAlmostEqual(ref_length_2d,
InterstateProj2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
InterstateProj3D.objects.length().get(name='I-45').length.m,
tol)
def test_scale(self):
"""
Testing GeoQuerySet.scale() on Z values.
"""
self._load_city_data()
# Mapping of City name to reference Z values.
zscales = (-3, 4, 23)
for zscale in zscales:
for city in City3D.objects.scale(1.0, 1.0, zscale):
self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z)
def test_translate(self):
"""
Testing GeoQuerySet.translate() on Z values.
"""
self._load_city_data()
ztranslations = (5.23, 23, -17)
for ztrans in ztranslations:
for city in City3D.objects.translate(0, 0, ztrans):
self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z)
| gpl-2.0 | 5,722,929,895,130,421,000 | -3,428,059,395,534,405,600 | 42.921933 | 676 | 0.616504 | false |
Zentyal/samba | python/samba/tests/registry.py | 49 | 1772 | # Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for samba.registry."""
import os
from samba import registry
import samba.tests
class HelperTests(samba.tests.TestCase):
def test_predef_to_name(self):
self.assertEquals("HKEY_LOCAL_MACHINE",
registry.get_predef_name(0x80000002))
def test_str_regtype(self):
self.assertEquals("REG_DWORD", registry.str_regtype(4))
class HiveTests(samba.tests.TestCaseInTempDir):
def setUp(self):
super(HiveTests, self).setUp()
self.hive_path = os.path.join(self.tempdir, "ldb_new.ldb")
self.hive = registry.open_ldb(self.hive_path)
def tearDown(self):
del self.hive
os.unlink(self.hive_path)
super(HiveTests, self).tearDown()
def test_ldb_new(self):
self.assertTrue(self.hive is not None)
#def test_flush(self):
# self.hive.flush()
#def test_del_value(self):
# self.hive.del_value("FOO")
class RegistryTests(samba.tests.TestCase):
def test_new(self):
self.registry = registry.Registry()
| gpl-3.0 | 5,816,106,713,040,879,000 | -4,214,272,316,531,474,400 | 28.533333 | 71 | 0.690181 | false |