repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
thaole16/Boids | boids/boids.py | 1 | 4866 | """
A refactored implementation of Boids from a deliberately bad implementation of
[Boids](http://dl.acm.org/citation.cfm?doid=37401.37406): an exercise for class.
"""
from matplotlib import pyplot as plt
from matplotlib import animation
import numpy as np
class Boids(object):
def __init__(self,
boid_count=50,
x_positions=[-450, 50.0],
y_positions=[300.0, 600.0],
x_velocities=[0, 10.0],
y_velocities=[-20.0, 20.0],
move_to_middle_strength=0.01,
alert_distance=100,
formation_flying_distance=10000,
formation_flying_strength=0.125):
self.boid_count = boid_count
self.move_to_middle_strength = move_to_middle_strength
self.alert_distance = alert_distance
self.formation_flying_distance = formation_flying_distance
self.formation_flying_strength = formation_flying_strength
self.boids_x = np.random.uniform(size=boid_count, *x_positions)
self.boids_y = np.random.uniform(size=boid_count, *y_positions)
self.positions = np.stack((self.boids_x, self.boids_y))
self.boid_x_velocities = np.random.uniform(size=boid_count, *x_velocities)
self.boid_y_velocities = np.random.uniform(size=boid_count, *y_velocities)
self.velocities = np.stack((self.boid_x_velocities, self.boid_y_velocities))
self.boids = (self.positions, self.velocities)
def fly_towards_the_middle(self, boids, move_to_middle_strength=0.01):
(positions, velocities) = boids
middle = np.mean(positions, 1)
move_to_middle = (middle[:, np.newaxis] - positions) * move_to_middle_strength
velocities += move_to_middle
def separation(self, coords):
separations = np.array(coords)[:, np.newaxis, :] - np.array(coords)[:, :, np.newaxis]
separation_distance_squared = separations[0, :, :] ** 2 + separations[1, :, :] ** 2
return separations, separation_distance_squared
def fly_away_from_nearby_boids(self, boids, alert_distance=100):
(positions, velocities) = boids
separations, separation_distance_squared = self.separation(positions)
birds_outside_alert = separation_distance_squared > alert_distance
close_separations = np.copy(separations)
close_separations[0, :, :][birds_outside_alert] = 0 # x positions
close_separations[1, :, :][birds_outside_alert] = 0 # y positions
velocities += np.sum(close_separations, 1)
def match_speed_with_nearby_boids(self, boids,
formation_flying_distance=10000,
formation_flying_strength=0.125):
(positions, velocities) = boids
separations, separation_distance_squared = self.separation(positions)
birds_outside_formation = separation_distance_squared > formation_flying_distance
velocity_difference = velocities[:, np.newaxis, :] - velocities[:, :, np.newaxis]
close_formation = np.copy(velocity_difference)
close_formation[0, :, :][birds_outside_formation] = 0
close_formation[1, :, :][birds_outside_formation] = 0
velocities += -1 * np.mean(close_formation, 1) * formation_flying_strength
def update_boids(self, boids):
(positions, velocities) = boids
# Fly towards the middle
self.fly_towards_the_middle(boids, self.move_to_middle_strength)
# Fly away from nearby boids
self.fly_away_from_nearby_boids(boids, self.alert_distance)
# Try to match speed with nearby boids
self.match_speed_with_nearby_boids(boids, self.formation_flying_distance, self.formation_flying_strength)
# Update positions
positions += velocities
def _animate(self, frame):
self.update_boids(self.boids)
(positions, velocities) = self.boids
self.scatter.set_offsets(np.transpose(positions))
def model(self, xlim=(-500, 1500), ylim=(-500, 1500), frames=50, interval=50, savefile=None):
colors = np.random.rand(self.boid_count)
boidsize = np.pi * (2 * np.random.rand(self.boid_count) + 2) ** 2
figure = plt.figure()
axes = plt.axes(xlim=xlim, ylim=ylim)
self.scatter = axes.scatter(self.boids_x, self.boids_y,
s=boidsize, c=colors, alpha=0.5, edgecolors=None)
anim = animation.FuncAnimation(figure, self._animate,
frames=frames, interval=interval)
plt.xlabel('x (arbitrary units)')
plt.ylabel('y (arbitrary units)')
plt.title("Boids a'Flocking")
if savefile != None:
anim.save(savefile)
plt.show()
if __name__ == "__main__":
boidsobject = Boids()
boidsobject.model()
| mit | -4,863,632,825,812,648,000 | 42.446429 | 113 | 0.622894 | false |
Eric-Zhong/odoo | addons/resource/faces/utils.py | 433 | 3231 | ############################################################################
# Copyright (C) 2005 by Reithinger GmbH
# [email protected]
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
import observer
import os.path
import sys
import os.path
_call_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
def get_installation_path():
try:
if sys.frozen:
path = _call_dir
else:
raise AttributeError()
except AttributeError:
path = os.path.abspath(observer.__file__)
path = os.path.split(path)[0]
path = os.path.normcase(path)
return path
def get_resource_path():
try:
if sys.frozen:
path = _call_dir
path = os.path.join(path, "resources", "faces", "gui")
else:
raise AttributeError()
except AttributeError:
path = get_installation_path()
path = os.path.join(path, "gui", "resources")
path = os.path.normcase(path)
return path
def get_template_path():
try:
if sys.frozen:
path = _call_dir
path = os.path.join(path, "resources", "faces", "templates")
else:
raise AttributeError()
except AttributeError:
path = get_installation_path()
path = os.path.join(path, "templates")
path = os.path.normcase(path)
return path
def get_howtos_path():
try:
if sys.frozen:
path = _call_dir
else:
raise AttributeError()
except AttributeError:
path = get_installation_path()
path = os.path.join(path, "howtos")
path = os.path.normcase(path)
return path
def flatten(items):
if isinstance(items, tuple):
items = list(items)
if not isinstance(items, list):
yield items
stack = [iter(items)]
while stack:
for item in stack[-1]:
if isinstance(item, tuple):
item = list(item)
if isinstance(item, list):
stack.append(iter(item))
break
yield item
else:
stack.pop()
def do_yield():
pass
def progress_start(title, maximum, message=""):
pass
def progress_update(value, message=""):
pass
def progress_end():
pass
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -8,170,183,780,738,484 | 24.642857 | 76 | 0.561746 | false |
fhaoquan/kbengine | kbe/src/lib/python/Lib/difflib.py | 72 | 81679 | """
Module difflib -- helpers for computing deltas between objects.
Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
Use SequenceMatcher to return list of the best "good enough" matches.
Function context_diff(a, b):
For two lists of strings, return a delta in context diff format.
Function ndiff(a, b):
Return a delta: the difference between `a` and `b` (lists of strings).
Function restore(delta, which):
Return one of the two sequences that generated an ndiff delta.
Function unified_diff(a, b):
For two lists of strings, return a delta in unified diff format.
Class SequenceMatcher:
A flexible class for comparing pairs of sequences of any type.
Class Differ:
For producing human-readable deltas from sequences of lines of text.
Class HtmlDiff:
For producing HTML side by side comparison with change highlights.
"""
__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
'Differ','IS_CHARACTER_JUNK', 'IS_LINE_JUNK', 'context_diff',
'unified_diff', 'HtmlDiff', 'Match']
import heapq
from collections import namedtuple as _namedtuple
Match = _namedtuple('Match', 'a b size')
def _calculate_ratio(matches, length):
if length:
return 2.0 * matches / length
return 1.0
class SequenceMatcher:
"""
SequenceMatcher is a flexible class for comparing pairs of sequences of
any type, so long as the sequence elements are hashable. The basic
algorithm predates, and is a little fancier than, an algorithm
published in the late 1980's by Ratcliff and Obershelp under the
hyperbolic name "gestalt pattern matching". The basic idea is to find
the longest contiguous matching subsequence that contains no "junk"
elements (R-O doesn't address junk). The same idea is then applied
recursively to the pieces of the sequences to the left and to the right
of the matching subsequence. This does not yield minimal edit
sequences, but does tend to yield matches that "look right" to people.
SequenceMatcher tries to compute a "human-friendly diff" between two
sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
longest *contiguous* & junk-free matching subsequence. That's what
catches peoples' eyes. The Windows(tm) windiff has another interesting
notion, pairing up elements that appear uniquely in each sequence.
That, and the method here, appear to yield more intuitive difference
reports than does diff. This method appears to be the least vulnerable
to synching up on blocks of "junk lines", though (like blank lines in
ordinary text files, or maybe "<P>" lines in HTML files). That may be
because this is the only method of the 3 that has a *concept* of
"junk" <wink>.
Example, comparing two strings, and considering blanks to be "junk":
>>> s = SequenceMatcher(lambda x: x == " ",
... "private Thread currentThread;",
... "private volatile Thread currentThread;")
>>>
.ratio() returns a float in [0, 1], measuring the "similarity" of the
sequences. As a rule of thumb, a .ratio() value over 0.6 means the
sequences are close matches:
>>> print(round(s.ratio(), 3))
0.866
>>>
If you're only interested in where the sequences match,
.get_matching_blocks() is handy:
>>> for block in s.get_matching_blocks():
... print("a[%d] and b[%d] match for %d elements" % block)
a[0] and b[0] match for 8 elements
a[8] and b[17] match for 21 elements
a[29] and b[38] match for 0 elements
Note that the last tuple returned by .get_matching_blocks() is always a
dummy, (len(a), len(b), 0), and this is the only case in which the last
tuple element (number of elements matched) is 0.
If you want to know how to change the first sequence into the second,
use .get_opcodes():
>>> for opcode in s.get_opcodes():
... print("%6s a[%d:%d] b[%d:%d]" % opcode)
equal a[0:8] b[0:8]
insert a[8:8] b[8:17]
equal a[8:29] b[17:38]
See the Differ class for a fancy human-friendly file differencer, which
uses SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
See also function get_close_matches() in this module, which shows how
simple code building on SequenceMatcher can be used to do useful work.
Timing: Basic R-O is cubic time worst case and quadratic time expected
case. SequenceMatcher is quadratic time for the worst case and has
expected-case behavior dependent in a complicated way on how many
elements the sequences have in common; best case time is linear.
Methods:
__init__(isjunk=None, a='', b='')
Construct a SequenceMatcher.
set_seqs(a, b)
Set the two sequences to be compared.
set_seq1(a)
Set the first sequence to be compared.
set_seq2(b)
Set the second sequence to be compared.
find_longest_match(alo, ahi, blo, bhi)
Find longest matching block in a[alo:ahi] and b[blo:bhi].
get_matching_blocks()
Return list of triples describing matching subsequences.
get_opcodes()
Return list of 5-tuples describing how to turn a into b.
ratio()
Return a measure of the sequences' similarity (float in [0,1]).
quick_ratio()
Return an upper bound on .ratio() relatively quickly.
real_quick_ratio()
Return an upper bound on ratio() very quickly.
"""
def __init__(self, isjunk=None, a='', b='', autojunk=True):
"""Construct a SequenceMatcher.
Optional arg isjunk is None (the default), or a one-argument
function that takes a sequence element and returns true iff the
element is junk. None is equivalent to passing "lambda x: 0", i.e.
no elements are considered to be junk. For example, pass
lambda x: x in " \\t"
if you're comparing lines as sequences of characters, and don't
want to synch up on blanks or hard tabs.
Optional arg a is the first of two sequences to be compared. By
default, an empty string. The elements of a must be hashable. See
also .set_seqs() and .set_seq1().
Optional arg b is the second of two sequences to be compared. By
default, an empty string. The elements of b must be hashable. See
also .set_seqs() and .set_seq2().
Optional arg autojunk should be set to False to disable the
"automatic junk heuristic" that treats popular elements as junk
(see module documentation for more information).
"""
# Members:
# a
# first sequence
# b
# second sequence; differences are computed as "what do
# we need to do to 'a' to change it into 'b'?"
# b2j
# for x in b, b2j[x] is a list of the indices (into b)
# at which x appears; junk and popular elements do not appear
# fullbcount
# for x in b, fullbcount[x] == the number of times x
# appears in b; only materialized if really needed (used
# only for computing quick_ratio())
# matching_blocks
# a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
# ascending & non-overlapping in i and in j; terminated by
# a dummy (len(a), len(b), 0) sentinel
# opcodes
# a list of (tag, i1, i2, j1, j2) tuples, where tag is
# one of
# 'replace' a[i1:i2] should be replaced by b[j1:j2]
# 'delete' a[i1:i2] should be deleted
# 'insert' b[j1:j2] should be inserted
# 'equal' a[i1:i2] == b[j1:j2]
# isjunk
# a user-supplied function taking a sequence element and
# returning true iff the element is "junk" -- this has
# subtle but helpful effects on the algorithm, which I'll
# get around to writing up someday <0.9 wink>.
# DON'T USE! Only __chain_b uses this. Use "in self.bjunk".
# bjunk
# the items in b for which isjunk is True.
# bpopular
# nonjunk items in b treated as junk by the heuristic (if used).
self.isjunk = isjunk
self.a = self.b = None
self.autojunk = autojunk
self.set_seqs(a, b)
def set_seqs(self, a, b):
"""Set the two sequences to be compared.
>>> s = SequenceMatcher()
>>> s.set_seqs("abcd", "bcde")
>>> s.ratio()
0.75
"""
self.set_seq1(a)
self.set_seq2(b)
def set_seq1(self, a):
"""Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
"""
if a is self.a:
return
self.a = a
self.matching_blocks = self.opcodes = None
def set_seq2(self, b):
"""Set the second sequence to be compared.
The first sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq2("abcd")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq1().
"""
if b is self.b:
return
self.b = b
self.matching_blocks = self.opcodes = None
self.fullbcount = None
self.__chain_b()
# For each element x in b, set b2j[x] to a list of the indices in
# b where x appears; the indices are in increasing order; note that
# the number of times x appears in b is len(b2j[x]) ...
# when self.isjunk is defined, junk elements don't show up in this
# map at all, which stops the central find_longest_match method
# from starting any matching block at a junk element ...
# b2j also does not contain entries for "popular" elements, meaning
# elements that account for more than 1 + 1% of the total elements, and
# when the sequence is reasonably large (>= 200 elements); this can
# be viewed as an adaptive notion of semi-junk, and yields an enormous
# speedup when, e.g., comparing program files with hundreds of
# instances of "return NULL;" ...
# note that this is only called when b changes; so for cross-product
# kinds of matches, it's best to call set_seq2 once, then set_seq1
# repeatedly
def __chain_b(self):
# Because isjunk is a user-defined (not C) function, and we test
# for junk a LOT, it's important to minimize the number of calls.
# Before the tricks described here, __chain_b was by far the most
# time-consuming routine in the whole module! If anyone sees
# Jim Roskind, thank him again for profile.py -- I never would
# have guessed that.
# The first trick is to build b2j ignoring the possibility
# of junk. I.e., we don't call isjunk at all yet. Throwing
# out the junk later is much cheaper than building b2j "right"
# from the start.
b = self.b
self.b2j = b2j = {}
for i, elt in enumerate(b):
indices = b2j.setdefault(elt, [])
indices.append(i)
# Purge junk elements
self.bjunk = junk = set()
isjunk = self.isjunk
if isjunk:
for elt in b2j.keys():
if isjunk(elt):
junk.add(elt)
for elt in junk: # separate loop avoids separate list of keys
del b2j[elt]
# Purge popular elements that are not junk
self.bpopular = popular = set()
n = len(b)
if self.autojunk and n >= 200:
ntest = n // 100 + 1
for elt, idxs in b2j.items():
if len(idxs) > ntest:
popular.add(elt)
for elt in popular: # ditto; as fast for 1% deletion
del b2j[elt]
def find_longest_match(self, alo, ahi, blo, bhi):
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
If isjunk is not defined:
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
alo <= i <= i+k <= ahi
blo <= j <= j+k <= bhi
and for all (i',j',k') meeting those conditions,
k >= k'
i <= i'
and if i == i', j <= j'
In other words, of all maximal matching blocks, return one that
starts earliest in a, and of all those maximal matching blocks that
start earliest in a, return the one that starts earliest in b.
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=0, b=4, size=5)
If isjunk is defined, first the longest matching block is
determined as above, but with the additional restriction that no
junk element appears in the block. Then that block is extended as
far as possible by matching (only) junk elements on both sides. So
the resulting block never matches on junk except as identical junk
happens to be adjacent to an "interesting" match.
Here's the same example as before, but considering blanks to be
junk. That prevents " abcd" from matching the " abcd" at the tail
end of the second sequence directly. Instead only the "abcd" can
match, and matches the leftmost "abcd" in the second sequence:
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=1, b=0, size=4)
If no blocks match, return (alo, blo, 0).
>>> s = SequenceMatcher(None, "ab", "c")
>>> s.find_longest_match(0, 2, 0, 1)
Match(a=0, b=0, size=0)
"""
# CAUTION: stripping common prefix or suffix would be incorrect.
# E.g.,
# ab
# acab
# Longest matching block is "ab", but if common prefix is
# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
# strip, so ends up claiming that ab is changed to acab by
# inserting "ca" in the middle. That's minimal but unintuitive:
# "it's obvious" that someone inserted "ac" at the front.
# Windiff ends up at the same place as diff, but by pairing up
# the unique 'b's and then matching the first two 'a's.
a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.bjunk.__contains__
besti, bestj, bestsize = alo, blo, 0
# find longest junk-free match
# during an iteration of the loop, j2len[j] = length of longest
# junk-free match ending with a[i-1] and b[j]
j2len = {}
nothing = []
for i in range(alo, ahi):
# look at all instances of a[i] in b; note that because
# b2j has no junk keys, the loop is skipped if a[i] is junk
j2lenget = j2len.get
newj2len = {}
for j in b2j.get(a[i], nothing):
# a[i] matches b[j]
if j < blo:
continue
if j >= bhi:
break
k = newj2len[j] = j2lenget(j-1, 0) + 1
if k > bestsize:
besti, bestj, bestsize = i-k+1, j-k+1, k
j2len = newj2len
# Extend the best by non-junk elements on each end. In particular,
# "popular" non-junk elements aren't in b2j, which greatly speeds
# the inner loop above, but also means "the best" match so far
# doesn't contain any junk *or* popular non-junk elements.
while besti > alo and bestj > blo and \
not isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
not isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize += 1
# Now that we have a wholly interesting match (albeit possibly
# empty!), we may as well suck up the matching junk on each
# side of it too. Can't think of a good reason not to, and it
# saves post-processing the (possibly considerable) expense of
# figuring out what to do with it. In the case of an empty
# interesting match, this is clearly the right thing to do,
# because no other kind of match is possible in the regions.
while besti > alo and bestj > blo and \
isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize = bestsize + 1
return Match(besti, bestj, bestsize)
def get_matching_blocks(self):
"""Return list of triples describing matching subsequences.
Each triple is of the form (i, j, n), and means that
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
i and in j. New in Python 2.5, it's also guaranteed that if
(i, j, n) and (i', j', n') are adjacent triples in the list, and
the second is not the last triple in the list, then i+n != i' or
j+n != j'. IOW, adjacent triples never describe adjacent equal
blocks.
The last triple is a dummy, (len(a), len(b), 0), and is the only
triple with n==0.
>>> s = SequenceMatcher(None, "abxcd", "abcd")
>>> list(s.get_matching_blocks())
[Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]
"""
if self.matching_blocks is not None:
return self.matching_blocks
la, lb = len(self.a), len(self.b)
# This is most naturally expressed as a recursive algorithm, but
# at least one user bumped into extreme use cases that exceeded
# the recursion limit on their box. So, now we maintain a list
# ('queue`) of blocks we still need to look at, and append partial
# results to `matching_blocks` in a loop; the matches are sorted
# at the end.
queue = [(0, la, 0, lb)]
matching_blocks = []
while queue:
alo, ahi, blo, bhi = queue.pop()
i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
# a[alo:i] vs b[blo:j] unknown
# a[i:i+k] same as b[j:j+k]
# a[i+k:ahi] vs b[j+k:bhi] unknown
if k: # if k is 0, there was no matching block
matching_blocks.append(x)
if alo < i and blo < j:
queue.append((alo, i, blo, j))
if i+k < ahi and j+k < bhi:
queue.append((i+k, ahi, j+k, bhi))
matching_blocks.sort()
# It's possible that we have adjacent equal blocks in the
# matching_blocks list now. Starting with 2.5, this code was added
# to collapse them.
i1 = j1 = k1 = 0
non_adjacent = []
for i2, j2, k2 in matching_blocks:
# Is this block adjacent to i1, j1, k1?
if i1 + k1 == i2 and j1 + k1 == j2:
# Yes, so collapse them -- this just increases the length of
# the first block by the length of the second, and the first
# block so lengthened remains the block to compare against.
k1 += k2
else:
# Not adjacent. Remember the first block (k1==0 means it's
# the dummy we started with), and make the second block the
# new block to compare against.
if k1:
non_adjacent.append((i1, j1, k1))
i1, j1, k1 = i2, j2, k2
if k1:
non_adjacent.append((i1, j1, k1))
non_adjacent.append( (la, lb, 0) )
self.matching_blocks = list(map(Match._make, non_adjacent))
return self.matching_blocks
def get_opcodes(self):
"""Return list of 5-tuples describing how to turn a into b.
Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
tuple preceding it, and likewise for j1 == the previous j2.
The tags are strings, with these meanings:
'replace': a[i1:i2] should be replaced by b[j1:j2]
'delete': a[i1:i2] should be deleted.
Note that j1==j2 in this case.
'insert': b[j1:j2] should be inserted at a[i1:i1].
Note that i1==i2 in this case.
'equal': a[i1:i2] == b[j1:j2]
>>> a = "qabxcd"
>>> b = "abycdf"
>>> s = SequenceMatcher(None, a, b)
>>> for tag, i1, i2, j1, j2 in s.get_opcodes():
... print(("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2])))
delete a[0:1] (q) b[0:0] ()
equal a[1:3] (ab) b[0:2] (ab)
replace a[3:4] (x) b[2:3] (y)
equal a[4:6] (cd) b[3:5] (cd)
insert a[6:6] () b[5:6] (f)
"""
if self.opcodes is not None:
return self.opcodes
i = j = 0
self.opcodes = answer = []
for ai, bj, size in self.get_matching_blocks():
# invariant: we've pumped out correct diffs to change
# a[:i] into b[:j], and the next matching block is
# a[ai:ai+size] == b[bj:bj+size]. So we need to pump
# out a diff to change a[i:ai] into b[j:bj], pump out
# the matching block, and move (i,j) beyond the match
tag = ''
if i < ai and j < bj:
tag = 'replace'
elif i < ai:
tag = 'delete'
elif j < bj:
tag = 'insert'
if tag:
answer.append( (tag, i, ai, j, bj) )
i, j = ai+size, bj+size
# the list of matching blocks is terminated by a
# sentinel with size 0
if size:
answer.append( ('equal', ai, i, bj, j) )
return answer
def get_grouped_opcodes(self, n=3):
""" Isolate change clusters by eliminating ranges with no changes.
Return a generator of groups with up to n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
>>> a = list(map(str, range(1,40)))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]]
"""
codes = self.get_opcodes()
if not codes:
codes = [("equal", 0, 1, 0, 1)]
# Fixup leading and trailing groups if they show no changes.
if codes[0][0] == 'equal':
tag, i1, i2, j1, j2 = codes[0]
codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
if codes[-1][0] == 'equal':
tag, i1, i2, j1, j2 = codes[-1]
codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
nn = n + n
group = []
for tag, i1, i2, j1, j2 in codes:
# End the current group and start a new one whenever
# there is a large range with no changes.
if tag == 'equal' and i2-i1 > nn:
group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
yield group
group = []
i1, j1 = max(i1, i2-n), max(j1, j2-n)
group.append((tag, i1, i2, j1 ,j2))
if group and not (len(group)==1 and group[0][0] == 'equal'):
yield group
def ratio(self):
"""Return a measure of the sequences' similarity (float in [0,1]).
Where T is the total number of elements in both sequences, and
M is the number of matches, this is 2.0*M / T.
Note that this is 1 if the sequences are identical, and 0 if
they have nothing in common.
.ratio() is expensive to compute if you haven't already computed
.get_matching_blocks() or .get_opcodes(), in which case you may
want to try .quick_ratio() or .real_quick_ratio() first to get an
upper bound.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.quick_ratio()
0.75
>>> s.real_quick_ratio()
1.0
"""
matches = sum(triple[-1] for triple in self.get_matching_blocks())
return _calculate_ratio(matches, len(self.a) + len(self.b))
def quick_ratio(self):
"""Return an upper bound on ratio() relatively quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute.
"""
# viewing a and b as multisets, set matches to the cardinality
# of their intersection; this counts the number of matches
# without regard to order, so is clearly an upper bound
if self.fullbcount is None:
self.fullbcount = fullbcount = {}
for elt in self.b:
fullbcount[elt] = fullbcount.get(elt, 0) + 1
fullbcount = self.fullbcount
# avail[x] is the number of times x appears in 'b' less the
# number of times we've seen it in 'a' so far ... kinda
avail = {}
availhas, matches = avail.__contains__, 0
for elt in self.a:
if availhas(elt):
numb = avail[elt]
else:
numb = fullbcount.get(elt, 0)
avail[elt] = numb - 1
if numb > 0:
matches = matches + 1
return _calculate_ratio(matches, len(self.a) + len(self.b))
def real_quick_ratio(self):
"""Return an upper bound on ratio() very quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute than either .ratio() or .quick_ratio().
"""
la, lb = len(self.a), len(self.b)
# can't have more matches than the number of elements in the
# shorter sequence
return _calculate_ratio(min(la, lb), la + lb)
def get_close_matches(word, possibilities, n=3, cutoff=0.6):
"""Use SequenceMatcher to return list of the best "good enough" matches.
word is a sequence for which close matches are desired (typically a
string).
possibilities is a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) is the maximum number of close matches to
return. n must be > 0.
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
that don't score at least that similar to word are ignored.
The best (no more than n) matches among the possibilities are returned
in a list, sorted by similarity score, most similar first.
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
['apple', 'ape']
>>> import keyword as _keyword
>>> get_close_matches("wheel", _keyword.kwlist)
['while']
>>> get_close_matches("Apple", _keyword.kwlist)
[]
>>> get_close_matches("accept", _keyword.kwlist)
['except']
"""
if not n > 0:
raise ValueError("n must be > 0: %r" % (n,))
if not 0.0 <= cutoff <= 1.0:
raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,))
result = []
s = SequenceMatcher()
s.set_seq2(word)
for x in possibilities:
s.set_seq1(x)
if s.real_quick_ratio() >= cutoff and \
s.quick_ratio() >= cutoff and \
s.ratio() >= cutoff:
result.append((s.ratio(), x))
# Move the best scorers to head of list
result = heapq.nlargest(n, result)
# Strip scores for the best n matches
return [x for score, x in result]
def _count_leading(line, ch):
"""
Return number of `ch` characters at the start of `line`.
Example:
>>> _count_leading(' abc', ' ')
3
"""
i, n = 0, len(line)
while i < n and line[i] == ch:
i += 1
return i
class Differ:
r"""
Differ is a class for comparing sequences of lines of text, and
producing human-readable differences or deltas. Differ uses
SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
Each line of a Differ delta begins with a two-letter code:
'- ' line unique to sequence 1
'+ ' line unique to sequence 2
' ' line common to both sequences
'? ' line not present in either input sequence
Lines beginning with '? ' attempt to guide the eye to intraline
differences, and were not present in either input sequence. These lines
can be confusing if the sequences contain tab characters.
Note that Differ makes no claim to produce a *minimal* diff. To the
contrary, minimal diffs are often counter-intuitive, because they synch
up anywhere possible, sometimes accidental matches 100 pages apart.
Restricting synch points to contiguous matches preserves some notion of
locality, at the occasional cost of producing a longer diff.
Example: Comparing two texts.
First we set up the texts, sequences of individual single-line strings
ending with newlines (such sequences can also be obtained from the
`readlines()` method of file-like objects):
>>> text1 = ''' 1. Beautiful is better than ugly.
... 2. Explicit is better than implicit.
... 3. Simple is better than complex.
... 4. Complex is better than complicated.
... '''.splitlines(keepends=True)
>>> len(text1)
4
>>> text1[0][-1]
'\n'
>>> text2 = ''' 1. Beautiful is better than ugly.
... 3. Simple is better than complex.
... 4. Complicated is better than complex.
... 5. Flat is better than nested.
... '''.splitlines(keepends=True)
Next we instantiate a Differ object:
>>> d = Differ()
Note that when instantiating a Differ object we may pass functions to
filter out line and character 'junk'. See Differ.__init__ for details.
Finally, we compare the two:
>>> result = list(d.compare(text1, text2))
'result' is a list of strings, so let's pretty-print it:
>>> from pprint import pprint as _pprint
>>> _pprint(result)
[' 1. Beautiful is better than ugly.\n',
'- 2. Explicit is better than implicit.\n',
'- 3. Simple is better than complex.\n',
'+ 3. Simple is better than complex.\n',
'? ++\n',
'- 4. Complex is better than complicated.\n',
'? ^ ---- ^\n',
'+ 4. Complicated is better than complex.\n',
'? ++++ ^ ^\n',
'+ 5. Flat is better than nested.\n']
As a single multi-line string it looks like this:
>>> print(''.join(result), end="")
1. Beautiful is better than ugly.
- 2. Explicit is better than implicit.
- 3. Simple is better than complex.
+ 3. Simple is better than complex.
? ++
- 4. Complex is better than complicated.
? ^ ---- ^
+ 4. Complicated is better than complex.
? ++++ ^ ^
+ 5. Flat is better than nested.
Methods:
__init__(linejunk=None, charjunk=None)
Construct a text differencer, with optional filters.
compare(a, b)
Compare two sequences of lines; generate the resulting delta.
"""
def __init__(self, linejunk=None, charjunk=None):
"""
Construct a text differencer, with optional filters.
The two optional keyword parameters are for filter functions:
- `linejunk`: A function that should accept a single string argument,
and return true iff the string is junk. The module-level function
`IS_LINE_JUNK` may be used to filter out lines without visible
characters, except for at most one splat ('#'). It is recommended
to leave linejunk None; as of Python 2.3, the underlying
SequenceMatcher class has grown an adaptive notion of "noise" lines
that's better than any static definition the author has ever been
able to craft.
- `charjunk`: A function that should accept a string of length 1. The
module-level function `IS_CHARACTER_JUNK` may be used to filter out
whitespace characters (a blank or tab; **note**: bad idea to include
newline in this!). Use of IS_CHARACTER_JUNK is recommended.
"""
self.linejunk = linejunk
self.charjunk = charjunk
def compare(self, a, b):
r"""
Compare two sequences of lines; generate the resulting delta.
Each sequence must contain individual single-line strings ending with
newlines. Such sequences can be obtained from the `readlines()` method
of file-like objects. The delta generated also consists of newline-
terminated strings, ready to be printed as-is via the writeline()
method of a file-like object.
Example:
>>> print(''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(True),
... 'ore\ntree\nemu\n'.splitlines(True))),
... end="")
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
cruncher = SequenceMatcher(self.linejunk, a, b)
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
if tag == 'replace':
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
elif tag == 'delete':
g = self._dump('-', a, alo, ahi)
elif tag == 'insert':
g = self._dump('+', b, blo, bhi)
elif tag == 'equal':
g = self._dump(' ', a, alo, ahi)
else:
raise ValueError('unknown tag %r' % (tag,))
yield from g
def _dump(self, tag, x, lo, hi):
"""Generate comparison results for a same-tagged range."""
for i in range(lo, hi):
yield '%s %s' % (tag, x[i])
def _plain_replace(self, a, alo, ahi, b, blo, bhi):
assert alo < ahi and blo < bhi
# dump the shorter block first -- reduces the burden on short-term
# memory if the blocks are of very different sizes
if bhi - blo < ahi - alo:
first = self._dump('+', b, blo, bhi)
second = self._dump('-', a, alo, ahi)
else:
first = self._dump('-', a, alo, ahi)
second = self._dump('+', b, blo, bhi)
for g in first, second:
yield from g
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
r"""
When replacing one block of lines with another, search the blocks
for *similar* lines; the best-matching pair (if any) is used as a
synch point, and intraline difference marking is done on the
similar pair. Lots of work, but often worth it.
Example:
>>> d = Differ()
>>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1,
... ['abcdefGhijkl\n'], 0, 1)
>>> print(''.join(results), end="")
- abcDefghiJkl
? ^ ^ ^
+ abcdefGhijkl
? ^ ^ ^
"""
# don't synch up unless the lines have a similarity score of at
# least cutoff; best_ratio tracks the best score seen so far
best_ratio, cutoff = 0.74, 0.75
cruncher = SequenceMatcher(self.charjunk)
eqi, eqj = None, None # 1st indices of equal lines (if any)
# search for the pair that matches best without being identical
# (identical lines must be junk lines, & we don't want to synch up
# on junk -- unless we have to)
for j in range(blo, bhi):
bj = b[j]
cruncher.set_seq2(bj)
for i in range(alo, ahi):
ai = a[i]
if ai == bj:
if eqi is None:
eqi, eqj = i, j
continue
cruncher.set_seq1(ai)
# computing similarity is expensive, so use the quick
# upper bounds first -- have seen this speed up messy
# compares by a factor of 3.
# note that ratio() is only expensive to compute the first
# time it's called on a sequence pair; the expensive part
# of the computation is cached by cruncher
if cruncher.real_quick_ratio() > best_ratio and \
cruncher.quick_ratio() > best_ratio and \
cruncher.ratio() > best_ratio:
best_ratio, best_i, best_j = cruncher.ratio(), i, j
if best_ratio < cutoff:
# no non-identical "pretty close" pair
if eqi is None:
# no identical pair either -- treat it as a straight replace
yield from self._plain_replace(a, alo, ahi, b, blo, bhi)
return
# no close pair, but an identical pair -- synch up on that
best_i, best_j, best_ratio = eqi, eqj, 1.0
else:
# there's a close pair, so forget the identical pair (if any)
eqi = None
# a[best_i] very similar to b[best_j]; eqi is None iff they're not
# identical
# pump out diffs from before the synch point
yield from self._fancy_helper(a, alo, best_i, b, blo, best_j)
# do intraline marking on the synch pair
aelt, belt = a[best_i], b[best_j]
if eqi is None:
# pump out a '-', '?', '+', '?' quad for the synched lines
atags = btags = ""
cruncher.set_seqs(aelt, belt)
for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
la, lb = ai2 - ai1, bj2 - bj1
if tag == 'replace':
atags += '^' * la
btags += '^' * lb
elif tag == 'delete':
atags += '-' * la
elif tag == 'insert':
btags += '+' * lb
elif tag == 'equal':
atags += ' ' * la
btags += ' ' * lb
else:
raise ValueError('unknown tag %r' % (tag,))
yield from self._qformat(aelt, belt, atags, btags)
else:
# the synch pair is identical
yield ' ' + aelt
# pump out diffs from after the synch point
yield from self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi)
def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
g = []
if alo < ahi:
if blo < bhi:
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
else:
g = self._dump('-', a, alo, ahi)
elif blo < bhi:
g = self._dump('+', b, blo, bhi)
yield from g
def _qformat(self, aline, bline, atags, btags):
r"""
Format "?" output and deal with leading tabs.
Example:
>>> d = Differ()
>>> results = d._qformat('\tabcDefghiJkl\n', '\tabcdefGhijkl\n',
... ' ^ ^ ^ ', ' ^ ^ ^ ')
>>> for line in results: print(repr(line))
...
'- \tabcDefghiJkl\n'
'? \t ^ ^ ^\n'
'+ \tabcdefGhijkl\n'
'? \t ^ ^ ^\n'
"""
# Can hurt, but will probably help most of the time.
common = min(_count_leading(aline, "\t"),
_count_leading(bline, "\t"))
common = min(common, _count_leading(atags[:common], " "))
common = min(common, _count_leading(btags[:common], " "))
atags = atags[common:].rstrip()
btags = btags[common:].rstrip()
yield "- " + aline
if atags:
yield "? %s%s\n" % ("\t" * common, atags)
yield "+ " + bline
if btags:
yield "? %s%s\n" % ("\t" * common, btags)
# With respect to junk, an earlier version of ndiff simply refused to
# *start* a match with a junk element. The result was cases like this:
# before: private Thread currentThread;
# after: private volatile Thread currentThread;
# If you consider whitespace to be junk, the longest contiguous match
# not starting with junk is "e Thread currentThread". So ndiff reported
# that "e volatil" was inserted between the 't' and the 'e' in "private".
# While an accurate view, to people that's absurd. The current version
# looks for matching blocks that are entirely junk-free, then extends the
# longest one of those as far as possible but only with matching junk.
# So now "currentThread" is matched, then extended to suck up the
# preceding blank; then "private" is matched, and extended to suck up the
# following blank; then "Thread" is matched; and finally ndiff reports
# that "volatile " was inserted before "Thread". The only quibble
# remaining is that perhaps it was really the case that " volatile"
# was inserted after "private". I can live with that <wink>.
import re
def IS_LINE_JUNK(line, pat=re.compile(r"\s*#?\s*$").match):
r"""
Return 1 for ignorable line: iff `line` is blank or contains a single '#'.
Examples:
>>> IS_LINE_JUNK('\n')
True
>>> IS_LINE_JUNK(' # \n')
True
>>> IS_LINE_JUNK('hello\n')
False
"""
return pat(line) is not None
def IS_CHARACTER_JUNK(ch, ws=" \t"):
r"""
Return 1 for ignorable character: iff `ch` is a space or tab.
Examples:
>>> IS_CHARACTER_JUNK(' ')
True
>>> IS_CHARACTER_JUNK('\t')
True
>>> IS_CHARACTER_JUNK('\n')
False
>>> IS_CHARACTER_JUNK('x')
False
"""
return ch in ws
########################################################################
### Unified Diff
########################################################################
def _format_range_unified(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if length == 1:
return '{}'.format(beginning)
if not length:
beginning -= 1 # empty ranges begin at line just before the range
return '{},{}'.format(beginning, length)
def unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a unified diff.
Unified diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with ---, +++, or @@) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The unidiff format normally has a header for filenames and modification
times. Any or all of these may be specified using strings for
'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the ISO 8601 format.
Example:
>>> for line in unified_diff('one two three four'.split(),
... 'zero one tree four'.split(), 'Original', 'Current',
... '2005-01-26 23:30:50', '2010-04-02 10:20:52',
... lineterm=''):
... print(line) # doctest: +NORMALIZE_WHITESPACE
--- Original 2005-01-26 23:30:50
+++ Current 2010-04-02 10:20:52
@@ -1,4 +1,4 @@
+zero
one
-two
-three
+tree
four
"""
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
started = True
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
todate = '\t{}'.format(tofiledate) if tofiledate else ''
yield '--- {}{}{}'.format(fromfile, fromdate, lineterm)
yield '+++ {}{}{}'.format(tofile, todate, lineterm)
first, last = group[0], group[-1]
file1_range = _format_range_unified(first[1], last[2])
file2_range = _format_range_unified(first[3], last[4])
yield '@@ -{} +{} @@{}'.format(file1_range, file2_range, lineterm)
for tag, i1, i2, j1, j2 in group:
if tag == 'equal':
for line in a[i1:i2]:
yield ' ' + line
continue
if tag in {'replace', 'delete'}:
for line in a[i1:i2]:
yield '-' + line
if tag in {'replace', 'insert'}:
for line in b[j1:j2]:
yield '+' + line
########################################################################
### Context Diff
########################################################################
def _format_range_context(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if not length:
beginning -= 1 # empty ranges begin at line just before the range
if length <= 1:
return '{}'.format(beginning)
return '{},{}'.format(beginning, beginning + length - 1)
# See http://www.unix.org/single_unix_specification/
def context_diff(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a context diff.
Context diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with *** or ---) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The context diff format normally has a header for filenames and
modification times. Any or all of these may be specified using
strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the ISO 8601 format.
If not specified, the strings default to blanks.
Example:
>>> print(''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(True),
... 'zero\none\ntree\nfour\n'.splitlines(True), 'Original', 'Current')),
... end="")
*** Original
--- Current
***************
*** 1,4 ****
one
! two
! three
four
--- 1,4 ----
+ zero
one
! tree
four
"""
prefix = dict(insert='+ ', delete='- ', replace='! ', equal=' ')
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
started = True
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
todate = '\t{}'.format(tofiledate) if tofiledate else ''
yield '*** {}{}{}'.format(fromfile, fromdate, lineterm)
yield '--- {}{}{}'.format(tofile, todate, lineterm)
first, last = group[0], group[-1]
yield '***************' + lineterm
file1_range = _format_range_context(first[1], last[2])
yield '*** {} ****{}'.format(file1_range, lineterm)
if any(tag in {'replace', 'delete'} for tag, _, _, _, _ in group):
for tag, i1, i2, _, _ in group:
if tag != 'insert':
for line in a[i1:i2]:
yield prefix[tag] + line
file2_range = _format_range_context(first[3], last[4])
yield '--- {} ----{}'.format(file2_range, lineterm)
if any(tag in {'replace', 'insert'} for tag, _, _, _, _ in group):
for tag, _, _, j1, j2 in group:
if tag != 'delete':
for line in b[j1:j2]:
yield prefix[tag] + line
def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK):
r"""
Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
Optional keyword parameters `linejunk` and `charjunk` are for filter
functions (or None):
- linejunk: A function that should accept a single string argument, and
return true iff the string is junk. The default is None, and is
recommended; as of Python 2.3, an adaptive notion of "noise" lines is
used that does a good job on its own.
- charjunk: A function that should accept a string of length 1. The
default is module-level function IS_CHARACTER_JUNK, which filters out
whitespace characters (a blank or tab; note: bad idea to include newline
in this!).
Tools/scripts/ndiff.py is a command-line front-end to this function.
Example:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True),
... 'ore\ntree\nemu\n'.splitlines(keepends=True))
>>> print(''.join(diff), end="")
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
return Differ(linejunk, charjunk).compare(a, b)
def _mdiff(fromlines, tolines, context=None, linejunk=None,
charjunk=IS_CHARACTER_JUNK):
r"""Returns generator yielding marked up from/to side by side differences.
Arguments:
fromlines -- list of text lines to compared to tolines
tolines -- list of text lines to be compared to fromlines
context -- number of context lines to display on each side of difference,
if None, all from/to text lines will be generated.
linejunk -- passed on to ndiff (see ndiff documentation)
charjunk -- passed on to ndiff (see ndiff documentation)
This function returns an iterator which returns a tuple:
(from line tuple, to line tuple, boolean flag)
from/to line tuple -- (line num, line text)
line num -- integer or None (to indicate a context separation)
line text -- original line text with following markers inserted:
'\0+' -- marks start of added text
'\0-' -- marks start of deleted text
'\0^' -- marks start of changed text
'\1' -- marks end of added/deleted/changed text
boolean flag -- None indicates context separation, True indicates
either "from" or "to" line contains a change, otherwise False.
This function/iterator was originally developed to generate side by side
file difference for making HTML pages (see HtmlDiff class for example
usage).
Note, this function utilizes the ndiff function to generate the side by
side difference markup. Optional ndiff arguments may be passed to this
function and they in turn will be passed to ndiff.
"""
import re
# regular expression for finding intraline change indices
change_re = re.compile('(\++|\-+|\^+)')
# create the difference iterator to generate the differences
diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk)
def _make_line(lines, format_key, side, num_lines=[0,0]):
"""Returns line of text with user's change markup and line formatting.
lines -- list of lines from the ndiff generator to produce a line of
text from. When producing the line of text to return, the
lines used are removed from this list.
format_key -- '+' return first line in list with "add" markup around
the entire line.
'-' return first line in list with "delete" markup around
the entire line.
'?' return first line in list with add/delete/change
intraline markup (indices obtained from second line)
None return first line in list with no markup
side -- indice into the num_lines list (0=from,1=to)
num_lines -- from/to current line number. This is NOT intended to be a
passed parameter. It is present as a keyword argument to
maintain memory of the current line numbers between calls
of this function.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
num_lines[side] += 1
# Handle case where no user markup is to be added, just return line of
# text with user's line format to allow for usage of the line number.
if format_key is None:
return (num_lines[side],lines.pop(0)[2:])
# Handle case of intraline changes
if format_key == '?':
text, markers = lines.pop(0), lines.pop(0)
# find intraline changes (store change type and indices in tuples)
sub_info = []
def record_sub_info(match_object,sub_info=sub_info):
sub_info.append([match_object.group(1)[0],match_object.span()])
return match_object.group(1)
change_re.sub(record_sub_info,markers)
# process each tuple inserting our special marks that won't be
# noticed by an xml/html escaper.
for key,(begin,end) in sub_info[::-1]:
text = text[0:begin]+'\0'+key+text[begin:end]+'\1'+text[end:]
text = text[2:]
# Handle case of add/delete entire line
else:
text = lines.pop(0)[2:]
# if line of text is just a newline, insert a space so there is
# something for the user to highlight and see.
if not text:
text = ' '
# insert marks that won't be noticed by an xml/html escaper.
text = '\0' + format_key + text + '\1'
# Return line of text, first allow user's line formatter to do its
# thing (such as adding the line number) then replace the special
# marks with what the user's change markup.
return (num_lines[side],text)
def _line_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from a
differencing iterator, processes them and yields them. When it can
it yields both a "from" and a "to" line, otherwise it will yield one
or the other. In addition to yielding the lines of from/to text, a
boolean flag is yielded to indicate if the text line(s) have
differences in them.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
lines = []
num_blanks_pending, num_blanks_to_yield = 0, 0
while True:
# Load up next 4 lines so we can look ahead, create strings which
# are a concatenation of the first character of each of the 4 lines
# so we can do some very readable comparisons.
while len(lines) < 4:
try:
lines.append(next(diff_lines_iterator))
except StopIteration:
lines.append('X')
s = ''.join([line[0] for line in lines])
if s.startswith('X'):
# When no more lines, pump out any remaining blank lines so the
# corresponding add/delete lines get a matching blank line so
# all line pairs get yielded at the next level.
num_blanks_to_yield = num_blanks_pending
elif s.startswith('-?+?'):
# simple intraline change
yield _make_line(lines,'?',0), _make_line(lines,'?',1), True
continue
elif s.startswith('--++'):
# in delete block, add block coming: we do NOT want to get
# caught up on blank lines yet, just process the delete line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith(('--?+', '--+', '- ')):
# in delete block and see a intraline change or unchanged line
# coming: yield the delete line and then blanks
from_line,to_line = _make_line(lines,'-',0), None
num_blanks_to_yield,num_blanks_pending = num_blanks_pending-1,0
elif s.startswith('-+?'):
# intraline change
yield _make_line(lines,None,0), _make_line(lines,'?',1), True
continue
elif s.startswith('-?+'):
# intraline change
yield _make_line(lines,'?',0), _make_line(lines,None,1), True
continue
elif s.startswith('-'):
# delete FROM line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith('+--'):
# in add block, delete block coming: we do NOT want to get
# caught up on blank lines yet, just process the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(('+ ', '+-')):
# will be leaving an add block: yield blanks then add line
from_line, to_line = None, _make_line(lines,'+',1)
num_blanks_to_yield,num_blanks_pending = num_blanks_pending+1,0
elif s.startswith('+'):
# inside an add block, yield the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(' '):
# unchanged text, yield it to both sides
yield _make_line(lines[:],None,0),_make_line(lines,None,1),False
continue
# Catch up on the blank lines so when we yield the next from/to
# pair, they are lined up.
while(num_blanks_to_yield < 0):
num_blanks_to_yield += 1
yield None,('','\n'),True
while(num_blanks_to_yield > 0):
num_blanks_to_yield -= 1
yield ('','\n'),None,True
if s.startswith('X'):
raise StopIteration
else:
yield from_line,to_line,True
def _line_pair_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from the line
iterator. Its difference from that iterator is that this function
always yields a pair of from/to text lines (with the change
indication). If necessary it will collect single from/to lines
until it has a matching pair from/to pair to yield.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
line_iterator = _line_iterator()
fromlines,tolines=[],[]
while True:
# Collecting lines of text until we have a from/to pair
while (len(fromlines)==0 or len(tolines)==0):
from_line, to_line, found_diff = next(line_iterator)
if from_line is not None:
fromlines.append((from_line,found_diff))
if to_line is not None:
tolines.append((to_line,found_diff))
# Once we have a pair, remove them from the collection and yield it
from_line, fromDiff = fromlines.pop(0)
to_line, to_diff = tolines.pop(0)
yield (from_line,to_line,fromDiff or to_diff)
# Handle case where user does not want context differencing, just yield
# them up without doing anything else with them.
line_pair_iterator = _line_pair_iterator()
if context is None:
while True:
yield next(line_pair_iterator)
# Handle case where user wants context differencing. We must do some
# storage of lines until we know for sure that they are to be yielded.
else:
context += 1
lines_to_write = 0
while True:
# Store lines up until we find a difference, note use of a
# circular queue because we only need to keep around what
# we need for context.
index, contextLines = 0, [None]*(context)
found_diff = False
while(found_diff is False):
from_line, to_line, found_diff = next(line_pair_iterator)
i = index % context
contextLines[i] = (from_line, to_line, found_diff)
index += 1
# Yield lines that we have collected so far, but first yield
# the user's separator.
if index > context:
yield None, None, None
lines_to_write = context
else:
lines_to_write = index
index = 0
while(lines_to_write):
i = index % context
index += 1
yield contextLines[i]
lines_to_write -= 1
# Now yield the context lines after the change
lines_to_write = context-1
while(lines_to_write):
from_line, to_line, found_diff = next(line_pair_iterator)
# If another change within the context, extend the context
if found_diff:
lines_to_write = context-1
else:
lines_to_write -= 1
yield from_line, to_line, found_diff
_file_template = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type"
content="text/html; charset=ISO-8859-1" />
<title></title>
<style type="text/css">%(styles)s
</style>
</head>
<body>
%(table)s%(legend)s
</body>
</html>"""
_styles = """
table.diff {font-family:Courier; border:medium;}
.diff_header {background-color:#e0e0e0}
td.diff_header {text-align:right}
.diff_next {background-color:#c0c0c0}
.diff_add {background-color:#aaffaa}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}"""
_table_template = """
<table class="diff" id="difflib_chg_%(prefix)s_top"
cellspacing="0" cellpadding="0" rules="groups" >
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
%(header_row)s
<tbody>
%(data_rows)s </tbody>
</table>"""
_legend = """
<table class="diff" summary="Legends">
<tr> <th colspan="2"> Legends </th> </tr>
<tr> <td> <table border="" summary="Colors">
<tr><th> Colors </th> </tr>
<tr><td class="diff_add"> Added </td></tr>
<tr><td class="diff_chg">Changed</td> </tr>
<tr><td class="diff_sub">Deleted</td> </tr>
</table></td>
<td> <table border="" summary="Links">
<tr><th colspan="2"> Links </th> </tr>
<tr><td>(f)irst change</td> </tr>
<tr><td>(n)ext change</td> </tr>
<tr><td>(t)op</td> </tr>
</table></td> </tr>
</table>"""
class HtmlDiff(object):
"""For producing HTML side by side comparison with change highlights.
This class can be used to create an HTML table (or a complete HTML file
containing the table) showing a side by side, line by line comparison
of text with inter-line and intra-line change highlights. The table can
be generated in either full or contextual difference mode.
The following methods are provided for HTML generation:
make_table -- generates HTML for a single side by side table
make_file -- generates complete HTML file with a single side by side table
See tools/scripts/diff.py for an example usage of this class.
"""
_file_template = _file_template
_styles = _styles
_table_template = _table_template
_legend = _legend
_default_prefix = 0
def __init__(self,tabsize=8,wrapcolumn=None,linejunk=None,
charjunk=IS_CHARACTER_JUNK):
"""HtmlDiff instance initializer
Arguments:
tabsize -- tab stop spacing, defaults to 8.
wrapcolumn -- column number where lines are broken and wrapped,
defaults to None where lines are not wrapped.
linejunk,charjunk -- keyword arguments passed into ndiff() (used to by
HtmlDiff() to generate the side by side HTML differences). See
ndiff() documentation for argument default values and descriptions.
"""
self._tabsize = tabsize
self._wrapcolumn = wrapcolumn
self._linejunk = linejunk
self._charjunk = charjunk
def make_file(self,fromlines,tolines,fromdesc='',todesc='',context=False,
numlines=5):
"""Returns HTML file of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
"""
return self._file_template % dict(
styles = self._styles,
legend = self._legend,
table = self.make_table(fromlines,tolines,fromdesc,todesc,
context=context,numlines=numlines))
def _tab_newline_replace(self,fromlines,tolines):
"""Returns from/to line lists with tabs expanded and newlines removed.
Instead of tab characters being replaced by the number of spaces
needed to fill in to the next tab stop, this function will fill
the space with tab characters. This is done so that the difference
algorithms can identify changes in a file when tabs are replaced by
spaces and vice versa. At the end of the HTML generation, the tab
characters will be replaced with a nonbreakable space.
"""
def expand_tabs(line):
# hide real spaces
line = line.replace(' ','\0')
# expand tabs into spaces
line = line.expandtabs(self._tabsize)
# replace spaces from expanded tabs back into tab characters
# (we'll replace them with markup after we do differencing)
line = line.replace(' ','\t')
return line.replace('\0',' ').rstrip('\n')
fromlines = [expand_tabs(line) for line in fromlines]
tolines = [expand_tabs(line) for line in tolines]
return fromlines,tolines
def _split_line(self,data_list,line_num,text):
"""Builds list of text lines by splitting text lines at wrap point
This function will determine if the input text line needs to be
wrapped (split) into separate lines. If so, the first wrap point
will be determined and the first line appended to the output
text line list. This function is used recursively to handle
the second part of the split line to further split it.
"""
# if blank line or context separator, just add it to the output list
if not line_num:
data_list.append((line_num,text))
return
# if line text doesn't need wrapping, just add it to the output list
size = len(text)
max = self._wrapcolumn
if (size <= max) or ((size -(text.count('\0')*3)) <= max):
data_list.append((line_num,text))
return
# scan text looking for the wrap point, keeping track if the wrap
# point is inside markers
i = 0
n = 0
mark = ''
while n < max and i < size:
if text[i] == '\0':
i += 1
mark = text[i]
i += 1
elif text[i] == '\1':
i += 1
mark = ''
else:
i += 1
n += 1
# wrap point is inside text, break it up into separate lines
line1 = text[:i]
line2 = text[i:]
# if wrap point is inside markers, place end marker at end of first
# line and start marker at beginning of second line because each
# line will have its own table tag markup around it.
if mark:
line1 = line1 + '\1'
line2 = '\0' + mark + line2
# tack on first line onto the output list
data_list.append((line_num,line1))
# use this routine again to wrap the remaining text
self._split_line(data_list,'>',line2)
def _line_wrapper(self,diffs):
"""Returns iterator that splits (wraps) mdiff text lines"""
# pull from/to data and flags from mdiff iterator
for fromdata,todata,flag in diffs:
# check for context separators and pass them through
if flag is None:
yield fromdata,todata,flag
continue
(fromline,fromtext),(toline,totext) = fromdata,todata
# for each from/to line split it at the wrap column to form
# list of text lines.
fromlist,tolist = [],[]
self._split_line(fromlist,fromline,fromtext)
self._split_line(tolist,toline,totext)
# yield from/to line in pairs inserting blank lines as
# necessary when one side has more wrapped lines
while fromlist or tolist:
if fromlist:
fromdata = fromlist.pop(0)
else:
fromdata = ('',' ')
if tolist:
todata = tolist.pop(0)
else:
todata = ('',' ')
yield fromdata,todata,flag
def _collect_lines(self,diffs):
"""Collects mdiff output into separate lists
Before storing the mdiff from/to data into a list, it is converted
into a single line of text with HTML markup.
"""
fromlist,tolist,flaglist = [],[],[]
# pull from/to data and flags from mdiff style iterator
for fromdata,todata,flag in diffs:
try:
# store HTML markup of the lines into the lists
fromlist.append(self._format_line(0,flag,*fromdata))
tolist.append(self._format_line(1,flag,*todata))
except TypeError:
# exceptions occur for lines where context separators go
fromlist.append(None)
tolist.append(None)
flaglist.append(flag)
return fromlist,tolist,flaglist
def _format_line(self,side,flag,linenum,text):
"""Returns HTML markup of "from" / "to" text lines
side -- 0 or 1 indicating "from" or "to" text
flag -- indicates if difference on line
linenum -- line number (used for line number column)
text -- line text to be marked up
"""
try:
linenum = '%d' % linenum
id = ' id="%s%s"' % (self._prefix[side],linenum)
except TypeError:
# handle blank lines where linenum is '>' or ''
id = ''
# replace those things that would get confused with HTML symbols
text=text.replace("&","&").replace(">",">").replace("<","<")
# make space non-breakable so they don't get compressed or line wrapped
text = text.replace(' ',' ').rstrip()
return '<td class="diff_header"%s>%s</td><td nowrap="nowrap">%s</td>' \
% (id,linenum,text)
def _make_prefix(self):
"""Create unique anchor prefixes"""
# Generate a unique anchor prefix so multiple tables
# can exist on the same HTML page without conflicts.
fromprefix = "from%d_" % HtmlDiff._default_prefix
toprefix = "to%d_" % HtmlDiff._default_prefix
HtmlDiff._default_prefix += 1
# store prefixes so line format method has access
self._prefix = [fromprefix,toprefix]
def _convert_flags(self,fromlist,tolist,flaglist,context,numlines):
"""Makes list of "next" links"""
# all anchor names will be generated using the unique "to" prefix
toprefix = self._prefix[1]
# process change flags, generating middle column of next anchors/links
next_id = ['']*len(flaglist)
next_href = ['']*len(flaglist)
num_chg, in_change = 0, False
last = 0
for i,flag in enumerate(flaglist):
if flag:
if not in_change:
in_change = True
last = i
# at the beginning of a change, drop an anchor a few lines
# (the context lines) before the change for the previous
# link
i = max([0,i-numlines])
next_id[i] = ' id="difflib_chg_%s_%d"' % (toprefix,num_chg)
# at the beginning of a change, drop a link to the next
# change
num_chg += 1
next_href[last] = '<a href="#difflib_chg_%s_%d">n</a>' % (
toprefix,num_chg)
else:
in_change = False
# check for cases where there is no content to avoid exceptions
if not flaglist:
flaglist = [False]
next_id = ['']
next_href = ['']
last = 0
if context:
fromlist = ['<td></td><td> No Differences Found </td>']
tolist = fromlist
else:
fromlist = tolist = ['<td></td><td> Empty File </td>']
# if not a change on first line, drop a link
if not flaglist[0]:
next_href[0] = '<a href="#difflib_chg_%s_0">f</a>' % toprefix
# redo the last link to link to the top
next_href[last] = '<a href="#difflib_chg_%s_top">t</a>' % (toprefix)
return fromlist,tolist,flaglist,next_href,next_id
def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False,
numlines=5):
"""Returns HTML table of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
"""
# make unique anchor prefixes so that multiple tables may exist
# on the same page without conflict.
self._make_prefix()
# change tabs to spaces before it gets more difficult after we insert
# markup
fromlines,tolines = self._tab_newline_replace(fromlines,tolines)
# create diffs iterator which generates side by side from/to data
if context:
context_lines = numlines
else:
context_lines = None
diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk,
charjunk=self._charjunk)
# set up iterator to wrap lines that exceed desired width
if self._wrapcolumn:
diffs = self._line_wrapper(diffs)
# collect up from/to lines and flags into lists (also format the lines)
fromlist,tolist,flaglist = self._collect_lines(diffs)
# process change flags, generating middle column of next anchors/links
fromlist,tolist,flaglist,next_href,next_id = self._convert_flags(
fromlist,tolist,flaglist,context,numlines)
s = []
fmt = ' <tr><td class="diff_next"%s>%s</td>%s' + \
'<td class="diff_next">%s</td>%s</tr>\n'
for i in range(len(flaglist)):
if flaglist[i] is None:
# mdiff yields None on separator lines skip the bogus ones
# generated for the first line
if i > 0:
s.append(' </tbody> \n <tbody>\n')
else:
s.append( fmt % (next_id[i],next_href[i],fromlist[i],
next_href[i],tolist[i]))
if fromdesc or todesc:
header_row = '<thead><tr>%s%s%s%s</tr></thead>' % (
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % fromdesc,
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % todesc)
else:
header_row = ''
table = self._table_template % dict(
data_rows=''.join(s),
header_row=header_row,
prefix=self._prefix[1])
return table.replace('\0+','<span class="diff_add">'). \
replace('\0-','<span class="diff_sub">'). \
replace('\0^','<span class="diff_chg">'). \
replace('\1','</span>'). \
replace('\t',' ')
del re
def restore(delta, which):
r"""
Generate one of the two sequences that generated a delta.
Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
lines originating from file 1 or 2 (parameter `which`), stripping off line
prefixes.
Examples:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True),
... 'ore\ntree\nemu\n'.splitlines(keepends=True))
>>> diff = list(diff)
>>> print(''.join(restore(diff, 1)), end="")
one
two
three
>>> print(''.join(restore(diff, 2)), end="")
ore
tree
emu
"""
try:
tag = {1: "- ", 2: "+ "}[int(which)]
except KeyError:
raise ValueError('unknown delta choice (must be 1 or 2): %r'
% which)
prefixes = (" ", tag)
for line in delta:
if line[:2] in prefixes:
yield line[2:]
def _test():
import doctest, difflib
return doctest.testmod(difflib)
if __name__ == "__main__":
_test()
| lgpl-3.0 | 848,720,548,558,049,800 | 39.058362 | 83 | 0.565739 | false |
Bionetbook/bionetbook | bnbapp/bionetbook/protocols/forms/verbs/resuspend.py | 2 | 1489 | from protocols.forms import forms
from core.utils import VOLUME_UNITS, CONCENTRATION_UNITS, TIME_UNITS
class ResuspendForm(forms.VerbForm):
name = "Resuspend"
slug = "resuspend"
# has_component = True
has_manual = True
layers = ['item_to_act', 'reagent', 'settify']
item_to_act = forms.CharField(required=False, help_text='what are you resuspending?', label='item to resuspend')
reagent = forms.CharField(required=False, help_text='where are you washing it with')
min_conc = forms.FloatField(required=False, widget=forms.NumberInput(attrs={'step':'any'}))
max_conc = forms.FloatField(required=False, widget=forms.NumberInput(attrs={'step':'any'}))
conc_units = forms.ChoiceField(required=False, choices=CONCENTRATION_UNITS )
conc_comment = forms.CharField(required=False)
min_vol = forms.FloatField(required=False, widget=forms.NumberInput(attrs={'step':'any'}))
max_vol = forms.FloatField(required=False, widget=forms.NumberInput(attrs={'step':'any'}))
vol_units = forms.ChoiceField(required=False, choices=VOLUME_UNITS )
vol_comment = forms.CharField(required=False)
min_time = forms.FloatField(required=False, widget=forms.NumberInput(attrs={'step':'any'}))
max_time = forms.FloatField(required=False, widget=forms.NumberInput(attrs={'step':'any'}))
time_units = forms.ChoiceField(required=False, choices=TIME_UNITS, help_text='in seconds', initial = 'sec' )
time_comment = forms.CharField(required=False)
| mit | -441,155,153,899,768,770 | 56.269231 | 116 | 0.723304 | false |
EKiefer/edge-starter | py34env/Lib/site-packages/authtools/views.py | 4 | 11578 | """
Mostly equivalent to the views from django.contrib.auth.views, but
implemented as class-based views.
"""
from __future__ import unicode_literals
import warnings
from django.conf import settings
from django.contrib.auth import get_user_model, REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import (AuthenticationForm, SetPasswordForm,
PasswordChangeForm, PasswordResetForm)
from django.contrib.auth.tokens import default_token_generator
from django.contrib import auth
try:
from django.contrib.sites.shortcuts import get_current_site
except ImportError:
from django.contrib.sites.models import get_current_site # Django < 1.7
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import redirect, resolve_url
from django.utils.functional import lazy
from django.utils.http import base36_to_int, is_safe_url
from django.utils import six
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic import FormView, TemplateView, RedirectView
try:
from django.contrib.auth import update_session_auth_hash
except ImportError:
# Django < 1.7
def update_session_auth_hash(request, user):
pass
User = get_user_model()
def _safe_resolve_url(url):
"""
Previously, resolve_url_lazy would fail if the url was a unicode object.
See <https://github.com/fusionbox/django-authtools/issues/13> for more
information.
Thanks to GitHub user alanwj for pointing out the problem and providing
this solution.
"""
return six.text_type(resolve_url(url))
resolve_url_lazy = lazy(_safe_resolve_url, six.text_type)
class WithCurrentSiteMixin(object):
def get_current_site(self):
return get_current_site(self.request)
def get_context_data(self, **kwargs):
kwargs = super(WithCurrentSiteMixin, self).get_context_data(**kwargs)
current_site = self.get_current_site()
kwargs.update({
'site': current_site,
'site_name': current_site.name,
})
return kwargs
class WithNextUrlMixin(object):
redirect_field_name = REDIRECT_FIELD_NAME
success_url = None
def get_next_url(self):
request = self.request
redirect_to = request.POST.get(self.redirect_field_name,
request.GET.get(self.redirect_field_name, ''))
if not redirect_to:
return
if is_safe_url(redirect_to, host=self.request.get_host()):
return redirect_to
# This mixin can be mixed with FormViews and RedirectViews. They
# each use a different method to get the URL to redirect to, so we
# need to provide both methods.
def get_success_url(self):
return self.get_next_url() or super(WithNextUrlMixin, self).get_success_url()
def get_redirect_url(self, **kwargs):
return self.get_next_url() or super(WithNextUrlMixin, self).get_redirect_url(**kwargs)
def DecoratorMixin(decorator):
"""
Converts a decorator written for a function view into a mixin for a
class-based view.
::
LoginRequiredMixin = DecoratorMixin(login_required)
class MyView(LoginRequiredMixin):
pass
class SomeView(DecoratorMixin(some_decorator),
DecoratorMixin(something_else)):
pass
"""
class Mixin(object):
__doc__ = decorator.__doc__
@classmethod
def as_view(cls, *args, **kwargs):
view = super(Mixin, cls).as_view(*args, **kwargs)
return decorator(view)
Mixin.__name__ = str('DecoratorMixin(%s)' % decorator.__name__)
return Mixin
NeverCacheMixin = DecoratorMixin(never_cache)
CsrfProtectMixin = DecoratorMixin(csrf_protect)
LoginRequiredMixin = DecoratorMixin(login_required)
SensitivePostParametersMixin = DecoratorMixin(
sensitive_post_parameters('password', 'old_password', 'password1',
'password2', 'new_password1', 'new_password2')
)
class AuthDecoratorsMixin(NeverCacheMixin, CsrfProtectMixin, SensitivePostParametersMixin):
pass
class LoginView(AuthDecoratorsMixin, WithCurrentSiteMixin, WithNextUrlMixin, FormView):
form_class = AuthenticationForm
template_name = 'registration/login.html'
allow_authenticated = True
success_url = resolve_url_lazy(settings.LOGIN_REDIRECT_URL)
# BBB: This is deprecated (See LoginView.get_allow_authenticated)
disallow_authenticated = None
def get_allow_authenticated(self):
if self.disallow_authenticated is not None:
warnings.warn("disallow_authenticated is deprecated. Please use allow_authenticated",
DeprecationWarning)
return not self.disallow_authenticated
else:
return self.allow_authenticated
def dispatch(self, *args, **kwargs):
allow_authenticated = self.get_allow_authenticated()
if not allow_authenticated and self.request.user.is_authenticated():
return redirect(self.get_success_url())
return super(LoginView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
auth.login(self.request, form.get_user())
return super(LoginView, self).form_valid(form)
def get_context_data(self, **kwargs):
kwargs = super(LoginView, self).get_context_data(**kwargs)
kwargs.update({
self.redirect_field_name: self.request.GET.get(
self.redirect_field_name, '',
),
})
return kwargs
class LogoutView(NeverCacheMixin, WithCurrentSiteMixin, WithNextUrlMixin, TemplateView, RedirectView):
template_name = 'registration/logged_out.html'
permanent = False
def get(self, *args, **kwargs):
auth.logout(self.request)
# If we have a url to redirect to, do it. Otherwise render the logged-out template.
if self.get_redirect_url(**kwargs):
return RedirectView.get(self, *args, **kwargs)
else:
return TemplateView.get(self, *args, **kwargs)
class PasswordChangeView(LoginRequiredMixin, WithNextUrlMixin, AuthDecoratorsMixin, FormView):
template_name = 'registration/password_change_form.html'
form_class = PasswordChangeForm
success_url = reverse_lazy('password_change_done')
def get_form_kwargs(self):
kwargs = super(PasswordChangeView, self).get_form_kwargs()
kwargs['user'] = self.get_user()
return kwargs
def get_user(self):
return self.request.user
def form_valid(self, form):
form.save()
# Updating the password logs out all other sessions for the user
# except the current one if
# django.contrib.auth.middleware.SessionAuthenticationMiddleware
# is enabled.
update_session_auth_hash(self.request, form.user)
return super(PasswordChangeView, self).form_valid(form)
class PasswordChangeDoneView(LoginRequiredMixin, TemplateView):
template_name = 'registration/password_change_done.html'
# 4 views for password reset:
# - PasswordResetView sends the mail
# - PasswordResetDoneView shows a success message for the above
# - PasswordResetConfirmView checks the link the user clicked and
# prompts for a new password
# - PasswordResetCompleteView shows a success message for the above
class PasswordResetView(CsrfProtectMixin, FormView):
template_name = 'registration/password_reset_form.html'
token_generator = default_token_generator
success_url = reverse_lazy('password_reset_done')
domain_override = None
subject_template_name = 'registration/password_reset_subject.txt'
email_template_name = 'registration/password_reset_email.html'
html_email_template_name = None
from_email = None
form_class = PasswordResetForm
def form_valid(self, form):
form.save(
domain_override=self.domain_override,
subject_template_name=self.subject_template_name,
email_template_name=self.email_template_name,
token_generator=self.token_generator,
from_email=self.from_email,
request=self.request,
use_https=self.request.is_secure(),
html_email_template_name=self.html_email_template_name,
)
return super(PasswordResetView, self).form_valid(form)
class PasswordResetDoneView(TemplateView):
template_name = 'registration/password_reset_done.html'
class PasswordResetConfirmView(AuthDecoratorsMixin, FormView):
template_name = 'registration/password_reset_confirm.html'
token_generator = default_token_generator
form_class = SetPasswordForm
success_url = reverse_lazy('password_reset_complete')
def dispatch(self, *args, **kwargs):
assert self.kwargs.get('token') is not None
self.user = self.get_user()
return super(PasswordResetConfirmView, self).dispatch(*args, **kwargs)
def get_queryset(self):
return User._default_manager.all()
def get_user(self):
# django 1.5 uses uidb36, django 1.6 uses uidb64
uidb36 = self.kwargs.get('uidb36')
uidb64 = self.kwargs.get('uidb64')
assert bool(uidb36) ^ bool(uidb64)
try:
if uidb36:
uid = base36_to_int(uidb36)
else:
# urlsafe_base64_decode is not available in django 1.5
from django.utils.http import urlsafe_base64_decode
uid = urlsafe_base64_decode(uidb64)
return self.get_queryset().get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
return None
def valid_link(self):
user = self.user
return user is not None and self.token_generator.check_token(user, self.kwargs.get('token'))
def get_form_kwargs(self):
kwargs = super(PasswordResetConfirmView, self).get_form_kwargs()
kwargs['user'] = self.user
return kwargs
def get_context_data(self, **kwargs):
kwargs = super(PasswordResetConfirmView, self).get_context_data(**kwargs)
if self.valid_link():
kwargs['validlink'] = True
else:
kwargs['validlink'] = False
kwargs['form'] = None
return kwargs
def form_valid(self, form):
if not self.valid_link():
return self.form_invalid(form)
self.save_form(form)
return super(PasswordResetConfirmView, self).form_valid(form)
def save_form(self, form):
return form.save()
class PasswordResetConfirmAndLoginView(PasswordResetConfirmView):
success_url = resolve_url_lazy(settings.LOGIN_REDIRECT_URL)
def save_form(self, form):
ret = super(PasswordResetConfirmAndLoginView, self).save_form(form)
user = auth.authenticate(username=self.user.get_username(),
password=form.cleaned_data['new_password1'])
auth.login(self.request, user)
return ret
class PasswordResetCompleteView(TemplateView):
template_name = 'registration/password_reset_complete.html'
login_url = settings.LOGIN_URL
def get_login_url(self):
return resolve_url(self.login_url)
def get_context_data(self, **kwargs):
kwargs = super(PasswordResetCompleteView, self).get_context_data(**kwargs)
kwargs['login_url'] = self.get_login_url()
return kwargs
| mit | -81,026,986,573,845,260 | 34.406728 | 102 | 0.67516 | false |
larsmans/numpy | numpy/random/tests/test_random.py | 9 | 31521 | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises, assert_equal,
assert_warns)
from numpy import random
from numpy.compat import asbytes
import sys
class TestSeed(TestCase):
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = np.random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = np.random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = np.random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be a unsigned 32 bit integers
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
# seed must be a unsigned 32 bit integers
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
class TestBinomial(TestCase):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
np.testing.assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(TestCase):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
np.float(1))
class TestSetState(TestCase):
def setUp(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
class TestRandomDist(TestCase):
# Make sure the random distrobution return the correct value for a
# given seed
def setUp(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
np.testing.assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
actual = np.random.random_integers(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
np.testing.assert_array_equal(actual, desired)
def test_random_sample(self):
np.random.seed(self.seed)
actual = np.random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
np.testing.assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
np.testing.assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
np.testing.assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
np.testing.assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
np.testing.assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2, replace=False,
p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_(np.random.choice(6, s, replace=True).shape, s)
assert_(np.random.choice(6, s, replace=False).shape, s)
assert_(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_(np.random.choice(np.arange(6), s, replace=True).shape, s)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = asbytes('\x82Ui\x9e\xff\x97+Wf\xa5')
np.testing.assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays, and multidimensional versions of both:
for conv in [lambda x: x,
np.asarray,
lambda x: [(i, i) for i in x],
lambda x: np.asarray([(i, i) for i in x])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
np.testing.assert_array_equal(actual, desired)
def test_shuffle_flexible(self):
# gh-4270
arr = [(0, 1), (2, 3)]
dt = np.dtype([('a', np.int32, 1), ('b', np.int32, 1)])
nparr = np.array(arr, dtype=dt)
a, b = nparr[0].copy(), nparr[1].copy()
for i in range(50):
np.random.shuffle(nparr)
assert_(a in nparr)
assert_(b in nparr)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5,4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
ma = np.ma.count_masked(a)
mb = np.ma.count_masked(b)
for i in range(50):
np.random.shuffle(a)
self.assertEqual(ma, np.ma.count_masked(a))
np.random.shuffle(b)
self.assertEqual(mb, np.ma.count_masked(b))
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
np.testing.assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
np.testing.assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, np.float(1))
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
np.testing.assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
np.testing.assert_array_equal(actual, desired)
# Test nbad = 0
actual = np.random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
np.testing.assert_array_equal(actual, desired)
actual = np.random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
np.testing.assert_array_equal(actual, desired)
# Test ngood = 0
actual = np.random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
np.testing.assert_array_equal(actual, desired)
actual = np.random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
np.testing.assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
np.testing.assert_array_almost_equal(actual, desired, decimal=13)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
np.testing.assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
np.testing.assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean = (.123456789, 10)
# Hmm... not even symmetric.
cov = [[1, 0], [1, 0]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[-1.47027513018564449, 10.],
[-1.65915081534845532, 10.]],
[[-2.29186329304599745, 10.],
[-1.77505606019580053, 10.]],
[[-0.54970369430044119, 10.],
[0.29768848031692957, 10.]]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = np.random.multivariate_normal(mean, cov)
desired = np.array([-0.79441224511977482, 10.])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance raises warning
mean = [0, 0]
cov = [[1, 1 + 1e-10], [1 + 1e-10, 1]]
assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
np.testing.assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# http://mail.scipy.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
np.testing.assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = np.random.uniform
np.testing.assert_raises(OverflowError, func, -np.inf, 0)
np.testing.assert_raises(OverflowError, func, 0, np.inf)
np.testing.assert_raises(OverflowError, func, fmin, fmax)
# (fmax / 1e17) - fmin is within range, so this should not throw
np.random.uniform(low=fmin, high=fmax / 1e17)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
np.random.seed(self.seed)
r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
np.testing.assert_array_equal(actual, desired)
class TestThread(object):
# make sure each state produces the same sequence even in threads
def setUp(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(np.random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(np.random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if (np.intp().dtype.itemsize == 4 and sys.platform == "win32"):
np.testing.assert_array_almost_equal(out1, out2)
else:
np.testing.assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000,6))
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | -7,220,779,066,877,924,000 | 42.962343 | 87 | 0.575172 | false |
jhseu/tensorflow | tensorflow/python/kernel_tests/summary_ops_test.py | 6 | 46187 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V2 summary ops from summary_ops_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import six
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import step_stats_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine.sequential import Sequential
from tensorflow.python.keras.engine.training import Model
from tensorflow.python.keras.layers.core import Activation
from tensorflow.python.keras.layers.core import Dense
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import summary_ops_v2 as summary_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class SummaryOpsCoreTest(test_util.TensorFlowTestCase):
def testWrite(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
output = summary_ops.write('tag', 42, step=12)
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
value = events[1].summary.value[0]
self.assertEqual('tag', value.tag)
self.assertEqual(42, to_numpy(value))
def testWrite_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f():
with writer.as_default():
return summary_ops.write('tag', 42, step=12)
output = f()
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
value = events[1].summary.value[0]
self.assertEqual('tag', value.tag)
self.assertEqual(42, to_numpy(value))
def testWrite_metadata(self):
logdir = self.get_temp_dir()
metadata = summary_pb2.SummaryMetadata()
metadata.plugin_data.plugin_name = 'foo'
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('obj', 0, 0, metadata=metadata)
summary_ops.write('bytes', 0, 0, metadata=metadata.SerializeToString())
m = constant_op.constant(metadata.SerializeToString())
summary_ops.write('string_tensor', 0, 0, metadata=m)
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(metadata, events[1].summary.value[0].metadata)
self.assertEqual(metadata, events[2].summary.value[0].metadata)
self.assertEqual(metadata, events[3].summary.value[0].metadata)
def testWrite_name(self):
@def_function.function
def f():
output = summary_ops.write('tag', 42, step=12, name='anonymous')
self.assertTrue(output.name.startswith('anonymous'))
f()
def testWrite_ndarray(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', [[1, 2], [3, 4]], step=12)
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual([[1, 2], [3, 4]], to_numpy(value))
def testWrite_tensor(self):
logdir = self.get_temp_dir()
with context.eager_mode():
t = constant_op.constant([[1, 2], [3, 4]])
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', t, step=12)
expected = t.numpy()
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual(expected, to_numpy(value))
def testWrite_tensor_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f(t):
with writer.as_default():
summary_ops.write('tag', t, step=12)
t = constant_op.constant([[1, 2], [3, 4]])
f(t)
expected = t.numpy()
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual(expected, to_numpy(value))
def testWrite_stringTensor(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', [b'foo', b'bar'], step=12)
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual([b'foo', b'bar'], to_numpy(value))
@test_util.run_gpu_only
def testWrite_gpuDeviceContext(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer(logdir).as_default():
with ops.device('/GPU:0'):
value = constant_op.constant(42.0)
step = constant_op.constant(12, dtype=dtypes.int64)
summary_ops.write('tag', value, step=step).numpy()
empty_metadata = summary_pb2.SummaryMetadata()
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
self.assertEqual(42, to_numpy(events[1].summary.value[0]))
self.assertEqual(empty_metadata, events[1].summary.value[0].metadata)
@test_util.also_run_as_tf_function
def testWrite_noDefaultWriter(self):
# Use assertAllEqual instead of assertFalse since it works in a defun.
self.assertAllEqual(False, summary_ops.write('tag', 42, step=0))
@test_util.also_run_as_tf_function
def testWrite_noStep_okayIfAlsoNoDefaultWriter(self):
# Use assertAllEqual instead of assertFalse since it works in a defun.
self.assertAllEqual(False, summary_ops.write('tag', 42))
@test_util.also_run_as_tf_function
def testWrite_noStep(self):
logdir = self.get_temp_dir()
with summary_ops.create_file_writer(logdir).as_default():
with self.assertRaisesRegex(ValueError, 'No step set'):
summary_ops.write('tag', 42)
def testWrite_usingDefaultStep(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
with summary_ops.create_file_writer(logdir).as_default():
summary_ops.set_step(1)
summary_ops.write('tag', 1.0)
summary_ops.set_step(2)
summary_ops.write('tag', 1.0)
mystep = variables.Variable(10, dtype=dtypes.int64)
summary_ops.set_step(mystep)
summary_ops.write('tag', 1.0)
mystep.assign_add(1)
summary_ops.write('tag', 1.0)
events = events_from_logdir(logdir)
self.assertEqual(5, len(events))
self.assertEqual(1, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(10, events[3].step)
self.assertEqual(11, events[4].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepConstant_fromFunction(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
writer = summary_ops.create_file_writer(logdir)
@def_function.function
def f():
with writer.as_default():
summary_ops.write('tag', 1.0)
summary_ops.set_step(1)
f()
summary_ops.set_step(2)
f()
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual(1, events[1].step)
# The step value will still be 1 because the value was captured at the
# time the function was first traced.
self.assertEqual(1, events[2].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepVariable_fromFunction(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
writer = summary_ops.create_file_writer(logdir)
@def_function.function
def f():
with writer.as_default():
summary_ops.write('tag', 1.0)
mystep = variables.Variable(0, dtype=dtypes.int64)
summary_ops.set_step(mystep)
f()
mystep.assign_add(1)
f()
mystep.assign(10)
f()
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(1, events[2].step)
self.assertEqual(10, events[3].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepConstant_fromLegacyGraph(self):
logdir = self.get_temp_dir()
try:
with context.graph_mode():
writer = summary_ops.create_file_writer(logdir)
summary_ops.set_step(1)
with writer.as_default():
write_op = summary_ops.write('tag', 1.0)
summary_ops.set_step(2)
with self.cached_session() as sess:
sess.run(writer.init())
sess.run(write_op)
sess.run(write_op)
sess.run(writer.flush())
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual(1, events[1].step)
# The step value will still be 1 because the value was captured at the
# time the graph was constructed.
self.assertEqual(1, events[2].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepVariable_fromLegacyGraph(self):
logdir = self.get_temp_dir()
try:
with context.graph_mode():
writer = summary_ops.create_file_writer(logdir)
mystep = variables.Variable(0, dtype=dtypes.int64)
summary_ops.set_step(mystep)
with writer.as_default():
write_op = summary_ops.write('tag', 1.0)
first_assign_op = mystep.assign_add(1)
second_assign_op = mystep.assign(10)
with self.cached_session() as sess:
sess.run(writer.init())
sess.run(mystep.initializer)
sess.run(write_op)
sess.run(first_assign_op)
sess.run(write_op)
sess.run(second_assign_op)
sess.run(write_op)
sess.run(writer.flush())
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(1, events[2].step)
self.assertEqual(10, events[3].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_recordIf_constant(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
self.assertTrue(summary_ops.write('default', 1, step=0))
with summary_ops.record_if(True):
self.assertTrue(summary_ops.write('set_on', 1, step=0))
with summary_ops.record_if(False):
self.assertFalse(summary_ops.write('set_off', 1, step=0))
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual('default', events[1].summary.value[0].tag)
self.assertEqual('set_on', events[2].summary.value[0].tag)
def testWrite_recordIf_constant_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f():
with writer.as_default():
# Use assertAllEqual instead of assertTrue since it works in a defun.
self.assertAllEqual(summary_ops.write('default', 1, step=0), True)
with summary_ops.record_if(True):
self.assertAllEqual(summary_ops.write('set_on', 1, step=0), True)
with summary_ops.record_if(False):
self.assertAllEqual(summary_ops.write('set_off', 1, step=0), False)
f()
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual('default', events[1].summary.value[0].tag)
self.assertEqual('set_on', events[2].summary.value[0].tag)
def testWrite_recordIf_callable(self):
logdir = self.get_temp_dir()
with context.eager_mode():
step = variables.Variable(-1, dtype=dtypes.int64)
def record_fn():
step.assign_add(1)
return int(step % 2) == 0
with summary_ops.create_file_writer_v2(logdir).as_default():
with summary_ops.record_if(record_fn):
self.assertTrue(summary_ops.write('tag', 1, step=step))
self.assertFalse(summary_ops.write('tag', 1, step=step))
self.assertTrue(summary_ops.write('tag', 1, step=step))
self.assertFalse(summary_ops.write('tag', 1, step=step))
self.assertTrue(summary_ops.write('tag', 1, step=step))
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(4, events[3].step)
def testWrite_recordIf_callable_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
step = variables.Variable(-1, dtype=dtypes.int64)
@def_function.function
def record_fn():
step.assign_add(1)
return math_ops.equal(step % 2, 0)
@def_function.function
def f():
with writer.as_default():
with summary_ops.record_if(record_fn):
return [
summary_ops.write('tag', 1, step=step),
summary_ops.write('tag', 1, step=step),
summary_ops.write('tag', 1, step=step)]
self.assertAllEqual(f(), [True, False, True])
self.assertAllEqual(f(), [False, True, False])
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(4, events[3].step)
def testWrite_recordIf_tensorInput_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[], dtype=dtypes.int64)])
def f(step):
with writer.as_default():
with summary_ops.record_if(math_ops.equal(step % 2, 0)):
return summary_ops.write('tag', 1, step=step)
self.assertTrue(f(0))
self.assertFalse(f(1))
self.assertTrue(f(2))
self.assertFalse(f(3))
self.assertTrue(f(4))
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(4, events[3].step)
def testWriteRawPb(self):
logdir = self.get_temp_dir()
pb = summary_pb2.Summary()
pb.value.add().simple_value = 42.0
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
output = summary_ops.write_raw_pb(pb.SerializeToString(), step=12)
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
self.assertProtoEquals(pb, events[1].summary)
def testWriteRawPb_fromFunction(self):
logdir = self.get_temp_dir()
pb = summary_pb2.Summary()
pb.value.add().simple_value = 42.0
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f():
with writer.as_default():
return summary_ops.write_raw_pb(pb.SerializeToString(), step=12)
output = f()
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
self.assertProtoEquals(pb, events[1].summary)
def testWriteRawPb_multipleValues(self):
logdir = self.get_temp_dir()
pb1 = summary_pb2.Summary()
pb1.value.add().simple_value = 1.0
pb1.value.add().simple_value = 2.0
pb2 = summary_pb2.Summary()
pb2.value.add().simple_value = 3.0
pb3 = summary_pb2.Summary()
pb3.value.add().simple_value = 4.0
pb3.value.add().simple_value = 5.0
pb3.value.add().simple_value = 6.0
pbs = [pb.SerializeToString() for pb in (pb1, pb2, pb3)]
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
output = summary_ops.write_raw_pb(pbs, step=12)
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
expected_pb = summary_pb2.Summary()
for i in range(6):
expected_pb.value.add().simple_value = i + 1.0
self.assertProtoEquals(expected_pb, events[1].summary)
def testWriteRawPb_invalidValue(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
with self.assertRaisesRegex(
errors.DataLossError,
'Bad tf.compat.v1.Summary binary proto tensor string'):
summary_ops.write_raw_pb('notaproto', step=12)
@test_util.also_run_as_tf_function
def testGetSetStep(self):
try:
self.assertIsNone(summary_ops.get_step())
summary_ops.set_step(1)
# Use assertAllEqual instead of assertEqual since it works in a defun.
self.assertAllEqual(1, summary_ops.get_step())
summary_ops.set_step(constant_op.constant(2))
self.assertAllEqual(2, summary_ops.get_step())
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testGetSetStep_variable(self):
with context.eager_mode():
try:
mystep = variables.Variable(0)
summary_ops.set_step(mystep)
self.assertAllEqual(0, summary_ops.get_step().read_value())
mystep.assign_add(1)
self.assertAllEqual(1, summary_ops.get_step().read_value())
# Check that set_step() properly maintains reference to variable.
del mystep
self.assertAllEqual(1, summary_ops.get_step().read_value())
summary_ops.get_step().assign_add(1)
self.assertAllEqual(2, summary_ops.get_step().read_value())
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testGetSetStep_variable_fromFunction(self):
with context.eager_mode():
try:
@def_function.function
def set_step(step):
summary_ops.set_step(step)
return summary_ops.get_step()
@def_function.function
def get_and_increment():
summary_ops.get_step().assign_add(1)
return summary_ops.get_step()
mystep = variables.Variable(0)
self.assertAllEqual(0, set_step(mystep))
self.assertAllEqual(0, summary_ops.get_step().read_value())
self.assertAllEqual(1, get_and_increment())
self.assertAllEqual(2, get_and_increment())
# Check that set_step() properly maintains reference to variable.
del mystep
self.assertAllEqual(3, get_and_increment())
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.also_run_as_tf_function
def testSummaryScope(self):
with summary_ops.summary_scope('foo') as (tag, scope):
self.assertEqual('foo', tag)
self.assertEqual('foo/', scope)
with summary_ops.summary_scope('bar') as (tag, scope):
self.assertEqual('foo/bar', tag)
self.assertEqual('foo/bar/', scope)
with summary_ops.summary_scope('with/slash') as (tag, scope):
self.assertEqual('foo/with/slash', tag)
self.assertEqual('foo/with/slash/', scope)
with ops.name_scope(None, skip_on_eager=False):
with summary_ops.summary_scope('unnested') as (tag, scope):
self.assertEqual('unnested', tag)
self.assertEqual('unnested/', scope)
@test_util.also_run_as_tf_function
def testSummaryScope_defaultName(self):
with summary_ops.summary_scope(None) as (tag, scope):
self.assertEqual('summary', tag)
self.assertEqual('summary/', scope)
with summary_ops.summary_scope(None, 'backup') as (tag, scope):
self.assertEqual('backup', tag)
self.assertEqual('backup/', scope)
@test_util.also_run_as_tf_function
def testSummaryScope_handlesCharactersIllegalForScope(self):
with summary_ops.summary_scope('f?o?o') as (tag, scope):
self.assertEqual('f?o?o', tag)
self.assertEqual('foo/', scope)
# If all characters aren't legal for a scope name, use default name.
with summary_ops.summary_scope('???', 'backup') as (tag, scope):
self.assertEqual('???', tag)
self.assertEqual('backup/', scope)
@test_util.also_run_as_tf_function
def testSummaryScope_nameNotUniquifiedForTag(self):
constant_op.constant(0, name='foo')
with summary_ops.summary_scope('foo') as (tag, _):
self.assertEqual('foo', tag)
with summary_ops.summary_scope('foo') as (tag, _):
self.assertEqual('foo', tag)
with ops.name_scope('with', skip_on_eager=False):
constant_op.constant(0, name='slash')
with summary_ops.summary_scope('with/slash') as (tag, _):
self.assertEqual('with/slash', tag)
def testAllV2SummaryOps(self):
logdir = self.get_temp_dir()
def define_ops():
result = []
# TF 2.0 summary ops
result.append(summary_ops.write('write', 1, step=0))
result.append(summary_ops.write_raw_pb(b'', step=0, name='raw_pb'))
# TF 1.x tf.contrib.summary ops
result.append(summary_ops.generic('tensor', 1, step=1))
result.append(summary_ops.scalar('scalar', 2.0, step=1))
result.append(summary_ops.histogram('histogram', [1.0], step=1))
result.append(summary_ops.image('image', [[[[1.0]]]], step=1))
result.append(summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1))
return result
with context.graph_mode():
ops_without_writer = define_ops()
with summary_ops.create_file_writer_v2(logdir).as_default():
with summary_ops.record_if(True):
ops_recording_on = define_ops()
with summary_ops.record_if(False):
ops_recording_off = define_ops()
# We should be collecting all ops defined with a default writer present,
# regardless of whether recording was set on or off, but not those defined
# without a writer at all.
del ops_without_writer
expected_ops = ops_recording_on + ops_recording_off
self.assertCountEqual(expected_ops, summary_ops.all_v2_summary_ops())
class SummaryWriterTest(test_util.TensorFlowTestCase):
def testCreate_withInitAndClose(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(
logdir, max_queue=1000, flush_millis=1000000)
get_total = lambda: len(events_from_logdir(logdir))
self.assertEqual(1, get_total()) # file_version Event
# Calling init() again while writer is open has no effect
writer.init()
self.assertEqual(1, get_total())
with writer.as_default():
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
# Calling .close() should do an implicit flush
writer.close()
self.assertEqual(2, get_total())
def testCreate_fromFunction(self):
logdir = self.get_temp_dir()
@def_function.function
def f():
# Returned SummaryWriter must be stored in a non-local variable so it
# lives throughout the function execution.
if not hasattr(f, 'writer'):
f.writer = summary_ops.create_file_writer_v2(logdir)
with context.eager_mode():
f()
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(event_files))
def testCreate_graphTensorArgument_raisesError(self):
logdir = self.get_temp_dir()
with context.graph_mode():
logdir_tensor = constant_op.constant(logdir)
with context.eager_mode():
with self.assertRaisesRegex(
ValueError, 'Invalid graph Tensor argument.*logdir'):
summary_ops.create_file_writer_v2(logdir_tensor)
self.assertEmpty(gfile.Glob(os.path.join(logdir, '*')))
def testCreate_fromFunction_graphTensorArgument_raisesError(self):
logdir = self.get_temp_dir()
@def_function.function
def f():
summary_ops.create_file_writer_v2(constant_op.constant(logdir))
with context.eager_mode():
with self.assertRaisesRegex(
ValueError, 'Invalid graph Tensor argument.*logdir'):
f()
self.assertEmpty(gfile.Glob(os.path.join(logdir, '*')))
def testCreate_fromFunction_unpersistedResource_raisesError(self):
logdir = self.get_temp_dir()
@def_function.function
def f():
with summary_ops.create_file_writer_v2(logdir).as_default():
pass # Calling .as_default() is enough to indicate use.
with context.eager_mode():
# TODO(nickfelt): change this to a better error
with self.assertRaisesRegex(
errors.NotFoundError, 'Resource.*does not exist'):
f()
# Even though we didn't use it, an event file will have been created.
self.assertEqual(1, len(gfile.Glob(os.path.join(logdir, '*'))))
def testCreate_immediateSetAsDefault_retainsReference(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
summary_ops.create_file_writer_v2(logdir).set_as_default()
summary_ops.flush()
finally:
# Ensure we clean up no matter how the test executes.
summary_ops._summary_state.writer = None # pylint: disable=protected-access
def testCreate_immediateAsDefault_retainsReference(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.flush()
def testNoSharing(self):
# Two writers with the same logdir should not share state.
logdir = self.get_temp_dir()
with context.eager_mode():
writer1 = summary_ops.create_file_writer_v2(logdir)
with writer1.as_default():
summary_ops.write('tag', 1, step=1)
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(event_files))
file1 = event_files[0]
writer2 = summary_ops.create_file_writer_v2(logdir)
with writer2.as_default():
summary_ops.write('tag', 1, step=2)
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(2, len(event_files))
event_files.remove(file1)
file2 = event_files[0]
# Extra writes to ensure interleaved usage works.
with writer1.as_default():
summary_ops.write('tag', 1, step=1)
with writer2.as_default():
summary_ops.write('tag', 1, step=2)
events = iter(events_from_file(file1))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(1, next(events).step)
self.assertEqual(1, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
events = iter(events_from_file(file2))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(2, next(events).step)
self.assertEqual(2, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
def testNoSharing_fromFunction(self):
logdir = self.get_temp_dir()
@def_function.function
def f1():
if not hasattr(f1, 'writer'):
f1.writer = summary_ops.create_file_writer_v2(logdir)
with f1.writer.as_default():
summary_ops.write('tag', 1, step=1)
@def_function.function
def f2():
if not hasattr(f2, 'writer'):
f2.writer = summary_ops.create_file_writer_v2(logdir)
with f2.writer.as_default():
summary_ops.write('tag', 1, step=2)
with context.eager_mode():
f1()
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(event_files))
file1 = event_files[0]
f2()
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(2, len(event_files))
event_files.remove(file1)
file2 = event_files[0]
# Extra writes to ensure interleaved usage works.
f1()
f2()
events = iter(events_from_file(file1))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(1, next(events).step)
self.assertEqual(1, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
events = iter(events_from_file(file2))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(2, next(events).step)
self.assertEqual(2, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
def testMaxQueue(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(
logdir, max_queue=1, flush_millis=999999).as_default():
get_total = lambda: len(events_from_logdir(logdir))
# Note: First tf.compat.v1.Event is always file_version.
self.assertEqual(1, get_total())
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
# Should flush after second summary since max_queue = 1
summary_ops.write('tag', 1, step=0)
self.assertEqual(3, get_total())
def testWriterFlush(self):
logdir = self.get_temp_dir()
get_total = lambda: len(events_from_logdir(logdir))
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(
logdir, max_queue=1000, flush_millis=1000000)
self.assertEqual(1, get_total()) # file_version Event
with writer.as_default():
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
writer.flush()
self.assertEqual(2, get_total())
summary_ops.write('tag', 1, step=0)
self.assertEqual(2, get_total())
# Exiting the "as_default()" should do an implicit flush
self.assertEqual(3, get_total())
def testFlushFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(
logdir, max_queue=999999, flush_millis=999999)
with writer.as_default():
get_total = lambda: len(events_from_logdir(logdir))
# Note: First tf.compat.v1.Event is always file_version.
self.assertEqual(1, get_total())
summary_ops.write('tag', 1, step=0)
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
summary_ops.flush()
self.assertEqual(3, get_total())
# Test "writer" parameter
summary_ops.write('tag', 1, step=0)
self.assertEqual(3, get_total())
summary_ops.flush(writer=writer)
self.assertEqual(4, get_total())
summary_ops.write('tag', 1, step=0)
self.assertEqual(4, get_total())
summary_ops.flush(writer=writer._resource) # pylint:disable=protected-access
self.assertEqual(5, get_total())
@test_util.assert_no_new_tensors
def testNoMemoryLeak_graphMode(self):
logdir = self.get_temp_dir()
with context.graph_mode(), ops.Graph().as_default():
summary_ops.create_file_writer_v2(logdir)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testNoMemoryLeak_eagerMode(self):
logdir = self.get_temp_dir()
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', 1, step=0)
def testClose_preventsLaterUse(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
writer.close()
writer.close() # redundant close() is a no-op
writer.flush() # redundant flush() is a no-op
with self.assertRaisesRegex(RuntimeError, 'already closed'):
writer.init()
with self.assertRaisesRegex(RuntimeError, 'already closed'):
with writer.as_default():
self.fail('should not get here')
with self.assertRaisesRegex(RuntimeError, 'already closed'):
writer.set_as_default()
def testClose_closesOpenFile(self):
try:
import psutil # pylint: disable=g-import-not-at-top
except ImportError:
raise unittest.SkipTest('test requires psutil')
proc = psutil.Process()
get_open_filenames = lambda: set(info[0] for info in proc.open_files())
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(files))
eventfile = files[0]
self.assertIn(eventfile, get_open_filenames())
writer.close()
self.assertNotIn(eventfile, get_open_filenames())
def testDereference_closesOpenFile(self):
try:
import psutil # pylint: disable=g-import-not-at-top
except ImportError:
raise unittest.SkipTest('test requires psutil')
proc = psutil.Process()
get_open_filenames = lambda: set(info[0] for info in proc.open_files())
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(files))
eventfile = files[0]
self.assertIn(eventfile, get_open_filenames())
del writer
self.assertNotIn(eventfile, get_open_filenames())
class SummaryOpsTest(test_util.TensorFlowTestCase):
def tearDown(self):
summary_ops.trace_off()
def run_metadata(self, *args, **kwargs):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir)
with writer.as_default():
summary_ops.run_metadata(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
return events[1]
def run_metadata_graphs(self, *args, **kwargs):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir)
with writer.as_default():
summary_ops.run_metadata_graphs(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
return events[1]
def create_run_metadata(self):
step_stats = step_stats_pb2.StepStats(dev_stats=[
step_stats_pb2.DeviceStepStats(
device='cpu:0',
node_stats=[step_stats_pb2.NodeExecStats(node_name='hello')])
])
return config_pb2.RunMetadata(
function_graphs=[
config_pb2.RunMetadata.FunctionGraphs(
pre_optimization_graph=graph_pb2.GraphDef(
node=[node_def_pb2.NodeDef(name='foo')]))
],
step_stats=step_stats)
def keras_model(self, *args, **kwargs):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir)
with writer.as_default():
summary_ops.keras_model(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
# The first event contains no summary values. The written content goes to
# the second event.
return events[1]
def run_trace(self, f, step=1):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir)
summary_ops.trace_on(graph=True, profiler=False)
with writer.as_default():
f()
summary_ops.trace_export(name='foo', step=step)
writer.close()
events = events_from_logdir(logdir)
return events[1]
@test_util.run_v2_only
def testRunMetadata_usesNameAsTag(self):
meta = config_pb2.RunMetadata()
with ops.name_scope('foo', skip_on_eager=False):
event = self.run_metadata(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
self.assertEqual('foo/my_name', first_val.tag)
@test_util.run_v2_only
def testRunMetadata_summaryMetadata(self):
expected_summary_metadata = """
plugin_data {
plugin_name: "graph_run_metadata"
content: "1"
}
"""
meta = config_pb2.RunMetadata()
event = self.run_metadata(name='my_name', data=meta, step=1)
actual_summary_metadata = event.summary.value[0].metadata
self.assertProtoEquals(expected_summary_metadata, actual_summary_metadata)
@test_util.run_v2_only
def testRunMetadata_wholeRunMetadata(self):
expected_run_metadata = """
step_stats {
dev_stats {
device: "cpu:0"
node_stats {
node_name: "hello"
}
}
}
function_graphs {
pre_optimization_graph {
node {
name: "foo"
}
}
}
"""
meta = self.create_run_metadata()
event = self.run_metadata(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
actual_run_metadata = config_pb2.RunMetadata.FromString(
first_val.tensor.string_val[0])
self.assertProtoEquals(expected_run_metadata, actual_run_metadata)
@test_util.run_v2_only
def testRunMetadata_usesDefaultStep(self):
meta = config_pb2.RunMetadata()
try:
summary_ops.set_step(42)
event = self.run_metadata(name='my_name', data=meta)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.run_v2_only
def testRunMetadataGraph_usesNameAsTag(self):
meta = config_pb2.RunMetadata()
with ops.name_scope('foo', skip_on_eager=False):
event = self.run_metadata_graphs(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
self.assertEqual('foo/my_name', first_val.tag)
@test_util.run_v2_only
def testRunMetadataGraph_summaryMetadata(self):
expected_summary_metadata = """
plugin_data {
plugin_name: "graph_run_metadata_graph"
content: "1"
}
"""
meta = config_pb2.RunMetadata()
event = self.run_metadata_graphs(name='my_name', data=meta, step=1)
actual_summary_metadata = event.summary.value[0].metadata
self.assertProtoEquals(expected_summary_metadata, actual_summary_metadata)
@test_util.run_v2_only
def testRunMetadataGraph_runMetadataFragment(self):
expected_run_metadata = """
function_graphs {
pre_optimization_graph {
node {
name: "foo"
}
}
}
"""
meta = self.create_run_metadata()
event = self.run_metadata_graphs(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
actual_run_metadata = config_pb2.RunMetadata.FromString(
first_val.tensor.string_val[0])
self.assertProtoEquals(expected_run_metadata, actual_run_metadata)
@test_util.run_v2_only
def testRunMetadataGraph_usesDefaultStep(self):
meta = config_pb2.RunMetadata()
try:
summary_ops.set_step(42)
event = self.run_metadata_graphs(name='my_name', data=meta)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.run_v2_only
def testKerasModel(self):
model = Sequential(
[Dense(10, input_shape=(100,)),
Activation('relu', name='my_relu')])
event = self.keras_model(name='my_name', data=model, step=1)
first_val = event.summary.value[0]
self.assertEqual(model.to_json(), first_val.tensor.string_val[0].decode())
@test_util.run_v2_only
def testKerasModel_usesDefaultStep(self):
model = Sequential(
[Dense(10, input_shape=(100,)),
Activation('relu', name='my_relu')])
try:
summary_ops.set_step(42)
event = self.keras_model(name='my_name', data=model)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.run_v2_only
def testKerasModel_subclass(self):
class SimpleSubclass(Model):
def __init__(self):
super(SimpleSubclass, self).__init__(name='subclass')
self.dense = Dense(10, input_shape=(100,))
self.activation = Activation('relu', name='my_relu')
def call(self, inputs):
x = self.dense(inputs)
return self.activation(x)
model = SimpleSubclass()
with test.mock.patch.object(logging, 'warn') as mock_log:
self.assertFalse(
summary_ops.keras_model(name='my_name', data=model, step=1))
self.assertRegexpMatches(
str(mock_log.call_args), 'Model failed to serialize as JSON.')
@test_util.run_v2_only
def testKerasModel_otherExceptions(self):
model = Sequential()
with test.mock.patch.object(model, 'to_json') as mock_to_json:
with test.mock.patch.object(logging, 'warn') as mock_log:
mock_to_json.side_effect = Exception('oops')
self.assertFalse(
summary_ops.keras_model(name='my_name', data=model, step=1))
self.assertRegexpMatches(
str(mock_log.call_args),
'Model failed to serialize as JSON. Ignoring... oops')
@test_util.run_v2_only
def testTrace(self):
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
event = self.run_trace(f)
first_val = event.summary.value[0]
actual_run_metadata = config_pb2.RunMetadata.FromString(
first_val.tensor.string_val[0])
# Content of function_graphs is large and, for instance, device can change.
self.assertTrue(hasattr(actual_run_metadata, 'function_graphs'))
@test_util.run_v2_only
def testTrace_cannotEnableTraceInFunction(self):
@def_function.function
def f():
summary_ops.trace_on(graph=True, profiler=False)
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
with test.mock.patch.object(logging, 'warn') as mock_log:
f()
self.assertRegexpMatches(
str(mock_log.call_args), 'Cannot enable trace inside a tf.function.')
@test_util.run_v2_only
def testTrace_cannotEnableTraceInGraphMode(self):
with test.mock.patch.object(logging, 'warn') as mock_log:
with context.graph_mode():
summary_ops.trace_on(graph=True, profiler=False)
self.assertRegexpMatches(
str(mock_log.call_args), 'Must enable trace in eager mode.')
@test_util.run_v2_only
def testTrace_cannotExportTraceWithoutTrace(self):
with six.assertRaisesRegex(self, ValueError,
'Must enable trace before export.'):
summary_ops.trace_export(name='foo', step=1)
@test_util.run_v2_only
def testTrace_cannotExportTraceInFunction(self):
summary_ops.trace_on(graph=True, profiler=False)
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
summary_ops.trace_export(name='foo', step=1)
return x**y
with test.mock.patch.object(logging, 'warn') as mock_log:
f()
self.assertRegexpMatches(
str(mock_log.call_args),
'Cannot export trace inside a tf.function.')
@test_util.run_v2_only
def testTrace_cannotExportTraceInGraphMode(self):
with test.mock.patch.object(logging, 'warn') as mock_log:
with context.graph_mode():
summary_ops.trace_export(name='foo', step=1)
self.assertRegexpMatches(
str(mock_log.call_args),
'Can only export trace while executing eagerly.')
@test_util.run_v2_only
def testTrace_usesDefaultStep(self):
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
try:
summary_ops.set_step(42)
event = self.run_trace(f, step=None)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.Event protos in the event file.
"""
records = list(tf_record.tf_record_iterator(filepath))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result
def events_from_logdir(logdir):
"""Returns all events in the single eventfile in logdir.
Args:
logdir: The directory in which the single event file is sought.
Returns:
A list of all tf.Event protos from the single event file.
Raises:
AssertionError: If logdir does not contain exactly one file.
"""
assert gfile.Exists(logdir)
files = gfile.ListDirectory(logdir)
assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files
return events_from_file(os.path.join(logdir, files[0]))
def to_numpy(summary_value):
return tensor_util.MakeNdarray(summary_value.tensor)
if __name__ == '__main__':
test.main()
| apache-2.0 | -6,128,232,882,227,762,000 | 36.127814 | 85 | 0.65421 | false |
Ziqi-Li/bknqgis | pandas/pandas/tests/io/parser/quoting.py | 18 | 5813 | # -*- coding: utf-8 -*-
"""
Tests that quoting specifications are properly handled
during parsing for all of the parsers defined in parsers.py
"""
import csv
import pandas.util.testing as tm
from pandas import DataFrame
from pandas.compat import PY3, StringIO, u
class QuotingTests(object):
def test_bad_quote_char(self):
data = '1,2,3'
# Python 2.x: "...must be an 1-character..."
# Python 3.x: "...must be a 1-character..."
msg = '"quotechar" must be a(n)? 1-character string'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar='foo')
msg = 'quotechar must be set if quoting enabled'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
msg = '"quotechar" must be string, not int'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar=2)
def test_bad_quoting(self):
data = '1,2,3'
msg = '"quoting" must be an integer'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quoting='foo')
# quoting must in the range [0, 3]
msg = 'bad "quoting" value'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quoting=5)
def test_quote_char_basic(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar='"')
tm.assert_frame_equal(result, expected)
def test_quote_char_various(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
quote_chars = ['~', '*', '%', '$', '@', 'P']
for quote_char in quote_chars:
new_data = data.replace('"', quote_char)
result = self.read_csv(StringIO(new_data), quotechar=quote_char)
tm.assert_frame_equal(result, expected)
def test_null_quote_char(self):
data = 'a,b,c\n1,2,3'
# sanity checks
msg = 'quotechar must be set if quoting enabled'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar='',
quoting=csv.QUOTE_MINIMAL)
# no errors should be raised if quoting is None
expected = DataFrame([[1, 2, 3]],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar=None,
quoting=csv.QUOTE_NONE)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='',
quoting=csv.QUOTE_NONE)
tm.assert_frame_equal(result, expected)
def test_quoting_various(self):
data = '1,2,"foo"'
cols = ['a', 'b', 'c']
# QUOTE_MINIMAL and QUOTE_ALL apply only to
# the CSV writer, so they should have no
# special effect for the CSV reader
expected = DataFrame([[1, 2, 'foo']], columns=cols)
# test default (afterwards, arguments are all explicit)
result = self.read_csv(StringIO(data), names=cols)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_MINIMAL, names=cols)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_ALL, names=cols)
tm.assert_frame_equal(result, expected)
# QUOTE_NONE tells the reader to do no special handling
# of quote characters and leave them alone
expected = DataFrame([[1, 2, '"foo"']], columns=cols)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_NONE, names=cols)
tm.assert_frame_equal(result, expected)
# QUOTE_NONNUMERIC tells the reader to cast
# all non-quoted fields to float
expected = DataFrame([[1.0, 2.0, 'foo']], columns=cols)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_NONNUMERIC,
names=cols)
tm.assert_frame_equal(result, expected)
def test_double_quote(self):
data = 'a,b\n3,"4 "" 5"'
expected = DataFrame([[3, '4 " 5']],
columns=['a', 'b'])
result = self.read_csv(StringIO(data), quotechar='"',
doublequote=True)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[3, '4 " 5"']],
columns=['a', 'b'])
result = self.read_csv(StringIO(data), quotechar='"',
doublequote=False)
tm.assert_frame_equal(result, expected)
def test_quotechar_unicode(self):
# See gh-14477
data = 'a\n1'
expected = DataFrame({'a': [1]})
result = self.read_csv(StringIO(data), quotechar=u('"'))
tm.assert_frame_equal(result, expected)
# Compared to Python 3.x, Python 2.x does not handle unicode well.
if PY3:
result = self.read_csv(StringIO(data), quotechar=u('\u0001'))
tm.assert_frame_equal(result, expected)
| gpl-2.0 | -1,652,748,299,100,699,400 | 36.993464 | 76 | 0.537244 | false |
bailey1234/hyeri7846 | lib/werkzeug/wsgi.py | 312 | 37386 | # -*- coding: utf-8 -*-
"""
werkzeug.wsgi
~~~~~~~~~~~~~
This module implements WSGI related helpers.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import sys
import posixpath
import mimetypes
from itertools import chain
from zlib import adler32
from time import time, mktime
from datetime import datetime
from functools import partial, update_wrapper
from werkzeug._compat import iteritems, text_type, string_types, \
implements_iterator, make_literal_wrapper, to_unicode, to_bytes, \
wsgi_get_bytes, try_coerce_native, PY2
from werkzeug._internal import _empty_stream, _encode_idna
from werkzeug.http import is_resource_modified, http_date
from werkzeug.urls import uri_to_iri, url_quote, url_parse, url_join
def responder(f):
"""Marks a function as responder. Decorate a function with it and it
will automatically call the return value as WSGI application.
Example::
@responder
def application(environ, start_response):
return Response('Hello World!')
"""
return update_wrapper(lambda *a: f(*a)(*a[-2:]), f)
def get_current_url(environ, root_only=False, strip_querystring=False,
host_only=False, trusted_hosts=None):
"""A handy helper function that recreates the full URL for the current
request or parts of it. Here an example:
>>> from werkzeug.test import create_environ
>>> env = create_environ("/?param=foo", "http://localhost/script")
>>> get_current_url(env)
'http://localhost/script/?param=foo'
>>> get_current_url(env, root_only=True)
'http://localhost/script/'
>>> get_current_url(env, host_only=True)
'http://localhost/'
>>> get_current_url(env, strip_querystring=True)
'http://localhost/script/'
This optionally it verifies that the host is in a list of trusted hosts.
If the host is not in there it will raise a
:exc:`~werkzeug.exceptions.SecurityError`.
:param environ: the WSGI environment to get the current URL from.
:param root_only: set `True` if you only want the root URL.
:param strip_querystring: set to `True` if you don't want the querystring.
:param host_only: set to `True` if the host URL should be returned.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
tmp = [environ['wsgi.url_scheme'], '://', get_host(environ, trusted_hosts)]
cat = tmp.append
if host_only:
return uri_to_iri(''.join(tmp) + '/')
cat(url_quote(wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))).rstrip('/'))
cat('/')
if not root_only:
cat(url_quote(wsgi_get_bytes(environ.get('PATH_INFO', '')).lstrip(b'/')))
if not strip_querystring:
qs = get_query_string(environ)
if qs:
cat('?' + qs)
return uri_to_iri(''.join(tmp))
def host_is_trusted(hostname, trusted_list):
"""Checks if a host is trusted against a list. This also takes care
of port normalization.
.. versionadded:: 0.9
:param hostname: the hostname to check
:param trusted_list: a list of hostnames to check against. If a
hostname starts with a dot it will match against
all subdomains as well.
"""
if not hostname:
return False
if isinstance(trusted_list, string_types):
trusted_list = [trusted_list]
def _normalize(hostname):
if ':' in hostname:
hostname = hostname.rsplit(':', 1)[0]
return _encode_idna(hostname)
hostname = _normalize(hostname)
for ref in trusted_list:
if ref.startswith('.'):
ref = ref[1:]
suffix_match = True
else:
suffix_match = False
ref = _normalize(ref)
if ref == hostname:
return True
if suffix_match and hostname.endswith('.' + ref):
return True
return False
def get_host(environ, trusted_hosts=None):
"""Return the real host for the given WSGI environment. This takes care
of the `X-Forwarded-Host` header. Optionally it verifies that the host
is in a list of trusted hosts. If the host is not in there it will raise
a :exc:`~werkzeug.exceptions.SecurityError`.
:param environ: the WSGI environment to get the host of.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
if 'HTTP_X_FORWARDED_HOST' in environ:
rv = environ['HTTP_X_FORWARDED_HOST'].split(',')[0].strip()
elif 'HTTP_HOST' in environ:
rv = environ['HTTP_HOST']
else:
rv = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
rv += ':' + environ['SERVER_PORT']
if trusted_hosts is not None:
if not host_is_trusted(rv, trusted_hosts):
from werkzeug.exceptions import SecurityError
raise SecurityError('Host "%s" is not trusted' % rv)
return rv
def get_content_length(environ):
"""Returns the content length from the WSGI environment as
integer. If it's not available `None` is returned.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the content length from.
"""
content_length = environ.get('CONTENT_LENGTH')
if content_length is not None:
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass
def get_input_stream(environ, safe_fallback=True):
"""Returns the input stream from the WSGI environment and wraps it
in the most sensible way possible. The stream returned is not the
raw WSGI stream in most cases but one that is safe to read from
without taking into account the content length.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the stream from.
:param safe: indicates weather the function should use an empty
stream as safe fallback or just return the original
WSGI input stream if it can't wrap it safely. The
default is to return an empty string in those cases.
"""
stream = environ['wsgi.input']
content_length = get_content_length(environ)
# A wsgi extension that tells us if the input is terminated. In
# that case we return the stream unchanged as we know we can savely
# read it until the end.
if environ.get('wsgi.input_terminated'):
return stream
# If we don't have a content length we fall back to an empty stream
# in case of a safe fallback, otherwise we return the stream unchanged.
# The non-safe fallback is not recommended but might be useful in
# some situations.
if content_length is None:
return safe_fallback and _empty_stream or stream
# Otherwise limit the stream to the content length
return LimitedStream(stream, content_length)
def get_query_string(environ):
"""Returns the `QUERY_STRING` from the WSGI environment. This also takes
care about the WSGI decoding dance on Python 3 environments as a
native string. The string returned will be restricted to ASCII
characters.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the query string from.
"""
qs = wsgi_get_bytes(environ.get('QUERY_STRING', ''))
# QUERY_STRING really should be ascii safe but some browsers
# will send us some unicode stuff (I am looking at you IE).
# In that case we want to urllib quote it badly.
return try_coerce_native(url_quote(qs, safe=':&%=+$!*\'(),'))
def get_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the `PATH_INFO` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path info, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('PATH_INFO', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def get_script_name(environ, charset='utf-8', errors='replace'):
"""Returns the `SCRIPT_NAME` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def pop_path_info(environ, charset='utf-8', errors='replace'):
"""Removes and returns the next segment of `PATH_INFO`, pushing it onto
`SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`.
If the `charset` is set to `None` a bytestring is returned.
If there are empty segments (``'/foo//bar``) these are ignored but
properly pushed to the `SCRIPT_NAME`:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> pop_path_info(env)
'a'
>>> env['SCRIPT_NAME']
'/foo/a'
>>> pop_path_info(env)
'b'
>>> env['SCRIPT_NAME']
'/foo/a/b'
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is modified.
"""
path = environ.get('PATH_INFO')
if not path:
return None
script_name = environ.get('SCRIPT_NAME', '')
# shift multiple leading slashes over
old_path = path
path = path.lstrip('/')
if path != old_path:
script_name += '/' * (len(old_path) - len(path))
if '/' not in path:
environ['PATH_INFO'] = ''
environ['SCRIPT_NAME'] = script_name + path
rv = wsgi_get_bytes(path)
else:
segment, path = path.split('/', 1)
environ['PATH_INFO'] = '/' + path
environ['SCRIPT_NAME'] = script_name + segment
rv = wsgi_get_bytes(segment)
return to_unicode(rv, charset, errors, allow_none_charset=True)
def peek_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the next segment on the `PATH_INFO` or `None` if there
is none. Works like :func:`pop_path_info` without modifying the
environment:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> peek_path_info(env)
'a'
>>> peek_path_info(env)
'a'
If the `charset` is set to `None` a bytestring is returned.
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is checked.
"""
segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1)
if segments:
return to_unicode(wsgi_get_bytes(segments[0]),
charset, errors, allow_none_charset=True)
def extract_path_info(environ_or_baseurl, path_or_url, charset='utf-8',
errors='replace', collapse_http_schemes=True):
"""Extracts the path info from the given URL (or WSGI environment) and
path. The path info returned is a unicode string, not a bytestring
suitable for a WSGI environment. The URLs might also be IRIs.
If the path info could not be determined, `None` is returned.
Some examples:
>>> extract_path_info('http://example.com/app', '/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello',
... collapse_http_schemes=False) is None
True
Instead of providing a base URL you can also pass a WSGI environment.
.. versionadded:: 0.6
:param environ_or_baseurl: a WSGI environment dict, a base URL or
base IRI. This is the root of the
application.
:param path_or_url: an absolute path from the server root, a
relative path (in which case it's the path info)
or a full URL. Also accepts IRIs and unicode
parameters.
:param charset: the charset for byte data in URLs
:param errors: the error handling on decode
:param collapse_http_schemes: if set to `False` the algorithm does
not assume that http and https on the
same server point to the same
resource.
"""
def _normalize_netloc(scheme, netloc):
parts = netloc.split(u'@', 1)[-1].split(u':', 1)
if len(parts) == 2:
netloc, port = parts
if (scheme == u'http' and port == u'80') or \
(scheme == u'https' and port == u'443'):
port = None
else:
netloc = parts[0]
port = None
if port is not None:
netloc += u':' + port
return netloc
# make sure whatever we are working on is a IRI and parse it
path = uri_to_iri(path_or_url, charset, errors)
if isinstance(environ_or_baseurl, dict):
environ_or_baseurl = get_current_url(environ_or_baseurl,
root_only=True)
base_iri = uri_to_iri(environ_or_baseurl, charset, errors)
base_scheme, base_netloc, base_path = url_parse(base_iri)[:3]
cur_scheme, cur_netloc, cur_path, = \
url_parse(url_join(base_iri, path))[:3]
# normalize the network location
base_netloc = _normalize_netloc(base_scheme, base_netloc)
cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
# is that IRI even on a known HTTP scheme?
if collapse_http_schemes:
for scheme in base_scheme, cur_scheme:
if scheme not in (u'http', u'https'):
return None
else:
if not (base_scheme in (u'http', u'https') and \
base_scheme == cur_scheme):
return None
# are the netlocs compatible?
if base_netloc != cur_netloc:
return None
# are we below the application path?
base_path = base_path.rstrip(u'/')
if not cur_path.startswith(base_path):
return None
return u'/' + cur_path[len(base_path):].lstrip(u'/')
class SharedDataMiddleware(object):
"""A WSGI middleware that provides static content for development
environments or simple server setups. Usage is quite simple::
import os
from werkzeug.wsgi import SharedDataMiddleware
app = SharedDataMiddleware(app, {
'/shared': os.path.join(os.path.dirname(__file__), 'shared')
})
The contents of the folder ``./shared`` will now be available on
``http://example.com/shared/``. This is pretty useful during development
because a standalone media server is not required. One can also mount
files on the root folder and still continue to use the application because
the shared data middleware forwards all unhandled requests to the
application, even if the requests are below one of the shared folders.
If `pkg_resources` is available you can also tell the middleware to serve
files from package data::
app = SharedDataMiddleware(app, {
'/shared': ('myapplication', 'shared_files')
})
This will then serve the ``shared_files`` folder in the `myapplication`
Python package.
The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
rules for files that are not accessible from the web. If `cache` is set to
`False` no caching headers are sent.
Currently the middleware does not support non ASCII filenames. If the
encoding on the file system happens to be the encoding of the URI it may
work but this could also be by accident. We strongly suggest using ASCII
only file names for static files.
The middleware will guess the mimetype using the Python `mimetype`
module. If it's unable to figure out the charset it will fall back
to `fallback_mimetype`.
.. versionchanged:: 0.5
The cache timeout is configurable now.
.. versionadded:: 0.6
The `fallback_mimetype` parameter was added.
:param app: the application to wrap. If you don't want to wrap an
application you can pass it :exc:`NotFound`.
:param exports: a dict of exported files and folders.
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
:param fallback_mimetype: the fallback mimetype for unknown files.
:param cache: enable or disable caching headers.
:Param cache_timeout: the cache timeout in seconds for the headers.
"""
def __init__(self, app, exports, disallow=None, cache=True,
cache_timeout=60 * 60 * 12, fallback_mimetype='text/plain'):
self.app = app
self.exports = {}
self.cache = cache
self.cache_timeout = cache_timeout
for key, value in iteritems(exports):
if isinstance(value, tuple):
loader = self.get_package_loader(*value)
elif isinstance(value, string_types):
if os.path.isfile(value):
loader = self.get_file_loader(value)
else:
loader = self.get_directory_loader(value)
else:
raise TypeError('unknown def %r' % value)
self.exports[key] = loader
if disallow is not None:
from fnmatch import fnmatch
self.is_allowed = lambda x: not fnmatch(x, disallow)
self.fallback_mimetype = fallback_mimetype
def is_allowed(self, filename):
"""Subclasses can override this method to disallow the access to
certain files. However by providing `disallow` in the constructor
this method is overwritten.
"""
return True
def _opener(self, filename):
return lambda: (
open(filename, 'rb'),
datetime.utcfromtimestamp(os.path.getmtime(filename)),
int(os.path.getsize(filename))
)
def get_file_loader(self, filename):
return lambda x: (os.path.basename(filename), self._opener(filename))
def get_package_loader(self, package, package_path):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
loadtime = datetime.utcnow()
provider = get_provider(package)
manager = ResourceManager()
filesystem_bound = isinstance(provider, DefaultProvider)
def loader(path):
if path is None:
return None, None
path = posixpath.join(package_path, path)
if not provider.has_resource(path):
return None, None
basename = posixpath.basename(path)
if filesystem_bound:
return basename, self._opener(
provider.get_resource_filename(manager, path))
return basename, lambda: (
provider.get_resource_stream(manager, path),
loadtime,
0
)
return loader
def get_directory_loader(self, directory):
def loader(path):
if path is not None:
path = os.path.join(directory, path)
else:
path = directory
if os.path.isfile(path):
return os.path.basename(path), self._opener(path)
return None, None
return loader
def generate_etag(self, mtime, file_size, real_filename):
if not isinstance(real_filename, bytes):
real_filename = real_filename.encode(sys.getfilesystemencoding())
return 'wzsdm-%d-%s-%s' % (
mktime(mtime.timetuple()),
file_size,
adler32(real_filename) & 0xffffffff
)
def __call__(self, environ, start_response):
cleaned_path = get_path_info(environ)
if PY2:
cleaned_path = cleaned_path.encode(sys.getfilesystemencoding())
# sanitize the path for non unix systems
cleaned_path = cleaned_path.strip('/')
for sep in os.sep, os.altsep:
if sep and sep != '/':
cleaned_path = cleaned_path.replace(sep, '/')
path = '/'.join([''] + [x for x in cleaned_path.split('/')
if x and x != '..'])
file_loader = None
for search_path, loader in iteritems(self.exports):
if search_path == path:
real_filename, file_loader = loader(None)
if file_loader is not None:
break
if not search_path.endswith('/'):
search_path += '/'
if path.startswith(search_path):
real_filename, file_loader = loader(path[len(search_path):])
if file_loader is not None:
break
if file_loader is None or not self.is_allowed(real_filename):
return self.app(environ, start_response)
guessed_type = mimetypes.guess_type(real_filename)
mime_type = guessed_type[0] or self.fallback_mimetype
f, mtime, file_size = file_loader()
headers = [('Date', http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(mtime, file_size, real_filename)
headers += [
('Etag', '"%s"' % etag),
('Cache-Control', 'max-age=%d, public' % timeout)
]
if not is_resource_modified(environ, etag, last_modified=mtime):
f.close()
start_response('304 Not Modified', headers)
return []
headers.append(('Expires', http_date(time() + timeout)))
else:
headers.append(('Cache-Control', 'public'))
headers.extend((
('Content-Type', mime_type),
('Content-Length', str(file_size)),
('Last-Modified', http_date(mtime))
))
start_response('200 OK', headers)
return wrap_file(environ, f)
class DispatcherMiddleware(object):
"""Allows one to mount middlewares or applications in a WSGI application.
This is useful if you want to combine multiple WSGI applications::
app = DispatcherMiddleware(app, {
'/app2': app2,
'/app3': app3
})
"""
def __init__(self, app, mounts=None):
self.app = app
self.mounts = mounts or {}
def __call__(self, environ, start_response):
script = environ.get('PATH_INFO', '')
path_info = ''
while '/' in script:
if script in self.mounts:
app = self.mounts[script]
break
items = script.split('/')
script = '/'.join(items[:-1])
path_info = '/%s%s' % (items[-1], path_info)
else:
app = self.mounts.get(script, self.app)
original_script_name = environ.get('SCRIPT_NAME', '')
environ['SCRIPT_NAME'] = original_script_name + script
environ['PATH_INFO'] = path_info
return app(environ, start_response)
@implements_iterator
class ClosingIterator(object):
"""The WSGI specification requires that all middlewares and gateways
respect the `close` callback of an iterator. Because it is useful to add
another close action to a returned iterator and adding a custom iterator
is a boring task this class can be used for that::
return ClosingIterator(app(environ, start_response), [cleanup_session,
cleanup_locals])
If there is just one close function it can be passed instead of the list.
A closing iterator is not needed if the application uses response objects
and finishes the processing if the response is started::
try:
return response(environ, start_response)
finally:
cleanup_session()
cleanup_locals()
"""
def __init__(self, iterable, callbacks=None):
iterator = iter(iterable)
self._next = partial(next, iterator)
if callbacks is None:
callbacks = []
elif callable(callbacks):
callbacks = [callbacks]
else:
callbacks = list(callbacks)
iterable_close = getattr(iterator, 'close', None)
if iterable_close:
callbacks.insert(0, iterable_close)
self._callbacks = callbacks
def __iter__(self):
return self
def __next__(self):
return self._next()
def close(self):
for callback in self._callbacks:
callback()
def wrap_file(environ, file, buffer_size=8192):
"""Wraps a file. This uses the WSGI server's file wrapper if available
or otherwise the generic :class:`FileWrapper`.
.. versionadded:: 0.5
If the file wrapper from the WSGI server is used it's important to not
iterate over it from inside the application but to pass it through
unchanged. If you want to pass out a file wrapper inside a response
object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`.
More information about file wrappers are available in :pep:`333`.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
return environ.get('wsgi.file_wrapper', FileWrapper)(file, buffer_size)
@implements_iterator
class FileWrapper(object):
"""This class can be used to convert a :class:`file`-like object into
an iterable. It yields `buffer_size` blocks until the file is fully
read.
You should not use this class directly but rather use the
:func:`wrap_file` function that uses the WSGI server's file wrapper
support if it's available.
.. versionadded:: 0.5
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
def __init__(self, file, buffer_size=8192):
self.file = file
self.buffer_size = buffer_size
def close(self):
if hasattr(self.file, 'close'):
self.file.close()
def __iter__(self):
return self
def __next__(self):
data = self.file.read(self.buffer_size)
if data:
return data
raise StopIteration()
def _make_chunk_iter(stream, limit, buffer_size):
"""Helper for the line and chunk iter functions."""
if isinstance(stream, (bytes, bytearray, text_type)):
raise TypeError('Passed a string or byte object instead of '
'true iterator or stream.')
if not hasattr(stream, 'read'):
for item in stream:
if item:
yield item
return
if not isinstance(stream, LimitedStream) and limit is not None:
stream = LimitedStream(stream, limit)
_read = stream.read
while 1:
item = _read(buffer_size)
if not item:
break
yield item
def make_line_iter(stream, limit=None, buffer_size=10 * 1024):
"""Safely iterates line-based over an input stream. If the input stream
is not a :class:`LimitedStream` the `limit` parameter is mandatory.
This uses the stream's :meth:`~file.read` method internally as opposite
to the :meth:`~file.readline` method that is unsafe and can only be used
in violation of the WSGI specification. The same problem applies to the
`__iter__` function of the input stream which calls :meth:`~file.readline`
without arguments.
If you need line-by-line processing it's strongly recommended to iterate
over the input stream using this helper function.
.. versionchanged:: 0.8
This function now ensures that the limit was reached.
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is a :class:`LimitedStream`.
:param buffer_size: The optional buffer size.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
s = make_literal_wrapper(first_item)
empty = s('')
cr = s('\r')
lf = s('\n')
crlf = s('\r\n')
_iter = chain((first_item,), _iter)
def _iter_basic_lines():
_join = empty.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
new_buf = []
for item in chain(buffer, new_data.splitlines(True)):
new_buf.append(item)
if item and item[-1:] in crlf:
yield _join(new_buf)
new_buf = []
buffer = new_buf
if buffer:
yield _join(buffer)
# This hackery is necessary to merge 'foo\r' and '\n' into one item
# of 'foo\r\n' if we were unlucky and we hit a chunk boundary.
previous = empty
for item in _iter_basic_lines():
if item == lf and previous[-1:] == cr:
previous += item
item = empty
if previous:
yield previous
previous = item
if previous:
yield previous
def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024):
"""Works like :func:`make_line_iter` but accepts a separator
which divides chunks. If you want newline based processing
you should use :func:`make_line_iter` instead as it
supports arbitrary newline markers.
.. versionadded:: 0.8
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param separator: the separator that divides chunks.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is otherwise already limited).
:param buffer_size: The optional buffer size.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
_iter = chain((first_item,), _iter)
if isinstance(first_item, text_type):
separator = to_unicode(separator)
_split = re.compile(r'(%s)' % re.escape(separator)).split
_join = u''.join
else:
separator = to_bytes(separator)
_split = re.compile(b'(' + re.escape(separator) + b')').split
_join = b''.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
chunks = _split(new_data)
new_buf = []
for item in chain(buffer, chunks):
if item == separator:
yield _join(new_buf)
new_buf = []
else:
new_buf.append(item)
buffer = new_buf
if buffer:
yield _join(buffer)
@implements_iterator
class LimitedStream(object):
"""Wraps a stream so that it doesn't read more than n bytes. If the
stream is exhausted and the caller tries to get more bytes from it
:func:`on_exhausted` is called which by default returns an empty
string. The return value of that function is forwarded
to the reader function. So if it returns an empty string
:meth:`read` will return an empty string as well.
The limit however must never be higher than what the stream can
output. Otherwise :meth:`readlines` will try to read past the
limit.
.. admonition:: Note on WSGI compliance
calls to :meth:`readline` and :meth:`readlines` are not
WSGI compliant because it passes a size argument to the
readline methods. Unfortunately the WSGI PEP is not safely
implementable without a size argument to :meth:`readline`
because there is no EOF marker in the stream. As a result
of that the use of :meth:`readline` is discouraged.
For the same reason iterating over the :class:`LimitedStream`
is not portable. It internally calls :meth:`readline`.
We strongly suggest using :meth:`read` only or using the
:func:`make_line_iter` which safely iterates line-based
over a WSGI input stream.
:param stream: the stream to wrap.
:param limit: the limit for the stream, must not be longer than
what the string can provide if the stream does not
end with `EOF` (like `wsgi.input`)
"""
def __init__(self, stream, limit):
self._read = stream.read
self._readline = stream.readline
self._pos = 0
self.limit = limit
def __iter__(self):
return self
@property
def is_exhausted(self):
"""If the stream is exhausted this attribute is `True`."""
return self._pos >= self.limit
def on_exhausted(self):
"""This is called when the stream tries to read past the limit.
The return value of this function is returned from the reading
function.
"""
# Read null bytes from the stream so that we get the
# correct end of stream marker.
return self._read(0)
def on_disconnect(self):
"""What should happen if a disconnect is detected? The return
value of this function is returned from read functions in case
the client went away. By default a
:exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised.
"""
from werkzeug.exceptions import ClientDisconnected
raise ClientDisconnected()
def exhaust(self, chunk_size=1024 * 64):
"""Exhaust the stream. This consumes all the data left until the
limit is reached.
:param chunk_size: the size for a chunk. It will read the chunk
until the stream is exhausted and throw away
the results.
"""
to_read = self.limit - self._pos
chunk = chunk_size
while to_read > 0:
chunk = min(to_read, chunk)
self.read(chunk)
to_read -= chunk
def read(self, size=None):
"""Read `size` bytes or if size is not provided everything is read.
:param size: the number of bytes read.
"""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None or size == -1: # -1 is for consistence with file
size = self.limit
to_read = min(self.limit - self._pos, size)
try:
read = self._read(to_read)
except (IOError, ValueError):
return self.on_disconnect()
if to_read and len(read) != to_read:
return self.on_disconnect()
self._pos += len(read)
return read
def readline(self, size=None):
"""Reads one line from the stream."""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None:
size = self.limit - self._pos
else:
size = min(size, self.limit - self._pos)
try:
line = self._readline(size)
except (ValueError, IOError):
return self.on_disconnect()
if size and not line:
return self.on_disconnect()
self._pos += len(line)
return line
def readlines(self, size=None):
"""Reads a file into a list of strings. It calls :meth:`readline`
until the file is read to the end. It does support the optional
`size` argument if the underlaying stream supports it for
`readline`.
"""
last_pos = self._pos
result = []
if size is not None:
end = min(self.limit, last_pos + size)
else:
end = self.limit
while 1:
if size is not None:
size -= last_pos - self._pos
if self._pos >= end:
break
result.append(self.readline(size))
if size is not None:
last_pos = self._pos
return result
def tell(self):
"""Returns the position of the stream.
.. versionadded:: 0.9
"""
return self._pos
def __next__(self):
line = self.readline()
if not line:
raise StopIteration()
return line
| apache-2.0 | -6,134,742,769,974,321,000 | 35.017341 | 81 | 0.604478 | false |
cjayb/mne-python | mne/channels/montage.py | 1 | 41279 | # Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Marijn van Vliet <[email protected]>
# Jona Sassenhagen <[email protected]>
# Teon Brooks <[email protected]>
# Christian Brodbeck <[email protected]>
# Stefan Appelhoff <[email protected]>
# Joan Massich <[email protected]>
#
# License: Simplified BSD
from collections import OrderedDict
from copy import deepcopy
from functools import partial
import os.path as op
import re
import numpy as np
from ..defaults import HEAD_SIZE_DEFAULT
from ..viz import plot_montage
from ..transforms import (apply_trans, get_ras_to_neuromag_trans, _sph_to_cart,
_topo_to_sph, _frame_to_str, Transform,
_verbose_frames, _fit_matched_points,
_quat_to_affine)
from ..io._digitization import (_count_points_by_type,
_get_dig_eeg, _make_dig_points, write_dig,
_read_dig_fif, _format_dig_points,
_get_fid_coords, _coord_frame_const)
from ..io.meas_info import create_info
from ..io.open import fiff_open
from ..io.pick import pick_types
from ..io.constants import FIFF
from ..utils import (warn, copy_function_doc_to_method_doc, _pl,
_check_option, _validate_type, _check_fname, _on_missing,
fill_doc, deprecated)
from ._dig_montage_utils import _read_dig_montage_egi
from ._dig_montage_utils import _parse_brainvision_dig_montage
_BUILT_IN_MONTAGES = [
'EGI_256',
'GSN-HydroCel-128', 'GSN-HydroCel-129', 'GSN-HydroCel-256',
'GSN-HydroCel-257', 'GSN-HydroCel-32', 'GSN-HydroCel-64_1.0',
'GSN-HydroCel-65_1.0',
'biosemi128', 'biosemi16', 'biosemi160', 'biosemi256',
'biosemi32', 'biosemi64',
'easycap-M1', 'easycap-M10',
'mgh60', 'mgh70',
'standard_1005', 'standard_1020', 'standard_alphabetic',
'standard_postfixed', 'standard_prefixed', 'standard_primed'
]
def _check_get_coord_frame(dig):
_MSG = 'Only single coordinate frame in dig is supported'
dig_coord_frames = set([d['coord_frame'] for d in dig])
assert len(dig_coord_frames) <= 1, _MSG
return _frame_to_str[dig_coord_frames.pop()] if dig_coord_frames else None
def get_builtin_montages():
"""Get a list of all builtin montages.
Returns
-------
montages : list
Names of all builtin montages that can be used by
:func:`make_standard_montage`.
"""
return _BUILT_IN_MONTAGES
def make_dig_montage(ch_pos=None, nasion=None, lpa=None, rpa=None,
hsp=None, hpi=None, coord_frame='unknown'):
r"""Make montage from arrays.
Parameters
----------
ch_pos : dict
Dictionary of channel positions. Keys are channel names and values
are 3D coordinates - array of shape (3,) - in native digitizer space
in m.
nasion : None | array, shape (3,)
The position of the nasion fiducial point.
This point is assumed to be in the native digitizer space in m.
lpa : None | array, shape (3,)
The position of the left periauricular fiducial point.
This point is assumed to be in the native digitizer space in m.
rpa : None | array, shape (3,)
The position of the right periauricular fiducial point.
This point is assumed to be in the native digitizer space in m.
hsp : None | array, shape (n_points, 3)
This corresponds to an array of positions of the headshape points in
3d. These points are assumed to be in the native digitizer space in m.
hpi : None | array, shape (n_hpi, 3)
This corresponds to an array of HPI points in the native digitizer
space. They only necessary if computation of a ``compute_dev_head_t``
is True.
coord_frame : str
The coordinate frame of the points. Usually this is "unknown"
for native digitizer space.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_captrak
read_dig_egi
read_dig_fif
read_dig_polhemus_isotrak
"""
if ch_pos is None:
ch_names = None
else:
ch_names = list(ch_pos)
dig = _make_dig_points(
nasion=nasion, lpa=lpa, rpa=rpa, hpi=hpi, extra_points=hsp,
dig_ch_pos=ch_pos, coord_frame=coord_frame
)
return DigMontage(dig=dig, ch_names=ch_names)
class DigMontage(object):
"""Montage for digitized electrode and headshape position data.
.. warning:: Montages are typically created using one of the helper
functions in the ``See Also`` section below instead of
instantiating this class directly.
Parameters
----------
dev_head_t : array, shape (4, 4)
A Device-to-Head transformation matrix.
dig : list of dict
The object containing all the dig points.
ch_names : list of str
The names of the EEG channels.
See Also
--------
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_hpts
read_dig_polhemus_isotrak
make_dig_montage
Notes
-----
.. versionadded:: 0.9.0
"""
def __init__(self, dev_head_t=None, dig=None, ch_names=None):
# XXX: dev_head_t now is np.array, we should add dev_head_transform
# (being instance of Transformation) and move the parameter to the
# end of the call.
dig = list() if dig is None else dig
_validate_type(item=dig, types=list, item_name='dig')
ch_names = list() if ch_names is None else ch_names
n_eeg = sum([1 for d in dig if d['kind'] == FIFF.FIFFV_POINT_EEG])
if n_eeg != len(ch_names):
raise ValueError(
'The number of EEG channels (%d) does not match the number'
' of channel names provided (%d)' % (n_eeg, len(ch_names))
)
self.dev_head_t = dev_head_t
self.dig = dig
self.ch_names = ch_names
def __repr__(self):
"""Return string representation."""
n_points = _count_points_by_type(self.dig)
return ('<DigMontage | {extra:d} extras (headshape), {hpi:d} HPIs,'
' {fid:d} fiducials, {eeg:d} channels>').format(**n_points)
@copy_function_doc_to_method_doc(plot_montage)
def plot(self, scale_factor=20, show_names=True, kind='topomap', show=True,
sphere=None):
return plot_montage(self, scale_factor=scale_factor,
show_names=show_names, kind=kind, show=show,
sphere=sphere)
def rename_channels(self, mapping):
"""Rename the channels.
Parameters
----------
%(rename_channels_mapping)s
Returns
-------
inst : instance of DigMontage
The instance. Operates in-place.
"""
from .channels import rename_channels
temp_info = create_info(list(self._get_ch_pos()), 1000., 'eeg')
rename_channels(temp_info, mapping)
self.ch_names = temp_info['ch_names']
def save(self, fname):
"""Save digitization points to FIF.
Parameters
----------
fname : str
The filename to use. Should end in .fif or .fif.gz.
"""
if _check_get_coord_frame(self.dig) != 'head':
raise RuntimeError('Can only write out digitization points in '
'head coordinates.')
write_dig(fname, self.dig)
def __iadd__(self, other):
"""Add two DigMontages in place.
Notes
-----
Two DigMontages can only be added if there are no duplicated ch_names
and if fiducials are present they should share the same coordinate
system and location values.
"""
def is_fid_defined(fid):
return not(
fid.nasion is None and fid.lpa is None and fid.rpa is None
)
# Check for none duplicated ch_names
ch_names_intersection = set(self.ch_names).intersection(other.ch_names)
if ch_names_intersection:
raise RuntimeError((
"Cannot add two DigMontage objects if they contain duplicated"
" channel names. Duplicated channel(s) found: {}."
).format(
', '.join(['%r' % v for v in sorted(ch_names_intersection)])
))
# Check for unique matching fiducials
self_fid, self_coord = _get_fid_coords(self.dig)
other_fid, other_coord = _get_fid_coords(other.dig)
if is_fid_defined(self_fid) and is_fid_defined(other_fid):
if self_coord != other_coord:
raise RuntimeError('Cannot add two DigMontage objects if '
'fiducial locations are not in the same '
'coordinate system.')
for kk in self_fid:
if not np.array_equal(self_fid[kk], other_fid[kk]):
raise RuntimeError('Cannot add two DigMontage objects if '
'fiducial locations do not match '
'(%s)' % kk)
# keep self
self.dig = _format_dig_points(
self.dig + [d for d in other.dig
if d['kind'] != FIFF.FIFFV_POINT_CARDINAL]
)
else:
self.dig = _format_dig_points(self.dig + other.dig)
self.ch_names += other.ch_names
return self
def copy(self):
"""Copy the DigMontage object.
Returns
-------
dig : instance of DigMontage
The copied DigMontage instance.
"""
return deepcopy(self)
def __add__(self, other):
"""Add two DigMontages."""
out = self.copy()
out += other
return out
def _get_ch_pos(self):
pos = [d['r'] for d in _get_dig_eeg(self.dig)]
assert len(self.ch_names) == len(pos)
return OrderedDict(zip(self.ch_names, pos))
def _get_dig_names(self):
NAMED_KIND = (FIFF.FIFFV_POINT_EEG,)
is_eeg = np.array([d['kind'] in NAMED_KIND for d in self.dig])
assert len(self.ch_names) == is_eeg.sum()
dig_names = [None] * len(self.dig)
for ch_name_idx, dig_idx in enumerate(np.where(is_eeg)[0]):
dig_names[dig_idx] = self.ch_names[ch_name_idx]
return dig_names
VALID_SCALES = dict(mm=1e-3, cm=1e-2, m=1)
def _check_unit_and_get_scaling(unit):
_check_option('unit', unit, sorted(VALID_SCALES.keys()))
return VALID_SCALES[unit]
def transform_to_head(montage):
"""Transform a DigMontage object into head coordinate.
It requires that the LPA, RPA and Nasion fiducial
point are available. It requires that all fiducial
points are in the same coordinate e.g. 'unknown'
and it will convert all the point in this coordinate
system to Neuromag head coordinate system.
Parameters
----------
montage : instance of DigMontage
The montage.
Returns
-------
montage : instance of DigMontage
The montage after transforming the points to head
coordinate system.
"""
# Get fiducial points and their coord_frame
native_head_t = compute_native_head_t(montage)
montage = montage.copy() # to avoid inplace modification
if native_head_t['from'] != FIFF.FIFFV_COORD_HEAD:
for d in montage.dig:
if d['coord_frame'] == native_head_t['from']:
d['r'] = apply_trans(native_head_t, d['r'])
d['coord_frame'] = FIFF.FIFFV_COORD_HEAD
return montage
def read_dig_dat(fname):
r"""Read electrode positions from a ``*.dat`` file.
.. Warning::
This function was implemented based on ``*.dat`` files available from
`Compumedics <https://compumedicsneuroscan.com/scan-acquire-
configuration-files/>`__ and might not work as expected with novel
files. If it does not read your files correctly please contact the
mne-python developers.
Parameters
----------
fname : path-like
File from which to read electrode locations.
Returns
-------
montage : DigMontage
The montage.
See Also
--------
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_hpts
read_dig_polhemus_isotrak
make_dig_montage
Notes
-----
``*.dat`` files are plain text files and can be inspected and amended with
a plain text editor.
"""
from ._standard_montage_utils import _check_dupes_odict
fname = _check_fname(fname, overwrite='read', must_exist=True)
with open(fname, 'r') as fid:
lines = fid.readlines()
ch_names, poss = list(), list()
nasion = lpa = rpa = None
for i, line in enumerate(lines):
items = line.split()
if not items:
continue
elif len(items) != 5:
raise ValueError(
"Error reading %s, line %s has unexpected number of entries:\n"
"%s" % (fname, i, line.rstrip()))
num = items[1]
if num == '67':
continue # centroid
pos = np.array([float(item) for item in items[2:]])
if num == '78':
nasion = pos
elif num == '76':
lpa = pos
elif num == '82':
rpa = pos
else:
ch_names.append(items[0])
poss.append(pos)
electrodes = _check_dupes_odict(ch_names, poss)
return make_dig_montage(electrodes, nasion, lpa, rpa)
def read_dig_fif(fname):
r"""Read digitized points from a .fif file.
Note that electrode names are not present in the .fif file so
they are here defined with the convention from VectorView
systems (EEG001, EEG002, etc.)
Parameters
----------
fname : path-like
FIF file from which to read digitization locations.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_dat
read_dig_egi
read_dig_captrak
read_dig_polhemus_isotrak
read_dig_hpts
make_dig_montage
"""
_check_fname(fname, overwrite='read', must_exist=True)
# Load the dig data
f, tree = fiff_open(fname)[:2]
with f as fid:
dig = _read_dig_fif(fid, tree)
ch_names = []
for d in dig:
if d['kind'] == FIFF.FIFFV_POINT_EEG:
ch_names.append('EEG%03d' % d['ident'])
montage = DigMontage(dig=dig, ch_names=ch_names)
return montage
def read_dig_hpts(fname, unit='mm'):
"""Read historical .hpts mne-c files.
Parameters
----------
fname : str
The filepath of .hpts file.
unit : 'm' | 'cm' | 'mm'
Unit of the positions. Defaults to 'mm'.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_polhemus_isotrak
make_dig_montage
Notes
-----
The hpts format digitzer data file may contain comment lines starting
with the pound sign (#) and data lines of the form::
<*category*> <*identifier*> <*x/mm*> <*y/mm*> <*z/mm*>
where:
``<*category*>``
defines the type of points. Allowed categories are: ``hpi``,
``cardinal`` (fiducial), ``eeg``, and ``extra`` corresponding to
head-position indicator coil locations, cardinal landmarks, EEG
electrode locations, and additional head surface points,
respectively.
``<*identifier*>``
identifies the point. The identifiers are usually sequential
numbers. For cardinal landmarks, 1 = left auricular point,
2 = nasion, and 3 = right auricular point. For EEG electrodes,
identifier = 0 signifies the reference electrode.
``<*x/mm*> , <*y/mm*> , <*z/mm*>``
Location of the point, usually in the head coordinate system
in millimeters. If your points are in [m] then unit parameter can
be changed.
For example::
cardinal 2 -5.6729 -12.3873 -30.3671
cardinal 1 -37.6782 -10.4957 91.5228
cardinal 3 -131.3127 9.3976 -22.2363
hpi 1 -30.4493 -11.8450 83.3601
hpi 2 -122.5353 9.2232 -28.6828
hpi 3 -6.8518 -47.0697 -37.0829
hpi 4 7.3744 -50.6297 -12.1376
hpi 5 -33.4264 -43.7352 -57.7756
eeg FP1 3.8676 -77.0439 -13.0212
eeg FP2 -31.9297 -70.6852 -57.4881
eeg F7 -6.1042 -68.2969 45.4939
...
"""
from ._standard_montage_utils import _str_names, _str
_scale = _check_unit_and_get_scaling(unit)
out = np.genfromtxt(fname, comments='#',
dtype=(_str, _str, 'f8', 'f8', 'f8'))
kind, label = _str_names(out['f0']), _str_names(out['f1'])
kind = [k.lower() for k in kind]
xyz = np.array([out['f%d' % ii] for ii in range(2, 5)]).T
xyz *= _scale
del _scale
fid_idx_to_label = {'1': 'lpa', '2': 'nasion', '3': 'rpa'}
fid = {fid_idx_to_label[label[ii]]: this_xyz
for ii, this_xyz in enumerate(xyz) if kind[ii] == 'cardinal'}
ch_pos = {label[ii]: this_xyz
for ii, this_xyz in enumerate(xyz) if kind[ii] == 'eeg'}
hpi = np.array([this_xyz for ii, this_xyz in enumerate(xyz)
if kind[ii] == 'hpi'])
hpi.shape = (-1, 3) # in case it's empty
hsp = np.array([this_xyz for ii, this_xyz in enumerate(xyz)
if kind[ii] == 'extra'])
hsp.shape = (-1, 3) # in case it's empty
return make_dig_montage(ch_pos=ch_pos, **fid, hpi=hpi, hsp=hsp)
def read_dig_egi(fname):
"""Read electrode locations from EGI system.
Parameters
----------
fname : path-like
EGI MFF XML coordinates file from which to read digitization locations.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_captrak
read_dig_dat
read_dig_fif
read_dig_hpts
read_dig_polhemus_isotrak
make_dig_montage
"""
_check_fname(fname, overwrite='read', must_exist=True)
data = _read_dig_montage_egi(
fname=fname,
_scaling=1.,
_all_data_kwargs_are_none=True
)
# XXX: to change to the new naming in v.0.20 (all this block should go)
data.pop('point_names')
data['hpi'] = data.pop('elp')
data['ch_pos'] = data.pop('dig_ch_pos')
return make_dig_montage(**data)
def read_dig_captrak(fname):
"""Read electrode locations from CapTrak Brain Products system.
Parameters
----------
fname : path-like
BrainVision CapTrak coordinates file from which to read EEG electrode
locations. This is typically in XML format with the .bvct extension.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_hpts
read_dig_polhemus_isotrak
make_dig_montage
"""
_check_fname(fname, overwrite='read', must_exist=True)
data = _parse_brainvision_dig_montage(fname, scale=1e-3)
return make_dig_montage(**data)
@deprecated('read_dig_captrack is deprecated and will be removed in 0.22; '
'please use read_dig_captrak instead '
'(note the spelling correction: captraCK -> captraK).')
def read_dig_captrack(fname):
"""Read electrode locations from CapTrak Brain Products system.
Parameters
----------
fname : path-like
BrainVision CapTrak coordinates file from which to read EEG electrode
locations. This is typically in XML format with the .bvct extension.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_hpts
read_dig_polhemus_isotrak
make_dig_montage
"""
return read_dig_captrak(fname)
def _get_montage_in_head(montage):
coords = set([d['coord_frame'] for d in montage.dig])
if len(coords) == 1 and coords.pop() == FIFF.FIFFV_COORD_HEAD:
return montage
else:
return transform_to_head(montage.copy())
@fill_doc
def _set_montage(info, montage, match_case=True, on_missing='raise'):
"""Apply montage to data.
With a DigMontage, this function will replace the digitizer info with
the values specified for the particular montage.
Usually, a montage is expected to contain the positions of all EEG
electrodes and a warning is raised when this is not the case.
Parameters
----------
info : instance of Info
The measurement info to update.
%(montage)s
%(match_case)s
%(on_missing_montage)s
Notes
-----
This function will change the info variable in place.
"""
_validate_type(montage, types=(DigMontage, type(None), str),
item_name='montage')
if isinstance(montage, str): # load builtin montage
_check_option('montage', montage, _BUILT_IN_MONTAGES)
montage = make_standard_montage(montage)
if isinstance(montage, DigMontage):
mnt_head = _get_montage_in_head(montage)
def _backcompat_value(pos, ref_pos):
if any(np.isnan(pos)):
return np.full(6, np.nan)
else:
return np.concatenate((pos, ref_pos))
ch_pos = mnt_head._get_ch_pos()
refs = set(ch_pos) & {'EEG000', 'REF'}
assert len(refs) <= 1
eeg_ref_pos = np.zeros(3) if not(refs) else ch_pos.pop(refs.pop())
# This raises based on info being subset/superset of montage
_pick_chs = partial(
pick_types, exclude=[], eeg=True, seeg=True, ecog=True, meg=False,
)
info_names = [info['ch_names'][ii] for ii in _pick_chs(info)]
dig_names = mnt_head._get_dig_names()
ref_names = [None, 'EEG000', 'REF']
if match_case:
ch_pos_use = ch_pos
info_names_use = info_names
dig_names_use = dig_names
else:
ch_pos_use = OrderedDict(
(name.lower(), pos) for name, pos in ch_pos.items())
info_names_use = [name.lower() for name in info_names]
dig_names_use = [name.lower() if name is not None else name
for name in dig_names]
ref_names = [name.lower() if name is not None else name
for name in ref_names]
n_dup = len(ch_pos) - len(ch_pos_use)
if n_dup:
raise ValueError('Cannot use match_case=False as %s montage '
'name(s) require case sensitivity' % n_dup)
n_dup = len(info_names_use) - len(set(info_names_use))
if n_dup:
raise ValueError('Cannot use match_case=False as %s channel '
'name(s) require case sensitivity' % n_dup)
# warn user if there is not a full overlap of montage with info_chs
not_in_montage = [name for name, use in zip(info_names, info_names_use)
if use not in ch_pos_use]
if len(not_in_montage): # DigMontage is subset of info
missing_coord_msg = (
'DigMontage is only a subset of info. There are '
f'{len(not_in_montage)} channel position{_pl(not_in_montage)} '
'not present in the DigMontage. The required channels are:\n\n'
f'{not_in_montage}.\n\nConsider using inst.set_channel_types '
'if these are not EEG channels, or use the on_missing '
'parameter if the channel positions are allowed to be unknown '
'in your analyses.'
)
_on_missing(on_missing, missing_coord_msg)
# set ch coordinates and names from digmontage or nan coords
ch_pos_use = dict(
(name, ch_pos_use.get(name, [np.nan] * 3))
for name in info_names) # order does not matter here
for name, use in zip(info_names, info_names_use):
_loc_view = info['chs'][info['ch_names'].index(name)]['loc']
_loc_view[:6] = _backcompat_value(ch_pos_use[use], eeg_ref_pos)
del ch_pos_use
# XXX this is probably wrong as it uses the order from the montage
# rather than the order of our info['ch_names'] ...
info['dig'] = _format_dig_points([
mnt_head.dig[ii] for ii, name in enumerate(dig_names_use)
if name in (info_names_use + ref_names)])
if mnt_head.dev_head_t is not None:
info['dev_head_t'] = Transform('meg', 'head', mnt_head.dev_head_t)
else: # None case
info['dig'] = None
for ch in info['chs']:
ch['loc'] = np.full(12, np.nan)
def _read_isotrak_elp_points(fname):
"""Read Polhemus Isotrak digitizer data from a ``.elp`` file.
Parameters
----------
fname : str
The filepath of .elp Polhemus Isotrak file.
Returns
-------
out : dict of arrays
The dictionary containing locations for 'nasion', 'lpa', 'rpa'
and 'points'.
"""
value_pattern = r"\-?\d+\.?\d*e?\-?\d*"
coord_pattern = r"({0})\s+({0})\s+({0})\s*$".format(value_pattern)
with open(fname) as fid:
file_str = fid.read()
points_str = [m.groups() for m in re.finditer(coord_pattern, file_str,
re.MULTILINE)]
points = np.array(points_str, dtype=float)
return {
'nasion': points[0], 'lpa': points[1], 'rpa': points[2],
'points': points[3:]
}
def _read_isotrak_hsp_points(fname):
"""Read Polhemus Isotrak digitizer data from a ``.hsp`` file.
Parameters
----------
fname : str
The filepath of .hsp Polhemus Isotrak file.
Returns
-------
out : dict of arrays
The dictionary containing locations for 'nasion', 'lpa', 'rpa'
and 'points'.
"""
def get_hsp_fiducial(line):
return np.fromstring(line.replace('%F', ''), dtype=float, sep='\t')
with open(fname) as ff:
for line in ff:
if 'position of fiducials' in line.lower():
break
nasion = get_hsp_fiducial(ff.readline())
lpa = get_hsp_fiducial(ff.readline())
rpa = get_hsp_fiducial(ff.readline())
_ = ff.readline()
line = ff.readline()
if line:
n_points, n_cols = np.fromstring(line, dtype=int, sep='\t')
points = np.fromstring(
string=ff.read(), dtype=float, sep='\t',
).reshape(-1, n_cols)
assert points.shape[0] == n_points
else:
points = np.empty((0, 3))
return {
'nasion': nasion, 'lpa': lpa, 'rpa': rpa, 'points': points
}
def read_dig_polhemus_isotrak(fname, ch_names=None, unit='m'):
"""Read Polhemus digitizer data from a file.
Parameters
----------
fname : str
The filepath of Polhemus ISOTrak formatted file.
File extension is expected to be '.hsp', '.elp' or '.eeg'.
ch_names : None | list of str
The names of the points. This will make the points
considered as EEG channels. If None, channels will be assumed
to be HPI if the extension is ``'.elp'``, and extra headshape
points otherwise.
unit : 'm' | 'cm' | 'mm'
Unit of the digitizer file. Polhemus ISOTrak systems data is usually
exported in meters. Defaults to 'm'
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
make_dig_montage
read_polhemus_fastscan
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
"""
VALID_FILE_EXT = ('.hsp', '.elp', '.eeg')
_scale = _check_unit_and_get_scaling(unit)
_, ext = op.splitext(fname)
_check_option('fname', ext, VALID_FILE_EXT)
if ext == '.elp':
data = _read_isotrak_elp_points(fname)
else:
# Default case we read points as hsp since is the most likely scenario
data = _read_isotrak_hsp_points(fname)
if _scale != 1:
data = {key: val * _scale for key, val in data.items()}
else:
pass # noqa
if ch_names is None:
keyword = 'hpi' if ext == '.elp' else 'hsp'
data[keyword] = data.pop('points')
else:
points = data.pop('points')
if points.shape[0] == len(ch_names):
data['ch_pos'] = OrderedDict(zip(ch_names, points))
else:
raise ValueError((
"Length of ``ch_names`` does not match the number of points"
" in {fname}. Expected ``ch_names`` length {n_points:d},"
" given {n_chnames:d}"
).format(
fname=fname, n_points=points.shape[0], n_chnames=len(ch_names)
))
return make_dig_montage(**data)
def _is_polhemus_fastscan(fname):
header = ''
with open(fname, 'r') as fid:
for line in fid:
if not line.startswith('%'):
break
header += line
return 'FastSCAN' in header
def read_polhemus_fastscan(fname, unit='mm'):
"""Read Polhemus FastSCAN digitizer data from a ``.txt`` file.
Parameters
----------
fname : str
The filepath of .txt Polhemus FastSCAN file.
unit : 'm' | 'cm' | 'mm'
Unit of the digitizer file. Polhemus FastSCAN systems data is usually
exported in millimeters. Defaults to 'mm'
Returns
-------
points : array, shape (n_points, 3)
The digitization points in digitizer coordinates.
See Also
--------
read_dig_polhemus_isotrak
make_dig_montage
"""
VALID_FILE_EXT = ['.txt']
_scale = _check_unit_and_get_scaling(unit)
_, ext = op.splitext(fname)
_check_option('fname', ext, VALID_FILE_EXT)
if not _is_polhemus_fastscan(fname):
raise ValueError(
"%s does not contain Polhemus FastSCAN header" % fname)
points = _scale * np.loadtxt(fname, comments='%', ndmin=2)
return points
def _read_eeglab_locations(fname):
ch_names = np.genfromtxt(fname, dtype=str, usecols=3).tolist()
topo = np.loadtxt(fname, dtype=float, usecols=[1, 2])
sph = _topo_to_sph(topo)
pos = _sph_to_cart(sph)
pos[:, [0, 1]] = pos[:, [1, 0]] * [-1, 1]
return ch_names, pos
def read_custom_montage(fname, head_size=HEAD_SIZE_DEFAULT, coord_frame=None):
"""Read a montage from a file.
Parameters
----------
fname : str
File extension is expected to be:
'.loc' or '.locs' or '.eloc' (for EEGLAB files),
'.sfp' (BESA/EGI files), '.csd',
'.elc', '.txt', '.csd', '.elp' (BESA spherical),
'.bvef' (BrainVision files).
head_size : float | None
The size of the head (radius, in [m]). If ``None``, returns the values
read from the montage file with no modification. Defaults to 0.095m.
coord_frame : str | None
The coordinate frame of the points. Usually this is "unknown"
for native digitizer space. Defaults to None, which is "unknown" for
most readers but "head" for EEGLAB.
.. versionadded:: 0.20
Returns
-------
montage : instance of DigMontage
The montage.
Notes
-----
The function is a helper to read electrode positions you may have
in various formats. Most of these format are weakly specified
in terms of units, coordinate systems. It implies that setting
a montage using a DigMontage produced by this function may
be problematic. If you use a standard/template (eg. 10/20,
10/10 or 10/05) we recommend you use :func:`make_standard_montage`.
If you can have positions in memory you can also use
:func:`make_dig_montage` that takes arrays as input.
See Also
--------
make_dig_montage
make_standard_montage
"""
from ._standard_montage_utils import (
_read_theta_phi_in_degrees, _read_sfp, _read_csd, _read_elc,
_read_elp_besa, _read_brainvision
)
SUPPORTED_FILE_EXT = {
'eeglab': ('.loc', '.locs', '.eloc', ),
'hydrocel': ('.sfp', ),
'matlab': ('.csd', ),
'asa electrode': ('.elc', ),
'generic (Theta-phi in degrees)': ('.txt', ),
'standard BESA spherical': ('.elp', ), # XXX: not same as polhemus elp
'brainvision': ('.bvef', ),
}
_, ext = op.splitext(fname)
_check_option('fname', ext, list(sum(SUPPORTED_FILE_EXT.values(), ())))
if ext in SUPPORTED_FILE_EXT['eeglab']:
if head_size is None:
raise ValueError(
"``head_size`` cannot be None for '{}'".format(ext))
ch_names, pos = _read_eeglab_locations(fname)
scale = head_size / np.median(np.linalg.norm(pos, axis=-1))
pos *= scale
montage = make_dig_montage(
ch_pos=OrderedDict(zip(ch_names, pos)),
coord_frame='head',
)
elif ext in SUPPORTED_FILE_EXT['hydrocel']:
montage = _read_sfp(fname, head_size=head_size)
elif ext in SUPPORTED_FILE_EXT['matlab']:
montage = _read_csd(fname, head_size=head_size)
elif ext in SUPPORTED_FILE_EXT['asa electrode']:
montage = _read_elc(fname, head_size=head_size)
elif ext in SUPPORTED_FILE_EXT['generic (Theta-phi in degrees)']:
if head_size is None:
raise ValueError(
"``head_size`` cannot be None for '{}'".format(ext))
montage = _read_theta_phi_in_degrees(fname, head_size=head_size,
fid_names=('Nz', 'LPA', 'RPA'))
elif ext in SUPPORTED_FILE_EXT['standard BESA spherical']:
montage = _read_elp_besa(fname, head_size)
elif ext in SUPPORTED_FILE_EXT['brainvision']:
montage = _read_brainvision(fname, head_size)
if coord_frame is not None:
coord_frame = _coord_frame_const(coord_frame)
for d in montage.dig:
d['coord_frame'] = coord_frame
return montage
def compute_dev_head_t(montage):
"""Compute device to head transform from a DigMontage.
Parameters
----------
montage : instance of DigMontage
The DigMontage must contain the fiducials in head
coordinate system and hpi points in both head and
meg device coordinate system.
Returns
-------
dev_head_t : instance of Transform
A Device-to-Head transformation matrix.
"""
_, coord_frame = _get_fid_coords(montage.dig)
if coord_frame != FIFF.FIFFV_COORD_HEAD:
raise ValueError('montage should have been set to head coordinate '
'system with transform_to_head function.')
hpi_head = np.array(
[d['r'] for d in montage.dig
if (d['kind'] == FIFF.FIFFV_POINT_HPI and
d['coord_frame'] == FIFF.FIFFV_COORD_HEAD)], float)
hpi_dev = np.array(
[d['r'] for d in montage.dig
if (d['kind'] == FIFF.FIFFV_POINT_HPI and
d['coord_frame'] == FIFF.FIFFV_COORD_DEVICE)], float)
if not (len(hpi_head) == len(hpi_dev) and len(hpi_dev) > 0):
raise ValueError((
"To compute Device-to-Head transformation, the same number of HPI"
" points in device and head coordinates is required. (Got {dev}"
" points in device and {head} points in head coordinate systems)"
).format(dev=len(hpi_dev), head=len(hpi_head)))
trans = _quat_to_affine(_fit_matched_points(hpi_dev, hpi_head)[0])
return Transform(fro='meg', to='head', trans=trans)
def compute_native_head_t(montage):
"""Compute the native-to-head transformation for a montage.
This uses the fiducials in the native space to transform to compute the
transform to the head coordinate frame.
Parameters
----------
montage : instance of DigMontage
The montage.
Returns
-------
native_head_t : instance of Transform
A native-to-head transformation matrix.
"""
# Get fiducial points and their coord_frame
fid_coords, coord_frame = _get_fid_coords(montage.dig)
if coord_frame is None:
coord_frame = FIFF.FIFFV_COORD_UNKNOWN
if coord_frame == FIFF.FIFFV_COORD_HEAD:
native_head_t = np.eye(4)
else:
fid_keys = ('nasion', 'lpa', 'rpa')
for key in fid_keys:
if fid_coords[key] is None:
warn('Fiducial point %s not found, assuming identity %s to '
'head transformation'
% (key, _verbose_frames[coord_frame],))
native_head_t = np.eye(4)
break
else:
native_head_t = get_ras_to_neuromag_trans(
*[fid_coords[key] for key in fid_keys])
return Transform(coord_frame, 'head', native_head_t)
def make_standard_montage(kind, head_size=HEAD_SIZE_DEFAULT):
"""Read a generic (built-in) montage.
Parameters
----------
kind : str
The name of the montage to use. See notes for valid kinds.
head_size : float
The head size (radius, in meters) to use for spherical montages.
Defaults to 95mm.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
make_dig_montage
read_custom_montage
Notes
-----
Individualized (digitized) electrode positions should be read in using
:func:`read_dig_captrak`, :func:`read_dig_dat`, :func:`read_dig_egi`,
:func:`read_dig_fif`, :func:`read_dig_polhemus_isotrak`,
:func:`read_dig_hpts` or made with :func:`make_dig_montage`.
Valid ``kind`` arguments are:
=================== =====================================================
Kind Description
=================== =====================================================
standard_1005 Electrodes are named and positioned according to the
international 10-05 system (343+3 locations)
standard_1020 Electrodes are named and positioned according to the
international 10-20 system (94+3 locations)
standard_alphabetic Electrodes are named with LETTER-NUMBER combinations
(A1, B2, F4, ...) (65+3 locations)
standard_postfixed Electrodes are named according to the international
10-20 system using postfixes for intermediate
positions (100+3 locations)
standard_prefixed Electrodes are named according to the international
10-20 system using prefixes for intermediate
positions (74+3 locations)
standard_primed Electrodes are named according to the international
10-20 system using prime marks (' and '') for
intermediate positions (100+3 locations)
biosemi16 BioSemi cap with 16 electrodes (16+3 locations)
biosemi32 BioSemi cap with 32 electrodes (32+3 locations)
biosemi64 BioSemi cap with 64 electrodes (64+3 locations)
biosemi128 BioSemi cap with 128 electrodes (128+3 locations)
biosemi160 BioSemi cap with 160 electrodes (160+3 locations)
biosemi256 BioSemi cap with 256 electrodes (256+3 locations)
easycap-M1 EasyCap with 10-05 electrode names (74 locations)
easycap-M10 EasyCap with numbered electrodes (61 locations)
EGI_256 Geodesic Sensor Net (256 locations)
GSN-HydroCel-32 HydroCel Geodesic Sensor Net and Cz (33+3 locations)
GSN-HydroCel-64_1.0 HydroCel Geodesic Sensor Net (64+3 locations)
GSN-HydroCel-65_1.0 HydroCel Geodesic Sensor Net and Cz (65+3 locations)
GSN-HydroCel-128 HydroCel Geodesic Sensor Net (128+3 locations)
GSN-HydroCel-129 HydroCel Geodesic Sensor Net and Cz (129+3 locations)
GSN-HydroCel-256 HydroCel Geodesic Sensor Net (256+3 locations)
GSN-HydroCel-257 HydroCel Geodesic Sensor Net and Cz (257+3 locations)
mgh60 The (older) 60-channel cap used at
MGH (60+3 locations)
mgh70 The (newer) 70-channel BrainVision cap used at
MGH (70+3 locations)
=================== =====================================================
.. versionadded:: 0.19.0
"""
from ._standard_montage_utils import standard_montage_look_up_table
_check_option('kind', kind, _BUILT_IN_MONTAGES)
return standard_montage_look_up_table[kind](head_size=head_size)
| bsd-3-clause | 7,078,422,319,586,289,000 | 32.752249 | 79 | 0.579496 | false |
kytvi2p/Sigil | 3rdparty/python/Lib/test/test_script_helper.py | 8 | 5187 | """Unittests for test.script_helper. Who tests the test helper?"""
import subprocess
import sys
from test import script_helper
import unittest
from unittest import mock
class TestScriptHelper(unittest.TestCase):
def test_assert_python_expect_success(self):
t = script_helper._assert_python(True, '-c', 'import sys; sys.exit(0)')
self.assertEqual(0, t[0], 'return code was not 0')
def test_assert_python_expect_failure(self):
# I didn't import the sys module so this child will fail.
rc, out, err = script_helper._assert_python(False, '-c', 'sys.exit(0)')
self.assertNotEqual(0, rc, 'return code should not be 0')
def test_assert_python_raises_expect_success(self):
# I didn't import the sys module so this child will fail.
with self.assertRaises(AssertionError) as error_context:
script_helper._assert_python(True, '-c', 'sys.exit(0)')
error_msg = str(error_context.exception)
self.assertIn('command line was:', error_msg)
self.assertIn('sys.exit(0)', error_msg, msg='unexpected command line')
def test_assert_python_raises_expect_failure(self):
with self.assertRaises(AssertionError) as error_context:
script_helper._assert_python(False, '-c', 'import sys; sys.exit(0)')
error_msg = str(error_context.exception)
self.assertIn('Process return code is 0,', error_msg)
self.assertIn('import sys; sys.exit(0)', error_msg,
msg='unexpected command line.')
@mock.patch('subprocess.Popen')
def test_assert_python_isolated_when_env_not_required(self, mock_popen):
with mock.patch.object(script_helper,
'_interpreter_requires_environment',
return_value=False) as mock_ire_func:
mock_popen.side_effect = RuntimeError('bail out of unittest')
try:
script_helper._assert_python(True, '-c', 'None')
except RuntimeError as err:
self.assertEqual('bail out of unittest', err.args[0])
self.assertEqual(1, mock_popen.call_count)
self.assertEqual(1, mock_ire_func.call_count)
popen_command = mock_popen.call_args[0][0]
self.assertEqual(sys.executable, popen_command[0])
self.assertIn('None', popen_command)
self.assertIn('-I', popen_command)
self.assertNotIn('-E', popen_command) # -I overrides this
@mock.patch('subprocess.Popen')
def test_assert_python_not_isolated_when_env_is_required(self, mock_popen):
"""Ensure that -I is not passed when the environment is required."""
with mock.patch.object(script_helper,
'_interpreter_requires_environment',
return_value=True) as mock_ire_func:
mock_popen.side_effect = RuntimeError('bail out of unittest')
try:
script_helper._assert_python(True, '-c', 'None')
except RuntimeError as err:
self.assertEqual('bail out of unittest', err.args[0])
popen_command = mock_popen.call_args[0][0]
self.assertNotIn('-I', popen_command)
self.assertNotIn('-E', popen_command)
class TestScriptHelperEnvironment(unittest.TestCase):
"""Code coverage for _interpreter_requires_environment()."""
def setUp(self):
self.assertTrue(
hasattr(script_helper, '__cached_interp_requires_environment'))
# Reset the private cached state.
script_helper.__dict__['__cached_interp_requires_environment'] = None
def tearDown(self):
# Reset the private cached state.
script_helper.__dict__['__cached_interp_requires_environment'] = None
@mock.patch('subprocess.check_call')
def test_interpreter_requires_environment_true(self, mock_check_call):
mock_check_call.side_effect = subprocess.CalledProcessError('', '')
self.assertTrue(script_helper._interpreter_requires_environment())
self.assertTrue(script_helper._interpreter_requires_environment())
self.assertEqual(1, mock_check_call.call_count)
@mock.patch('subprocess.check_call')
def test_interpreter_requires_environment_false(self, mock_check_call):
# The mocked subprocess.check_call fakes a no-error process.
script_helper._interpreter_requires_environment()
self.assertFalse(script_helper._interpreter_requires_environment())
self.assertEqual(1, mock_check_call.call_count)
@mock.patch('subprocess.check_call')
def test_interpreter_requires_environment_details(self, mock_check_call):
script_helper._interpreter_requires_environment()
self.assertFalse(script_helper._interpreter_requires_environment())
self.assertFalse(script_helper._interpreter_requires_environment())
self.assertEqual(1, mock_check_call.call_count)
check_call_command = mock_check_call.call_args[0][0]
self.assertEqual(sys.executable, check_call_command[0])
self.assertIn('-E', check_call_command)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 4,734,193,920,613,768,000 | 46.587156 | 80 | 0.644881 | false |
0asa/scikit-learn | sklearn/feature_selection/univariate_selection.py | 5 | 18520 | """Univariate features selection."""
# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.
# L. Buitinck, A. Joly
# License: BSD 3 clause
import numpy as np
import warnings
from scipy import special, stats
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..preprocessing import LabelBinarizer
from ..utils import (as_float_array, check_array, check_X_y, safe_sqr,
safe_mask)
from ..utils.extmath import norm, safe_sparse_dot
from ..utils.validation import check_is_fitted
from .base import SelectorMixin
def _clean_nans(scores):
"""
Fixes Issue #1240: NaNs can't be properly compared, so change them to the
smallest value of scores's dtype. -inf seems to be unreliable.
"""
# XXX where should this function be called? fit? scoring functions
# themselves?
scores = as_float_array(scores, copy=True)
scores[np.isnan(scores)] = np.finfo(scores.dtype).min
return scores
######################################################################
# Scoring functions
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def f_oneway(*args):
"""Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like, sparse matrices
The sample measurements should be given as arguments.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
args = [as_float_array(a) for a in args]
n_samples_per_class = np.array([a.shape[0] for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
sums_args = [np.asarray(a.sum(axis=0)) for a in args]
square_of_sums_alldata = sum(sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
constant_features_idx = np.where(msw == 0.)[0]
if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size):
warnings.warn("Features %s are constant." % constant_features_idx,
UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = stats.fprob(dfbn, dfwn, f)
return f, prob
def f_classif(X, y):
"""Compute the Anova F-value for the provided sample
Parameters
----------
X : {array-like, sparse matrix} shape = [n_samples, n_features]
The set of regressors that will tested sequentially.
y : array of shape(n_samples)
The data matrix.
Returns
-------
F : array, shape = [n_features,]
The set of F values.
pval : array, shape = [n_features,]
The set of p-values.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
return f_oneway(*args)
def _chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
k = len(f_obs)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq, special.chdtrc(k - 1, chisq)
def chi2(X, y):
"""Compute chi-squared statistic for each class/feature combination.
This score can be used to select the n_features features with the
highest values for the test chi-squared statistic from X, which must
contain booleans or frequencies (e.g., term counts in document
classification), relative to the classes.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features_in)
Sample vectors.
y : array-like, shape = (n_samples,)
Target vector (class labels).
Returns
-------
chi2 : array, shape = (n_features,)
chi2 statistics of each feature.
pval : array, shape = (n_features,)
p-values of each feature.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
"""
# XXX: we might want to do some of the following in logspace instead for
# numerical stability.
X = check_array(X, accept_sparse='csr')
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
Y = LabelBinarizer().fit_transform(y)
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
feature_count = check_array(X.sum(axis=0))
class_prob = check_array(Y.mean(axis=0))
expected = np.dot(class_prob.T, feature_count)
return _chisquare(observed, expected)
def f_regression(X, y, center=True):
"""Univariate linear regression tests
Quick linear model for testing the effect of a single regressor,
sequentially for many regressors.
This is done in 3 steps:
1. the regressor of interest and the data are orthogonalized
wrt constant regressors
2. the cross correlation between data and regressors is computed
3. it is converted to an F score then to a p-value
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will tested sequentially.
y : array of shape(n_samples).
The data matrix
center : True, bool,
If true, X and y will be centered.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
"""
if issparse(X) and center:
raise ValueError("center=True only allowed for dense data")
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float)
if center:
y = y - np.mean(y)
X = X.copy('F') # faster in fortran
X -= X.mean(axis=0)
# compute the correlation
corr = safe_sparse_dot(y, X)
# XXX could use corr /= row_norms(X.T) here, but the test doesn't pass
corr /= np.asarray(np.sqrt(safe_sqr(X).sum(axis=0))).ravel()
corr /= norm(y)
# convert to p-value
degrees_of_freedom = y.size - (2 if center else 1)
F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom
pv = stats.f.sf(F, 1, degrees_of_freedom)
return F, pv
######################################################################
# Base classes
class _BaseFilter(BaseEstimator, SelectorMixin):
"""Initialize the univariate feature selection.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
"""
def __init__(self, score_func):
self.score_func = score_func
def fit(self, X, y):
"""Run score function on (X, y) and get the appropriate features.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
if not callable(self.score_func):
raise TypeError("The score function should be a callable, %s (%s) "
"was passed."
% (self.score_func, type(self.score_func)))
self._check_params(X, y)
self.scores_, self.pvalues_ = self.score_func(X, y)
self.scores_ = np.asarray(self.scores_)
self.pvalues_ = np.asarray(self.pvalues_)
return self
def _check_params(self, X, y):
pass
######################################################################
# Specific filters
######################################################################
class SelectPercentile(_BaseFilter):
"""Select features according to a percentile of the highest scores.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
percentile : int, optional, default=10
Percent of features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
"""
def __init__(self, score_func=f_classif, percentile=10):
super(SelectPercentile, self).__init__(score_func)
self.percentile = percentile
def _check_params(self, X, y):
if not 0 <= self.percentile <= 100:
raise ValueError("percentile should be >=0, <=100; got %r"
% self.percentile)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
# Cater for NaNs
if self.percentile == 100:
return np.ones(len(self.scores_), dtype=np.bool)
elif self.percentile == 0:
return np.zeros(len(self.scores_), dtype=np.bool)
scores = _clean_nans(self.scores_)
treshold = stats.scoreatpercentile(scores,
100 - self.percentile)
mask = scores > treshold
ties = np.where(scores == treshold)[0]
if len(ties):
max_feats = len(scores) * self.percentile // 100
kept_ties = ties[:max_feats - mask.sum()]
mask[kept_ties] = True
return mask
class SelectKBest(_BaseFilter):
"""Select features according to the k highest scores.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
k : int or "all", optional, default=10
Number of top features to select.
The "all" option bypasses selection, for use in a parameter search.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
"""
def __init__(self, score_func=f_classif, k=10):
super(SelectKBest, self).__init__(score_func)
self.k = k
def _check_params(self, X, y):
if not (self.k == "all" or 0 <= self.k <= X.shape[1]):
raise ValueError("k should be >=0, <= n_features; got %r."
"Use k='all' to return all features."
% self.k)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
if self.k == 'all':
return np.ones(self.scores_.shape, dtype=bool)
elif self.k == 0:
return np.zeros(self.scores_.shape, dtype=bool)
else:
scores = _clean_nans(self.scores_)
mask = np.zeros(scores.shape, dtype=bool)
# Request a stable sort. Mergesort takes more memory (~40MB per
# megafeature on x86-64).
mask[np.argsort(scores, kind="mergesort")[-self.k:]] = 1
return mask
class SelectFpr(_BaseFilter):
"""Filter: Select the pvalues below alpha based on a FPR test.
FPR test stands for False Positive Rate test. It controls the total
amount of false detections.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest p-value for features to be kept.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFpr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return self.pvalues_ < self.alpha
class SelectFdr(_BaseFilter):
"""Filter: Select the p-values for an estimated false discovery rate
This uses the Benjamini-Hochberg procedure. ``alpha`` is the target false
discovery rate.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFdr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
alpha = self.alpha
sv = np.sort(self.pvalues_)
threshold = sv[sv < alpha * np.arange(len(self.pvalues_))].max()
return self.pvalues_ <= threshold
class SelectFwe(_BaseFilter):
"""Filter: Select the p-values corresponding to Family-wise error rate
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFwe, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return (self.pvalues_ < self.alpha / len(self.pvalues_))
######################################################################
# Generic filter
######################################################################
# TODO this class should fit on either p-values or scores,
# depending on the mode.
class GenericUnivariateSelect(_BaseFilter):
"""Univariate feature selector with configurable strategy.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}
Feature selection mode.
param : float or int depending on the feature selection mode
Parameter of the corresponding mode.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
"""
_selection_modes = {'percentile': SelectPercentile,
'k_best': SelectKBest,
'fpr': SelectFpr,
'fdr': SelectFdr,
'fwe': SelectFwe}
def __init__(self, score_func=f_classif, mode='percentile', param=1e-5):
super(GenericUnivariateSelect, self).__init__(score_func)
self.mode = mode
self.param = param
def _make_selector(self):
selector = self._selection_modes[self.mode](score_func=self.score_func)
# Now perform some acrobatics to set the right named parameter in
# the selector
possible_params = selector._get_param_names()
possible_params.remove('score_func')
selector.set_params(**{possible_params[0]: self.param})
return selector
def _check_params(self, X, y):
if self.mode not in self._selection_modes:
raise ValueError("The mode passed should be one of %s, %r,"
" (type %s) was passed."
% (self._selection_modes.keys(), self.mode,
type(self.mode)))
self._make_selector()._check_params(X, y)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
selector = self._make_selector()
selector.pvalues_ = self.pvalues_
selector.scores_ = self.scores_
return selector._get_support_mask()
| bsd-3-clause | 9,161,403,944,473,077,000 | 29.815308 | 79 | 0.591361 | false |
ric2b/Vivaldi-browser | chromium/tools/binary_size/libsupersize/concurrent_test.py | 2 | 5418 | #!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import threading
import unittest
import concurrent
def _ForkTestHelper(arg1, arg2, pickle_me_not, test_instance, parent_pid):
_ = pickle_me_not # Suppress lint warning.
test_instance.assertNotEquals(os.getpid(), parent_pid)
return arg1 + arg2
class Unpicklable(object):
"""Ensures that pickle() is not called on parameters."""
def __getstate__(self):
raise AssertionError('Tried to pickle')
class ConcurrentTest(unittest.TestCase):
def testEncodeDictOfLists_Empty(self):
test_dict = {}
encoded = concurrent.EncodeDictOfLists(test_dict)
decoded = concurrent.DecodeDictOfLists(encoded)
self.assertEquals(test_dict, decoded)
def testEncodeDictOfLists_EmptyValue(self):
test_dict = {'foo': []}
encoded = concurrent.EncodeDictOfLists(test_dict)
decoded = concurrent.DecodeDictOfLists(encoded)
self.assertEquals(test_dict, decoded)
def testEncodeDictOfLists_AllStrings(self):
test_dict = {'foo': ['a', 'b', 'c'], 'foo2': ['a', 'b']}
encoded = concurrent.EncodeDictOfLists(test_dict)
decoded = concurrent.DecodeDictOfLists(encoded)
self.assertEquals(test_dict, decoded)
def testEncodeDictOfLists_KeyTransform(self):
test_dict = {0: ['a', 'b', 'c'], 9: ['a', 'b']}
encoded = concurrent.EncodeDictOfLists(test_dict, key_transform=str)
decoded = concurrent.DecodeDictOfLists(encoded, key_transform=int)
self.assertEquals(test_dict, decoded)
def testEncodeDictOfLists_ValueTransform(self):
test_dict = {'a': ['0', '1', '2'], 'b': ['3', '4']}
expected = {'a': [0, 1, 2], 'b': [3, 4]}
encoded = concurrent.EncodeDictOfLists(test_dict)
decoded = concurrent.DecodeDictOfLists(encoded, value_transform=int)
self.assertEquals(expected, decoded)
def testEncodeDictOfLists_Join_Empty(self):
test_dict1 = {}
test_dict2 = {}
expected = {}
encoded1 = concurrent.EncodeDictOfLists(test_dict1)
encoded2 = concurrent.EncodeDictOfLists(test_dict2)
encoded = concurrent.JoinEncodedDictOfLists([encoded1, encoded2])
decoded = concurrent.DecodeDictOfLists(encoded)
self.assertEquals(expected, decoded)
def testEncodeDictOfLists_Join_Singl(self):
test_dict1 = {'key1': ['a']}
encoded1 = concurrent.EncodeDictOfLists(test_dict1)
encoded = concurrent.JoinEncodedDictOfLists([encoded1])
decoded = concurrent.DecodeDictOfLists(encoded)
self.assertEquals(test_dict1, decoded)
def testEncodeDictOfLists_JoinMultiple(self):
test_dict1 = {'key1': ['a']}
test_dict2 = {'key2': ['b']}
expected = {'key1': ['a'], 'key2': ['b']}
encoded1 = concurrent.EncodeDictOfLists(test_dict1)
encoded2 = concurrent.EncodeDictOfLists({})
encoded3 = concurrent.EncodeDictOfLists(test_dict2)
encoded = concurrent.JoinEncodedDictOfLists([encoded1, encoded2, encoded3])
decoded = concurrent.DecodeDictOfLists(encoded)
self.assertEquals(expected, decoded)
def testCallOnThread(self):
main_thread = threading.current_thread()
def callback(arg1, arg2):
self.assertEquals(1, arg1)
self.assertEquals(2, arg2)
my_thread = threading.current_thread()
self.assertNotEquals(my_thread, main_thread)
return 3
result = concurrent.CallOnThread(callback, 1, arg2=2)
self.assertEquals(3, result.get())
def testForkAndCall_normal(self):
parent_pid = os.getpid()
result = concurrent.ForkAndCall(
_ForkTestHelper, (1, 2, Unpicklable(), self, parent_pid))
self.assertEquals(3, result.get())
def testForkAndCall_exception(self):
parent_pid = os.getpid()
result = concurrent.ForkAndCall(
_ForkTestHelper, (1, 'a', None, self, parent_pid))
self.assertRaises(TypeError, result.get)
def testBulkForkAndCall_none(self):
results = concurrent.BulkForkAndCall(_ForkTestHelper, [])
self.assertEquals([], list(results))
def testBulkForkAndCall_few(self):
parent_pid = os.getpid()
results = concurrent.BulkForkAndCall(_ForkTestHelper, [
(1, 2, Unpicklable(), self, parent_pid),
(3, 4, None, self, parent_pid)])
self.assertEquals({3, 7}, set(results))
def testBulkForkAndCall_few_kwargs(self):
parent_pid = os.getpid()
results = concurrent.BulkForkAndCall(_ForkTestHelper,
[(1, 2, Unpicklable()), (3, 4, None)],
test_instance=self, parent_pid=parent_pid)
self.assertEquals({3, 7}, set(results))
def testBulkForkAndCall_many(self):
parent_pid = os.getpid()
args = [(1, 2, Unpicklable(), self, parent_pid) for _ in xrange(100)]
results = concurrent.BulkForkAndCall(_ForkTestHelper, args)
self.assertEquals([3] * 100, list(results))
def testBulkForkAndCall_many_kwargs(self):
parent_pid = os.getpid()
args = [(1, 2) for _ in xrange(100)]
results = concurrent.BulkForkAndCall(
_ForkTestHelper, args, pickle_me_not=Unpicklable(), test_instance=self,
parent_pid=parent_pid)
self.assertEquals([3] * 100, list(results))
def testBulkForkAndCall_exception(self):
parent_pid = os.getpid()
results = concurrent.BulkForkAndCall(_ForkTestHelper, [
(1, 'a', self, parent_pid)])
self.assertRaises(TypeError, results.next)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 2,737,504,394,054,580,700 | 35.608108 | 79 | 0.691953 | false |
quarkslab/irma | probe/tests/probe/controllers/test_ftpctrl.py | 1 | 1780 | from unittest import TestCase
from mock import patch, MagicMock, call
import probe.controllers.ftpctrl as module
from irma.common.base.exceptions import IrmaFtpError
class TestFtpctrl(TestCase):
@patch("probe.controllers.ftpctrl.os.path.isdir")
@patch('probe.controllers.ftpctrl.config.IrmaSFTPv2')
def test_upload_files(self, m_IrmaSFTPv2, m_isdir):
parent_filename = "parent_file"
filelist = ["file1", "file2"]
m_ftp = MagicMock()
m_IrmaSFTPv2().__enter__.return_value = m_ftp
m_isdir.return_value = False
module.upload_files("frontend", "path", filelist, parent_filename)
m_isdir.assert_has_calls([call('path/file1'),
call('path/file2')])
m_ftp.upload_file.assert_has_calls([call('parent_file_0',
'path/file1'),
call('parent_file_1',
'path/file2')])
@patch("probe.controllers.ftpctrl.os.path.isdir")
@patch('probe.controllers.ftpctrl.config.IrmaSFTPv2')
def test_upload_files_not_a_file(self, m_IrmaSFTPv2, m_isdir):
m_isdir.return_value = True
m_ftp = MagicMock()
m_IrmaSFTPv2().__enter__.return_value = m_ftp
module.upload_files("frontend", "path", ["dir"], "parent_file")
m_ftp.upload_file.assert_not_called()
@patch('probe.controllers.ftpctrl.config.IrmaSFTPv2')
def test_download_file(self, m_IrmaSFTPv2):
filename = "file4"
m_ftp = MagicMock()
m_IrmaSFTPv2().__enter__.return_value = m_ftp
module.download_file("frontend", "srcname", filename)
m_ftp.download_file.assert_called_once_with(".", "srcname", filename)
| apache-2.0 | 2,803,622,803,782,412,000 | 43.5 | 77 | 0.596067 | false |
thanatoskira/cansina | core/payload.py | 2 | 3505 | import threading
import Queue
import time
from core.task import Task
def _populate_list_with_file(file_name):
""" Open a file, read its content and strips it. Returns a list with the content
additionally it filter and clean some splinters
"""
with open(file_name, 'r') as f:
tmp_list = f.readlines()
clean_list = []
for e in tmp_list:
# Delete leading and trailing spaces
e = e.strip()
# Skip commented lines in payload files
if e.startswith('#'):
continue
# Remove leading '/' characters
if e.startswith('/'):
e = e[1:]
clean_list.append(e.decode("utf-8", "replace"))
return clean_list
def _has_extension(res):
#wether the last path sector has '.'
if res.rfind("/") == -1:
return "." in res
else:
return "." in res[res.rfind("/"):]
class Payload():
def __init__(self, target, payload_filename):
self.target = target
self.payload_filename = payload_filename
self.payload = _populate_list_with_file(payload_filename)
self.queue = Queue.Queue()
self.dead = False
self.extensions = None
self.length = len(self.payload)
self.banned_response_codes = None
self.unbanned_response_codes = None
self.content = None
self.remove_slash = False
self.uppercase = False
def set_remove_slash(self, remove_slash):
self.remove_slash = remove_slash
def set_banned_response_codes(self, banned_response_codes):
self.banned_response_codes = banned_response_codes
def set_unbanned_response_codes(self, unbanned_response_codes):
self.unbanned_response_codes = unbanned_response_codes
def set_extensions(self, extensions):
self.extensions = extensions
def set_content(self, content):
self.content = content
def get_length(self):
return self.length
def get_total_requests(self):
return self.length * len(self.extensions)
def kill(self):
self.dead = True
def is_finished(self):
return self.dead
def set_uppercase(self, uppercase):
self.uppercase = uppercase
def get_queue(self):
task_id = 0
for resource in self.payload:
if self.uppercase:
resource = resource.upper()
task_id += 1
# Useful when looking for files without extension instead of directories
if self.remove_slash and resource.endswith("/"):
resource = resource[:-1]
for extension in self.extensions:
# If resource is a whole word and user didnt provide a extension
# put a final /
if not extension and not _has_extension(resource) and not self.remove_slash:
resource += '/'
# Put a . before extension if the users didnt do it
if extension and not '.' in extension:
extension = '.' + extension
task = Task(task_id, self.target, resource, extension)
task.set_payload_filename(self.payload_filename)
task.set_payload_length(self.length)
task.set_banned_response_codes(self.banned_response_codes)
task.set_unbanned_response_codes(self.unbanned_response_codes)
task.set_content(self.content)
self.queue.put(task)
return self.queue
| gpl-3.0 | 1,638,703,656,410,932,500 | 30.576577 | 92 | 0.59515 | false |
willseward/cattle | tests/integration/cattletest/core/test_user_preferences.py | 8 | 2401 | from common_fixtures import * # NOQA
from gdapi import ApiError
@pytest.fixture(scope='module')
def user_client(context):
return context.user_client
def _user_preference(client, name=None):
if name is None:
name = random_str()
preference = client.wait_success(client.create_user_preference(
name=name, value=random_str()))
got_preference = client.by_id('userPreference', preference.id)
assert preference.id == got_preference.id
assert name == got_preference.name
assert preference.value == got_preference.value
return got_preference
def test_create_user_preference(user_client):
_user_preference(user_client)
def test_delete_user_preference(user_client):
preference = _user_preference(user_client)
preference = user_client.wait_success(preference.deactivate())
preference = user_client.wait_success(preference.remove())
preference = user_client.wait_success(preference.purge())
preference = user_client.by_id('userPreference', preference.id)
assert preference.state == 'purged'
preference = _user_preference(user_client)
preference = user_client.wait_success(preference.remove())
assert preference.state == 'removed'
preference = user_client.wait_success(preference.purge())
assert preference.state == 'purged'
def test_update_user_preference(user_client):
preference = _user_preference(user_client)
new_value = random_str()
user_client.update(preference, value=new_value)
got_preference = user_client.by_id('userPreference', preference.id)
assert got_preference.value == new_value
def test_update_user_preference_pass_name(user_client):
preference = _user_preference(user_client)
new_value = random_str()
user_client.update(preference, name=preference.name, value=new_value)
got_preference = user_client.by_id('userPreference', preference.id)
assert got_preference.value == new_value
def test_unique_user_preference(user_client, admin_user_client):
rand_str = random_str()
_user_preference(user_client, name=rand_str)
with pytest.raises(ApiError) as e:
_user_preference(user_client, name=rand_str)
assert e.value.error.status == 422
_user_preference(admin_user_client, name=rand_str)
with pytest.raises(ApiError) as e:
_user_preference(admin_user_client, name=rand_str)
assert e.value.error.status == 422
| apache-2.0 | -2,528,450,583,300,333,000 | 35.938462 | 73 | 0.718451 | false |
mdublin/Brightcove-Dynamic-Ingest-App | ENV/lib/python2.7/site-packages/pip/_vendor/packaging/__about__.py | 441 | 1073 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "15.3"
__author__ = "Donald Stufft"
__email__ = "[email protected]"
__license__ = "Apache License, Version 2.0"
__copyright__ = "Copyright 2014 %s" % __author__
| mit | -2,791,858,947,414,828,000 | 33.612903 | 74 | 0.680336 | false |
Enucatl/pilatus-experiments | scripts/time_series2csv.py | 1 | 1133 | """Read the time series and output a csv"""
import argparse
import h5py
import csv
import sys
import numpy as np
parser = argparse.ArgumentParser(
__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"file",
nargs=1,
help="hdf5 file"
)
if __name__ == '__main__':
args = parser.parse_args()
file_name = args.file[0]
hdf5_file = h5py.File(file_name, "r")
hdf5_group = hdf5_file["raw_images"]
writer = csv.writer(sys.stdout)
exposures = [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1]
n_files = [1000, 1000, 1000, 500, 500, 500, 200, 200, 200, 100]
writer.writerow(["exposure", "signal", "noise", "snr"])
datasets = np.array([dataset for dataset in hdf5_group.values()])
print(datasets.shape)
i = 0
for exposure, n in zip(exposures, n_files):
dataset = datasets[i:(i + n), 0, ...]
print(dataset.shape)
i += n
signal = np.mean(dataset, axis=0)
noise = np.std(dataset, axis=0)
snr = signal / noise
writer.writerow([exposure, signal, noise, snr])
hdf5_file.close()
| gpl-3.0 | 3,053,433,939,669,690,000 | 28.815789 | 73 | 0.60812 | false |
redhat-performance/tuned | tuned/plugins/plugin_irqbalance.py | 1 | 3151 | from . import base
from .decorators import command_custom
from tuned import consts
import tuned.logs
import errno
import perf
import re
log = tuned.logs.get()
class IrqbalancePlugin(base.Plugin):
"""
Plugin for irqbalance settings management.
"""
def __init__(self, *args, **kwargs):
super(IrqbalancePlugin, self).__init__(*args, **kwargs)
self._cpus = perf.cpu_map()
def _instance_init(self, instance):
instance._has_dynamic_tuning = False
instance._has_static_tuning = True
def _instance_cleanup(self, instance):
pass
@classmethod
def _get_config_options(cls):
return {
"banned_cpus": None,
}
def _read_irqbalance_sysconfig(self):
try:
with open(consts.IRQBALANCE_SYSCONFIG_FILE, "r") as f:
return f.read()
except IOError as e:
if e.errno == errno.ENOENT:
log.warn("irqbalance sysconfig file is missing. Is irqbalance installed?")
else:
log.error("Failed to read irqbalance sysconfig file: %s" % e)
return None
def _write_irqbalance_sysconfig(self, content):
try:
with open(consts.IRQBALANCE_SYSCONFIG_FILE, "w") as f:
f.write(content)
return True
except IOError as e:
log.error("Failed to write irqbalance sysconfig file: %s" % e)
return False
def _write_banned_cpus(self, sysconfig, banned_cpumask):
return sysconfig + "IRQBALANCE_BANNED_CPUS=%s\n" % banned_cpumask
def _clear_banned_cpus(self, sysconfig):
lines = []
for line in sysconfig.split("\n"):
if not re.match(r"\s*IRQBALANCE_BANNED_CPUS=", line):
lines.append(line)
return "\n".join(lines)
def _restart_irqbalance(self):
# Exit code 5 means unit not found (see 'EXIT_NOTINSTALLED' in
# systemd.exec(5))
retcode, out = self._cmd.execute(
["systemctl", "try-restart", "irqbalance"],
no_errors=[5])
if retcode != 0:
log.warn("Failed to restart irqbalance. Is it installed?")
def _set_banned_cpus(self, banned_cpumask):
content = self._read_irqbalance_sysconfig()
if content is None:
return
content = self._clear_banned_cpus(content)
content = self._write_banned_cpus(content, banned_cpumask)
if self._write_irqbalance_sysconfig(content):
self._restart_irqbalance()
def _restore_banned_cpus(self):
content = self._read_irqbalance_sysconfig()
if content is None:
return
content = self._clear_banned_cpus(content)
if self._write_irqbalance_sysconfig(content):
self._restart_irqbalance()
@command_custom("banned_cpus", per_device=False)
def _banned_cpus(self, enabling, value, verify, ignore_missing):
banned_cpumask = None
if value is not None:
banned = set(self._cmd.cpulist_unpack(value))
present = set(self._cpus)
if banned.issubset(present):
banned_cpumask = self._cmd.cpulist2hex(list(banned))
else:
str_cpus = ",".join([str(x) for x in self._cpus])
log.error("Invalid banned_cpus specified, '%s' does not match available cores '%s'"
% (value, str_cpus))
if (enabling or verify) and banned_cpumask is None:
return None
if verify:
# Verification is currently not supported
return None
elif enabling:
self._set_banned_cpus(banned_cpumask)
else:
self._restore_banned_cpus()
| gpl-2.0 | -2,159,136,969,307,826,700 | 27.645455 | 87 | 0.695017 | false |
rggjan/gegl-global-matting | bindings/pygegl/Gegl/__init__.py | 7 | 1702 | # PyGEGL - Python bindings for the GEGL image processing library
# Copyright (C) 2007 Manish Singh
#
# __init__.py: initialization file for the Gegl package
#
# PyGEGL is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# PyGEGL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with PyGEGL; if not, see <http://www.gnu.org/licenses/>.
# dl tricks from GST python's __init__.py
import sys
def setdlopenflags():
oldflags = sys.getdlopenflags()
try:
from DLFCN import RTLD_GLOBAL, RTLD_LAZY
except ImportError:
RTLD_GLOBAL = -1
RTLD_LAZY = -1
import os
osname = os.uname()[0]
if osname == 'Linux' or osname == 'SunOS' or osname == 'FreeBSD':
RTLD_GLOBAL = 0x100
RTLD_LAZY = 0x1
elif osname == 'Darwin':
RTLD_GLOBAL = 0x8
RTLD_LAZY = 0x1
del os
except:
RTLD_GLOBAL = -1
RTLD_LAZY = -1
if RTLD_GLOBAL != -1 and RTLD_LAZY != -1:
sys.setdlopenflags(RTLD_LAZY | RTLD_GLOBAL)
return oldflags
oldflags = setdlopenflags()
from _gegl import *
sys.setdlopenflags(oldflags)
del sys, setdlopenflags
from fifthleg import *
import atexit
atexit.register(exit)
del exit, atexit
del _gegl
| gpl-3.0 | -7,241,721,958,495,886,000 | 27.366667 | 73 | 0.670975 | false |
Natim/sentry | tests/sentry/api/endpoints/test_project_group_index.py | 8 | 19043 | from __future__ import absolute_import
from datetime import timedelta
from django.core.urlresolvers import reverse
from django.utils import timezone
from mock import patch
from sentry.models import (
EventMapping, Group, GroupBookmark, GroupSeen, GroupStatus
)
from sentry.testutils import APITestCase
from sentry.testutils.helpers import parse_link_header
class GroupListTest(APITestCase):
def _parse_links(self, header):
# links come in {url: {...attrs}}, but we need {rel: {...attrs}}
links = {}
for url, attrs in parse_link_header(header).iteritems():
links[attrs['rel']] = attrs
attrs['href'] = url
return links
def test_simple_pagination(self):
project = self.project
now = timezone.now()
group1 = self.create_group(
checksum='a' * 32,
last_seen=now - timedelta(seconds=1),
)
group2 = self.create_group(
checksum='b' * 32,
last_seen=now,
)
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.get(url + '?sort_by=date&limit=1', format='json')
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(group2.id)
links = self._parse_links(response['Link'])
assert links['previous']['results'] == 'false'
assert links['next']['results'] == 'true'
print(links['next']['cursor'])
response = self.client.get(links['next']['href'], format='json')
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(group1.id)
links = self._parse_links(response['Link'])
assert links['previous']['results'] == 'true'
assert links['next']['results'] == 'false'
print(links['previous']['cursor'])
response = self.client.get(links['previous']['href'], format='json')
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(group2.id)
links = self._parse_links(response['Link'])
assert links['previous']['results'] == 'false'
assert links['next']['results'] == 'true'
print(links['previous']['cursor'])
response = self.client.get(links['previous']['href'], format='json')
assert response.status_code == 200
assert len(response.data) == 0
group3 = self.create_group(
checksum='c' * 32,
last_seen=now + timedelta(seconds=1),
)
links = self._parse_links(response['Link'])
assert links['previous']['results'] == 'false'
assert links['next']['results'] == 'true'
print(links['previous']['cursor'])
response = self.client.get(links['previous']['href'], format='json')
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(group3.id)
def test_stats_period(self):
# TODO(dcramer): this test really only checks if validation happens
# on statsPeriod
project = self.project
now = timezone.now()
group1 = self.create_group(
checksum='a' * 32,
last_seen=now - timedelta(seconds=1),
)
group2 = self.create_group(
checksum='b' * 32,
last_seen=now,
)
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.get(url + '?statsPeriod=24h', format='json')
assert response.status_code == 200
response = self.client.get(url + '?statsPeriod=14d', format='json')
assert response.status_code == 200
response = self.client.get(url + '?statsPeriod=', format='json')
assert response.status_code == 200
response = self.client.get(url + '?statsPeriod=48h', format='json')
assert response.status_code == 400
def test_auto_resolved(self):
project = self.project
project.update_option('sentry:resolve_age', 1)
now = timezone.now()
group1 = self.create_group(
checksum='a' * 32,
last_seen=now - timedelta(days=1),
)
group2 = self.create_group(
checksum='b' * 32,
last_seen=now,
)
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.get(url, format='json')
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(group2.id)
def test_lookup_by_event_id(self):
project = self.project
project.update_option('sentry:resolve_age', 1)
now = timezone.now()
group = self.create_group(checksum='a' * 32)
self.create_group(checksum='b' * 32)
EventMapping.objects.create(
event_id='c' * 32,
project=group.project,
group=group,
)
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.get(url + '?query=' + ('c' * 32), format='json')
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(group.id)
def test_lookup_by_unknown_event_id(self):
project = self.project
project.update_option('sentry:resolve_age', 1)
now = timezone.now()
group = self.create_group(checksum='a' * 32)
self.create_group(checksum='b' * 32)
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.get(url + '?query=' + ('c' * 32), format='json')
assert response.status_code == 200
assert len(response.data) == 0
class GroupUpdateTest(APITestCase):
def test_global_resolve(self):
group1 = self.create_group(checksum='a' * 32, status=GroupStatus.RESOLVED)
group2 = self.create_group(checksum='b' * 32, status=GroupStatus.UNRESOLVED)
group3 = self.create_group(checksum='c' * 32, status=GroupStatus.MUTED)
group4 = self.create_group(
project=self.create_project(slug='foo'),
checksum='b' * 32, status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.put(url + '?status=unresolved', data={
'status': 'resolved',
}, format='json')
assert response.status_code == 200, response.data
assert response.data == {
'status': 'resolved',
}
# the previously resolved entry should not be included
new_group1 = Group.objects.get(id=group1.id)
assert new_group1.status == GroupStatus.RESOLVED
assert new_group1.resolved_at is None
new_group2 = Group.objects.get(id=group2.id)
assert new_group2.status == GroupStatus.RESOLVED
assert new_group2.resolved_at is not None
# the muted entry should not be included
new_group3 = Group.objects.get(id=group3.id)
assert new_group3.status == GroupStatus.MUTED
assert new_group3.resolved_at is None
new_group4 = Group.objects.get(id=group4.id)
assert new_group4.status == GroupStatus.UNRESOLVED
assert new_group4.resolved_at is None
def test_selective_status_update(self):
project = self.project
group1 = self.create_group(checksum='a' * 32, status=GroupStatus.RESOLVED)
group2 = self.create_group(checksum='b' * 32, status=GroupStatus.UNRESOLVED)
group3 = self.create_group(checksum='c' * 32, status=GroupStatus.MUTED)
group4 = self.create_group(
project=self.create_project(slug='foo'),
checksum='b' * 32, status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}&group4={group4.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
group4=group4,
)
response = self.client.put(url, data={
'status': 'resolved',
}, format='json')
assert response.status_code == 200
assert response.data == {
'status': 'resolved',
}
new_group1 = Group.objects.get(id=group1.id)
assert new_group1.resolved_at is None
new_group2 = Group.objects.get(id=group2.id)
assert new_group2.resolved_at is not None
assert new_group2.status == GroupStatus.RESOLVED
new_group3 = Group.objects.get(id=group3.id)
assert new_group3.resolved_at is None
assert new_group3.status == GroupStatus.MUTED
new_group4 = Group.objects.get(id=group4.id)
assert new_group4.resolved_at is None
assert new_group4.status == GroupStatus.UNRESOLVED
def test_set_unresolved(self):
project = self.project
group = self.create_group(checksum='a' * 32, status=GroupStatus.RESOLVED)
self.login_as(user=self.user)
url = '{url}?id={group.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group=group,
)
response = self.client.put(url, data={
'status': 'unresolved',
}, format='json')
assert response.status_code == 200
assert response.data == {
'status': 'unresolved',
}
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.UNRESOLVED
def test_set_bookmarked(self):
project = self.project
group1 = self.create_group(checksum='a' * 32, status=GroupStatus.RESOLVED)
group2 = self.create_group(checksum='b' * 32, status=GroupStatus.UNRESOLVED)
group3 = self.create_group(checksum='c' * 32, status=GroupStatus.MUTED)
group4 = self.create_group(
project=self.create_project(slug='foo'),
checksum='b' * 32, status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}&group4={group4.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
group4=group4,
)
response = self.client.put(url, data={
'isBookmarked': 'true',
}, format='json')
assert response.status_code == 200
assert response.data == {
'isBookmarked': True,
}
bookmark1 = GroupBookmark.objects.filter(group=group1, user=self.user)
assert bookmark1.exists()
bookmark2 = GroupBookmark.objects.filter(group=group2, user=self.user)
assert bookmark2.exists()
bookmark3 = GroupBookmark.objects.filter(group=group3, user=self.user)
assert not bookmark3.exists()
bookmark4 = GroupBookmark.objects.filter(group=group4, user=self.user)
assert not bookmark4.exists()
def test_set_public(self):
group1 = self.create_group(checksum='a' * 32, is_public=False)
group2 = self.create_group(checksum='b' * 32, is_public=False)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
)
response = self.client.put(url, data={
'isPublic': 'true',
}, format='json')
assert response.status_code == 200
assert response.data == {
'isPublic': True,
}
new_group1 = Group.objects.get(id=group1.id)
assert new_group1.is_public
new_group2 = Group.objects.get(id=group2.id)
assert new_group2.is_public
def test_set_private(self):
group1 = self.create_group(checksum='a' * 32, is_public=True)
group2 = self.create_group(checksum='b' * 32, is_public=True)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
)
response = self.client.put(url, data={
'isPublic': 'false',
}, format='json')
assert response.status_code == 200
assert response.data == {
'isPublic': False,
}
new_group1 = Group.objects.get(id=group1.id)
assert not new_group1.is_public
new_group2 = Group.objects.get(id=group2.id)
assert not new_group2.is_public
def test_set_has_seen(self):
project = self.project
group1 = self.create_group(checksum='a' * 32, status=GroupStatus.RESOLVED)
group2 = self.create_group(checksum='b' * 32, status=GroupStatus.UNRESOLVED)
group3 = self.create_group(checksum='c' * 32, status=GroupStatus.MUTED)
group4 = self.create_group(
project=self.create_project(slug='foo'),
checksum='b' * 32, status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}&group4={group4.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
group4=group4,
)
response = self.client.put(url, data={
'hasSeen': 'true',
}, format='json')
assert response.status_code == 200
assert response.data == {
'hasSeen': True,
}
r1 = GroupSeen.objects.filter(group=group1, user=self.user)
assert r1.exists()
r2 = GroupSeen.objects.filter(group=group2, user=self.user)
assert r2.exists()
r3 = GroupSeen.objects.filter(group=group3, user=self.user)
assert not r3.exists()
r4 = GroupSeen.objects.filter(group=group4, user=self.user)
assert not r4.exists()
@patch('sentry.api.endpoints.project_group_index.merge_group')
def test_merge(self, merge_group):
project = self.project
group1 = self.create_group(checksum='a' * 32, times_seen=1)
group2 = self.create_group(checksum='b' * 32, times_seen=50)
group3 = self.create_group(checksum='c' * 32, times_seen=2)
group4 = self.create_group(checksum='d' * 32)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}&id={group3.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
group3=group3,
)
response = self.client.put(url, data={
'merge': '1',
}, format='json')
assert response.status_code == 200
assert response.data['merge']['parent'] == str(group2.id)
assert sorted(response.data['merge']['children']) == [
str(group1.id),
str(group3.id),
]
assert len(merge_group.mock_calls) == 2
merge_group.delay.assert_any_call(from_object_id=group1.id, to_object_id=group2.id)
merge_group.delay.assert_any_call(from_object_id=group3.id, to_object_id=group2.id)
class GroupDeleteTest(APITestCase):
def test_global_is_forbidden(self):
project = self.project
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.delete(url, data={
'status': 'resolved',
}, format='json')
assert response.status_code == 400
def test_delete_by_id(self):
project = self.project
group1 = self.create_group(checksum='a' * 32, status=GroupStatus.RESOLVED)
group2 = self.create_group(checksum='b' * 32, status=GroupStatus.UNRESOLVED)
group3 = self.create_group(checksum='c' * 32, status=GroupStatus.MUTED)
group4 = self.create_group(
project=self.create_project(slug='foo'),
checksum='b' * 32, status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}&group4={group4.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
group4=group4,
)
with self.tasks():
response = self.client.delete(url, format='json')
assert response.status_code == 204
new_group1 = Group.objects.filter(id=group1.id)
assert not new_group1.exists()
new_group2 = Group.objects.filter(id=group2.id)
assert not new_group2.exists()
new_group3 = Group.objects.filter(id=group3.id)
assert new_group3.exists()
new_group4 = Group.objects.filter(id=group4.id)
assert new_group4.exists()
| bsd-3-clause | -1,518,718,468,204,667,100 | 36.634387 | 91 | 0.58788 | false |
gigglesninja/senior-design | MAVProxy/MAVProxy/modules/lib/mp_menu.py | 4 | 10663 | #!/usr/bin/env python
'''
menu handling widgets for wx
Andrew Tridgell
November 2013
'''
import wx
from MAVProxy.modules.lib import mp_util
class MPMenuGeneric(object):
'''a MP menu separator'''
def __init__(self):
pass
def find_selected(self, event):
return None
def _append(self, menu):
'''append this menu item to a menu'''
pass
def __str__(self):
return "MPMenuGeneric()"
def __repr__(self):
return str(self.__str__())
class MPMenuSeparator(MPMenuGeneric):
'''a MP menu separator'''
def __init__(self):
MPMenuGeneric.__init__(self)
def _append(self, menu):
'''append this menu item to a menu'''
menu.AppendSeparator()
def __str__(self):
return "MPMenuSeparator()"
class MPMenuItem(MPMenuGeneric):
'''a MP menu item'''
def __init__(self, name, description='', returnkey=None, handler=None):
MPMenuGeneric.__init__(self)
self.name = name
self.description = description
self.returnkey = returnkey
self.handler = handler
self.handler_result = None
def find_selected(self, event):
'''find the selected menu item'''
if event.GetId() == self.id():
return self
return None
def call_handler(self):
'''optionally call a handler function'''
if self.handler is None:
return
call = getattr(self.handler, 'call', None)
if call is not None:
self.handler_result = call()
def id(self):
'''id used to identify the returned menu items
uses a 16 bit signed integer'''
# 0x7FFF is used as windows only allows for 16 bit IDs
return int(hash((self.name, self.returnkey)) & 0x7FFF)
def _append(self, menu):
'''append this menu item to a menu'''
menu.Append(self.id(), self.name, self.description)
def __str__(self):
return "MPMenuItem(%s,%s,%s)" % (self.name, self.description, self.returnkey)
class MPMenuCheckbox(MPMenuItem):
'''a MP menu item as a checkbox'''
def __init__(self, name, description='', returnkey=None, checked=False, handler=None):
MPMenuItem.__init__(self, name, description=description, returnkey=returnkey, handler=handler)
self.checked = checked
def find_selected(self, event):
'''find the selected menu item'''
if event.GetId() == self.id():
self.checked = event.IsChecked()
return self
return None
def IsChecked(self):
'''return true if item is checked'''
return self.checked
def _append(self, menu):
'''append this menu item to a menu'''
menu.AppendCheckItem(self.id(), self.name, self.description)
menu.Check(self.id(), self.checked)
def __str__(self):
return "MPMenuCheckbox(%s,%s,%s,%s)" % (self.name, self.description, self.returnkey, str(self.checked))
class MPMenuRadio(MPMenuItem):
'''a MP menu item as a radio item'''
def __init__(self, name, description='', returnkey=None, selected=None, items=[], handler=None):
MPMenuItem.__init__(self, name, description=description, returnkey=returnkey, handler=handler)
self.items = items
self.choice = 0
self.initial = selected
def set_choices(self, items):
'''set radio item choices'''
self.items = items
def get_choice(self):
'''return the chosen item'''
return self.items[self.choice]
def find_selected(self, event):
'''find the selected menu item'''
first = self.id()
last = first + len(self.items) - 1
evid = event.GetId()
if evid >= first and evid <= last:
self.choice = evid - first
return self
return None
def _append(self, menu):
'''append this menu item to a menu'''
submenu = wx.Menu()
for i in range(len(self.items)):
submenu.AppendRadioItem(self.id()+i, self.items[i], self.description)
if self.items[i] == self.initial:
submenu.Check(self.id()+i, True)
menu.AppendMenu(-1, self.name, submenu)
def __str__(self):
return "MPMenuRadio(%s,%s,%s,%s)" % (self.name, self.description, self.returnkey, self.get_choice())
class MPMenuSubMenu(MPMenuGeneric):
'''a MP menu item'''
def __init__(self, name, items):
MPMenuGeneric.__init__(self)
self.name = name
self.items = items
def add(self, items, addto=None):
'''add more items to a sub-menu'''
if not isinstance(items, list):
items = [items]
for m in items:
updated = False
for i in range(len(self.items)):
try:
if self.items[i].name == m.name:
self.items[i] = m
updated = True
except Exception:
pass
if not updated:
self.items.append(m)
def combine(self, submenu):
'''combine a new menu with an existing one'''
self.items.extend(submenu.items)
def wx_menu(self):
'''return a wx.Menu() for this menu'''
menu = wx.Menu()
for i in range(len(self.items)):
m = self.items[i]
m._append(menu)
return menu
def find_selected(self, event):
'''find the selected menu item'''
for m in self.items:
ret = m.find_selected(event)
if ret is not None:
return ret
return None
def _append(self, menu):
'''append this menu item to a menu'''
menu.AppendMenu(-1, self.name, self.wx_menu())
def __str__(self):
return "MPMenuSubMenu(%s)" % (self.name)
class MPMenuTop(object):
'''a MP top level menu'''
def __init__(self, items):
self.items = items
def add(self, items):
'''add a submenu'''
if not isinstance(items, list):
items = [items]
for m in items:
updated = False
for i in range(len(self.items)):
if self.items[i].name == m.name:
self.items[i] = m
updated = True
if not updated:
self.items.append(m)
def wx_menu(self):
'''return a wx.MenuBar() for the menu'''
menubar = wx.MenuBar()
for i in range(len(self.items)):
m = self.items[i]
menubar.Append(m.wx_menu(), m.name)
return menubar
def find_selected(self, event):
'''find the selected menu item'''
for i in range(len(self.items)):
m = self.items[i]
ret = m.find_selected(event)
if ret is not None:
return ret
return None
class MPMenuCallFileDialog(object):
'''used to create a file dialog callback'''
def __init__(self, flags=wx.FD_OPEN, title='Filename', wildcard='*.*'):
self.flags = flags
self.title = title
self.wildcard = wildcard
def call(self):
'''show a file dialog'''
dlg = wx.FileDialog(None, self.title, '', "", self.wildcard, self.flags)
if dlg.ShowModal() != wx.ID_OK:
return None
return dlg.GetPath()
class MPMenuCallTextDialog(object):
'''used to create a value dialog callback'''
def __init__(self, title='Enter Value', default=''):
self.title = title
self.default = default
def call(self):
'''show a value dialog'''
dlg = wx.TextEntryDialog(None, self.title, self.title, defaultValue=str(self.default))
if dlg.ShowModal() != wx.ID_OK:
return None
return dlg.GetValue()
class MPMenuChildMessageDialog(object):
'''used to create a message dialog in a child process'''
def __init__(self, title='Information', message='', font_size=18):
self.title = title
self.message = message
self.font_size = font_size
import multiprocessing
t = multiprocessing.Process(target=self.show)
t.start()
def show(self):
'''show the dialog as a child process'''
mp_util.child_close_fds()
from wx.lib.agw.genericmessagedialog import GenericMessageDialog
app = wx.PySimpleApp()
# note! font size change is not working. I don't know why yet
font = wx.Font(self.font_size, wx.MODERN, wx.NORMAL, wx.NORMAL)
dlg = GenericMessageDialog(None, self.message, self.title, wx.ICON_INFORMATION|wx.OK)
dlg.SetFont(font)
dlg.ShowModal()
app.MainLoop()
if __name__ == '__main__':
from MAVProxy.modules.lib.mp_image import MPImage
import time
im = MPImage(mouse_events=True,
key_events=True,
can_drag = False,
can_zoom = False,
auto_size = True)
menu = MPMenuTop([MPMenuSubMenu('&File',
items=[MPMenuItem('&Open\tCtrl+O'),
MPMenuItem('&Save\tCtrl+S'),
MPMenuItem('Close', 'Close'),
MPMenuItem('&Quit\tCtrl+Q', 'Quit')]),
MPMenuSubMenu('Edit',
items=[MPMenuSubMenu('Option',
items=[MPMenuItem('Foo'),
MPMenuItem('Bar'),
MPMenuSeparator(),
MPMenuCheckbox('&Grid\tCtrl+G')]),
MPMenuItem('Image', 'EditImage'),
MPMenuRadio('Colours',
items=['Red','Green','Blue']),
MPMenuRadio('Shapes',
items=['Circle','Square','Triangle'])])])
im.set_menu(menu)
popup = MPMenuSubMenu('A Popup',
items=[MPMenuItem('Sub1'),
MPMenuItem('Sub2'),
MPMenuItem('Sub3')])
im.set_popup_menu(popup)
while im.is_alive():
for event in im.events():
if isinstance(event, MPMenuItem):
print(event, getattr(event, 'popup_pos', None))
continue
else:
print(event)
time.sleep(0.1)
| gpl-2.0 | -5,167,212,183,719,611,000 | 32.012384 | 111 | 0.526775 | false |
eusi/MissionPlanerHM | Lib/site-packages/scipy/ndimage/interpolation.py | 55 | 25609 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
import _ni_support
import _nd_image
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
def spline_filter1d(input, order=3, axis=-1, output=numpy.float64):
"""
Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
Parameters
----------
input : array_like
The input array.
order : int, optional
The order of the spline, default is 3.
axis : int, optional
The axis along which the spline filter is applied. Default is the last
axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array. Default is `numpy.float64`.
Returns
-------
return_value : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
def spline_filter(input, order=3, output = numpy.float64):
"""
Multi-dimensional spline filter.
For more details, see `spline_filter1d`.
See Also
--------
spline_filter1d
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output = output)
input = output
else:
output[...] = input[...]
return return_value
def geometric_transform(input, mapping, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True,
extra_arguments=(), extra_keywords={}):
"""
Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
Parameters
----------
input : array_like
The input array.
mapping : callable
A callable object that accepts a tuple of length equal to the output
array rank, and returns the corresponding input coordinates as a tuple
of length equal to the input array rank.
output_shape : tuple of ints
Shape tuple.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
extra_arguments : tuple, optional
Extra arguments passed to `mapping`.
extra_keywords : dict, optional
Extra keywords passed to `mapping`.
Returns
-------
return_value : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
See Also
--------
map_coordinates, affine_transform, spline_filter1d
Examples
--------
>>> a = np.arange(12.).reshape((4, 3))
>>> def shift_func(output_coords):
... return (output_coords[0] - 0.5, output_coords[1] - 0.5)
...
>>> sp.ndimage.geometric_transform(a, shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.362, 2.738],
[ 0. , 4.812, 6.187],
[ 0. , 8.263, 9.637]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None,
output, order, mode, cval, extra_arguments, extra_keywords)
return return_value
def map_coordinates(input, coordinates, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array.
coordinates : array_like
The coordinates at which `input` is evaluated.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray
The result of transforming the input. The shape of the output is
derived from that of `coordinates` by dropping the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> import scipy.ndimage
>>> a = np.arange(12.).reshape((4, 3))
>>> a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
[ 2. 7.]
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return return_value
def affine_transform(input, matrix, offset=0.0, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode.
Parameters
----------
input : ndarray
The input array.
matrix : ndarray
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is assumed
that the matrix is diagonal. A more efficient algorithms is then
applied that exploits the separability of the problem.
offset : float or sequence, optional
The offset into the array where the transform is applied. If a float,
`offset` is the same for each axis. If a sequence, `offset` should
contain one value for each axis.
output_shape : tuple of ints, optional
Shape tuple.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The transformed input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
matrix = numpy.asarray(matrix, dtype = numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype = numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
_nd_image.zoom_shift(filtered, matrix, offset, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return return_value
def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Shift an array.
The array is shifted using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode.
Parameters
----------
input : ndarray
The input array.
shift : float or sequence, optional
The shift along the axes. If a float, `shift` is the same for each
axis. If a sequence, `shift` should contain one value for each axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The shifted input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype = numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return return_value
def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Parameters
----------
input : ndarray
The input array.
zoom : float or sequence, optional
The zoom factor along the axes. If a float, `zoom` is the same for each
axis. If a sequence, `zoom` should contain one value for each axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The zoomed input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)])
zoom = (numpy.array(input.shape)-1)/(numpy.array(output_shape,float)-1)
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
zoom = numpy.asarray(zoom, dtype = numpy.float64)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return return_value
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes=(1, 0), reshape=True,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Rotate an array.
The array is rotated in the plane defined by the two axes given by the
`axes` parameter using spline interpolation of the requested order.
Parameters
----------
input : ndarray
The input array.
angle : float
The rotation angle in degrees.
axes : tuple of 2 ints, optional
The two axes that define the plane of rotation. Default is the first
two axes.
reshape : bool, optional
If `reshape` is true, the output shape is adapted so that the input
array is contained completely in the output. Default is True.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The rotated input. If `output` is given as a parameter, None is
returned.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype = numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[ m11, -m21],
[-m12, m22]], dtype = numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype = numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype = numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape,axis=0)
size //= input.shape[axes[0]]
size //= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = range(input.ndim)
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return return_value
| gpl-3.0 | 2,290,035,108,452,676,900 | 37.860395 | 79 | 0.625913 | false |
ToonTownInfiniteRepo/ToontownInfinite | toontown/minigame/DistributedIceGameAI.py | 2 | 15666 | from pandac.PandaModules import Point3
from direct.distributed.ClockDelta import globalClockDelta
from direct.fsm import ClassicFSM, State
from direct.task import Task
from toontown.minigame import DistributedMinigameAI
from toontown.minigame import MinigameGlobals
from toontown.minigame import IceGameGlobals
from toontown.ai.ToonBarrier import ToonBarrier
class DistributedIceGameAI(DistributedMinigameAI.DistributedMinigameAI):
notify = directNotify.newCategory('DistributedIceGameAI')
def __init__(self, air, minigameId):
try:
self.DistributedIceGameAI_initialized
except:
self.DistributedIceGameAI_initialized = 1
DistributedMinigameAI.DistributedMinigameAI.__init__(self, air, minigameId)
self.gameFSM = ClassicFSM.ClassicFSM('DistributedIceGameAI', [State.State('off', self.enterOff, self.exitOff, ['waitClientsChoices']),
State.State('waitClientsChoices', self.enterWaitClientsChoices, self.exitWaitClientsChoices, ['cleanup', 'processChoices']),
State.State('processChoices', self.enterProcessChoices, self.exitProcessChoices, ['waitEndingPositions', 'cleanup']),
State.State('waitEndingPositions', self.enterWaitEndingPositions, self.exitWaitEndingPositions, ['processEndingPositions', 'cleanup']),
State.State('processEndingPositions', self.enterProcessEndingPositions, self.exitProcessEndingPositions, ['waitClientsChoices', 'scoreMatch', 'cleanup']),
State.State('scoreMatch', self.enterScoreMatch, self.exitScoreMatch, ['waitClientsChoices', 'finalResults', 'cleanup']),
State.State('finalResults', self.enterFinalResults, self.exitFinalResults, ['cleanup']),
State.State('cleanup', self.enterCleanup, self.exitCleanup, ['off'])], 'off', 'off')
self.addChildGameFSM(self.gameFSM)
self.avatarChoices = {}
self.avatarEndingPositions = {}
self.curRound = 0
self.curMatch = 0
self.finalEndingPositions = [Point3(IceGameGlobals.StartingPositions[0]),
Point3(IceGameGlobals.StartingPositions[1]),
Point3(IceGameGlobals.StartingPositions[2]),
Point3(IceGameGlobals.StartingPositions[3])]
def generate(self):
self.notify.debug('generate')
DistributedMinigameAI.DistributedMinigameAI.generate(self)
def delete(self):
self.notify.debug('delete')
taskMgr.remove(self.taskName('wait-choices-timeout'))
taskMgr.remove(self.taskName('endingPositionsTimeout'))
del self.gameFSM
DistributedMinigameAI.DistributedMinigameAI.delete(self)
def setGameReady(self):
self.notify.debug('setGameReady')
DistributedMinigameAI.DistributedMinigameAI.setGameReady(self)
self.numTreasures = IceGameGlobals.NumTreasures[self.getSafezoneId()]
self.numTreasuresTaken = 0
self.takenTreasuresTable = [0] * self.numTreasures
self.numPenalties = IceGameGlobals.NumPenalties[self.getSafezoneId()]
self.numPenaltiesTaken = 0
self.takenPenaltiesTable = [0] * self.numPenalties
def setGameStart(self, timestamp):
self.notify.debug('setGameStart')
DistributedMinigameAI.DistributedMinigameAI.setGameStart(self, timestamp)
self.gameFSM.request('waitClientsChoices')
def setGameAbort(self):
self.notify.debug('setGameAbort')
if self.gameFSM.getCurrentState():
self.gameFSM.request('cleanup')
DistributedMinigameAI.DistributedMinigameAI.setGameAbort(self)
def gameOver(self):
self.notify.debug('gameOver')
self.gameFSM.request('cleanup')
DistributedMinigameAI.DistributedMinigameAI.gameOver(self)
def enterOff(self):
self.notify.debug('enterOff')
def exitOff(self):
pass
def enterCleanup(self):
self.notify.debug('enterCleanup')
self.gameFSM.request('off')
def exitCleanup(self):
pass
def enterWaitClientsChoices(self):
self.notify.debug('enterWaitClientsChoices')
self.resetChoices()
self.sendUpdate('setMatchAndRound', [self.curMatch, self.curRound])
self.sendUpdate('setNewState', ['inputChoice'])
taskMgr.doMethodLater(IceGameGlobals.InputTimeout, self.waitClientsChoicesTimeout, self.taskName('wait-choices-timeout'))
self.sendUpdate('setTimerStartTime', [globalClockDelta.getFrameNetworkTime()])
def exitWaitClientsChoices(self):
self.notify.debug('exitWaitClientsChoices')
taskMgr.remove(self.taskName('wait-choices-timeout'))
def enterProcessChoices(self):
forceAndHeading = []
for avId in self.avIdList:
force = self.avatarChoices[avId][0]
heading = self.avatarChoices[avId][1]
forceAndHeading.append([force, heading])
self.notify.debug('tireInputs = %s' % forceAndHeading)
self.sendUpdate('setTireInputs', [forceAndHeading])
self.gameFSM.request('waitEndingPositions')
def exitProcessChoices(self):
pass
def enterWaitEndingPositions(self):
if self.curRound == 0:
self.takenTreasuresTable = [0] * self.numTreasures
self.takenPenaltiesTable = [0] * self.numPenalties
taskMgr.doMethodLater(IceGameGlobals.InputTimeout, self.waitClientsChoicesTimeout, self.taskName('endingPositionsTimeout'))
self.avatarEndingPositions = {}
def exitWaitEndingPositions(self):
taskMgr.remove(self.taskName('endingPositionsTimeout'))
def enterProcessEndingPositions(self):
averagePos = [Point3(0, 0, 0),
Point3(0, 0, 0),
Point3(0, 0, 0),
Point3(0, 0, 0)]
divisor = 0
for avId in self.avatarEndingPositions.keys():
divisor += 1
oneClientEndingPositions = self.avatarEndingPositions[avId]
avIndex = self.avIdList.index(avId)
for index in xrange(len(oneClientEndingPositions)):
pos = oneClientEndingPositions[index]
averagePos[index] += Point3(pos[0], pos[1], pos[2])
self.notify.debug('index = %d averagePos = %s' % (index, averagePos))
sentPos = []
if divisor:
for newPos in averagePos:
newPos /= divisor
newPos.setZ(IceGameGlobals.TireRadius)
sentPos.append([newPos[0], newPos[1], newPos[2]])
else:
sentPos = self.finalEndingPositions
self.sendUpdate('setFinalPositions', [sentPos])
self.finalEndingPositions = sentPos
if self.curMatch == IceGameGlobals.NumMatches - 1 and self.curRound == IceGameGlobals.NumRounds - 1:
self.gameFSM.request('scoreMatch')
elif self.curRound == IceGameGlobals.NumRounds - 1:
self.gameFSM.request('scoreMatch')
else:
self.curRound += 1
self.sendUpdate('setMatchAndRound', [self.curMatch, self.curRound])
self.gameFSM.request('waitClientsChoices')
def exitProcessEndingPositions(self):
pass
def enterScoreMatch(self):
sortedByDistance = []
for avId in self.avIdList:
index = self.avIdList.index(avId)
pos = Point3(*self.finalEndingPositions[index])
pos.setZ(0)
sortedByDistance.append((avId, pos.length()))
def compareDistance(x, y):
if x[1] - y[1] > 0:
return 1
elif x[1] - y[1] < 0:
return -1
else:
return 0
sortedByDistance.sort(cmp=compareDistance)
self.scoresAsList = []
totalPointsAdded = 0
for index in xrange(len(self.avIdList)):
pos = Point3(*self.finalEndingPositions[index])
pos.setZ(0)
length = pos.length()
points = length / IceGameGlobals.FarthestLength * (IceGameGlobals.PointsInCorner - IceGameGlobals.PointsDeadCenter[self.numPlayers])
points += IceGameGlobals.PointsDeadCenter[self.numPlayers]
self.notify.debug('length = %s points=%s avId=%d' % (length, points, avId))
avId = self.avIdList[index]
bonusIndex = 0
for sortIndex in xrange(len(sortedByDistance)):
if sortedByDistance[sortIndex][0] == avId:
bonusIndex = sortIndex
bonusIndex += 4 - len(self.avIdList)
pointsToAdd = int(points + 0.5) + IceGameGlobals.BonusPointsForPlace[bonusIndex]
totalPointsAdded += pointsToAdd
self.scoreDict[avId] += pointsToAdd
self.scoresAsList.append(self.scoreDict[avId])
self.curMatch += 1
self.curRound = 0
self.sendUpdate('setScores', [self.curMatch, self.curRound, self.scoresAsList])
self.sendUpdate('setNewState', ['scoring'])
def allToonsScoringMovieDone(self = self):
self.notify.debug('allToonsScoringMovieDone')
if self.curMatch == IceGameGlobals.NumMatches:
self.gameFSM.request('finalResults')
else:
self.gameFSM.request('waitClientsChoices')
def handleTimeout(avIds, self = self):
self.notify.debug('handleTimeout: avatars %s did not report "done"' % avIds)
if self.curMatch == IceGameGlobals.NumMatches:
self.gameFSM.request('finalResults')
else:
self.gameFSM.request('waitClientsChoices')
scoreMovieDuration = IceGameGlobals.FarthestLength * IceGameGlobals.ExpandFeetPerSec
scoreMovieDuration += totalPointsAdded * IceGameGlobals.ScoreCountUpRate
self.scoringMovieDoneBarrier = ToonBarrier('waitScoringMovieDone', self.uniqueName('waitScoringMovieDone'), self.avIdList, scoreMovieDuration + MinigameGlobals.latencyTolerance, allToonsScoringMovieDone, handleTimeout)
def exitScoreMatch(self):
self.scoringMovieDoneBarrier.cleanup()
self.scoringMovieDoneBarrier = None
return
def enterFinalResults(self):
self.checkScores()
self.sendUpdate('setNewState', ['finalResults'])
taskMgr.doMethodLater(IceGameGlobals.ShowScoresDuration, self.__doneShowingScores, self.taskName('waitShowScores'))
def exitFinalResults(self):
taskMgr.remove(self.taskName('waitShowScores'))
def __doneShowingScores(self, task):
self.notify.debug('doneShowingScores')
self.gameOver()
return Task.done
def waitClientsChoicesTimeout(self, task):
self.notify.debug('waitClientsChoicesTimeout: did not hear from all clients')
for avId in self.avatarChoices.keys():
if self.avatarChoices[avId] == (-1, 0):
self.avatarChoices[avId] = (0, 0)
self.gameFSM.request('processChoices')
return Task.done
def resetChoices(self):
for avId in self.avIdList:
self.avatarChoices[avId] = (-1, 0)
def setAvatarChoice(self, force, direction):
avatarId = self.air.getAvatarIdFromSender()
self.notify.debug('setAvatarChoice: avatar: ' + str(avatarId) + ' votes: ' + str(force) + ' direction: ' + str(direction))
self.avatarChoices[avatarId] = self.checkChoice(avatarId, force, direction)
if self.allAvatarsChosen():
self.notify.debug('setAvatarChoice: all avatars have chosen')
self.gameFSM.request('processChoices')
else:
self.notify.debug('setAvatarChoice: still waiting for more choices')
def checkChoice(self, avId, force, direction):
retForce = force
retDir = direction
if retForce < 0:
retForce = 0
if retForce > 100:
retForce = 100
return (retForce, retDir)
def allAvatarsChosen(self):
for avId in self.avatarChoices.keys():
choice = self.avatarChoices[avId]
if choice[0] == -1 and not self.stateDict[avId] == DistributedMinigameAI.EXITED:
return False
return True
def endingPositions(self, positions):
if not self.gameFSM or not self.gameFSM.getCurrentState() or self.gameFSM.getCurrentState().getName() != 'waitEndingPositions':
return
self.notify.debug('got endingPositions from client %s' % positions)
avId = self.air.getAvatarIdFromSender()
self.avatarEndingPositions[avId] = positions
if self.allAvatarsSentEndingPositions():
self.gameFSM.request('processEndingPositions')
def allAvatarsSentEndingPositions(self):
if len(self.avatarEndingPositions) == len(self.avIdList):
return True
return False
def endingPositionsTimeout(self, task):
self.notify.debug('endingPositionsTimeout : did not hear from all clients')
self.gameFSM.request('processEndingPositions')
return Task.done
def reportScoringMovieDone(self):
if not self.gameFSM or not self.gameFSM.getCurrentState() or self.gameFSM.getCurrentState().getName() != 'scoreMatch':
return
avId = self.air.getAvatarIdFromSender()
self.notify.debug('reportScoringMovieDone: avatar %s is done' % avId)
self.scoringMovieDoneBarrier.clear(avId)
def claimTreasure(self, treasureNum):
if not self.gameFSM or not self.gameFSM.getCurrentState() or self.gameFSM.getCurrentState().getName() != 'waitEndingPositions':
return
avId = self.air.getAvatarIdFromSender()
if not self.scoreDict.has_key(avId):
self.notify.warning('PROBLEM: avatar %s called claimTreasure(%s) but he is not in the scoreDict: %s. avIdList is: %s' % (avId,
treasureNum,
self.scoreDict,
self.avIdList))
return
if treasureNum < 0 or treasureNum >= self.numTreasures:
self.air.writeServerEvent('warning', treasureNum, 'MazeGameAI.claimTreasure treasureNum out of range')
return
if self.takenTreasuresTable[treasureNum]:
return
self.takenTreasuresTable[treasureNum] = 1
avId = self.air.getAvatarIdFromSender()
self.sendUpdate('setTreasureGrabbed', [avId, treasureNum])
self.scoreDict[avId] += 1
self.numTreasuresTaken += 1
def claimPenalty(self, penaltyNum):
if not self.gameFSM or not self.gameFSM.getCurrentState() or self.gameFSM.getCurrentState().getName() != 'waitEndingPositions':
return
avId = self.air.getAvatarIdFromSender()
if not self.scoreDict.has_key(avId):
self.notify.warning('PROBLEM: avatar %s called claimPenalty(%s) but he is not in the scoreDict: %s. avIdList is: %s' % (avId,
penaltyNum,
self.scoreDict,
self.avIdList))
return
if penaltyNum < 0 or penaltyNum >= self.numPenalties:
self.air.writeServerEvent('warning', penaltyNum, 'IceGameAI.claimPenalty penaltyNum out of range')
return
if self.takenPenaltiesTable[penaltyNum]:
return
self.takenPenaltiesTable[penaltyNum] = 1
avId = self.air.getAvatarIdFromSender()
self.sendUpdate('setPenaltyGrabbed', [avId, penaltyNum])
self.scoreDict[avId] -= 1
self.numPenaltiesTaken += 1
def checkScores(self):
self.scoresAsList = []
for index in xrange(len(self.avIdList)):
avId = self.avIdList[index]
if self.scoreDict[avId] < 0:
self.scoreDict[avId] = 1
self.scoresAsList.append(self.scoreDict[avId])
| mit | -5,404,933,960,941,281,000 | 43.254237 | 226 | 0.654538 | false |
msbeta/apollo | modules/tools/prediction/data_pipelines/common/rotation2d.py | 3 | 1121 | #!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from math import cos, sin
from vector2d import Vector2
def rotate(v, theta):
cos_theta = cos(theta)
sin_theta = sin(theta)
return rotate_fast(v, cos_theta, sin_theta)
def rotate_fast(v, cos_theta, sin_theta):
x = cos_theta * v.x - sin_theta * v.y
y = sin_theta * v.x + cos_theta * v.y
return Vector2(x, y)
| apache-2.0 | -7,731,610,705,715,456,000 | 31.028571 | 79 | 0.61463 | false |
ikrauchanka/flask-jsonrpc | examples/flask-httpauth/auth.py | 2 | 2733 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Cenobit Technologies, Inc. http://cenobit.es/
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the Cenobit Technologies nor the names of
# its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
from flask import Flask
from flask.ext.httpauth import HTTPBasicAuth
PROJECT_DIR, PROJECT_MODULE_NAME = os.path.split(
os.path.dirname(os.path.realpath(__file__))
)
FLASK_JSONRPC_PROJECT_DIR = os.path.join(PROJECT_DIR, os.pardir)
if os.path.exists(FLASK_JSONRPC_PROJECT_DIR) \
and not FLASK_JSONRPC_PROJECT_DIR in sys.path:
sys.path.append(FLASK_JSONRPC_PROJECT_DIR)
from flask_jsonrpc import JSONRPC
app = Flask(__name__)
auth = HTTPBasicAuth()
jsonrpc = JSONRPC(app, '/api', enable_web_browsable_api=True)
users = {
'john': 'hello',
'susan': 'bye'
}
@auth.get_password
def get_pw(username):
if username in users:
return users.get(username)
return None
@auth.verify_password
def verify_pwd(username, password):
return users.get(username) == password
@jsonrpc.method('App.index')
@auth.login_required
def index():
return u'Welcome to Flask JSON-RPC'
@jsonrpc.method('App.hello')
@auth.login_required
def hello(name):
return u'Hello {0}'.format(name)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| bsd-3-clause | -6,505,728,027,492,959,000 | 34.038462 | 78 | 0.742774 | false |
KontorConsulting/odoo | addons/account_accountant/__init__.py | 892 | 1046 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,304,520,672,803,910,000 | 44.478261 | 78 | 0.610899 | false |
willzhang05/postgrestesting1 | postgrestesting1/lib/python3.5/site-packages/django/conf/locale/it/formats.py | 115 | 2079 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y' # 25 Ottobre 2006
TIME_FORMAT = 'H:i' # 14:30
DATETIME_FORMAT = 'l d F Y H:i' # Mercoledì 25 Ottobre 2006 14:30
YEAR_MONTH_FORMAT = 'F Y' # Ottobre 2006
MONTH_DAY_FORMAT = 'j/F' # 10/2006
SHORT_DATE_FORMAT = 'd/m/Y' # 25/12/2009
SHORT_DATETIME_FORMAT = 'd/m/Y H:i' # 25/10/2009 14:30
FIRST_DAY_OF_WEEK = 1 # Lunedì
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%Y/%m/%d', # '25/10/2006', '2008/10/25'
'%d-%m-%Y', '%Y-%m-%d', # '25-10-2006', '2008-10-25'
'%d-%m-%y', '%d/%m/%y', # '25-10-06', '25/10/06'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59'
'%d-%m-%Y %H:%M:%S.%f', # '25-10-2006 14:30:59.000200'
'%d-%m-%Y %H:%M', # '25-10-2006 14:30'
'%d-%m-%Y', # '25-10-2006'
'%d-%m-%y %H:%M:%S', # '25-10-06 14:30:59'
'%d-%m-%y %H:%M:%S.%f', # '25-10-06 14:30:59.000200'
'%d-%m-%y %H:%M', # '25-10-06 14:30'
'%d-%m-%y', # '25-10-06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| mit | -117,522,891,818,072,210 | 42.270833 | 77 | 0.502648 | false |
hifly/OpenUpgrade | addons/account/wizard/account_report_general_journal.py | 378 | 1697 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_general_journal(osv.osv_memory):
_inherit = "account.common.journal.report"
_name = 'account.general.journal'
_description = 'Account General Journal'
_columns = {
'journal_ids': fields.many2many('account.journal', 'account_general_journal_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
def _print_report(self, cr, uid, ids, data, context=None):
data = self.pre_print_report(cr, uid, ids, data, context=context)
return self.pool['report'].get_action(cr, uid, [], 'account.report_generaljournal', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,172,252,810,657,236,500 | 43.657895 | 153 | 0.638185 | false |
kidaa/aurora | src/test/python/apache/thermos/monitoring/test_resource.py | 8 | 3705 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from time import time
from unittest import TestCase
import mock
from apache.thermos.monitoring.monitor import TaskMonitor
from apache.thermos.monitoring.process import ProcessSample
from apache.thermos.monitoring.resource import (
ResourceHistory,
ResourceMonitorBase,
TaskResourceMonitor
)
from gen.apache.thermos.ttypes import ProcessStatus
class TestResourceHistory(TestCase):
def setUp(self):
self.max_len = 4
self.resource_history = ResourceHistory(self.max_len)
def test_add(self):
next_resource_stamp = time() + 100
value = ResourceMonitorBase.ResourceResult(1, 1, 0)
assert (next_resource_stamp, value) not in self.resource_history._values
self.resource_history.add(next_resource_stamp, value)
assert (next_resource_stamp, value) == self.resource_history._values[1]
def test_add_prevents_old_entries(self):
with self.assertRaises(ValueError):
self.resource_history.add(-1, 10)
def test_get(self):
resource_stamp = time() + 100
value = ResourceMonitorBase.ResourceResult(1, 1, 0)
value_wrong = ResourceMonitorBase.ResourceResult(1, 1, 50)
self.resource_history.add(resource_stamp, value)
self.resource_history.add(resource_stamp + 1000, value_wrong)
self.resource_history.add(resource_stamp + 10000, value_wrong)
assert resource_stamp, value == self.resource_history.get(resource_stamp)
class TestTaskResouceMonitor(TestCase):
@mock.patch('apache.thermos.monitoring.process_collector_psutil.ProcessTreeCollector.sample',
autospec=True, spec_set=True)
@mock.patch('apache.thermos.monitoring.monitor.TaskMonitor.get_active_processes',
autospec=True, spec_set=True)
def test_sample_by_process(self, mock_get_active_processes, mock_sample):
fake_process_name = 'fake-process-name'
task_path = '.'
task_monitor = TaskMonitor(task_path, 'fake-task-id')
fake_process_status = ProcessStatus(process=fake_process_name)
mock_get_active_processes.return_value = [(fake_process_status, 1)]
fake_process_sample = ProcessSample.empty()
mock_sample.return_value = fake_process_sample
task_resource_monitor = TaskResourceMonitor('fake-task-id', task_monitor)
assert task_resource_monitor.name == 'TaskResourceMonitor[fake-task-id]'
assert fake_process_sample == task_resource_monitor.sample_by_process(fake_process_name)
assert mock_get_active_processes.mock_calls == [mock.call(task_monitor)]
assert mock_sample.mock_calls == [mock.call(
task_resource_monitor._process_collectors[fake_process_status])]
@mock.patch('apache.thermos.monitoring.monitor.TaskMonitor.get_active_processes',
autospec=True, spec_set=True)
def test_sample_by_process_no_process(self, mock_get_active_processes):
task_path = '.'
task_monitor = TaskMonitor(task_path, 'fake-task-id')
mock_get_active_processes.return_value = []
task_resource_monitor = TaskResourceMonitor('fake-task-id', task_monitor)
with self.assertRaises(ValueError):
task_resource_monitor.sample_by_process('fake-process-name')
assert mock_get_active_processes.mock_calls == [mock.call(task_monitor)]
| apache-2.0 | -6,488,680,674,287,525,000 | 38.414894 | 95 | 0.74197 | false |
sanderator/lab-o-matic | test/test_lab_o_matic/test_compiler.py | 1 | 1208 | '''
Created on May 8, 2011
@author: sander
'''
import os.path
import shutil
import sys
'''
nose.tools has to be imported into the Eclipse project, eg, from
/usr/local/lib/python2.6/dist-packages/nose-1.0.0-py2.6.egg/nose/tools.py
'''
from tools import with_setup, raises, nottest
import lab_o_matic.compiler
paths = {}
def setup_func():
'''Creates fixtures.
Note that nose doesn't work properly if this function is just called setup.
'''
paths['projects'] = os.path.join(os.path.dirname(__file__), '../data')
paths['student'] = 'stoodent_src'
paths['bytecode'] = os.path.join(paths['projects'], '%s/build/classes' % paths['student'])
def teardown_func():
'''Removes fixtures.
Note that nose doesn't work properly if this function is just called teardown.
'''
# shutil.rmtree(os.path.join(paths['projects'], paths['student'] + '/src'))
@with_setup(setup_func, teardown_func)
def test_compile():
'''
Compiles any .java files it finds.
The paths argument determines where to look for source files and
where to put generated bytecode .class files.
'''
assert lab_o_matic.compiler.compile(paths)
assert os.path.exists(paths['bytecode'])
| gpl-2.0 | 5,628,246,233,826,935,000 | 26.454545 | 94 | 0.680464 | false |
anair13/where-am-i | flaskapp/flask/bin/activate_this.py | 669 | 1129 | """By using execfile(this_file, dict(__file__=this_file)) you will
activate this virtualenv environment.
This can be used when you must use an existing Python interpreter, not
the virtualenv bin/python
"""
try:
__file__
except NameError:
raise AssertionError(
"You must run this like execfile('path/to/activate_this.py', dict(__file__='path/to/activate_this.py'))")
import sys
import os
old_os_path = os.environ['PATH']
os.environ['PATH'] = os.path.dirname(os.path.abspath(__file__)) + os.pathsep + old_os_path
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if sys.platform == 'win32':
site_packages = os.path.join(base, 'Lib', 'site-packages')
else:
site_packages = os.path.join(base, 'lib', 'python%s' % sys.version[:3], 'site-packages')
prev_sys_path = list(sys.path)
import site
site.addsitedir(site_packages)
sys.real_prefix = sys.prefix
sys.prefix = base
# Move the added items to the front of the path:
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
| mit | 713,843,416,466,405,600 | 32.205882 | 113 | 0.689105 | false |
cs207-project/TimeSeries | tsdb/tsdb_constants.py | 1 | 1455 | ####################################################
#
# This file records all the constant variables used
# in tsdb module
#
####################################################
OPMAP = {
'<': 0,
'>': 1,
'==': 2,
'!=': 3,
'<=': 4,
'>=': 5
}
FILES_DIR = 'persistent_files'
MAX_CARD = 8
INDEXES = {
1: None, #Binary Tree
2: None #bitmask
}
TYPES = {
'float': 'd',
'bool': '?',
'int': 'i',
}
TYPE_DEFAULT = {
'float': 0.0,
'bool': False,
'int': 0
}
TS_FIELD_LENGTH = 4
BYTES_PER_NUM = 8
REFRESH_RATE = 50
TS_LENGTH = 100
NUMVPS = 5
schema_type = {
'pk': {'type': 'string', 'index': None},
'ts': {'type': None, 'index': None},
'order': {'type': 'int', 'index': 2, 'values': [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]},
'blarg': {'type': 'int', 'index': 2, 'values': [1, 2]},
'mean': {'type': 'float', 'index': 1},
'std': {'type': 'float', 'index': 1},
'vp': {'type': 'bool', 'index': 2, 'values': [0,1]},
'd-vp1': {'type': 'float', 'index': 1}
}
identity = lambda x: x
schema_convert = {
'pk': {'convert': identity, 'index': None},
'ts': {'convert': identity, 'index': None},
'order': {'convert': int, 'index': 1},
'blarg': {'convert': int, 'index': 1},
'useless': {'convert': identity, 'index': None},
'mean': {'convert': float, 'index': 1},
'std': {'convert': float, 'index': 1},
'vp': {'convert': bool, 'index': 1}
}
| mit | 2,109,068,200,658,952,400 | 18.931507 | 95 | 0.442612 | false |
esthermm/odoomrp-wip | mrp_subcontracting/models/procurement_order.py | 25 | 1258 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields, api
class ProcurementOrder(models.Model):
_inherit = 'procurement.order'
mrp_operation = fields.Many2one(
'mrp.production.workcenter.line', 'MRP Operation')
@api.multi
def make_po(self):
purchase_line_obj = self.env['purchase.order.line']
res = super(ProcurementOrder, self).make_po()
for procurement in self:
if res[procurement.id]:
purchase_line = purchase_line_obj.browse(res[procurement.id])
if (procurement.mrp_operation and
(not purchase_line.order_id.mrp_operation or
procurement.mrp_operation.id !=
purchase_line.order_id.mrp_operation.id)):
purchase_line.order_id.mrp_operation = (
procurement.mrp_operation.id)
procurement.mrp_operation.purchase_order = (
purchase_line.order_id.id)
return res
| agpl-3.0 | 2,030,272,376,812,338,400 | 42.37931 | 78 | 0.514308 | false |
sirinath/root | interpreter/llvm/src/tools/clang/bindings/python/tests/cindex/test_token_kind.py | 97 | 1064 | from clang.cindex import TokenKind
from nose.tools import eq_
from nose.tools import ok_
from nose.tools import raises
def test_constructor():
"""Ensure TokenKind constructor works as expected."""
t = TokenKind(5, 'foo')
eq_(t.value, 5)
eq_(t.name, 'foo')
@raises(ValueError)
def test_bad_register():
"""Ensure a duplicate value is rejected for registration."""
TokenKind.register(2, 'foo')
@raises(ValueError)
def test_unknown_value():
"""Ensure trying to fetch an unknown value raises."""
TokenKind.from_value(-1)
def test_registration():
"""Ensure that items registered appear as class attributes."""
ok_(hasattr(TokenKind, 'LITERAL'))
literal = TokenKind.LITERAL
ok_(isinstance(literal, TokenKind))
def test_from_value():
"""Ensure registered values can be obtained from from_value()."""
t = TokenKind.from_value(3)
ok_(isinstance(t, TokenKind))
eq_(t, TokenKind.LITERAL)
def test_repr():
"""Ensure repr() works."""
r = repr(TokenKind.LITERAL)
eq_(r, 'TokenKind.LITERAL')
| lgpl-2.1 | -5,445,447,430,254,963,000 | 23.744186 | 69 | 0.673872 | false |
glaubitz/fs-uae-debian | launcher/launcher/setup/setupwizarddialog.py | 2 | 1290 | import fsui
from launcher.i18n import gettext
from launcher.setup.setupwelcomepage import SetupWelcomePage
from launcher.ui.skin import LauncherTheme
from launcher.ui.widgets import PrevButton, NextButton, CloseButton
class SetupWizardDialog(fsui.Window):
@classmethod
def open(cls, parent=None):
return fsui.open_window_instance(cls, parent)
def __init__(self, parent):
super().__init__(
parent,
gettext("Setup Wizard"),
minimizable=False,
maximizable=False,
)
self.theme = LauncherTheme.get()
self.layout = fsui.VerticalLayout()
page = SetupWelcomePage(self)
self.layout.add(page, expand=True, fill=True)
button_layout = fsui.HorizontalLayout()
self.layout.add(button_layout, fill=True, margin=20)
button_layout.add_spacer(0, expand=True)
self.prev_button = PrevButton(self)
button_layout.add(self.prev_button, fill=True, margin_left=10)
self.next_button = NextButton(self)
button_layout.add(self.next_button, fill=True, margin_left=10)
if self.window.theme.has_close_buttons:
self.close_button = CloseButton(self)
button_layout.add(self.close_button, fill=True, margin_left=10)
| gpl-2.0 | 88,827,779,302,788,960 | 35.857143 | 75 | 0.662016 | false |
roderickmackenzie/gpvdm | gpvdm_gui/gui/materials_main.py | 1 | 7747 | #
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2012-2017 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
# Room B86 Coates, University Park, Nottingham, NG7 2RD, UK
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
## @package materials_main
# Dialog to show information about a material.
#
import os
from tab import tab_class
from icon_lib import icon_get
#qt
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtWidgets import QWidget,QVBoxLayout,QToolBar,QSizePolicy,QAction,QTabWidget,QDialog
from PyQt5.QtGui import QPainter,QIcon
#python modules
import webbrowser
from help import help_window
from win_lin import desktop_open
from ref import ref_window
from bibtex import bibtex
from gpvdm_open import gpvdm_open
from QWidgetSavePos import QWidgetSavePos
from plot_widget import plot_widget
from ribbon_materials import ribbon_materials
from import_data import import_data
from equation_editor import equation_editor
articles = []
mesh_articles = []
class materials_main(QWidgetSavePos):
def changed_click(self):
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Electrical parameters"):
help_window().help_set_help(["tab.png",_("<big><b>Electrical parameters</b></big><br>Use this tab to configure the electrical parameters for the material.")])
self.ribbon.tb_save.setEnabled(False)
self.ribbon.import_data.setEnabled(False)
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Luminescence"):
help_window().help_set_help(["tab.png",_("<big><b>Luminescence</b></big><br>Use this tab to edit the materials Luminescence.")])
self.ribbon.tb_save.setEnabled(False)
self.ribbon.import_data.setEnabled(False)
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Absorption"):
b=bibtex()
if b.load(os.path.join(self.path,"mat.bib"))!=False:
text=b.get_text_of_token("alpha",html=True)
if text!=False:
help_window().help_set_help(["alpha.png",_("<big><b>Absorption</b></big><br>"+text)])
self.ribbon.tb_save.setEnabled(True)
self.ribbon.import_data.setEnabled(True)
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Refractive index"):
b=bibtex()
if b.load(os.path.join(self.path,"mat.bib"))!=False:
text=b.get_text_of_token("n",html=True)
if text!=False:
help_window().help_set_help(["n.png",_("<big><b>Refractive index</b></big><br>"+text)])
self.ribbon.tb_save.setEnabled(True)
self.ribbon.import_data.setEnabled(True)
def callback_cost(self):
desktop_open(os.path.join(self.path,"cost.xlsx"))
def callback_help(self):
webbrowser.open("https://www.gpvdm.com/man/index.html")
def __init__(self,path):
QWidgetSavePos.__init__(self,"materials_main")
self.path=path
self.setFixedSize(900, 600)
self.setWindowIcon(icon_get("organic_material"))
self.setWindowTitle(_("Material editor")+" (https://www.gpvdm.com)"+" "+os.path.basename(self.path))
self.main_vbox = QVBoxLayout()
self.ribbon=ribbon_materials()
self.ribbon.cost.triggered.connect(self.callback_cost)
self.ribbon.folder_open.triggered.connect(self.callback_dir_open)
self.ribbon.import_data.clicked.connect(self.import_data)
self.ribbon.equation.clicked.connect(self.callback_equation_editor)
self.ribbon.tb_ref.triggered.connect(self.callback_ref)
self.ribbon.help.triggered.connect(self.callback_help)
self.main_vbox.addWidget(self.ribbon)
self.notebook = QTabWidget()
self.notebook.setMovable(True)
self.main_vbox.addWidget(self.notebook)
fname=os.path.join(self.path,"alpha.gmat")
self.alpha=plot_widget(enable_toolbar=False)
self.alpha.set_labels([_("Absorption")])
self.alpha.load_data([fname])
self.alpha.do_plot()
self.notebook.addTab(self.alpha,_("Absorption"))
fname=os.path.join(self.path,"n.gmat")
self.n=plot_widget(enable_toolbar=False)
self.n.set_labels([_("Refractive index")])
self.n.load_data([fname])
self.n.do_plot()
self.notebook.addTab(self.n,_("Refractive index"))
files=["dos.inp","pl.inp","mat.inp"]
description=[_("Electrical parameters"),_("Luminescence"),_("Basic")]
for i in range(0,len(files)):
full_path=os.path.join(self.path,files[i])
if os.path.isfile(full_path)==True:
tab=tab_class(os.path.join(self.path,files[i]))
self.notebook.addTab(tab,description[i])
self.setLayout(self.main_vbox)
self.notebook.currentChanged.connect(self.changed_click)
def callback_equation_editor(self):
equation_file=None
file_name=None
data_label=""
data_units=""
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Absorption"):
file_name="alpha.gmat"
equation_file="alpha_eq.inp"
data_label="Absorption"
data_units="m^{-1}"
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Refractive index"):
file_name="n.gmat"
equation_file="n_eq.inp"
data_label="n"
data_units="au"
if file_name!=None:
output_file=os.path.join(self.path,file_name)
config_file=os.path.join(self.path,file_name+"import.inp")
self.equation_editor=equation_editor(self.path,equation_file,file_name)
self.equation_editor.data_written.connect(self.update)
self.equation_editor.data.y_label="Wavelength"
self.equation_editor.data.data_label=data_label
self.equation_editor.data.y_units="nm"
self.equation_editor.data.data_units=data_units
self.equation_editor.load()
self.equation_editor.show()
def import_data(self):
file_name=None
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Absorption"):
file_name="alpha.gmat"
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Refractive index"):
file_name="n.gmat"
if file_name!=None:
output_file=os.path.join(self.path,file_name)
config_file=os.path.join(self.path,file_name+"import.inp")
self.im=import_data(output_file,config_file)
self.im.run()
self.update()
def import_ref(self):
file_name=None
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Absorption"):
file_name="alpha.gmat"
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Refractive index"):
file_name="n.gmat"
if file_name!=None:
output_file=os.path.join(self.path,file_name)
config_file=os.path.join(self.path,file_name+"import.inp")
self.im=import_data(output_file,config_file)
self.im.run()
self.update()
def update(self):
self.n.update()
self.alpha.update()
def callback_ref(self):
token=None
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Absorption"):
token="alpha"
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Refractive index"):
token="n"
if token!=None:
self.ref_window=ref_window(os.path.join(self.path,"mat.bib"),token)
self.ref_window.show()
def callback_dir_open(self):
dialog=gpvdm_open(self.path)
dialog.show_inp_files=False
ret=dialog.exec_()
if ret==QDialog.Accepted:
desktop_open(dialog.get_filename())
| gpl-2.0 | 7,798,414,526,064,103,000 | 30.364372 | 161 | 0.718214 | false |
rickerc/cinder_audit | cinder/tests/db/test_name_id.py | 5 | 2344 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for volume name_id."""
from oslo.config import cfg
from cinder import context
from cinder import db
from cinder import test
from cinder.tests import utils as testutils
CONF = cfg.CONF
class NameIDsTestCase(test.TestCase):
"""Test cases for naming volumes with name_id."""
def setUp(self):
super(NameIDsTestCase, self).setUp()
self.ctxt = context.RequestContext(user_id='user_id',
project_id='project_id')
def tearDown(self):
super(NameIDsTestCase, self).tearDown()
def test_name_id_same(self):
"""New volume should have same 'id' and 'name_id'."""
vol_ref = testutils.create_volume(self.ctxt, size=1)
self.assertEqual(vol_ref['name_id'], vol_ref['id'])
expected_name = CONF.volume_name_template % vol_ref['id']
self.assertEqual(vol_ref['name'], expected_name)
def test_name_id_diff(self):
"""Change name ID to mimic volume after migration."""
vol_ref = testutils.create_volume(self.ctxt, size=1)
db.volume_update(self.ctxt, vol_ref['id'], {'name_id': 'fake'})
vol_ref = db.volume_get(self.ctxt, vol_ref['id'])
expected_name = CONF.volume_name_template % 'fake'
self.assertEqual(vol_ref['name'], expected_name)
def test_name_id_snapshot_volume_name(self):
"""Make sure snapshot['volume_name'] is updated."""
vol_ref = testutils.create_volume(self.ctxt, size=1)
db.volume_update(self.ctxt, vol_ref['id'], {'name_id': 'fake'})
snap_ref = testutils.create_snapshot(self.ctxt, vol_ref['id'])
expected_name = CONF.volume_name_template % 'fake'
self.assertEqual(snap_ref['volume_name'], expected_name)
| apache-2.0 | -2,809,393,644,744,328,700 | 38.066667 | 78 | 0.655717 | false |
jpshort/odoo | marcos_addons/marcos_l10n_do/__init__.py | 3 | 1085 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013-2015 Marcos Organizador de Negocios SRL http://marcos.do
# Write by Eneldo Serrata ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 8,095,760,275,109,491,000 | 44.208333 | 80 | 0.62212 | false |
widelands/widelands | utils/fix_formatting.py | 1 | 4107 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The code base had inconsistent usage of tabs/spaces for indenting in Lua.
files. Spaces were more prominent - and I prefer them over tabs. So I wrote
this small script to fix leading tabs in Lua files to spaces.
It also saves files in unix file endings ("\r\n") and strips empty lines at the
end of files and whitespace characters at the end of lines.
After fixing the Lua tabs, this script also executes clang-format over the src
directory and pyformat over the utils directory.
"""
import argparse
import os
import re
import sys
from subprocess import call
from file_utils import read_text_file, write_text_file, find_files
LEADING_TABS = re.compile(r'^\s*\t+\s*')
SPACES_PER_TAB = 3
def parse_args():
p = argparse.ArgumentParser(
description='Fix common whitespace errors in Lua files, run clang-format'
' over the code base and pyformat over the utils directory.'
' Recurses over all relevant files.')
p.add_argument('-c', '--c++', action='store_true',
help='Format C++ files only')
p.add_argument('-l', '--lua', action='store_true',
help='Format Lua files only')
p.add_argument('-p', '--python', action='store_true',
help='Format Python files only')
p.add_argument('-d', '--dir', action='store',
help='Format the given directory and its subdirectories only')
return vars(p.parse_args())
def main():
args = parse_args()
format_cplusplus = args['c++'] or not (args['lua'] or args['python'])
format_lua = args['lua'] or not (args['c++'] or args['python'])
format_python = args['python'] or not (args['c++'] or args['lua'])
if not os.path.isdir('src') or not os.path.isdir('utils'):
print('CWD is not the root of the repository.')
return 1
if format_cplusplus:
directory = args['dir']
if not directory:
directory = './src'
sys.stdout.write('\nFormatting C++ in directory: ' + directory + ' ')
for filename in find_files(directory, ['.cc', '.h']):
if 'third_party' in filename:
continue
sys.stdout.write('.')
sys.stdout.flush()
call(['clang-format', '-i', filename])
call(['git', 'add', '--renormalize', filename])
print(' done.')
if format_lua:
directories = set()
if args['dir']:
directories.add(args['dir'])
else:
directories = {'./data', './test'}
for directory in directories:
sys.stdout.write(
'\nFixing Lua tabs in directory: ' + directory + ' ')
for filename in find_files(directory, ['.lua']):
sys.stdout.write('.')
sys.stdout.flush()
lines = read_text_file(filename).strip().split('\n')
new_lines = []
for line in lines:
m = LEADING_TABS.match(line)
if m is not None:
line = line[m.start():m.end()].expandtabs(
SPACES_PER_TAB) + line[m.end():]
new_lines.append(line.rstrip() + '\n')
write_text_file(filename, ''.join(new_lines))
call(['git', 'add', '--renormalize', filename])
print(' done.')
if format_python:
directories = set()
if args['dir']:
directories.add(args['dir'])
else:
directories = {'./utils', './cmake/codecheck'}
for directory in directories:
sys.stdout.write(
'\nFormatting Python scripts in directory: ' + directory + ' ')
for filename in find_files(directory, ['.py']):
sys.stdout.write('.')
sys.stdout.flush()
call(['pyformat', '-i', filename])
call(['git', 'add', '--renormalize', filename])
print(' done.')
print('Formatting finished.')
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-2.0 | 8,557,288,437,539,116,000 | 34.713043 | 81 | 0.551254 | false |
louyihua/edx-platform | lms/djangoapps/badges/migrations/0001_initial.py | 17 | 3499 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import jsonfield.fields
import badges.models
from django.conf import settings
import django.utils.timezone
from model_utils import fields
import xmodule_django.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BadgeAssertion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('data', jsonfield.fields.JSONField()),
('backend', models.CharField(max_length=50)),
('image_url', models.URLField()),
('assertion_url', models.URLField()),
('modified', fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('created', fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False, db_index=True)),
],
),
migrations.CreateModel(
name='BadgeClass',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('slug', models.SlugField(max_length=255, validators=[badges.models.validate_lowercase])),
('issuing_component', models.SlugField(default=b'', blank=True, validators=[badges.models.validate_lowercase])),
('display_name', models.CharField(max_length=255)),
('course_id', xmodule_django.models.CourseKeyField(default=None, max_length=255, blank=True)),
('description', models.TextField()),
('criteria', models.TextField()),
('mode', models.CharField(default=b'', max_length=100, blank=True)),
('image', models.ImageField(upload_to=b'badge_classes', validators=[badges.models.validate_badge_image])),
],
),
migrations.CreateModel(
name='CourseCompleteImageConfiguration',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('mode', models.CharField(help_text='The course mode for this badge image. For example, "verified" or "honor".', unique=True, max_length=125)),
('icon', models.ImageField(help_text='Badge images must be square PNG files. The file size should be under 250KB.', upload_to=b'course_complete_badges', validators=[badges.models.validate_badge_image])),
('default', models.BooleanField(default=False, help_text='Set this value to True if you want this image to be the default image for any course modes that do not have a specified badge image. You can have only one default image.')),
],
),
migrations.AlterUniqueTogether(
name='badgeclass',
unique_together=set([('slug', 'issuing_component', 'course_id')]),
),
migrations.AddField(
model_name='badgeassertion',
name='badge_class',
field=models.ForeignKey(to='badges.BadgeClass'),
),
migrations.AddField(
model_name='badgeassertion',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
]
| agpl-3.0 | 4,522,359,262,542,806,000 | 49.710145 | 247 | 0.616748 | false |
drglove/SickRage | sickbeard/clients/rtorrent.py | 2 | 5258 | # Author: jkaberg <[email protected]>, based on fuzemans work (https://github.com/RuudBurger/CouchPotatoServer/blob/develop/couchpotato/core/downloaders/rtorrent/main.py)
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from base64 import b64encode
import traceback
import sickbeard
from sickbeard import logger
from sickbeard.clients.generic import GenericClient
from lib.rtorrent import RTorrent
from lib.rtorrent.err import MethodError
class rTorrentAPI(GenericClient):
def __init__(self, host=None, username=None, password=None):
super(rTorrentAPI, self).__init__('rTorrent', host, username, password)
def _get_auth(self):
self.auth = None
if self.auth is not None:
return self.auth
if not self.host:
return
tp_kwargs = {}
if sickbeard.TORRENT_AUTH_TYPE is not 'none':
tp_kwargs['authtype'] = sickbeard.TORRENT_AUTH_TYPE
if not sickbeard.TORRENT_VERIFY_CERT:
tp_kwargs['check_ssl_cert'] = False
if self.username and self.password:
self.auth = RTorrent(self.host, self.username, self.password, True, tp_kwargs=tp_kwargs)
else:
self.auth = RTorrent(self.host, None, None, True)
return self.auth
def _add_torrent_uri(self, result):
filedata = None
if not self.auth:
return False
if not result:
return False
try:
# Send magnet to rTorrent
torrent = self.auth.load_magnet(result.url, result.hash)
if not torrent:
return False
# Set label
label = sickbeard.TORRENT_LABEL
if result.show.is_anime:
label = sickbeard.TORRENT_LABEL_ANIME
if label:
torrent.set_custom(1, label.lower())
if sickbeard.TORRENT_PATH:
torrent.set_directory(sickbeard.TORRENT_PATH)
# Start torrent
torrent.start()
return True
except Exception as e:
logger.log(traceback.format_exc(), logger.DEBUG)
return False
def _add_torrent_file(self, result):
filedata = None
if not self.auth:
return False
if not result:
return False
# group_name = 'sb_test'.lower() ##### Use provider instead of _test
# if not self._set_torrent_ratio(group_name):
# return False
# Send request to rTorrent
try:
# Send torrent to rTorrent
torrent = self.auth.load_torrent(result.content)
if not torrent:
return False
# Set label
label = sickbeard.TORRENT_LABEL
if result.show.is_anime:
label = sickbeard.TORRENT_LABEL_ANIME
if label:
torrent.set_custom(1, label.lower())
if sickbeard.TORRENT_PATH:
torrent.set_directory(sickbeard.TORRENT_PATH)
# Set Ratio Group
# torrent.set_visible(group_name)
# Start torrent
torrent.start()
return True
except Exception as e:
logger.log(traceback.format_exc(), logger.DEBUG)
return False
def _set_torrent_ratio(self, name):
# if not name:
# return False
#
# if not self.auth:
# return False
#
# views = self.auth.get_views()
#
# if name not in views:
# self.auth.create_group(name)
# group = self.auth.get_group(name)
# ratio = int(float(sickbeard.TORRENT_RATIO) * 100)
#
# try:
# if ratio > 0:
#
# # Explicitly set all group options to ensure it is setup correctly
# group.set_upload('1M')
# group.set_min(ratio)
# group.set_max(ratio)
# group.set_command('d.stop')
# group.enable()
# else:
# # Reset group action and disable it
# group.set_command()
# group.disable()
#
# except:
# return False
return True
def testAuthentication(self):
try:
self._get_auth()
if self.auth is not None:
return True, 'Success: Connected and Authenticated'
else:
return False, 'Error: Unable to get ' + self.name + ' Authentication, check your config!'
except Exception:
return False, 'Error: Unable to connect to ' + self.name
api = rTorrentAPI()
| gpl-3.0 | -4,681,479,207,629,375,000 | 27.89011 | 174 | 0.583492 | false |
zoggn/kernel_tcl_msm8610 | tools/perf/python/twatch.py | 7370 | 1334 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <[email protected]>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 | -4,823,889,916,626,149,000 | 31.536585 | 93 | 0.669415 | false |
mavenlin/tensorflow | tensorflow/python/debug/cli/cli_shared_test.py | 45 | 15872 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for the shared functions and classes for tfdbg CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class BytesToReadableStrTest(test_util.TensorFlowTestCase):
def testNoneSizeWorks(self):
self.assertEqual(str(None), cli_shared.bytes_to_readable_str(None))
def testSizesBelowOneKiloByteWorks(self):
self.assertEqual("0", cli_shared.bytes_to_readable_str(0))
self.assertEqual("500", cli_shared.bytes_to_readable_str(500))
self.assertEqual("1023", cli_shared.bytes_to_readable_str(1023))
def testSizesBetweenOneKiloByteandOneMegaByteWorks(self):
self.assertEqual("1.00k", cli_shared.bytes_to_readable_str(1024))
self.assertEqual("2.40k", cli_shared.bytes_to_readable_str(int(1024 * 2.4)))
self.assertEqual("1023.00k", cli_shared.bytes_to_readable_str(1024 * 1023))
def testSizesBetweenOneMegaByteandOneGigaByteWorks(self):
self.assertEqual("1.00M", cli_shared.bytes_to_readable_str(1024**2))
self.assertEqual("2.40M",
cli_shared.bytes_to_readable_str(int(1024**2 * 2.4)))
self.assertEqual("1023.00M",
cli_shared.bytes_to_readable_str(1024**2 * 1023))
def testSizeAboveOneGigaByteWorks(self):
self.assertEqual("1.00G", cli_shared.bytes_to_readable_str(1024**3))
self.assertEqual("2000.00G",
cli_shared.bytes_to_readable_str(1024**3 * 2000))
def testReadableStrIncludesBAtTheEndOnRequest(self):
self.assertEqual("0B", cli_shared.bytes_to_readable_str(0, include_b=True))
self.assertEqual(
"1.00kB", cli_shared.bytes_to_readable_str(
1024, include_b=True))
self.assertEqual(
"1.00MB", cli_shared.bytes_to_readable_str(
1024**2, include_b=True))
self.assertEqual(
"1.00GB", cli_shared.bytes_to_readable_str(
1024**3, include_b=True))
class TimeToReadableStrTest(test_util.TensorFlowTestCase):
def testNoneTimeWorks(self):
self.assertEqual("0", cli_shared.time_to_readable_str(None))
def testMicrosecondsTime(self):
self.assertEqual("40us", cli_shared.time_to_readable_str(40))
def testMillisecondTime(self):
self.assertEqual("40ms", cli_shared.time_to_readable_str(40e3))
def testSecondTime(self):
self.assertEqual("40s", cli_shared.time_to_readable_str(40e6))
def testForceTimeUnit(self):
self.assertEqual("40s",
cli_shared.time_to_readable_str(
40e6, force_time_unit=cli_shared.TIME_UNIT_S))
self.assertEqual("40000ms",
cli_shared.time_to_readable_str(
40e6, force_time_unit=cli_shared.TIME_UNIT_MS))
self.assertEqual("40000000us",
cli_shared.time_to_readable_str(
40e6, force_time_unit=cli_shared.TIME_UNIT_US))
self.assertEqual("4e-05s",
cli_shared.time_to_readable_str(
40, force_time_unit=cli_shared.TIME_UNIT_S))
self.assertEqual("0",
cli_shared.time_to_readable_str(
0, force_time_unit=cli_shared.TIME_UNIT_S))
with self.assertRaisesRegexp(ValueError, r"Invalid time unit: ks"):
cli_shared.time_to_readable_str(100, force_time_unit="ks")
class GetRunStartIntroAndDescriptionTest(test_util.TensorFlowTestCase):
def setUp(self):
self.const_a = constant_op.constant(11.0, name="a")
self.const_b = constant_op.constant(22.0, name="b")
self.const_c = constant_op.constant(33.0, name="c")
self.sparse_d = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 1]], values=[1.0, 2.0], dense_shape=[3, 3])
def tearDown(self):
ops.reset_default_graph()
def testSingleFetchNoFeeds(self):
run_start_intro = cli_shared.get_run_start_intro(12, self.const_a, None, {})
# Verify line about run() call number.
self.assertTrue(run_start_intro.lines[1].endswith("run() call #12:"))
# Verify line about fetch.
const_a_name_line = run_start_intro.lines[4]
self.assertEqual(self.const_a.name, const_a_name_line.strip())
# Verify line about feeds.
feeds_line = run_start_intro.lines[7]
self.assertEqual("(Empty)", feeds_line.strip())
# Verify lines about possible commands and their font attributes.
self.assertEqual("run:", run_start_intro.lines[11][2:])
annot = run_start_intro.font_attr_segs[11][0]
self.assertEqual(2, annot[0])
self.assertEqual(5, annot[1])
self.assertEqual("run", annot[2][0].content)
self.assertEqual("bold", annot[2][1])
annot = run_start_intro.font_attr_segs[13][0]
self.assertEqual(2, annot[0])
self.assertEqual(8, annot[1])
self.assertEqual("run -n", annot[2][0].content)
self.assertEqual("bold", annot[2][1])
self.assertEqual("run -t <T>:", run_start_intro.lines[15][2:])
self.assertEqual([(2, 12, "bold")], run_start_intro.font_attr_segs[15])
self.assertEqual("run -f <filter_name>:", run_start_intro.lines[17][2:])
self.assertEqual([(2, 22, "bold")], run_start_intro.font_attr_segs[17])
annot = run_start_intro.font_attr_segs[21][0]
self.assertEqual(2, annot[0])
self.assertEqual(16, annot[1])
self.assertEqual("invoke_stepper", annot[2][0].content)
# Verify short description.
description = cli_shared.get_run_short_description(12, self.const_a, None)
self.assertEqual("run #12: 1 fetch (a:0); 0 feeds", description)
# Verify the main menu associated with the run_start_intro.
self.assertIn(debugger_cli_common.MAIN_MENU_KEY,
run_start_intro.annotations)
menu = run_start_intro.annotations[debugger_cli_common.MAIN_MENU_KEY]
self.assertEqual("run", menu.caption_to_item("run").content)
self.assertEqual("invoke_stepper",
menu.caption_to_item("invoke_stepper").content)
self.assertEqual("exit", menu.caption_to_item("exit").content)
def testSparseTensorAsFeedShouldHandleNoNameAttribute(self):
sparse_feed_val = ([[0, 0], [1, 1]], [10.0, 20.0])
run_start_intro = cli_shared.get_run_start_intro(
1, self.sparse_d, {self.sparse_d: sparse_feed_val}, {})
self.assertEqual(str(self.sparse_d), run_start_intro.lines[7].strip())
short_description = cli_shared.get_run_short_description(
1, self.sparse_d, {self.sparse_d: sparse_feed_val})
self.assertEqual(
"run #1: 1 fetch; 1 feed (%s)" % self.sparse_d, short_description)
def testSparseTensorAsFetchShouldHandleNoNameAttribute(self):
run_start_intro = cli_shared.get_run_start_intro(1, self.sparse_d, None, {})
self.assertEqual(str(self.sparse_d), run_start_intro.lines[4].strip())
def testTwoFetchesListNoFeeds(self):
fetches = [self.const_a, self.const_b]
run_start_intro = cli_shared.get_run_start_intro(1, fetches, None, {})
const_a_name_line = run_start_intro.lines[4]
const_b_name_line = run_start_intro.lines[5]
self.assertEqual(self.const_a.name, const_a_name_line.strip())
self.assertEqual(self.const_b.name, const_b_name_line.strip())
feeds_line = run_start_intro.lines[8]
self.assertEqual("(Empty)", feeds_line.strip())
# Verify short description.
description = cli_shared.get_run_short_description(1, fetches, None)
self.assertEqual("run #1: 2 fetches; 0 feeds", description)
def testNestedListAsFetches(self):
fetches = [self.const_c, [self.const_a, self.const_b]]
run_start_intro = cli_shared.get_run_start_intro(1, fetches, None, {})
# Verify lines about the fetches.
self.assertEqual(self.const_c.name, run_start_intro.lines[4].strip())
self.assertEqual(self.const_a.name, run_start_intro.lines[5].strip())
self.assertEqual(self.const_b.name, run_start_intro.lines[6].strip())
# Verify short description.
description = cli_shared.get_run_short_description(1, fetches, None)
self.assertEqual("run #1: 3 fetches; 0 feeds", description)
def testNestedDictAsFetches(self):
fetches = {"c": self.const_c, "ab": {"a": self.const_a, "b": self.const_b}}
run_start_intro = cli_shared.get_run_start_intro(1, fetches, None, {})
# Verify lines about the fetches. The ordering of the dict keys is
# indeterminate.
fetch_names = set()
fetch_names.add(run_start_intro.lines[4].strip())
fetch_names.add(run_start_intro.lines[5].strip())
fetch_names.add(run_start_intro.lines[6].strip())
self.assertEqual({"a:0", "b:0", "c:0"}, fetch_names)
# Verify short description.
description = cli_shared.get_run_short_description(1, fetches, None)
self.assertEqual("run #1: 3 fetches; 0 feeds", description)
def testTwoFetchesAsTupleNoFeeds(self):
fetches = (self.const_a, self.const_b)
run_start_intro = cli_shared.get_run_start_intro(1, fetches, None, {})
const_a_name_line = run_start_intro.lines[4]
const_b_name_line = run_start_intro.lines[5]
self.assertEqual(self.const_a.name, const_a_name_line.strip())
self.assertEqual(self.const_b.name, const_b_name_line.strip())
feeds_line = run_start_intro.lines[8]
self.assertEqual("(Empty)", feeds_line.strip())
# Verify short description.
description = cli_shared.get_run_short_description(1, fetches, None)
self.assertEqual("run #1: 2 fetches; 0 feeds", description)
def testTwoFetchesAsNamedTupleNoFeeds(self):
fetches_namedtuple = namedtuple("fetches", "x y")
fetches = fetches_namedtuple(self.const_b, self.const_c)
run_start_intro = cli_shared.get_run_start_intro(1, fetches, None, {})
const_b_name_line = run_start_intro.lines[4]
const_c_name_line = run_start_intro.lines[5]
self.assertEqual(self.const_b.name, const_b_name_line.strip())
self.assertEqual(self.const_c.name, const_c_name_line.strip())
feeds_line = run_start_intro.lines[8]
self.assertEqual("(Empty)", feeds_line.strip())
# Verify short description.
description = cli_shared.get_run_short_description(1, fetches, None)
self.assertEqual("run #1: 2 fetches; 0 feeds", description)
def testWithFeedDict(self):
feed_dict = {
self.const_a: 10.0,
self.const_b: 20.0,
}
run_start_intro = cli_shared.get_run_start_intro(1, self.const_c, feed_dict,
{})
const_c_name_line = run_start_intro.lines[4]
self.assertEqual(self.const_c.name, const_c_name_line.strip())
# Verify lines about the feed dict.
feed_a_line = run_start_intro.lines[7]
feed_b_line = run_start_intro.lines[8]
self.assertEqual(self.const_a.name, feed_a_line.strip())
self.assertEqual(self.const_b.name, feed_b_line.strip())
# Verify short description.
description = cli_shared.get_run_short_description(1, self.const_c,
feed_dict)
self.assertEqual("run #1: 1 fetch (c:0); 2 feeds", description)
def testTensorFilters(self):
feed_dict = {self.const_a: 10.0}
tensor_filters = {
"filter_a": lambda x: True,
"filter_b": lambda x: False,
}
run_start_intro = cli_shared.get_run_start_intro(1, self.const_c, feed_dict,
tensor_filters)
# Verify the listed names of the tensor filters.
filter_names = set()
filter_names.add(run_start_intro.lines[20].split(" ")[-1])
filter_names.add(run_start_intro.lines[21].split(" ")[-1])
self.assertEqual({"filter_a", "filter_b"}, filter_names)
# Verify short description.
description = cli_shared.get_run_short_description(1, self.const_c,
feed_dict)
self.assertEqual("run #1: 1 fetch (c:0); 1 feed (a:0)", description)
# Verify the command links for the two filters.
command_set = set()
annot = run_start_intro.font_attr_segs[20][0]
command_set.add(annot[2].content)
annot = run_start_intro.font_attr_segs[21][0]
command_set.add(annot[2].content)
self.assertEqual({"run -f filter_a", "run -f filter_b"}, command_set)
def testGetRunShortDescriptionWorksForTensorFeedKey(self):
short_description = cli_shared.get_run_short_description(
1, self.const_a, {self.const_a: 42.0})
self.assertEqual("run #1: 1 fetch (a:0); 1 feed (a:0)", short_description)
def testGetRunShortDescriptionWorksForUnicodeFeedKey(self):
short_description = cli_shared.get_run_short_description(
1, self.const_a, {u"foo": 42.0})
self.assertEqual("run #1: 1 fetch (a:0); 1 feed (foo)", short_description)
class GetErrorIntroTest(test_util.TensorFlowTestCase):
def setUp(self):
self.var_a = variables.Variable(42.0, name="a")
def tearDown(self):
ops.reset_default_graph()
def testShapeError(self):
tf_error = errors.OpError(None, self.var_a.initializer, "foo description",
None)
error_intro = cli_shared.get_error_intro(tf_error)
self.assertEqual("!!! An error occurred during the run !!!",
error_intro.lines[1])
self.assertEqual([(0, len(error_intro.lines[1]), "blink")],
error_intro.font_attr_segs[1])
self.assertEqual(2, error_intro.lines[4].index("ni -a -d -t a/Assign"))
self.assertEqual(2, error_intro.font_attr_segs[4][0][0])
self.assertEqual(22, error_intro.font_attr_segs[4][0][1])
self.assertEqual("ni -a -d -t a/Assign",
error_intro.font_attr_segs[4][0][2][0].content)
self.assertEqual("bold", error_intro.font_attr_segs[4][0][2][1])
self.assertEqual(2, error_intro.lines[6].index("li -r a/Assign"))
self.assertEqual(2, error_intro.font_attr_segs[6][0][0])
self.assertEqual(16, error_intro.font_attr_segs[6][0][1])
self.assertEqual("li -r a/Assign",
error_intro.font_attr_segs[6][0][2][0].content)
self.assertEqual("bold", error_intro.font_attr_segs[6][0][2][1])
self.assertEqual(2, error_intro.lines[8].index("lt"))
self.assertEqual(2, error_intro.font_attr_segs[8][0][0])
self.assertEqual(4, error_intro.font_attr_segs[8][0][1])
self.assertEqual("lt", error_intro.font_attr_segs[8][0][2][0].content)
self.assertEqual("bold", error_intro.font_attr_segs[8][0][2][1])
self.assertStartsWith(error_intro.lines[11], "Op name:")
self.assertTrue(error_intro.lines[11].endswith("a/Assign"))
self.assertStartsWith(error_intro.lines[12], "Error type:")
self.assertTrue(error_intro.lines[12].endswith(str(type(tf_error))))
self.assertEqual("Details:", error_intro.lines[14])
self.assertStartsWith(error_intro.lines[15], "foo description")
if __name__ == "__main__":
googletest.main()
| apache-2.0 | -5,888,650,789,050,836,000 | 41.100796 | 80 | 0.66255 | false |
aforalee/RRally | rally/plugins/openstack/scenarios/murano/utils.py | 2 | 10549 | # Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import tempfile
import uuid
import zipfile
from oslo_config import cfg
import yaml
from rally.common import fileutils
from rally.common import utils as common_utils
from rally.plugins.openstack import scenario
from rally.task import atomic
from rally.task import utils
CONF = cfg.CONF
MURANO_BENCHMARK_OPTS = [
cfg.IntOpt("murano_delete_environment_timeout", default=180,
deprecated_name="delete_environment_timeout",
help="A timeout in seconds for an environment delete"),
cfg.IntOpt("murano_deploy_environment_timeout", default=1200,
deprecated_name="deploy_environment_timeout",
help="A timeout in seconds for an environment deploy"),
cfg.IntOpt("murano_delete_environment_check_interval", default=2,
deprecated_name="delete_environment_check_interval",
help="Delete environment check interval in seconds"),
cfg.IntOpt("murano_deploy_environment_check_interval", default=5,
deprecated_name="deploy_environment_check_interval",
help="Deploy environment check interval in seconds"),
]
benchmark_group = cfg.OptGroup(name="benchmark", title="benchmark options")
CONF.register_opts(MURANO_BENCHMARK_OPTS, group=benchmark_group)
class MuranoScenario(scenario.OpenStackScenario):
"""Base class for Murano scenarios with basic atomic actions."""
@atomic.action_timer("murano.list_environments")
def _list_environments(self):
"""Return environments list."""
return self.clients("murano").environments.list()
@atomic.action_timer("murano.create_environment")
def _create_environment(self, env_name=None):
"""Create environment.
:param env_name: String used to name environment
:returns: Environment instance
"""
env_name = env_name or self._generate_random_name()
return self.clients("murano").environments.create({"name": env_name})
@atomic.action_timer("murano.delete_environment")
def _delete_environment(self, environment):
"""Delete given environment.
Return when the environment is actually deleted.
:param environment: Environment instance
"""
self.clients("murano").environments.delete(environment.id)
config = CONF.benchmark
utils.wait_for_status(
environment,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=utils.get_from_manager(),
timeout=config.murano_delete_environment_timeout,
check_interval=config.murano_delete_environment_check_interval
)
@atomic.action_timer("murano.create_session")
def _create_session(self, environment_id):
"""Create session for environment with specific id
:param environment_id: Environment id
:returns: Session instance
"""
return self.clients("murano").sessions.configure(environment_id)
@atomic.optional_action_timer("murano.create_service")
def _create_service(self, environment, session, full_package_name,
image_name=None, flavor_name=None):
"""Create Murano service.
:param environment: Environment instance
:param session: Session instance
:param full_package_name: full name of the Murano package
:param image_name: Image name
:param flavor_name: Flavor name
:param atomic_action: True if this is atomic action. added and
handled by the optional_action_timer()
decorator
:returns: Service instance
"""
app_id = str(uuid.uuid4())
data = {"?": {"id": app_id,
"type": full_package_name},
"name": self._generate_random_name("rally_")}
return self.clients("murano").services.post(
environment_id=environment.id, path="/", data=data,
session_id=session.id)
@atomic.action_timer("murano.deploy_environment")
def _deploy_environment(self, environment, session):
"""Deploy environment.
:param environment: Environment instance
:param session: Session instance
"""
self.clients("murano").sessions.deploy(environment.id,
session.id)
config = CONF.benchmark
utils.wait_for(
environment, is_ready=utils.resource_is("READY"),
update_resource=utils.get_from_manager(["DEPLOY FAILURE"]),
timeout=config.murano_deploy_environment_timeout,
check_interval=config.murano_deploy_environment_check_interval
)
@atomic.action_timer("murano.list_packages")
def _list_packages(self, include_disabled=False):
"""Returns packages list.
:param include_disabled: if "True" then disabled packages will be
included in a the result.
Default value is False.
:returns: list of imported packages
"""
return self.clients("murano").packages.list(
include_disabled=include_disabled)
@atomic.action_timer("murano.import_package")
def _import_package(self, package):
"""Import package to the Murano.
:param package: path to zip archive with Murano application
:returns: imported package
"""
package = self.clients("murano").packages.create(
{}, {"file": open(package)}
)
return package
@atomic.action_timer("murano.delete_package")
def _delete_package(self, package):
"""Delete specified package.
:param package: package that will be deleted
"""
self.clients("murano").packages.delete(package.id)
@atomic.action_timer("murano.update_package")
def _update_package(self, package, body, operation="replace"):
"""Update specified package.
:param package: package that will be updated
:param body: dict object that defines what package property will be
updated, e.g {"tags": ["tag"]} or {"enabled": "true"}
:param operation: string object that defines the way of how package
property will be updated, allowed operations are
"add", "replace" or "delete".
Default value is "replace".
:returns: updated package
"""
return self.clients("murano").packages.update(
package.id, body, operation)
@atomic.action_timer("murano.filter_applications")
def _filter_applications(self, filter_query):
"""Filter list of uploaded application by specified criteria.
:param filter_query: dict that contains filter criteria, it
will be passed as **kwargs to filter method
e.g. {"category": "Web"}
:returns: filtered list of packages
"""
return self.clients("murano").packages.filter(**filter_query)
def _zip_package(self, package_path):
"""Call _prepare_package method that returns path to zip archive."""
return MuranoPackageManager()._prepare_package(package_path)
class MuranoPackageManager(object):
@staticmethod
def _read_from_file(filename):
with open(filename, "r") as f:
read_data = f.read()
return yaml.safe_load(read_data)
@staticmethod
def _write_to_file(data, filename):
with open(filename, "w") as f:
yaml.safe_dump(data, f)
def _change_app_fullname(self, app_dir):
"""Change application full name.
To avoid name conflict error during package import (when user
tries to import a few packages into the same tenant) need to change the
application name. For doing this need to replace following parts
in manifest.yaml
from
...
FullName: app.name
...
Classes:
app.name: app_class.yaml
to:
...
FullName: <new_name>
...
Classes:
<new_name>: app_class.yaml
:param app_dir: path to directory with Murano application context
"""
new_fullname = common_utils.generate_random_name("app.")
manifest_file = os.path.join(app_dir, "manifest.yaml")
manifest = self._read_from_file(manifest_file)
class_file_name = manifest["Classes"][manifest["FullName"]]
# update manifest.yaml file
del manifest["Classes"][manifest["FullName"]]
manifest["FullName"] = new_fullname
manifest["Classes"][new_fullname] = class_file_name
self._write_to_file(manifest, manifest_file)
def _prepare_package(self, package_path):
"""Check whether the package path is path to zip archive or not.
If package_path is not a path to zip archive but path to Murano
application folder, than method prepares zip archive with Murano
application. It copies directory with Murano app files to temporary
folder, changes manifest.yaml and class file (to avoid '409 Conflict'
errors in Murano) and prepares zip package.
:param package_path: path to zip archive or directory with package
components
:returns: path to zip archive with Murano application
"""
if not zipfile.is_zipfile(package_path):
tmp_dir = tempfile.mkdtemp()
pkg_dir = os.path.join(tmp_dir, "package/")
try:
shutil.copytree(package_path, pkg_dir)
self._change_app_fullname(pkg_dir)
package_path = fileutils.pack_dir(pkg_dir)
finally:
shutil.rmtree(tmp_dir)
return package_path
| apache-2.0 | -7,280,454,918,527,431,000 | 35.884615 | 79 | 0.625083 | false |
elelsee/pycfn-elasticsearch | pycfn_elasticsearch/vendored/docutils/utils/error_reporting.py | 104 | 7765 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# :Id: $Id: error_reporting.py 7668 2013-06-04 12:46:30Z milde $
# :Copyright: © 2011 Günter Milde.
# :License: Released under the terms of the `2-Clause BSD license`_, in short:
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
# This file is offered as-is, without any warranty.
#
# .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause
"""
Error reporting should be safe from encoding/decoding errors.
However, implicit conversions of strings and exceptions like
>>> u'%s world: %s' % ('H\xe4llo', Exception(u'H\xe4llo')
fail in some Python versions:
* In Python <= 2.6, ``unicode(<exception instance>)`` uses
`__str__` and fails with non-ASCII chars in`unicode` arguments.
(work around http://bugs.python.org/issue2517):
* In Python 2, unicode(<exception instance>) fails, with non-ASCII
chars in arguments. (Use case: in some locales, the errstr
argument of IOError contains non-ASCII chars.)
* In Python 2, str(<exception instance>) fails, with non-ASCII chars
in `unicode` arguments.
The `SafeString`, `ErrorString` and `ErrorOutput` classes handle
common exceptions.
"""
import sys, codecs
# Guess the locale's encoding.
# If no valid guess can be made, locale_encoding is set to `None`:
try:
import locale # module missing in Jython
except ImportError:
locale_encoding = None
else:
locale_encoding = locale.getlocale()[1] or locale.getdefaultlocale()[1]
# locale.getpreferredencoding([do_setlocale=True|False])
# has side-effects | might return a wrong guess.
# (cf. Update 1 in http://stackoverflow.com/questions/4082645/using-python-2-xs-locale-module-to-format-numbers-and-currency)
try:
codecs.lookup(locale_encoding or '') # None -> ''
except LookupError:
locale_encoding = None
class SafeString(object):
"""
A wrapper providing robust conversion to `str` and `unicode`.
"""
def __init__(self, data, encoding=None, encoding_errors='backslashreplace',
decoding_errors='replace'):
self.data = data
self.encoding = (encoding or getattr(data, 'encoding', None) or
locale_encoding or 'ascii')
self.encoding_errors = encoding_errors
self.decoding_errors = decoding_errors
def __str__(self):
try:
return str(self.data)
except UnicodeEncodeError, err:
if isinstance(self.data, Exception):
args = [str(SafeString(arg, self.encoding,
self.encoding_errors))
for arg in self.data.args]
return ', '.join(args)
if isinstance(self.data, unicode):
if sys.version_info > (3,0):
return self.data
else:
return self.data.encode(self.encoding,
self.encoding_errors)
raise
def __unicode__(self):
"""
Return unicode representation of `self.data`.
Try ``unicode(self.data)``, catch `UnicodeError` and
* if `self.data` is an Exception instance, work around
http://bugs.python.org/issue2517 with an emulation of
Exception.__unicode__,
* else decode with `self.encoding` and `self.decoding_errors`.
"""
try:
u = unicode(self.data)
if isinstance(self.data, EnvironmentError):
u = u.replace(": u'", ": '") # normalize filename quoting
return u
except UnicodeError, error: # catch ..Encode.. and ..Decode.. errors
if isinstance(self.data, EnvironmentError):
return u"[Errno %s] %s: '%s'" % (self.data.errno,
SafeString(self.data.strerror, self.encoding,
self.decoding_errors),
SafeString(self.data.filename, self.encoding,
self.decoding_errors))
if isinstance(self.data, Exception):
args = [unicode(SafeString(arg, self.encoding,
decoding_errors=self.decoding_errors))
for arg in self.data.args]
return u', '.join(args)
if isinstance(error, UnicodeDecodeError):
return unicode(self.data, self.encoding, self.decoding_errors)
raise
class ErrorString(SafeString):
"""
Safely report exception type and message.
"""
def __str__(self):
return '%s: %s' % (self.data.__class__.__name__,
super(ErrorString, self).__str__())
def __unicode__(self):
return u'%s: %s' % (self.data.__class__.__name__,
super(ErrorString, self).__unicode__())
class ErrorOutput(object):
"""
Wrapper class for file-like error streams with
failsave de- and encoding of `str`, `bytes`, `unicode` and
`Exception` instances.
"""
def __init__(self, stream=None, encoding=None,
encoding_errors='backslashreplace',
decoding_errors='replace'):
"""
:Parameters:
- `stream`: a file-like object,
a string (path to a file),
`None` (write to `sys.stderr`, default), or
evaluating to `False` (write() requests are ignored).
- `encoding`: `stream` text encoding. Guessed if None.
- `encoding_errors`: how to treat encoding errors.
"""
if stream is None:
stream = sys.stderr
elif not(stream):
stream = False
# if `stream` is a file name, open it
elif isinstance(stream, str):
stream = open(stream, 'w')
elif isinstance(stream, unicode):
stream = open(stream.encode(sys.getfilesystemencoding()), 'w')
self.stream = stream
"""Where warning output is sent."""
self.encoding = (encoding or getattr(stream, 'encoding', None) or
locale_encoding or 'ascii')
"""The output character encoding."""
self.encoding_errors = encoding_errors
"""Encoding error handler."""
self.decoding_errors = decoding_errors
"""Decoding error handler."""
def write(self, data):
"""
Write `data` to self.stream. Ignore, if self.stream is False.
`data` can be a `string`, `unicode`, or `Exception` instance.
"""
if self.stream is False:
return
if isinstance(data, Exception):
data = unicode(SafeString(data, self.encoding,
self.encoding_errors, self.decoding_errors))
try:
self.stream.write(data)
except UnicodeEncodeError:
self.stream.write(data.encode(self.encoding, self.encoding_errors))
except TypeError: # in Python 3, stderr expects unicode
if self.stream in (sys.stderr, sys.stdout):
self.stream.buffer.write(data) # write bytes to raw stream
else:
self.stream.write(unicode(data, self.encoding,
self.decoding_errors))
def close(self):
"""
Close the error-output stream.
Ignored if the stream is` sys.stderr` or `sys.stdout` or has no
close() method.
"""
if self.stream in (sys.stdout, sys.stderr):
return
try:
self.stream.close()
except AttributeError:
pass
| apache-2.0 | 4,528,100,260,759,167,000 | 35.791469 | 129 | 0.57452 | false |
xiaoyaozi5566/DynamicCache | src/mem/ruby/network/simple/SimpleLink.py | 18 | 1846 | # Copyright (c) 2011 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Brad Beckmann
from m5.params import *
from m5.proxy import *
from m5.SimObject import SimObject
from BasicLink import BasicIntLink, BasicExtLink
class SimpleExtLink(BasicExtLink):
type = 'SimpleExtLink'
class SimpleIntLink(BasicIntLink):
type = 'SimpleIntLink'
| bsd-3-clause | -7,001,011,454,336,312,000 | 46.333333 | 72 | 0.788732 | false |
earshel/PokeyPyManager | POGOProtos/Enums/IapItemCategory_pb2.py | 16 | 2408 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Enums/IapItemCategory.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Enums/IapItemCategory.proto',
package='POGOProtos.Enums',
syntax='proto3',
serialized_pb=_b('\n&POGOProtos/Enums/IapItemCategory.proto\x12\x10POGOProtos.Enums*\x94\x01\n\x13HoloIapItemCategory\x12\x15\n\x11IAP_CATEGORY_NONE\x10\x00\x12\x17\n\x13IAP_CATEGORY_BUNDLE\x10\x01\x12\x16\n\x12IAP_CATEGORY_ITEMS\x10\x02\x12\x19\n\x15IAP_CATEGORY_UPGRADES\x10\x03\x12\x1a\n\x16IAP_CATEGORY_POKECOINS\x10\x04\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_HOLOIAPITEMCATEGORY = _descriptor.EnumDescriptor(
name='HoloIapItemCategory',
full_name='POGOProtos.Enums.HoloIapItemCategory',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='IAP_CATEGORY_NONE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IAP_CATEGORY_BUNDLE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IAP_CATEGORY_ITEMS', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IAP_CATEGORY_UPGRADES', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IAP_CATEGORY_POKECOINS', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=61,
serialized_end=209,
)
_sym_db.RegisterEnumDescriptor(_HOLOIAPITEMCATEGORY)
HoloIapItemCategory = enum_type_wrapper.EnumTypeWrapper(_HOLOIAPITEMCATEGORY)
IAP_CATEGORY_NONE = 0
IAP_CATEGORY_BUNDLE = 1
IAP_CATEGORY_ITEMS = 2
IAP_CATEGORY_UPGRADES = 3
IAP_CATEGORY_POKECOINS = 4
DESCRIPTOR.enum_types_by_name['HoloIapItemCategory'] = _HOLOIAPITEMCATEGORY
# @@protoc_insertion_point(module_scope)
| mit | 2,174,317,528,529,845,200 | 32.444444 | 342 | 0.749169 | false |
irwinlove/django | tests/template_tests/filter_tests/test_linebreaks.py | 310 | 1920 | from django.template.defaultfilters import linebreaks_filter
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class LinebreaksTests(SimpleTestCase):
"""
The contents in "linebreaks" are escaped according to the current
autoescape setting.
"""
@setup({'linebreaks01': '{{ a|linebreaks }} {{ b|linebreaks }}'})
def test_linebreaks01(self):
output = self.engine.render_to_string('linebreaks01', {"a": "x&\ny", "b": mark_safe("x&\ny")})
self.assertEqual(output, "<p>x&<br />y</p> <p>x&<br />y</p>")
@setup({'linebreaks02':
'{% autoescape off %}{{ a|linebreaks }} {{ b|linebreaks }}{% endautoescape %}'})
def test_linebreaks02(self):
output = self.engine.render_to_string('linebreaks02', {"a": "x&\ny", "b": mark_safe("x&\ny")})
self.assertEqual(output, "<p>x&<br />y</p> <p>x&<br />y</p>")
class FunctionTests(SimpleTestCase):
def test_line(self):
self.assertEqual(linebreaks_filter('line 1'), '<p>line 1</p>')
def test_newline(self):
self.assertEqual(linebreaks_filter('line 1\nline 2'), '<p>line 1<br />line 2</p>')
def test_carriage(self):
self.assertEqual(linebreaks_filter('line 1\rline 2'), '<p>line 1<br />line 2</p>')
def test_carriage_newline(self):
self.assertEqual(linebreaks_filter('line 1\r\nline 2'), '<p>line 1<br />line 2</p>')
def test_non_string_input(self):
self.assertEqual(linebreaks_filter(123), '<p>123</p>')
def test_autoescape(self):
self.assertEqual(
linebreaks_filter('foo\n<a>bar</a>\nbuz'),
'<p>foo<br /><a>bar</a><br />buz</p>',
)
def test_autoescape_off(self):
self.assertEqual(
linebreaks_filter('foo\n<a>bar</a>\nbuz', autoescape=False),
'<p>foo<br /><a>bar</a><br />buz</p>',
)
| bsd-3-clause | -3,341,272,972,340,112,400 | 35.226415 | 102 | 0.605729 | false |
kevthehermit/SpearPhisher | testdata.py | 2 | 3851 | # This file will generate random test data and write it to a database.
import os
import sys
import string
import django
import random
# Fake Factory
from faker import Faker
fake = Faker()
# Connect to the Django Database
sys.path.insert(1,'/home/spearphisher/spearphisher')
script_path = os.path.dirname(__file__)
sys.path.insert(1,'/home/spearphisher/spearphisher')
os.environ['DJANGO_SETTINGS_MODULE']='spearphisher.settings'
django.setup()
# Load the Database Classes
from panel.models import Campaign, Recipient, Template, Configuration, Logging
# Edit these entries for each Run
company_domain = 'companyname.com' # This forms the email address
company_size = 452
os_list = ['Android', 'BlackBerry OS', 'IOS', 'Windows XP', 'Windows 7', 'Windows 8.1']
browser_list = ['Chrome 27.0.1453', 'Chrome 43.0.2357', 'Chrome 32.0.1489', 'IE 6.0.0', 'IE 7.0.0', 'IE 8.0.0', 'IE 9.0.0', 'IE 10.0.0', 'IE 11.0.0']
reader_list = ['null', '', '10.1.13', '11.0.8', '9.3.0', '9.2.3', '11.0.4', '10.1.2', '9.0.0']
flash_list = ['null', '', '18.0.0.161', '13.0.0.292', '15.0.0.189', '16.0.0.257']
java_list = ['null', '', '1.7.0.60', '1.7.0.13', '1.6.0.22', '1.6.0.37', '1.7.0.15', '1.8.0.22']
silver_list = ['null', '', '', '3.0', '3.5', '4.5.1', '', '']
shock_list = ['null', '', '', '12.0.0.112', '12.0.6.147', '8.0.205', '9.0.432', '10.1.1.016']
doc_list = ['Office 2003', 'Office 2007', 'Office 2010', 'Office 2013']
email_list = ['Outlook 2003', 'Outlook 2007', 'Outlook 2010', 'Outlook 2013']
# Create a Campaign
campaign = Campaign()
campaign.name = 'This is another test campaign'
campaign.description = 'This is another test description'
campaign.template_id = '2'
campaign.created = fake.date_time_between(start_date="-3d", end_date="-2d")
campaign.start_data = fake.date_time_between(start_date="-3d", end_date="-2d")
campaign.save()
# Create Recipients
for i in range(company_size):
full_name = fake.name()
email_add = '{0}@{1}'.format(full_name.replace(' ', '.'), company_domain)
uid = ''.join([random.choice(string.ascii_letters + string.digits) for n in xrange(8)])
# Create a recipient
recipient = Recipient()
recipient.campaign = campaign
recipient.real_name = full_name
recipient.email_address = email_add
recipient.uid = uid
# Choose to enter details or not
portal_choice = random.choice([True, False])
document_choice = random.choice([True, False])
webbug_choice = random.choice([True, False])
# Fill as per choices
if portal_choice:
recipient.portal_open = fake.date_time_between(start_date="-2d", end_date="now")
recipient.os_system = random.choice(os_list)
if recipient.os_system == 'Android':
recipient.web_client = 'Chrome Mobile'
elif recipient.os_system == 'BlackBerry OS':
recipient.web_client = 'BlackBerry WebKit'
else:
recipient.web_client = random.choice(browser_list)
recipient.reader_version = random.choice(reader_list)
recipient.flash_version = random.choice(flash_list)
recipient.java_version = random.choice(java_list)
recipient.silverlight_version = random.choice(silver_list)
recipient.shockwave_version = random.choice(shock_list)
recipient.ip_address = '144.76.87.236'
if document_choice:
recipient.document_open = fake.date_time_between(start_date="-2d", end_date="now")
recipient.document_client = random.choice(doc_list)
recipient.ip_address = '144.76.87.236'
if webbug_choice:
recipient.email_open = fake.date_time_between(start_date="-2d", end_date="now")
recipient.email_client = random.choice(email_list)
recipient.ip_address = '144.76.87.236'
# Save the Recipient
recipient.save()
| gpl-3.0 | -5,972,295,223,162,244,000 | 38.306122 | 149 | 0.64295 | false |
yanheven/cinder | cinder/volume/drivers/block_device.py | 3 | 8381 | # Copyright (c) 2013 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from cinder import context
from cinder.db.sqlalchemy import api
from cinder import exception
from cinder.i18n import _, _LI
from cinder.image import image_utils
from cinder.volume import driver
from cinder.volume import utils as volutils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.ListOpt('available_devices',
default=[],
help='List of all available devices'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class BlockDeviceDriver(driver.VolumeDriver):
VERSION = '2.0.0'
def __init__(self, *args, **kwargs):
super(BlockDeviceDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
self.backend_name = \
self.configuration.safe_get('volume_backend_name') or "BlockDev"
target_driver =\
self.target_mapping[self.configuration.safe_get('iscsi_helper')]
self.target_driver = importutils.import_object(
target_driver,
configuration=self.configuration,
db=self.db,
executor=self._execute)
def check_for_setup_error(self):
pass
def create_volume(self, volume):
device = self.find_appropriate_size_device(volume['size'])
LOG.info("Create %s on %s" % (volume['name'], device))
return {
'provider_location': device,
}
def delete_volume(self, volume):
"""Deletes a logical volume."""
dev_path = self.local_path(volume)
if not dev_path or dev_path not in \
self.configuration.available_devices:
return
if os.path.exists(dev_path) and \
self.configuration.volume_clear != 'none':
volutils.clear_volume(
self._get_device_size(dev_path), dev_path,
volume_clear=self.configuration.volume_clear,
volume_clear_size=self.configuration.volume_clear_size)
def local_path(self, volume):
if volume['provider_location']:
path = volume['provider_location'].rsplit(" ", 1)
return path[-1]
else:
return None
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume['size'])
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def create_cloned_volume(self, volume, src_vref):
LOG.info(_LI('Creating clone of volume: %s') % src_vref['id'])
device = self.find_appropriate_size_device(src_vref['size'])
volutils.copy_volume(
self.local_path(src_vref), device,
self._get_device_size(device) * 2048,
self.configuration.volume_dd_blocksize,
execute=self._execute)
return {
'provider_location': device,
}
def get_volume_stats(self, refresh=False):
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
dict_of_devices_sizes = self._devices_sizes()
used_devices = self._get_used_devices()
total_size = 0
free_size = 0
for device, size in dict_of_devices_sizes.iteritems():
if device not in used_devices:
free_size += size
total_size += size
LOG.debug("Updating volume stats")
backend_name = self.configuration.safe_get('volume_backend_name')
data = {'total_capacity_gb': total_size / 1024,
'free_capacity_gb': free_size / 1024,
'reserved_percentage': self.configuration.reserved_percentage,
'QoS_support': False,
'volume_backend_name': backend_name or self.__class__.__name__,
'vendor_name': "Open Source",
'driver_version': self.VERSION,
'storage_protocol': 'unknown'}
self._stats = data
def _get_used_devices(self):
lst = api.volume_get_all_by_host(context.get_admin_context(),
self.host)
used_devices = set()
for volume in lst:
local_path = self.local_path(volume)
if local_path:
used_devices.add(local_path)
return used_devices
def _get_device_size(self, dev_path):
out, _err = self._execute('blockdev', '--getsz', dev_path,
run_as_root=True)
size_in_m = int(out)
return size_in_m / 2048
def _devices_sizes(self):
available_devices = self.configuration.available_devices
dict_of_devices_sizes = {}
for device in available_devices:
dict_of_devices_sizes[device] = self._get_device_size(device)
return dict_of_devices_sizes
def find_appropriate_size_device(self, size):
dict_of_devices_sizes = self._devices_sizes()
free_devices = (set(self.configuration.available_devices) -
self._get_used_devices())
if not free_devices:
raise exception.CinderException(_("No free disk"))
possible_device = None
possible_device_size = None
for device in free_devices:
dev_size = dict_of_devices_sizes[device]
if size * 1024 <= dev_size and (possible_device is None or
dev_size < possible_device_size):
possible_device = device
possible_device_size = dev_size
if possible_device:
return possible_device
else:
raise exception.CinderException(_("No big enough free disk"))
# ####### Interface methods for DataPath (Target Driver) ########
def ensure_export(self, context, volume):
volume_path = "/dev/%s/%s" % (self.configuration.volume_group,
volume['name'])
model_update = \
self.target_driver.ensure_export(
context,
volume,
volume_path)
return model_update
def create_export(self, context, volume):
volume_path = "/dev/%s/%s" % (self.configuration.volume_group,
volume['name'])
export_info = self.target_driver.create_export(context,
volume,
volume_path)
return {'provider_location': export_info['location'],
'provider_auth': export_info['auth'], }
def remove_export(self, context, volume):
self.target_driver.remove_export(context, volume)
def initialize_connection(self, volume, connector):
return self.target_driver.initialize_connection(volume, connector)
def validate_connector(self, connector):
return self.target_driver.validate_connector(connector)
def terminate_connection(self, volume, connector, **kwargs):
pass
| apache-2.0 | 2,925,538,829,464,596,500 | 37.095455 | 79 | 0.575826 | false |
YosaiProject/yosai_account_alchemy | yosai_alchemystore/accountstore/accountstore.py | 2 | 10153 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import functools
from sqlalchemy import case, cast, func, Text
from sqlalchemy.sql import Alias, ColumnElement
from sqlalchemy.ext.compiler import compiles
from yosai_alchemystore import (
init_session
)
from yosai_alchemystore.models.models import (
Credential,
CredentialType,
User,
Domain,
Action,
Resource,
Permission,
Role,
role_membership as role_membership_table,
role_permission as role_permission_table,
)
from yosai.core import (
account_abcs,
)
# -------------------------------------------------------
# Following is a recipe used to address postgres-json related shortcomings
# in sqlalchemy v1.1.4. This recipe will eventually be deprecated
# ----------------------------------------------------------
class as_row(ColumnElement):
def __init__(self, expr):
assert isinstance(expr, Alias)
self.expr = expr
@compiles(as_row)
def _gen_as_row(element, compiler, **kw):
return compiler.visit_alias(element.expr, ashint=True, **kw)
# -------------------------------------------------------
# -------------------------------------------------------
def session_context(fn):
"""
Handles session setup and teardown
"""
@functools.wraps(fn)
def wrap(*args, **kwargs):
session = args[0].Session() # obtain from self
result = fn(*args, session=session, **kwargs)
session.close()
return result
return wrap
class AlchemyAccountStore(account_abcs.CredentialsAccountStore,
account_abcs.AuthorizationAccountStore,
account_abcs.LockingAccountStore):
"""
AccountStore provides the realm-facing API to the relational database
that is managed through the SQLAlchemy ORM.
step 1: generate an orm query
step 2: execute the query
step 3: return results
"""
def __init__(self, db_url=None, session=None, settings=None):
"""
:param db_url: engine configuration that is in the
'Database URL' format as supported by SQLAlchemy:
http://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls
:type db_url: string
"""
if session is None:
self.Session = init_session(db_url=db_url, settings=settings)
else:
self.Session = session
def _get_user_query(self, session, identifier):
return session.query(User).filter(User.identifier == identifier)
def _get_permissions_query(self, session, identifier):
"""
select domain, json_agg(parts) as permissions from
(select domain, row_to_json(r) as parts from
(select domain, action, array_agg(distinct target) as target from
(select (case when domain is null then '*' else domain end) as domain,
(case when target is null then '*' else target end) as target,
array_agg(distinct (case when action is null then '*' else action end)) as action
from permission
group by domain, target
) x
group by domain, action)
r) parts
group by domain;
"""
thedomain = case([(Domain.name == None, '*')], else_=Domain.name)
theaction = case([(Action.name == None, '*')], else_=Action.name)
theresource = case([(Resource.name == None, '*')], else_=Resource.name)
action_agg = func.array_agg(theaction.distinct())
stmt1 = (
session.query(Permission.domain_id,
thedomain.label('domain'),
Permission.resource_id,
theresource.label('resource'),
action_agg.label('action')).
select_from(User).
join(role_membership_table, User.pk_id == role_membership_table.c.user_id).
join(role_permission_table, role_membership_table.c.role_id == role_permission_table.c.role_id).
join(Permission, role_permission_table.c.permission_id == Permission.pk_id).
outerjoin(Domain, Permission.domain_id == Domain.pk_id).
outerjoin(Action, Permission.action_id == Action.pk_id).
outerjoin(Resource, Permission.resource_id == Resource.pk_id).
filter(User.identifier == identifier).
group_by(Permission.domain_id, Domain.name, Permission.resource_id, Resource.name)).subquery()
stmt2 = (session.query(stmt1.c.domain,
stmt1.c.action,
func.array_agg(stmt1.c.resource.distinct()).label('resource')).
select_from(stmt1).
group_by(stmt1.c.domain, stmt1.c.action)).subquery()
stmt3 = (session.query(stmt2.c.domain,
func.row_to_json(as_row(stmt2)).label('parts')).
select_from(stmt2)).subquery()
final = (session.query(stmt3.c.domain, cast(func.json_agg(stmt3.c.parts), Text)).
select_from(stmt3).
group_by(stmt3.c.domain))
return final
def _get_roles_query(self, session, identifier):
"""
:type identifier: string
"""
return (session.query(Role).
join(role_membership_table, Role.pk_id == role_membership_table.c.role_id).
join(User, role_membership_table.c.user_id == User.pk_id).
filter(User.identifier == identifier))
def _get_credential_query(self, session, identifier):
return (session.query(CredentialType.title, Credential.credential).
join(Credential, CredentialType.pk_id == Credential.credential_type_id).
join(User, Credential.user_id == User.pk_id).
filter(User.identifier == identifier))
@session_context
def get_authc_info(self, identifier, session=None):
"""
If an Account requires credentials from multiple data stores, this
AccountStore is responsible for aggregating them (composite) and returning
the results in a single account object.
:returns: a dict of account attributes
"""
user = self._get_user_query(session, identifier).first()
creds = self._get_credential_query(session, identifier).all()
if not creds:
return None
authc_info = {cred_type: {'credential': cred_value, 'failed_attempts': []}
for cred_type, cred_value in creds}
if 'totp_key' in authc_info:
authc_info['totp_key']['2fa_info'] = {'phone_number': user.phone_number}
return dict(account_locked=user.account_lock_millis, authc_info=authc_info)
@session_context
def get_authz_permissions(self, identifier, session=None):
try:
return dict(self._get_permissions_query(session, identifier).all())
except (AttributeError, TypeError):
return None
@session_context
def get_authz_roles(self, identifier, session=None):
try:
return [r.title for r in self._get_roles_query(session, identifier).all()]
except (AttributeError, TypeError):
return None
@session_context
def lock_account(self, identifier, locked_time, session=None):
session.query(User).\
filter(User.identifier == identifier).\
update({User.account_lock_millis: locked_time})
session.commit()
@session_context
def unlock_account(self, identifier, session=None):
session.query(User).\
filter(User.identifier == identifier).\
update({User.account_lock_millis: None})
session.commit()
# @session_context
# def get_account(self, identifier, session=None):
# """
# get_account performs the most comprehensive collection of information
# from the database, including credentials AND authorization information
#
# :param identifier: the request object's identifier
# :returns: dict
#
# CAUTION
# --------
# This method was initially created as part of shiro porting but is
# not intended for v0.1.0 use due to lack of support for get_or_create_multi
# dogpile locking. If you would like to use get_account, you *should*
# implement an appropriate get_or_create_multi caching process (and submit
# the changes as pull requests to yosai!). Without dogpile protection,
# you run the risk of concurrently calling the most expensive creational
# process
#
# """
# cred = self.get_credential_query(session, identifier).scalar()
# credential = self.credential(cred)
#
# roles = {self.role(r.title)
# for r in self.get_roles_query(session, identifier).all()}
#
# perms = self.get_permissions_query(session, identifier).all()
# permissions = {self.permission(permission=p.perm)
# for p in perms}
#
# authz_info = self.authz_info(roles=roles,
# permissions=permissions)
#
# account = dict(account_id=identifier,
# credentials=credential,
# authz_info=authz_info)
#
# return account
| apache-2.0 | 5,269,965,566,354,407,000 | 37.604563 | 113 | 0.600118 | false |
matthewoliver/swift | test/unit/common/test_utils.py | 2 | 275026 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.common.utils"""
from __future__ import print_function
from test.unit import temptree, debug_logger, make_timestamp_iter, with_tempdir
import ctypes
import contextlib
import errno
import eventlet
import eventlet.debug
import eventlet.event
import eventlet.patcher
import functools
import grp
import logging
import platform
import os
import mock
import pwd
import random
import re
import socket
import string
import sys
import json
import math
import inspect
import six
from six import BytesIO, StringIO
from six.moves.queue import Queue, Empty
from six.moves import http_client
from six.moves import range
from textwrap import dedent
import tempfile
import time
import unittest
import fcntl
import shutil
from getpass import getuser
from shutil import rmtree
from functools import partial
from tempfile import TemporaryFile, NamedTemporaryFile, mkdtemp
from netifaces import AF_INET6
from mock import MagicMock, patch
from six.moves.configparser import NoSectionError, NoOptionError
from uuid import uuid4
from swift.common.exceptions import Timeout, MessageTimeout, \
ConnectionTimeout, LockTimeout, ReplicationLockTimeout, \
MimeInvalid
from swift.common import utils
from swift.common.utils import is_valid_ip, is_valid_ipv4, is_valid_ipv6, \
set_swift_dir
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.storage_policy import POLICIES, reload_storage_policies
from swift.common.swob import Request, Response
from test.unit import FakeLogger, requires_o_tmpfile_support, \
requires_o_tmpfile_support_in_tmp, quiet_eventlet_exceptions
threading = eventlet.patcher.original('threading')
class MockOs(object):
def __init__(self, pass_funcs=None, called_funcs=None, raise_funcs=None):
if pass_funcs is None:
pass_funcs = []
if called_funcs is None:
called_funcs = []
if raise_funcs is None:
raise_funcs = []
self.closed_fds = []
for func in pass_funcs:
setattr(self, func, self.pass_func)
self.called_funcs = {}
for func in called_funcs:
c_func = partial(self.called_func, func)
setattr(self, func, c_func)
for func in raise_funcs:
r_func = partial(self.raise_func, func)
setattr(self, func, r_func)
def pass_func(self, *args, **kwargs):
pass
setgroups = chdir = setsid = setgid = setuid = umask = pass_func
def called_func(self, name, *args, **kwargs):
self.called_funcs[name] = args
def raise_func(self, name, *args, **kwargs):
self.called_funcs[name] = args
raise OSError()
def dup2(self, source, target):
self.closed_fds.append(target)
def geteuid(self):
'''Pretend we are running as root.'''
return 0
def __getattr__(self, name):
# I only over-ride portions of the os module
try:
return object.__getattr__(self, name)
except AttributeError:
return getattr(os, name)
class MockUdpSocket(object):
def __init__(self, sendto_errno=None):
self.sent = []
self.sendto_errno = sendto_errno
def sendto(self, data, target):
if self.sendto_errno:
raise socket.error(self.sendto_errno,
'test errno %s' % self.sendto_errno)
self.sent.append((data, target))
def close(self):
pass
class MockSys(object):
def __init__(self):
self.stdin = TemporaryFile('w')
self.stdout = TemporaryFile('r')
self.stderr = TemporaryFile('r')
self.__stderr__ = self.stderr
self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()]
def reset_loggers():
if hasattr(utils.get_logger, 'handler4logger'):
for logger, handler in utils.get_logger.handler4logger.items():
logger.removeHandler(handler)
delattr(utils.get_logger, 'handler4logger')
if hasattr(utils.get_logger, 'console_handler4logger'):
for logger, h in utils.get_logger.console_handler4logger.items():
logger.removeHandler(h)
delattr(utils.get_logger, 'console_handler4logger')
# Reset the LogAdapter class thread local state. Use get_logger() here
# to fetch a LogAdapter instance because the items from
# get_logger.handler4logger above are the underlying logger instances,
# not the LogAdapter.
utils.get_logger(None).thread_locals = (None, None)
def reset_logger_state(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
reset_loggers()
try:
return f(self, *args, **kwargs)
finally:
reset_loggers()
return wrapper
class TestTimestamp(unittest.TestCase):
"""Tests for swift.common.utils.Timestamp"""
def test_invalid_input(self):
self.assertRaises(ValueError, utils.Timestamp, time.time(), offset=-1)
self.assertRaises(ValueError, utils.Timestamp, '123.456_78_90')
def test_invalid_string_conversion(self):
t = utils.Timestamp.now()
self.assertRaises(TypeError, str, t)
def test_offset_limit(self):
t = 1417462430.78693
# can't have a offset above MAX_OFFSET
self.assertRaises(ValueError, utils.Timestamp, t,
offset=utils.MAX_OFFSET + 1)
# exactly max offset is fine
ts = utils.Timestamp(t, offset=utils.MAX_OFFSET)
self.assertEqual(ts.internal, '1417462430.78693_ffffffffffffffff')
# but you can't offset it further
self.assertRaises(ValueError, utils.Timestamp, ts.internal, offset=1)
# unless you start below it
ts = utils.Timestamp(t, offset=utils.MAX_OFFSET - 1)
self.assertEqual(utils.Timestamp(ts.internal, offset=1),
'1417462430.78693_ffffffffffffffff')
def test_normal_format_no_offset(self):
expected = '1402436408.91203'
test_values = (
'1402436408.91203',
'1402436408.91203_00000000',
'1402436408.912030000',
'1402436408.912030000_0000000000000',
'000001402436408.912030000',
'000001402436408.912030000_0000000000',
1402436408.91203,
1402436408.912029,
1402436408.9120300000000000,
1402436408.91202999999999999,
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.912029),
utils.Timestamp(1402436408.912029, offset=0),
utils.Timestamp('1402436408.91203'),
utils.Timestamp('1402436408.91203', offset=0),
utils.Timestamp('1402436408.91203_00000000'),
utils.Timestamp('1402436408.91203_00000000', offset=0),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(timestamp.normal, expected)
# timestamp instance can also compare to string or float
self.assertEqual(timestamp, expected)
self.assertEqual(timestamp, float(expected))
self.assertEqual(timestamp, utils.normalize_timestamp(expected))
def test_isoformat(self):
expected = '2014-06-10T22:47:32.054580'
test_values = (
'1402440452.05458',
'1402440452.054579',
'1402440452.05458_00000000',
'1402440452.054579_00000000',
'1402440452.054580000',
'1402440452.054579999',
'1402440452.054580000_0000000000000',
'1402440452.054579999_0000ff00',
'000001402440452.054580000',
'000001402440452.0545799',
'000001402440452.054580000_0000000000',
'000001402440452.054579999999_00000fffff',
1402440452.05458,
1402440452.054579,
1402440452.0545800000000000,
1402440452.054579999,
utils.Timestamp(1402440452.05458),
utils.Timestamp(1402440452.0545799),
utils.Timestamp(1402440452.05458, offset=0),
utils.Timestamp(1402440452.05457999999, offset=0),
utils.Timestamp(1402440452.05458, offset=100),
utils.Timestamp(1402440452.054579, offset=100),
utils.Timestamp('1402440452.05458'),
utils.Timestamp('1402440452.054579999'),
utils.Timestamp('1402440452.05458', offset=0),
utils.Timestamp('1402440452.054579', offset=0),
utils.Timestamp('1402440452.05458', offset=300),
utils.Timestamp('1402440452.05457999', offset=300),
utils.Timestamp('1402440452.05458_00000000'),
utils.Timestamp('1402440452.05457999_00000000'),
utils.Timestamp('1402440452.05458_00000000', offset=0),
utils.Timestamp('1402440452.05457999_00000aaa', offset=0),
utils.Timestamp('1402440452.05458_00000000', offset=400),
utils.Timestamp('1402440452.054579_0a', offset=400),
)
for value in test_values:
self.assertEqual(utils.Timestamp(value).isoformat, expected)
expected = '1970-01-01T00:00:00.000000'
test_values = (
'0',
'0000000000.00000',
'0000000000.00000_ffffffffffff',
0,
0.0,
)
for value in test_values:
self.assertEqual(utils.Timestamp(value).isoformat, expected)
def test_not_equal(self):
ts = '1402436408.91203_0000000000000001'
test_values = (
utils.Timestamp('1402436408.91203_0000000000000002'),
utils.Timestamp('1402436408.91203'),
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91204),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.91203, offset=2),
)
for value in test_values:
self.assertTrue(value != ts)
self.assertIs(True, utils.Timestamp(ts) == ts) # sanity
self.assertIs(False, utils.Timestamp(ts) != utils.Timestamp(ts))
self.assertIs(False, utils.Timestamp(ts) != ts)
self.assertIs(False, utils.Timestamp(ts) is None)
self.assertIs(True, utils.Timestamp(ts) is not None)
def test_no_force_internal_no_offset(self):
"""Test that internal is the same as normal with no offset"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(utils.Timestamp(0).internal, '0000000000.00000')
self.assertEqual(utils.Timestamp(1402437380.58186).internal,
'1402437380.58186')
self.assertEqual(utils.Timestamp(1402437380.581859).internal,
'1402437380.58186')
self.assertEqual(utils.Timestamp(0).internal,
utils.normalize_timestamp(0))
def test_no_force_internal_with_offset(self):
"""Test that internal always includes the offset if significant"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(utils.Timestamp(0, offset=1).internal,
'0000000000.00000_0000000000000001')
self.assertEqual(
utils.Timestamp(1402437380.58186, offset=16).internal,
'1402437380.58186_0000000000000010')
self.assertEqual(
utils.Timestamp(1402437380.581859, offset=240).internal,
'1402437380.58186_00000000000000f0')
self.assertEqual(
utils.Timestamp('1402437380.581859_00000001',
offset=240).internal,
'1402437380.58186_00000000000000f1')
def test_force_internal(self):
"""Test that internal always includes the offset if forced"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=True):
self.assertEqual(utils.Timestamp(0).internal,
'0000000000.00000_0000000000000000')
self.assertEqual(utils.Timestamp(1402437380.58186).internal,
'1402437380.58186_0000000000000000')
self.assertEqual(utils.Timestamp(1402437380.581859).internal,
'1402437380.58186_0000000000000000')
self.assertEqual(utils.Timestamp(0, offset=1).internal,
'0000000000.00000_0000000000000001')
self.assertEqual(
utils.Timestamp(1402437380.58186, offset=16).internal,
'1402437380.58186_0000000000000010')
self.assertEqual(
utils.Timestamp(1402437380.581859, offset=16).internal,
'1402437380.58186_0000000000000010')
def test_internal_format_no_offset(self):
expected = '1402436408.91203_0000000000000000'
test_values = (
'1402436408.91203',
'1402436408.91203_00000000',
'1402436408.912030000',
'1402436408.912030000_0000000000000',
'000001402436408.912030000',
'000001402436408.912030000_0000000000',
1402436408.91203,
1402436408.9120300000000000,
1402436408.912029,
1402436408.912029999999999999,
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.912029),
utils.Timestamp(1402436408.91202999999999999, offset=0),
utils.Timestamp('1402436408.91203'),
utils.Timestamp('1402436408.91203', offset=0),
utils.Timestamp('1402436408.912029'),
utils.Timestamp('1402436408.912029', offset=0),
utils.Timestamp('1402436408.912029999999999'),
utils.Timestamp('1402436408.912029999999999', offset=0),
)
for value in test_values:
# timestamp instance is always equivalent
self.assertEqual(utils.Timestamp(value), expected)
if utils.FORCE_INTERNAL:
# the FORCE_INTERNAL flag makes the internal format always
# include the offset portion of the timestamp even when it's
# not significant and would be bad during upgrades
self.assertEqual(utils.Timestamp(value).internal, expected)
else:
# unless we FORCE_INTERNAL, when there's no offset the
# internal format is equivalent to the normalized format
self.assertEqual(utils.Timestamp(value).internal,
'1402436408.91203')
def test_internal_format_with_offset(self):
expected = '1402436408.91203_00000000000000f0'
test_values = (
'1402436408.91203_000000f0',
u'1402436408.91203_000000f0',
b'1402436408.91203_000000f0',
'1402436408.912030000_0000000000f0',
'1402436408.912029_000000f0',
'1402436408.91202999999_0000000000f0',
'000001402436408.912030000_000000000f0',
'000001402436408.9120299999_000000000f0',
utils.Timestamp(1402436408.91203, offset=240),
utils.Timestamp(1402436408.912029, offset=240),
utils.Timestamp('1402436408.91203', offset=240),
utils.Timestamp('1402436408.91203_00000000', offset=240),
utils.Timestamp('1402436408.91203_0000000f', offset=225),
utils.Timestamp('1402436408.9120299999', offset=240),
utils.Timestamp('1402436408.9120299999_00000000', offset=240),
utils.Timestamp('1402436408.9120299999_00000010', offset=224),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(timestamp.internal, expected)
# can compare with offset if the string is internalized
self.assertEqual(timestamp, expected)
# if comparison value only includes the normalized portion and the
# timestamp includes an offset, it is considered greater
normal = utils.Timestamp(expected).normal
self.assertTrue(timestamp > normal,
'%r is not bigger than %r given %r' % (
timestamp, normal, value))
self.assertTrue(timestamp > float(normal),
'%r is not bigger than %f given %r' % (
timestamp, float(normal), value))
def test_short_format_with_offset(self):
expected = '1402436408.91203_f0'
timestamp = utils.Timestamp(1402436408.91203, 0xf0)
self.assertEqual(expected, timestamp.short)
expected = '1402436408.91203'
timestamp = utils.Timestamp(1402436408.91203)
self.assertEqual(expected, timestamp.short)
def test_raw(self):
expected = 140243640891203
timestamp = utils.Timestamp(1402436408.91203)
self.assertEqual(expected, timestamp.raw)
# 'raw' does not include offset
timestamp = utils.Timestamp(1402436408.91203, 0xf0)
self.assertEqual(expected, timestamp.raw)
def test_delta(self):
def _assertWithinBounds(expected, timestamp):
tolerance = 0.00001
minimum = expected - tolerance
maximum = expected + tolerance
self.assertTrue(float(timestamp) > minimum)
self.assertTrue(float(timestamp) < maximum)
timestamp = utils.Timestamp(1402436408.91203, delta=100)
_assertWithinBounds(1402436408.91303, timestamp)
self.assertEqual(140243640891303, timestamp.raw)
timestamp = utils.Timestamp(1402436408.91203, delta=-100)
_assertWithinBounds(1402436408.91103, timestamp)
self.assertEqual(140243640891103, timestamp.raw)
timestamp = utils.Timestamp(1402436408.91203, delta=0)
_assertWithinBounds(1402436408.91203, timestamp)
self.assertEqual(140243640891203, timestamp.raw)
# delta is independent of offset
timestamp = utils.Timestamp(1402436408.91203, offset=42, delta=100)
self.assertEqual(140243640891303, timestamp.raw)
self.assertEqual(42, timestamp.offset)
# cannot go negative
self.assertRaises(ValueError, utils.Timestamp, 1402436408.91203,
delta=-140243640891203)
def test_int(self):
expected = 1402437965
test_values = (
'1402437965.91203',
'1402437965.91203_00000000',
'1402437965.912030000',
'1402437965.912030000_0000000000000',
'000001402437965.912030000',
'000001402437965.912030000_0000000000',
1402437965.91203,
1402437965.9120300000000000,
1402437965.912029,
1402437965.912029999999999999,
utils.Timestamp(1402437965.91203),
utils.Timestamp(1402437965.91203, offset=0),
utils.Timestamp(1402437965.91203, offset=500),
utils.Timestamp(1402437965.912029),
utils.Timestamp(1402437965.91202999999999999, offset=0),
utils.Timestamp(1402437965.91202999999999999, offset=300),
utils.Timestamp('1402437965.91203'),
utils.Timestamp('1402437965.91203', offset=0),
utils.Timestamp('1402437965.91203', offset=400),
utils.Timestamp('1402437965.912029'),
utils.Timestamp('1402437965.912029', offset=0),
utils.Timestamp('1402437965.912029', offset=200),
utils.Timestamp('1402437965.912029999999999'),
utils.Timestamp('1402437965.912029999999999', offset=0),
utils.Timestamp('1402437965.912029999999999', offset=100),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(int(timestamp), expected)
self.assertTrue(timestamp > expected)
def test_float(self):
expected = 1402438115.91203
test_values = (
'1402438115.91203',
'1402438115.91203_00000000',
'1402438115.912030000',
'1402438115.912030000_0000000000000',
'000001402438115.912030000',
'000001402438115.912030000_0000000000',
1402438115.91203,
1402438115.9120300000000000,
1402438115.912029,
1402438115.912029999999999999,
utils.Timestamp(1402438115.91203),
utils.Timestamp(1402438115.91203, offset=0),
utils.Timestamp(1402438115.91203, offset=500),
utils.Timestamp(1402438115.912029),
utils.Timestamp(1402438115.91202999999999999, offset=0),
utils.Timestamp(1402438115.91202999999999999, offset=300),
utils.Timestamp('1402438115.91203'),
utils.Timestamp('1402438115.91203', offset=0),
utils.Timestamp('1402438115.91203', offset=400),
utils.Timestamp('1402438115.912029'),
utils.Timestamp('1402438115.912029', offset=0),
utils.Timestamp('1402438115.912029', offset=200),
utils.Timestamp('1402438115.912029999999999'),
utils.Timestamp('1402438115.912029999999999', offset=0),
utils.Timestamp('1402438115.912029999999999', offset=100),
)
tolerance = 0.00001
minimum = expected - tolerance
maximum = expected + tolerance
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertTrue(float(timestamp) > minimum,
'%f is not bigger than %f given %r' % (
timestamp, minimum, value))
self.assertTrue(float(timestamp) < maximum,
'%f is not smaller than %f given %r' % (
timestamp, maximum, value))
# direct comparison of timestamp works too
self.assertTrue(timestamp > minimum,
'%s is not bigger than %f given %r' % (
timestamp.normal, minimum, value))
self.assertTrue(timestamp < maximum,
'%s is not smaller than %f given %r' % (
timestamp.normal, maximum, value))
# ... even against strings
self.assertTrue(timestamp > '%f' % minimum,
'%s is not bigger than %s given %r' % (
timestamp.normal, minimum, value))
self.assertTrue(timestamp < '%f' % maximum,
'%s is not smaller than %s given %r' % (
timestamp.normal, maximum, value))
def test_false(self):
self.assertFalse(utils.Timestamp(0))
self.assertFalse(utils.Timestamp(0, offset=0))
self.assertFalse(utils.Timestamp('0'))
self.assertFalse(utils.Timestamp('0', offset=0))
self.assertFalse(utils.Timestamp(0.0))
self.assertFalse(utils.Timestamp(0.0, offset=0))
self.assertFalse(utils.Timestamp('0.0'))
self.assertFalse(utils.Timestamp('0.0', offset=0))
self.assertFalse(utils.Timestamp(00000000.00000000))
self.assertFalse(utils.Timestamp(00000000.00000000, offset=0))
self.assertFalse(utils.Timestamp('00000000.00000000'))
self.assertFalse(utils.Timestamp('00000000.00000000', offset=0))
def test_true(self):
self.assertTrue(utils.Timestamp(1))
self.assertTrue(utils.Timestamp(1, offset=1))
self.assertTrue(utils.Timestamp(0, offset=1))
self.assertTrue(utils.Timestamp('1'))
self.assertTrue(utils.Timestamp('1', offset=1))
self.assertTrue(utils.Timestamp('0', offset=1))
self.assertTrue(utils.Timestamp(1.1))
self.assertTrue(utils.Timestamp(1.1, offset=1))
self.assertTrue(utils.Timestamp(0.0, offset=1))
self.assertTrue(utils.Timestamp('1.1'))
self.assertTrue(utils.Timestamp('1.1', offset=1))
self.assertTrue(utils.Timestamp('0.0', offset=1))
self.assertTrue(utils.Timestamp(11111111.11111111))
self.assertTrue(utils.Timestamp(11111111.11111111, offset=1))
self.assertTrue(utils.Timestamp(00000000.00000000, offset=1))
self.assertTrue(utils.Timestamp('11111111.11111111'))
self.assertTrue(utils.Timestamp('11111111.11111111', offset=1))
self.assertTrue(utils.Timestamp('00000000.00000000', offset=1))
def test_greater_no_offset(self):
now = time.time()
older = now - 1
timestamp = utils.Timestamp(now)
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443112.213252, '1402443112.213252', '1402443112.213252_ffff',
older, '%f' % older, '%f_0000ffff' % older,
)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp > value,
'%r is not greater than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp > other,
'%r is not greater than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp > other.normal,
'%r is not greater than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp > other.internal,
'%r is not greater than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp > float(other),
'%r is not greater than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp > int(other),
'%r is not greater than %r given %r' % (
timestamp, int(other), value))
def _test_greater_with_offset(self, now, test_values):
for offset in range(1, 1000, 100):
timestamp = utils.Timestamp(now, offset=offset)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp > value,
'%r is not greater than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp > other,
'%r is not greater than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp > other.normal,
'%r is not greater than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp > other.internal,
'%r is not greater than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp > float(other),
'%r is not greater than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp > int(other),
'%r is not greater than %r given %r' % (
timestamp, int(other), value))
def test_greater_with_offset(self):
# Part 1: use the natural time of the Python. This is deliciously
# unpredictable, but completely legitimate and realistic. Finds bugs!
now = time.time()
older = now - 1
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
older, now,
)
self._test_greater_with_offset(now, test_values)
# Part 2: Same as above, but with fixed time values that reproduce
# specific corner cases.
now = 1519830570.6949348
older = now - 1
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
older, now,
)
self._test_greater_with_offset(now, test_values)
# Part 3: The '%f' problem. Timestamps cannot be converted to %f
# strings, then back to timestamps, then compared with originals.
# You can only "import" a floating point representation once.
now = 1519830570.6949348
now = float('%f' % now)
older = now - 1
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
older, '%f' % older, '%f_0000ffff' % older,
now, '%f' % now, '%s_00000000' % now,
)
self._test_greater_with_offset(now, test_values)
def test_smaller_no_offset(self):
now = time.time()
newer = now + 1
timestamp = utils.Timestamp(now)
test_values = (
9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
newer, '%f' % newer, '%f_0000ffff' % newer,
)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp < value,
'%r is not smaller than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp < other,
'%r is not smaller than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp < other.normal,
'%r is not smaller than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp < other.internal,
'%r is not smaller than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp < float(other),
'%r is not smaller than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp < int(other),
'%r is not smaller than %r given %r' % (
timestamp, int(other), value))
def test_smaller_with_offset(self):
now = time.time()
newer = now + 1
test_values = (
9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
newer, '%f' % newer, '%f_0000ffff' % newer,
)
for offset in range(1, 1000, 100):
timestamp = utils.Timestamp(now, offset=offset)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp < value,
'%r is not smaller than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp < other,
'%r is not smaller than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp < other.normal,
'%r is not smaller than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp < other.internal,
'%r is not smaller than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp < float(other),
'%r is not smaller than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp < int(other),
'%r is not smaller than %r given %r' % (
timestamp, int(other), value))
def test_cmp_with_none(self):
self.assertGreater(utils.Timestamp(0), None)
self.assertGreater(utils.Timestamp(1.0), None)
self.assertGreater(utils.Timestamp(1.0, 42), None)
def test_ordering(self):
given = [
'1402444820.62590_000000000000000a',
'1402444820.62589_0000000000000001',
'1402444821.52589_0000000000000004',
'1402444920.62589_0000000000000004',
'1402444821.62589_000000000000000a',
'1402444821.72589_000000000000000a',
'1402444920.62589_0000000000000002',
'1402444820.62589_0000000000000002',
'1402444820.62589_000000000000000a',
'1402444820.62590_0000000000000004',
'1402444920.62589_000000000000000a',
'1402444820.62590_0000000000000002',
'1402444821.52589_0000000000000002',
'1402444821.52589_0000000000000000',
'1402444920.62589',
'1402444821.62589_0000000000000004',
'1402444821.72589_0000000000000001',
'1402444820.62590',
'1402444820.62590_0000000000000001',
'1402444820.62589_0000000000000004',
'1402444821.72589_0000000000000000',
'1402444821.52589_000000000000000a',
'1402444821.72589_0000000000000004',
'1402444821.62589',
'1402444821.52589_0000000000000001',
'1402444821.62589_0000000000000001',
'1402444821.62589_0000000000000002',
'1402444821.72589_0000000000000002',
'1402444820.62589',
'1402444920.62589_0000000000000001']
expected = [
'1402444820.62589',
'1402444820.62589_0000000000000001',
'1402444820.62589_0000000000000002',
'1402444820.62589_0000000000000004',
'1402444820.62589_000000000000000a',
'1402444820.62590',
'1402444820.62590_0000000000000001',
'1402444820.62590_0000000000000002',
'1402444820.62590_0000000000000004',
'1402444820.62590_000000000000000a',
'1402444821.52589',
'1402444821.52589_0000000000000001',
'1402444821.52589_0000000000000002',
'1402444821.52589_0000000000000004',
'1402444821.52589_000000000000000a',
'1402444821.62589',
'1402444821.62589_0000000000000001',
'1402444821.62589_0000000000000002',
'1402444821.62589_0000000000000004',
'1402444821.62589_000000000000000a',
'1402444821.72589',
'1402444821.72589_0000000000000001',
'1402444821.72589_0000000000000002',
'1402444821.72589_0000000000000004',
'1402444821.72589_000000000000000a',
'1402444920.62589',
'1402444920.62589_0000000000000001',
'1402444920.62589_0000000000000002',
'1402444920.62589_0000000000000004',
'1402444920.62589_000000000000000a',
]
# less visual version
"""
now = time.time()
given = [
utils.Timestamp(now + i, offset=offset).internal
for i in (0, 0.00001, 0.9, 1.0, 1.1, 100.0)
for offset in (0, 1, 2, 4, 10)
]
expected = [t for t in given]
random.shuffle(given)
"""
self.assertEqual(len(given), len(expected)) # sanity
timestamps = [utils.Timestamp(t) for t in given]
# our expected values don't include insignificant offsets
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(
[t.internal for t in sorted(timestamps)], expected)
# string sorting works as well
self.assertEqual(
sorted([t.internal for t in timestamps]), expected)
def test_hashable(self):
ts_0 = utils.Timestamp('1402444821.72589')
ts_0_also = utils.Timestamp('1402444821.72589')
self.assertEqual(ts_0, ts_0_also) # sanity
self.assertEqual(hash(ts_0), hash(ts_0_also))
d = {ts_0: 'whatever'}
self.assertIn(ts_0, d) # sanity
self.assertIn(ts_0_also, d)
class TestTimestampEncoding(unittest.TestCase):
def setUp(self):
t0 = utils.Timestamp(0.0)
t1 = utils.Timestamp(997.9996)
t2 = utils.Timestamp(999)
t3 = utils.Timestamp(1000, 24)
t4 = utils.Timestamp(1001)
t5 = utils.Timestamp(1002.00040)
# encodings that are expected when explicit = False
self.non_explicit_encodings = (
('0000001000.00000_18', (t3, t3, t3)),
('0000001000.00000_18', (t3, t3, None)),
)
# mappings that are expected when explicit = True
self.explicit_encodings = (
('0000001000.00000_18+0+0', (t3, t3, t3)),
('0000001000.00000_18+0', (t3, t3, None)),
)
# mappings that are expected when explicit = True or False
self.encodings = (
('0000001000.00000_18+0+186a0', (t3, t3, t4)),
('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
('0000001000.00000_18-186a0+0', (t3, t2, t2)),
('0000001000.00000_18+0-186a0', (t3, t3, t2)),
('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
('0000001000.00000_18', (t3, None, None)),
('0000001000.00000_18+186a0', (t3, t4, None)),
('0000001000.00000_18-186a0', (t3, t2, None)),
('0000001000.00000_18', (t3, None, t1)),
('0000001000.00000_18-5f5e100', (t3, t0, None)),
('0000001000.00000_18+0-5f5e100', (t3, t3, t0)),
('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
)
# decodings that are expected when explicit = False
self.non_explicit_decodings = (
('0000001000.00000_18', (t3, t3, t3)),
('0000001000.00000_18+186a0', (t3, t4, t4)),
('0000001000.00000_18-186a0', (t3, t2, t2)),
('0000001000.00000_18+186a0', (t3, t4, t4)),
('0000001000.00000_18-186a0', (t3, t2, t2)),
('0000001000.00000_18-5f5e100', (t3, t0, t0)),
)
# decodings that are expected when explicit = True
self.explicit_decodings = (
('0000001000.00000_18+0+0', (t3, t3, t3)),
('0000001000.00000_18+0', (t3, t3, None)),
('0000001000.00000_18', (t3, None, None)),
('0000001000.00000_18+186a0', (t3, t4, None)),
('0000001000.00000_18-186a0', (t3, t2, None)),
('0000001000.00000_18-5f5e100', (t3, t0, None)),
)
# decodings that are expected when explicit = True or False
self.decodings = (
('0000001000.00000_18+0+186a0', (t3, t3, t4)),
('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
('0000001000.00000_18-186a0+0', (t3, t2, t2)),
('0000001000.00000_18+0-186a0', (t3, t3, t2)),
('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
)
def _assertEqual(self, expected, actual, test):
self.assertEqual(expected, actual,
'Got %s but expected %s for parameters %s'
% (actual, expected, test))
def test_encoding(self):
for test in self.explicit_encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], True)
self._assertEqual(test[0], actual, test[1])
for test in self.non_explicit_encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], False)
self._assertEqual(test[0], actual, test[1])
for explicit in (True, False):
for test in self.encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], explicit)
self._assertEqual(test[0], actual, test[1])
def test_decoding(self):
for test in self.explicit_decodings:
actual = utils.decode_timestamps(test[0], True)
self._assertEqual(test[1], actual, test[0])
for test in self.non_explicit_decodings:
actual = utils.decode_timestamps(test[0], False)
self._assertEqual(test[1], actual, test[0])
for explicit in (True, False):
for test in self.decodings:
actual = utils.decode_timestamps(test[0], explicit)
self._assertEqual(test[1], actual, test[0])
class TestUtils(unittest.TestCase):
"""Tests for swift.common.utils """
def setUp(self):
utils.HASH_PATH_SUFFIX = b'endcap'
utils.HASH_PATH_PREFIX = b'startcap'
def test_get_zero_indexed_base_string(self):
self.assertEqual(utils.get_zero_indexed_base_string('something', 0),
'something')
self.assertEqual(utils.get_zero_indexed_base_string('something', None),
'something')
self.assertEqual(utils.get_zero_indexed_base_string('something', 1),
'something-1')
self.assertRaises(ValueError, utils.get_zero_indexed_base_string,
'something', 'not_integer')
@with_tempdir
def test_lock_path(self, tmpdir):
# 2 locks with limit=1 must fail
success = False
with utils.lock_path(tmpdir, 0.1):
with self.assertRaises(LockTimeout):
with utils.lock_path(tmpdir, 0.1):
success = True
self.assertFalse(success)
# 2 locks with limit=2 must succeed
success = False
with utils.lock_path(tmpdir, 0.1, limit=2):
try:
with utils.lock_path(tmpdir, 0.1, limit=2):
success = True
except LockTimeout as exc:
self.fail('Unexpected exception %s' % exc)
self.assertTrue(success)
# 3 locks with limit=2 must fail
success = False
with utils.lock_path(tmpdir, 0.1, limit=2):
with utils.lock_path(tmpdir, 0.1, limit=2):
with self.assertRaises(LockTimeout):
with utils.lock_path(tmpdir, 0.1):
success = True
self.assertFalse(success)
@with_tempdir
def test_lock_path_invalid_limit(self, tmpdir):
success = False
with self.assertRaises(ValueError):
with utils.lock_path(tmpdir, 0.1, limit=0):
success = True
self.assertFalse(success)
with self.assertRaises(ValueError):
with utils.lock_path(tmpdir, 0.1, limit=-1):
success = True
self.assertFalse(success)
with self.assertRaises(TypeError):
with utils.lock_path(tmpdir, 0.1, limit='1'):
success = True
self.assertFalse(success)
with self.assertRaises(TypeError):
with utils.lock_path(tmpdir, 0.1, limit=1.1):
success = True
self.assertFalse(success)
@with_tempdir
def test_lock_path_num_sleeps(self, tmpdir):
num_short_calls = [0]
exception_raised = [False]
def my_sleep(to_sleep):
if to_sleep == 0.01:
num_short_calls[0] += 1
else:
raise Exception('sleep time changed: %s' % to_sleep)
try:
with mock.patch('swift.common.utils.sleep', my_sleep):
with utils.lock_path(tmpdir):
with utils.lock_path(tmpdir):
pass
except Exception as e:
exception_raised[0] = True
self.assertTrue('sleep time changed' in str(e))
self.assertEqual(num_short_calls[0], 11)
self.assertTrue(exception_raised[0])
@with_tempdir
def test_lock_path_class(self, tmpdir):
with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout):
exc = None
exc2 = None
success = False
try:
with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout):
success = True
except ReplicationLockTimeout as err:
exc = err
except LockTimeout as err:
exc2 = err
self.assertTrue(exc is not None)
self.assertTrue(exc2 is None)
self.assertTrue(not success)
exc = None
exc2 = None
success = False
try:
with utils.lock_path(tmpdir, 0.1):
success = True
except ReplicationLockTimeout as err:
exc = err
except LockTimeout as err:
exc2 = err
self.assertTrue(exc is None)
self.assertTrue(exc2 is not None)
self.assertTrue(not success)
def test_normalize_timestamp(self):
# Test swift.common.utils.normalize_timestamp
self.assertEqual(utils.normalize_timestamp('1253327593.48174'),
"1253327593.48174")
self.assertEqual(utils.normalize_timestamp(1253327593.48174),
"1253327593.48174")
self.assertEqual(utils.normalize_timestamp('1253327593.48'),
"1253327593.48000")
self.assertEqual(utils.normalize_timestamp(1253327593.48),
"1253327593.48000")
self.assertEqual(utils.normalize_timestamp('253327593.48'),
"0253327593.48000")
self.assertEqual(utils.normalize_timestamp(253327593.48),
"0253327593.48000")
self.assertEqual(utils.normalize_timestamp('1253327593'),
"1253327593.00000")
self.assertEqual(utils.normalize_timestamp(1253327593),
"1253327593.00000")
self.assertRaises(ValueError, utils.normalize_timestamp, '')
self.assertRaises(ValueError, utils.normalize_timestamp, 'abc')
def test_normalize_delete_at_timestamp(self):
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593.67890),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593'),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593.67890'),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593.67890),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593'),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593.67890'),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593.67890),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593'),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593.67890'),
'9999999999')
self.assertRaises(ValueError, utils.normalize_timestamp, '')
self.assertRaises(ValueError, utils.normalize_timestamp, 'abc')
def test_last_modified_date_to_timestamp(self):
expectations = {
'1970-01-01T00:00:00.000000': 0.0,
'2014-02-28T23:22:36.698390': 1393629756.698390,
'2011-03-19T04:03:00.604554': 1300507380.604554,
}
for last_modified, ts in expectations.items():
real = utils.last_modified_date_to_timestamp(last_modified)
self.assertEqual(real, ts, "failed for %s" % last_modified)
def test_last_modified_date_to_timestamp_when_system_not_UTC(self):
try:
old_tz = os.environ.get('TZ')
# Western Argentina Summer Time. Found in glibc manual; this
# timezone always has a non-zero offset from UTC, so this test is
# always meaningful.
os.environ['TZ'] = 'WART4WARST,J1/0,J365/25'
self.assertEqual(utils.last_modified_date_to_timestamp(
'1970-01-01T00:00:00.000000'),
0.0)
finally:
if old_tz is not None:
os.environ['TZ'] = old_tz
else:
os.environ.pop('TZ')
def test_backwards(self):
# Test swift.common.utils.backward
# The lines are designed so that the function would encounter
# all of the boundary conditions and typical conditions.
# Block boundaries are marked with '<>' characters
blocksize = 25
lines = [b'123456789x12345678><123456789\n', # block larger than rest
b'123456789x123>\n', # block ends just before \n character
b'123423456789\n',
b'123456789x\n', # block ends at the end of line
b'<123456789x123456789x123\n',
b'<6789x123\n', # block ends at the beginning of the line
b'6789x1234\n',
b'1234><234\n', # block ends typically in the middle of line
b'123456789x123456789\n']
with TemporaryFile() as f:
for line in lines:
f.write(line)
count = len(lines) - 1
for line in utils.backward(f, blocksize):
self.assertEqual(line, lines[count].split(b'\n')[0])
count -= 1
# Empty file case
with TemporaryFile('r') as f:
self.assertEqual([], list(utils.backward(f)))
def test_mkdirs(self):
testdir_base = mkdtemp()
testroot = os.path.join(testdir_base, 'mkdirs')
try:
self.assertTrue(not os.path.exists(testroot))
utils.mkdirs(testroot)
self.assertTrue(os.path.exists(testroot))
utils.mkdirs(testroot)
self.assertTrue(os.path.exists(testroot))
rmtree(testroot, ignore_errors=1)
testdir = os.path.join(testroot, 'one/two/three')
self.assertTrue(not os.path.exists(testdir))
utils.mkdirs(testdir)
self.assertTrue(os.path.exists(testdir))
utils.mkdirs(testdir)
self.assertTrue(os.path.exists(testdir))
rmtree(testroot, ignore_errors=1)
open(testroot, 'wb').close()
self.assertTrue(not os.path.exists(testdir))
self.assertRaises(OSError, utils.mkdirs, testdir)
os.unlink(testroot)
finally:
rmtree(testdir_base)
def test_split_path(self):
# Test swift.common.utils.split_account_path
self.assertRaises(ValueError, utils.split_path, '')
self.assertRaises(ValueError, utils.split_path, '/')
self.assertRaises(ValueError, utils.split_path, '//')
self.assertEqual(utils.split_path('/a'), ['a'])
self.assertRaises(ValueError, utils.split_path, '//a')
self.assertEqual(utils.split_path('/a/'), ['a'])
self.assertRaises(ValueError, utils.split_path, '/a/c')
self.assertRaises(ValueError, utils.split_path, '//c')
self.assertRaises(ValueError, utils.split_path, '/a/c/')
self.assertRaises(ValueError, utils.split_path, '/a//')
self.assertRaises(ValueError, utils.split_path, '/a', 2)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3, True)
self.assertEqual(utils.split_path('/a/c', 2), ['a', 'c'])
self.assertEqual(utils.split_path('/a/c/o', 3), ['a', 'c', 'o'])
self.assertRaises(ValueError, utils.split_path, '/a/c/o/r', 3, 3)
self.assertEqual(utils.split_path('/a/c/o/r', 3, 3, True),
['a', 'c', 'o/r'])
self.assertEqual(utils.split_path('/a/c', 2, 3, True),
['a', 'c', None])
self.assertRaises(ValueError, utils.split_path, '/a', 5, 4)
self.assertEqual(utils.split_path('/a/c/', 2), ['a', 'c'])
self.assertEqual(utils.split_path('/a/c/', 2, 3), ['a', 'c', ''])
try:
utils.split_path('o\nn e', 2)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
try:
utils.split_path('o\nn e', 2, 3, True)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
def test_validate_device_partition(self):
# Test swift.common.utils.validate_device_partition
utils.validate_device_partition('foo', 'bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '', '')
self.assertRaises(ValueError,
utils.validate_device_partition, '', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo/bar', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', 'foo/bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '.', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, '..', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '.')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '..')
try:
utils.validate_device_partition('o\nn e', 'foo')
except ValueError as err:
self.assertEqual(str(err), 'Invalid device: o%0An%20e')
try:
utils.validate_device_partition('foo', 'o\nn e')
except ValueError as err:
self.assertEqual(str(err), 'Invalid partition: o%0An%20e')
def test_NullLogger(self):
# Test swift.common.utils.NullLogger
sio = StringIO()
nl = utils.NullLogger()
nl.write('test')
self.assertEqual(sio.getvalue(), '')
def test_LoggerFileObject(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sio = StringIO()
handler = logging.StreamHandler(sio)
logger = logging.getLogger()
logger.addHandler(handler)
lfo_stdout = utils.LoggerFileObject(logger)
lfo_stderr = utils.LoggerFileObject(logger)
lfo_stderr = utils.LoggerFileObject(logger, 'STDERR')
print('test1')
self.assertEqual(sio.getvalue(), '')
sys.stdout = lfo_stdout
print('test2')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\n')
sys.stderr = lfo_stderr
print('test4', file=sys.stderr)
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
sys.stdout = orig_stdout
print('test5')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
print('test6', file=sys.stderr)
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n')
sys.stderr = orig_stderr
print('test8')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n')
lfo_stdout.writelines(['a', 'b', 'c'])
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\n')
lfo_stdout.close()
lfo_stderr.close()
lfo_stdout.write('d')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
lfo_stdout.flush()
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
for lfo in (lfo_stdout, lfo_stderr):
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assertTrue(got_exc)
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assertTrue(got_exc)
self.assertRaises(IOError, lfo.read)
self.assertRaises(IOError, lfo.read, 1024)
self.assertRaises(IOError, lfo.readline)
self.assertRaises(IOError, lfo.readline, 1024)
lfo.tell()
def test_LoggerFileObject_recursion(self):
crashy_calls = [0]
class CrashyLogger(logging.Handler):
def emit(self, record):
crashy_calls[0] += 1
try:
# Pretend to be trying to send to syslog, but syslogd is
# dead. We need the raise here to set sys.exc_info.
raise socket.error(errno.ENOTCONN, "This is an ex-syslog")
except socket.error:
self.handleError(record)
logger = logging.getLogger()
logger.addHandler(CrashyLogger())
# Set up some real file descriptors for stdio. If you run
# nosetests with "-s", you already have real files there, but
# otherwise they're StringIO objects.
#
# In any case, since capture_stdio() closes sys.stdin and friends,
# we'd want to set up some sacrificial files so as to not goof up
# the testrunner.
new_stdin = open(os.devnull, 'r+b')
new_stdout = open(os.devnull, 'w+b')
new_stderr = open(os.devnull, 'w+b')
with contextlib.closing(new_stdin), contextlib.closing(new_stdout), \
contextlib.closing(new_stderr):
# logging.raiseExceptions is set to False in test/__init__.py, but
# is True in Swift daemons, and the error doesn't manifest without
# it.
with mock.patch('sys.stdin', new_stdin), \
mock.patch('sys.stdout', new_stdout), \
mock.patch('sys.stderr', new_stderr), \
mock.patch.object(logging, 'raiseExceptions', True):
# Note: since stdio is hooked up to /dev/null in here, using
# pdb is basically impossible. Sorry about that.
utils.capture_stdio(logger)
logger.info("I like ham")
self.assertTrue(crashy_calls[0], 1)
def test_parse_options(self):
# Get a file that is definitely on disk
with NamedTemporaryFile() as f:
conf_file = f.name
conf, options = utils.parse_options(test_args=[conf_file])
self.assertEqual(conf, conf_file)
# assert defaults
self.assertEqual(options['verbose'], False)
self.assertNotIn('once', options)
# assert verbose as option
conf, options = utils.parse_options(test_args=[conf_file, '-v'])
self.assertEqual(options['verbose'], True)
# check once option
conf, options = utils.parse_options(test_args=[conf_file],
once=True)
self.assertEqual(options['once'], False)
test_args = [conf_file, '--once']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEqual(options['once'], True)
# check options as arg parsing
test_args = [conf_file, 'once', 'plugin_name', 'verbose']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEqual(options['verbose'], True)
self.assertEqual(options['once'], True)
self.assertEqual(options['extra_args'], ['plugin_name'])
def test_parse_options_errors(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
stdo = StringIO()
stde = StringIO()
utils.sys.stdout = stdo
utils.sys.stderr = stde
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[])
self.assertTrue('missing config' in stdo.getvalue())
# verify conf file must exist, context manager will delete temp file
with NamedTemporaryFile() as f:
conf_file = f.name
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[conf_file])
self.assertTrue('unable to locate' in stdo.getvalue())
# reset stdio
utils.sys.stdout = orig_stdout
utils.sys.stderr = orig_stderr
def test_dump_recon_cache(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
logger = utils.get_logger(None, 'server', log_route='server')
try:
submit_dict = {'key0': 99,
'key1': {'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(submit_dict, file_dict)
# Use a nested entry
submit_dict = {'key0': 101,
'key1': {'key2': {'value1': 1, 'value2': 2}}}
expect_dict = {'key0': 101,
'key1': {'key2': {'value1': 1, 'value2': 2},
'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# cached entries are sticky
submit_dict = {}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# nested dicts can be erased...
submit_dict = {'key1': {'key2': {}}}
expect_dict = {'key0': 101,
'key1': {'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# ... and erasure is idempotent
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# top level dicts can be erased...
submit_dict = {'key1': {}}
expect_dict = {'key0': 101}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# ... and erasure is idempotent
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
finally:
rmtree(testdir_base)
def test_dump_recon_cache_set_owner(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
logger = utils.get_logger(None, 'server', log_route='server')
try:
submit_dict = {'key1': {'value1': 1, 'value2': 2}}
_ret = lambda: None
_ret.pw_uid = 100
_mock_getpwnam = MagicMock(return_value=_ret)
_mock_chown = mock.Mock()
with patch('os.chown', _mock_chown), \
patch('pwd.getpwnam', _mock_getpwnam):
utils.dump_recon_cache(submit_dict, testcache_file,
logger, set_owner="swift")
_mock_getpwnam.assert_called_once_with("swift")
self.assertEqual(_mock_chown.call_args[0][1], 100)
finally:
rmtree(testdir_base)
def test_dump_recon_cache_permission_denied(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
class MockLogger(object):
def __init__(self):
self._excs = []
def exception(self, message):
_junk, exc, _junk = sys.exc_info()
self._excs.append(exc)
logger = MockLogger()
try:
submit_dict = {'key1': {'value1': 1, 'value2': 2}}
with mock.patch(
'swift.common.utils.NamedTemporaryFile',
side_effect=IOError(13, 'Permission Denied')):
utils.dump_recon_cache(submit_dict, testcache_file, logger)
self.assertIsInstance(logger._excs[0], IOError)
finally:
rmtree(testdir_base)
def test_load_recon_cache(self):
stub_data = {'test': 'foo'}
with NamedTemporaryFile() as f:
f.write(json.dumps(stub_data).encode("utf-8"))
f.flush()
self.assertEqual(stub_data, utils.load_recon_cache(f.name))
# missing files are treated as empty
self.assertFalse(os.path.exists(f.name)) # sanity
self.assertEqual({}, utils.load_recon_cache(f.name))
# Corrupt files are treated as empty. We could crash and make an
# operator fix the corrupt file, but they'll "fix" it with "rm -f
# /var/cache/swift/*.recon", so let's just do it for them.
with NamedTemporaryFile() as f:
f.write(b"{not [valid (json")
f.flush()
self.assertEqual({}, utils.load_recon_cache(f.name))
def test_get_logger(self):
sio = StringIO()
logger = logging.getLogger('server')
logger.addHandler(logging.StreamHandler(sio))
logger = utils.get_logger(None, 'server', log_route='server')
logger.warning('test1')
self.assertEqual(sio.getvalue(), 'test1\n')
logger.debug('test2')
self.assertEqual(sio.getvalue(), 'test1\n')
logger = utils.get_logger({'log_level': 'DEBUG'}, 'server',
log_route='server')
logger.debug('test3')
self.assertEqual(sio.getvalue(), 'test1\ntest3\n')
# Doesn't really test that the log facility is truly being used all the
# way to syslog; but exercises the code.
logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server',
log_route='server')
logger.warning('test4')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure debug doesn't log by default
logger.debug('test5')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure notice lvl logs by default
logger.notice('test6')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\ntest6\n')
def test_get_logger_sysloghandler_plumbing(self):
orig_sysloghandler = utils.ThreadSafeSysLogHandler
syslog_handler_args = []
def syslog_handler_catcher(*args, **kwargs):
syslog_handler_args.append((args, kwargs))
return orig_sysloghandler(*args, **kwargs)
syslog_handler_catcher.LOG_LOCAL0 = orig_sysloghandler.LOG_LOCAL0
syslog_handler_catcher.LOG_LOCAL3 = orig_sysloghandler.LOG_LOCAL3
with mock.patch.object(utils, 'ThreadSafeSysLogHandler',
syslog_handler_catcher):
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
}, 'server', log_route='server')
expected_args = [((), {'address': '/dev/log',
'facility': orig_sysloghandler.LOG_LOCAL3})]
if not os.path.exists('/dev/log') or \
os.path.isfile('/dev/log') or \
os.path.isdir('/dev/log'):
# Since socket on OSX is in /var/run/syslog, there will be
# a fallback to UDP.
expected_args.append(
((), {'facility': orig_sysloghandler.LOG_LOCAL3}))
self.assertEqual(expected_args, syslog_handler_args)
syslog_handler_args = []
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
'log_address': '/foo/bar',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': '/foo/bar',
'facility': orig_sysloghandler.LOG_LOCAL3}),
# Second call is because /foo/bar didn't exist (and wasn't a
# UNIX domain socket).
((), {'facility': orig_sysloghandler.LOG_LOCAL3})],
syslog_handler_args)
# Using UDP with default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': ('syslog.funtimes.com',
logging.handlers.SYSLOG_UDP_PORT),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
# Using UDP with non-default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
'log_udp_port': '2123',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': ('syslog.funtimes.com', 2123),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
@reset_logger_state
def test_clean_logger_exception(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
def log_exception(exc):
try:
raise exc
except (Exception, Timeout):
logger.exception('blah')
try:
# establish base case
self.assertEqual(strip_value(sio), '')
logger.info('test')
self.assertEqual(strip_value(sio), 'test\n')
self.assertEqual(strip_value(sio), '')
logger.info('test')
logger.info('test')
self.assertEqual(strip_value(sio), 'test\ntest\n')
self.assertEqual(strip_value(sio), '')
# test OSError
for en in (errno.EIO, errno.ENOSPC):
log_exception(OSError(en, 'my %s error message' % en))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertIn('my %s error message' % en, log_msg)
# unfiltered
log_exception(OSError())
self.assertTrue('Traceback' in strip_value(sio))
# test socket.error
log_exception(socket.error(errno.ECONNREFUSED,
'my error message'))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertNotIn('errno.ECONNREFUSED message test', log_msg)
self.assertIn('Connection refused', log_msg)
log_exception(socket.error(errno.EHOSTUNREACH,
'my error message'))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertNotIn('my error message', log_msg)
self.assertIn('Host unreachable', log_msg)
log_exception(socket.error(errno.ETIMEDOUT, 'my error message'))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertNotIn('my error message', log_msg)
self.assertIn('Connection timeout', log_msg)
# unfiltered
log_exception(socket.error(0, 'my error message'))
log_msg = strip_value(sio)
self.assertIn('Traceback', log_msg)
self.assertIn('my error message', log_msg)
# test eventlet.Timeout
connection_timeout = ConnectionTimeout(42, 'my error message')
log_exception(connection_timeout)
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertTrue('ConnectionTimeout' in log_msg)
self.assertTrue('(42s)' in log_msg)
self.assertNotIn('my error message', log_msg)
connection_timeout.cancel()
message_timeout = MessageTimeout(42, 'my error message')
log_exception(message_timeout)
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertTrue('MessageTimeout' in log_msg)
self.assertTrue('(42s)' in log_msg)
self.assertTrue('my error message' in log_msg)
message_timeout.cancel()
# test BadStatusLine
log_exception(http_client.BadStatusLine(''))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertIn('BadStatusLine', log_msg)
self.assertIn("''", log_msg)
# test unhandled
log_exception(Exception('my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' in log_msg)
self.assertTrue('my error message' in log_msg)
finally:
logger.logger.removeHandler(handler)
@reset_logger_state
def test_swift_log_formatter_max_line_length(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
formatter = utils.SwiftLogFormatter(max_line_length=10)
handler.setFormatter(formatter)
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
try:
logger.info('12345')
self.assertEqual(strip_value(sio), '12345\n')
logger.info('1234567890')
self.assertEqual(strip_value(sio), '1234567890\n')
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12 ... de\n')
formatter.max_line_length = 11
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123 ... cde\n')
formatter.max_line_length = 0
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234567890abcde\n')
formatter.max_line_length = 1
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1\n')
formatter.max_line_length = 2
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12\n')
formatter.max_line_length = 3
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123\n')
formatter.max_line_length = 4
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234\n')
formatter.max_line_length = 5
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12345\n')
formatter.max_line_length = 6
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123456\n')
formatter.max_line_length = 7
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1 ... e\n')
formatter.max_line_length = -10
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234567890abcde\n')
finally:
logger.logger.removeHandler(handler)
@reset_logger_state
def test_swift_log_formatter(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
handler.setFormatter(utils.SwiftLogFormatter())
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
try:
self.assertFalse(logger.txn_id)
logger.error('my error message')
log_msg = strip_value(sio)
self.assertIn('my error message', log_msg)
self.assertNotIn('txn', log_msg)
logger.txn_id = '12345'
logger.error('test')
log_msg = strip_value(sio)
self.assertIn('txn', log_msg)
self.assertIn('12345', log_msg)
# test txn in info message
self.assertEqual(logger.txn_id, '12345')
logger.info('test')
log_msg = strip_value(sio)
self.assertIn('txn', log_msg)
self.assertIn('12345', log_msg)
# test txn already in message
self.assertEqual(logger.txn_id, '12345')
logger.warning('test 12345 test')
self.assertEqual(strip_value(sio), 'test 12345 test\n')
# Test multi line collapsing
logger.error('my\nerror\nmessage')
log_msg = strip_value(sio)
self.assertIn('my#012error#012message', log_msg)
# test client_ip
self.assertFalse(logger.client_ip)
logger.error('my error message')
log_msg = strip_value(sio)
self.assertIn('my error message', log_msg)
self.assertNotIn('client_ip', log_msg)
logger.client_ip = '1.2.3.4'
logger.error('test')
log_msg = strip_value(sio)
self.assertIn('client_ip', log_msg)
self.assertIn('1.2.3.4', log_msg)
# test no client_ip on info message
self.assertEqual(logger.client_ip, '1.2.3.4')
logger.info('test')
log_msg = strip_value(sio)
self.assertNotIn('client_ip', log_msg)
self.assertNotIn('1.2.3.4', log_msg)
# test client_ip (and txn) already in message
self.assertEqual(logger.client_ip, '1.2.3.4')
logger.warning('test 1.2.3.4 test 12345')
self.assertEqual(strip_value(sio), 'test 1.2.3.4 test 12345\n')
finally:
logger.logger.removeHandler(handler)
def test_storage_directory(self):
self.assertEqual(utils.storage_directory('objects', '1', 'ABCDEF'),
'objects/1/DEF/ABCDEF')
def test_is_valid_ip(self):
self.assertTrue(is_valid_ip("127.0.0.1"))
self.assertTrue(is_valid_ip("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "::1"
self.assertTrue(is_valid_ip(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ip(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ip(not_ipv6))
def test_is_valid_ipv4(self):
self.assertTrue(is_valid_ipv4("127.0.0.1"))
self.assertTrue(is_valid_ipv4("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "::1"
self.assertFalse(is_valid_ipv4(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ipv4(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ipv4(not_ipv6))
def test_is_valid_ipv6(self):
self.assertFalse(is_valid_ipv6("127.0.0.1"))
self.assertFalse(is_valid_ipv6("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "::1"
self.assertTrue(is_valid_ipv6(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ipv6(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ipv6(not_ipv6))
def test_expand_ipv6(self):
expanded_ipv6 = "fe80::204:61ff:fe9d:f156"
upper_ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(upper_ipv6))
omit_ipv6 = "fe80:0000:0000::0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(omit_ipv6))
less_num_ipv6 = "fe80:0:00:000:0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(less_num_ipv6))
def test_whataremyips(self):
myips = utils.whataremyips()
self.assertTrue(len(myips) > 1)
self.assertTrue('127.0.0.1' in myips)
def test_whataremyips_bind_to_all(self):
for any_addr in ('0.0.0.0', '0000:0000:0000:0000:0000:0000:0000:0000',
'::0', '::0000', '::',
# Wacky parse-error input produces all IPs
'I am a bear'):
myips = utils.whataremyips(any_addr)
self.assertTrue(len(myips) > 1)
self.assertTrue('127.0.0.1' in myips)
def test_whataremyips_bind_ip_specific(self):
self.assertEqual(['1.2.3.4'], utils.whataremyips('1.2.3.4'))
def test_whataremyips_error(self):
def my_interfaces():
return ['eth0']
def my_ifaddress_error(interface):
raise ValueError
with patch('netifaces.interfaces', my_interfaces), \
patch('netifaces.ifaddresses', my_ifaddress_error):
self.assertEqual(utils.whataremyips(), [])
def test_whataremyips_ipv6(self):
test_ipv6_address = '2001:6b0:dead:beef:2::32'
test_interface = 'eth0'
def my_ipv6_interfaces():
return ['eth0']
def my_ipv6_ifaddresses(interface):
return {AF_INET6:
[{'netmask': 'ffff:ffff:ffff:ffff::',
'addr': '%s%%%s' % (test_ipv6_address, test_interface)}]}
with patch('netifaces.interfaces', my_ipv6_interfaces), \
patch('netifaces.ifaddresses', my_ipv6_ifaddresses):
myips = utils.whataremyips()
self.assertEqual(len(myips), 1)
self.assertEqual(myips[0], test_ipv6_address)
def test_hash_path(self):
# Yes, these tests are deliberately very fragile. We want to make sure
# that if someones changes the results hash_path produces, they know it
with mock.patch('swift.common.utils.HASH_PATH_PREFIX', b''):
self.assertEqual(utils.hash_path('a'),
'1c84525acb02107ea475dcd3d09c2c58')
self.assertEqual(utils.hash_path('a', 'c'),
'33379ecb053aa5c9e356c68997cbb59e')
self.assertEqual(utils.hash_path('a', 'c', 'o'),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=True),
b'\x06\xfb\xf0\xb5\x14\xe5\x19\x9d\xfcN'
b'\x00\xf4.\xb5\xea\x83')
self.assertRaises(ValueError, utils.hash_path, 'a', object='o')
utils.HASH_PATH_PREFIX = b'abcdef'
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False),
'363f9b535bfb7d17a43a46a358afca0e')
def test_validate_hash_conf(self):
# no section causes InvalidHashPathConfigError
self._test_validate_hash_conf([], [], True)
# 'swift-hash' section is there but no options causes
# InvalidHashPathConfigError
self._test_validate_hash_conf(['swift-hash'], [], True)
# if we have the section and either of prefix or suffix,
# InvalidHashPathConfigError doesn't occur
self._test_validate_hash_conf(
['swift-hash'], ['swift_hash_path_prefix'], False)
self._test_validate_hash_conf(
['swift-hash'], ['swift_hash_path_suffix'], False)
# definitely, we have the section and both of them,
# InvalidHashPathConfigError doesn't occur
self._test_validate_hash_conf(
['swift-hash'],
['swift_hash_path_suffix', 'swift_hash_path_prefix'], False)
# But invalid section name should make an error even if valid
# options are there
self._test_validate_hash_conf(
['swift-hash-xxx'],
['swift_hash_path_suffix', 'swift_hash_path_prefix'], True)
def _test_validate_hash_conf(self, sections, options, should_raise_error):
class FakeConfigParser(object):
def read(self, conf_path, encoding=None):
return [conf_path]
def get(self, section, option):
if section not in sections:
raise NoSectionError('section error')
elif option not in options:
raise NoOptionError('option error', 'this option')
else:
return 'some_option_value'
with mock.patch('swift.common.utils.HASH_PATH_PREFIX', b''), \
mock.patch('swift.common.utils.HASH_PATH_SUFFIX', b''), \
mock.patch('swift.common.utils.ConfigParser',
FakeConfigParser):
try:
utils.validate_hash_conf()
except utils.InvalidHashPathConfigError:
if not should_raise_error:
self.fail('validate_hash_conf should not raise an error')
else:
if should_raise_error:
self.fail('validate_hash_conf should raise an error')
def test_load_libc_function(self):
self.assertTrue(callable(
utils.load_libc_function('printf')))
self.assertTrue(callable(
utils.load_libc_function('some_not_real_function')))
self.assertRaises(AttributeError,
utils.load_libc_function, 'some_not_real_function',
fail_if_missing=True)
def test_readconf(self):
conf = '''[section1]
foo = bar
[section2]
log_name = yarr'''
# setup a real file
fd, temppath = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': 'yarr'}}
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1')
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar'}
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile,
'section2').get('log_name')
expected = 'yarr'
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
log_name='foo').get('log_name')
expected = 'foo'
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
defaults={'bar': 'baz'})
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar', 'bar': 'baz'}
self.assertEqual(result, expected)
self.assertRaisesRegexp(
ValueError, 'Unable to find section3 config section in.*',
utils.readconf, temppath, 'section3')
os.unlink(temppath)
self.assertRaises(IOError, utils.readconf, temppath)
def test_readconf_raw(self):
conf = '''[section1]
foo = bar
[section2]
log_name = %(yarr)s'''
# setup a real file
fd, temppath = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile, raw=True)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': '%(yarr)s'}}
self.assertEqual(result, expected)
os.unlink(temppath)
self.assertRaises(IOError, utils.readconf, temppath)
def test_readconf_dir(self):
config_dir = {
'server.conf.d/01.conf': """
[DEFAULT]
port = 8080
foo = bar
[section1]
name=section1
""",
'server.conf.d/section2.conf': """
[DEFAULT]
port = 8081
bar = baz
[section2]
name=section2
""",
'other-server.conf.d/01.conf': """
[DEFAULT]
port = 8082
[section3]
name=section3
"""
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section1',
},
'section2': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section2',
},
}
self.assertEqual(conf, expected)
def test_readconf_dir_ignores_hidden_and_nondotconf_files(self):
config_dir = {
'server.conf.d/01.conf': """
[section1]
port = 8080
""",
'server.conf.d/.01.conf.swp': """
[section]
port = 8081
""",
'server.conf.d/01.conf-bak': """
[section]
port = 8082
""",
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8080',
},
}
self.assertEqual(conf, expected)
def _check_drop_privileges(self, mock_os, required_func_calls,
call_setsid=True):
user = getuser()
user_data = pwd.getpwnam(user)
self.assertFalse(mock_os.called_funcs) # sanity check
# over-ride os with mock
with mock.patch('swift.common.utils.os', mock_os):
# exercise the code
utils.drop_privileges(user, call_setsid=call_setsid)
for func in required_func_calls:
self.assertIn(func, mock_os.called_funcs)
self.assertEqual(user_data[5], mock_os.environ['HOME'])
groups = {g.gr_gid for g in grp.getgrall() if user in g.gr_mem}
self.assertEqual(groups, set(mock_os.called_funcs['setgroups'][0]))
self.assertEqual(user_data[3], mock_os.called_funcs['setgid'][0])
self.assertEqual(user_data[2], mock_os.called_funcs['setuid'][0])
self.assertEqual('/', mock_os.called_funcs['chdir'][0])
self.assertEqual(0o22, mock_os.called_funcs['umask'][0])
def test_drop_privileges(self):
required_func_calls = ('setgroups', 'setgid', 'setuid', 'setsid',
'chdir', 'umask')
mock_os = MockOs(called_funcs=required_func_calls)
self._check_drop_privileges(mock_os, required_func_calls)
def test_drop_privileges_setsid_error(self):
# OSError trying to get session leader
required_func_calls = ('setgroups', 'setgid', 'setuid', 'setsid',
'chdir', 'umask')
mock_os = MockOs(called_funcs=required_func_calls,
raise_funcs=('setsid',))
self._check_drop_privileges(mock_os, required_func_calls)
def test_drop_privileges_no_call_setsid(self):
required_func_calls = ('setgroups', 'setgid', 'setuid', 'chdir',
'umask')
# OSError if trying to get session leader, but it shouldn't be called
bad_func_calls = ('setsid',)
mock_os = MockOs(called_funcs=required_func_calls,
raise_funcs=bad_func_calls)
self._check_drop_privileges(mock_os, required_func_calls,
call_setsid=False)
for func in bad_func_calls:
self.assertNotIn(func, mock_os.called_funcs)
@reset_logger_state
def test_capture_stdio(self):
# stubs
logger = utils.get_logger(None, 'dummy')
# mock utils system modules
_orig_sys = utils.sys
_orig_os = utils.os
try:
utils.sys = MockSys()
utils.os = MockOs()
# basic test
utils.capture_stdio(logger)
self.assertTrue(utils.sys.excepthook is not None)
self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds)
self.assertTrue(
isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assertTrue(
isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test same args, but exc when trying to close stdio
utils.os = MockOs(raise_funcs=('dup2',))
utils.sys = MockSys()
# test unable to close stdio
utils.capture_stdio(logger)
self.assertTrue(utils.sys.excepthook is not None)
self.assertEqual(utils.os.closed_fds, [])
self.assertTrue(
isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assertTrue(
isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test some other args
utils.os = MockOs()
utils.sys = MockSys()
logger = utils.get_logger(None, log_to_console=True)
# test console log
utils.capture_stdio(logger, capture_stdout=False,
capture_stderr=False)
self.assertTrue(utils.sys.excepthook is not None)
# when logging to console, stderr remains open
self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds[:2])
reset_loggers()
# stdio not captured
self.assertFalse(isinstance(utils.sys.stdout,
utils.LoggerFileObject))
self.assertFalse(isinstance(utils.sys.stderr,
utils.LoggerFileObject))
finally:
utils.sys = _orig_sys
utils.os = _orig_os
@reset_logger_state
def test_get_logger_console(self):
logger = utils.get_logger(None)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertFalse(console_handlers)
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertTrue(console_handlers)
# make sure you can't have two console handlers
self.assertEqual(len(console_handlers), 1)
old_handler = console_handlers[0]
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertEqual(len(console_handlers), 1)
new_handler = console_handlers[0]
self.assertNotEqual(new_handler, old_handler)
def verify_under_pseudo_time(
self, func, target_runtime_ms=1, *args, **kwargs):
curr_time = [42.0]
def my_time():
curr_time[0] += 0.001
return curr_time[0]
def my_sleep(duration):
curr_time[0] += 0.001
curr_time[0] += duration
with patch('time.time', my_time), \
patch('time.sleep', my_sleep), \
patch('eventlet.sleep', my_sleep):
start = time.time()
func(*args, **kwargs)
# make sure it's accurate to 10th of a second, converting the time
# difference to milliseconds, 100 milliseconds is 1/10 of a second
diff_from_target_ms = abs(
target_runtime_ms - ((time.time() - start) * 1000))
self.assertTrue(diff_from_target_ms < 100,
"Expected %d < 100" % diff_from_target_ms)
def test_ratelimit_sleep(self):
def testfunc():
running_time = 0
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, -5)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=1)
def testfunc():
running_time = 0
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, 0)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=1)
def testfunc():
running_time = 0
for i in range(50):
running_time = utils.ratelimit_sleep(running_time, 200)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=250)
def test_ratelimit_sleep_with_incr(self):
def testfunc():
running_time = 0
vals = [5, 17, 0, 3, 11, 30,
40, 4, 13, 2, -1] * 2 # adds up to 248
total = 0
for i in vals:
running_time = utils.ratelimit_sleep(running_time,
500, incr_by=i)
total += i
self.assertEqual(248, total)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=500)
def test_ratelimit_sleep_with_sleep(self):
def testfunc():
running_time = 0
sleeps = [0] * 7 + [.2] * 3 + [0] * 30
for i in sleeps:
running_time = utils.ratelimit_sleep(running_time, 40,
rate_buffer=1)
time.sleep(i)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=900)
def test_urlparse(self):
parsed = utils.urlparse('http://127.0.0.1/')
self.assertEqual(parsed.scheme, 'http')
self.assertEqual(parsed.hostname, '127.0.0.1')
self.assertEqual(parsed.path, '/')
parsed = utils.urlparse('http://127.0.0.1:8080/')
self.assertEqual(parsed.port, 8080)
parsed = utils.urlparse('https://127.0.0.1/')
self.assertEqual(parsed.scheme, 'https')
parsed = utils.urlparse('http://[::1]/')
self.assertEqual(parsed.hostname, '::1')
parsed = utils.urlparse('http://[::1]:8080/')
self.assertEqual(parsed.hostname, '::1')
self.assertEqual(parsed.port, 8080)
parsed = utils.urlparse('www.example.com')
self.assertEqual(parsed.hostname, '')
def test_search_tree(self):
# file match & ext miss
with temptree(['asdf.conf', 'blarg.conf', 'asdf.cfg']) as t:
asdf = utils.search_tree(t, 'a*', '.conf')
self.assertEqual(len(asdf), 1)
self.assertEqual(asdf[0],
os.path.join(t, 'asdf.conf'))
# multi-file match & glob miss & sort
with temptree(['application.bin', 'apple.bin', 'apropos.bin']) as t:
app_bins = utils.search_tree(t, 'app*', 'bin')
self.assertEqual(len(app_bins), 2)
self.assertEqual(app_bins[0],
os.path.join(t, 'apple.bin'))
self.assertEqual(app_bins[1],
os.path.join(t, 'application.bin'))
# test file in folder & ext miss & glob miss
files = (
'sub/file1.ini',
'sub/file2.conf',
'sub.bin',
'bus.ini',
'bus/file3.ini',
)
with temptree(files) as t:
sub_ini = utils.search_tree(t, 'sub*', '.ini')
self.assertEqual(len(sub_ini), 1)
self.assertEqual(sub_ini[0],
os.path.join(t, 'sub/file1.ini'))
# test multi-file in folder & sub-folder & ext miss & glob miss
files = (
'folder_file.txt',
'folder/1.txt',
'folder/sub/2.txt',
'folder2/3.txt',
'Folder3/4.txt'
'folder.rc',
)
with temptree(files) as t:
folder_texts = utils.search_tree(t, 'folder*', '.txt')
self.assertEqual(len(folder_texts), 4)
f1 = os.path.join(t, 'folder_file.txt')
f2 = os.path.join(t, 'folder/1.txt')
f3 = os.path.join(t, 'folder/sub/2.txt')
f4 = os.path.join(t, 'folder2/3.txt')
for f in [f1, f2, f3, f4]:
self.assertTrue(f in folder_texts)
def test_search_tree_with_directory_ext_match(self):
files = (
'object-server/object-server.conf-base',
'object-server/1.conf.d/base.conf',
'object-server/1.conf.d/1.conf',
'object-server/2.conf.d/base.conf',
'object-server/2.conf.d/2.conf',
'object-server/3.conf.d/base.conf',
'object-server/3.conf.d/3.conf',
'object-server/4.conf.d/base.conf',
'object-server/4.conf.d/4.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'object-server', '.conf',
dir_ext='conf.d')
self.assertEqual(len(conf_dirs), 4)
for i in range(4):
conf_dir = os.path.join(t, 'object-server/%d.conf.d' % (i + 1))
self.assertTrue(conf_dir in conf_dirs)
def test_search_tree_conf_dir_with_named_conf_match(self):
files = (
'proxy-server/proxy-server.conf.d/base.conf',
'proxy-server/proxy-server.conf.d/pipeline.conf',
'proxy-server/proxy-noauth.conf.d/base.conf',
'proxy-server/proxy-noauth.conf.d/pipeline.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'proxy-server', 'noauth.conf',
dir_ext='noauth.conf.d')
self.assertEqual(len(conf_dirs), 1)
conf_dir = conf_dirs[0]
expected = os.path.join(t, 'proxy-server/proxy-noauth.conf.d')
self.assertEqual(conf_dir, expected)
def test_search_tree_conf_dir_pid_with_named_conf_match(self):
files = (
'proxy-server/proxy-server.pid.d',
'proxy-server/proxy-noauth.pid.d',
)
with temptree(files) as t:
pid_files = utils.search_tree(t, 'proxy-server',
exts=['noauth.pid', 'noauth.pid.d'])
self.assertEqual(len(pid_files), 1)
pid_file = pid_files[0]
expected = os.path.join(t, 'proxy-server/proxy-noauth.pid.d')
self.assertEqual(pid_file, expected)
def test_write_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'test')
utils.write_file(file_name, 'test')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEqual(contents, 'test')
# and also subdirs
file_name = os.path.join(t, 'subdir/test2')
utils.write_file(file_name, 'test2')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEqual(contents, 'test2')
# but can't over-write files
file_name = os.path.join(t, 'subdir/test2/test3')
self.assertRaises(IOError, utils.write_file, file_name,
'test3')
def test_remove_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'blah.pid')
# assert no raise
self.assertEqual(os.path.exists(file_name), False)
self.assertIsNone(utils.remove_file(file_name))
with open(file_name, 'w') as f:
f.write('1')
self.assertTrue(os.path.exists(file_name))
self.assertIsNone(utils.remove_file(file_name))
self.assertFalse(os.path.exists(file_name))
def test_human_readable(self):
self.assertEqual(utils.human_readable(0), '0')
self.assertEqual(utils.human_readable(1), '1')
self.assertEqual(utils.human_readable(10), '10')
self.assertEqual(utils.human_readable(100), '100')
self.assertEqual(utils.human_readable(999), '999')
self.assertEqual(utils.human_readable(1024), '1Ki')
self.assertEqual(utils.human_readable(1535), '1Ki')
self.assertEqual(utils.human_readable(1536), '2Ki')
self.assertEqual(utils.human_readable(1047552), '1023Ki')
self.assertEqual(utils.human_readable(1048063), '1023Ki')
self.assertEqual(utils.human_readable(1048064), '1Mi')
self.assertEqual(utils.human_readable(1048576), '1Mi')
self.assertEqual(utils.human_readable(1073741824), '1Gi')
self.assertEqual(utils.human_readable(1099511627776), '1Ti')
self.assertEqual(utils.human_readable(1125899906842624), '1Pi')
self.assertEqual(utils.human_readable(1152921504606846976), '1Ei')
self.assertEqual(utils.human_readable(1180591620717411303424), '1Zi')
self.assertEqual(utils.human_readable(1208925819614629174706176),
'1Yi')
self.assertEqual(utils.human_readable(1237940039285380274899124224),
'1024Yi')
def test_validate_sync_to(self):
fname = 'container-sync-realms.conf'
fcontents = '''
[US]
key = 9ff3b71c849749dbaec4ccdd3cbab62b
cluster_dfw1 = http://dfw1.host/v1/
'''
with temptree([fname], [fcontents]) as tempdir:
logger = FakeLogger()
fpath = os.path.join(tempdir, fname)
csr = ContainerSyncRealms(fpath, logger)
for realms_conf in (None, csr):
for goodurl, result in (
('http://1.1.1.1/v1/a/c',
(None, 'http://1.1.1.1/v1/a/c', None, None)),
('http://1.1.1.1:8080/a/c',
(None, 'http://1.1.1.1:8080/a/c', None, None)),
('http://2.2.2.2/a/c',
(None, 'http://2.2.2.2/a/c', None, None)),
('https://1.1.1.1/v1/a/c',
(None, 'https://1.1.1.1/v1/a/c', None, None)),
('//US/DFW1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//us/DFW1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//us/dfw1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//',
(None, None, None, None)),
('',
(None, None, None, None))):
if goodurl.startswith('//') and not realms_conf:
self.assertEqual(
utils.validate_sync_to(
goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
(None, None, None, None))
else:
self.assertEqual(
utils.validate_sync_to(
goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
result)
for badurl, result in (
('http://1.1.1.1',
('Path required in X-Container-Sync-To', None, None,
None)),
('httpq://1.1.1.1/v1/a/c',
('Invalid scheme \'httpq\' in X-Container-Sync-To, '
'must be "//", "http", or "https".', None, None,
None)),
('http://1.1.1.1/v1/a/c?query',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query=param',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query=param#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.2/v1/a/c',
("Invalid host '1.1.1.2' in X-Container-Sync-To",
None, None, None)),
('//us/invalid/a/c',
("No cluster endpoint for 'us' 'invalid'", None,
None, None)),
('//invalid/dfw1/a/c',
("No realm key for 'invalid'", None, None, None)),
('//us/invalid1/a/',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/a/'", None, None, None)),
('//us/invalid1/a',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/a'", None, None, None)),
('//us/invalid1/',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/'", None, None, None)),
('//us/invalid1',
("Invalid X-Container-Sync-To format "
"'//us/invalid1'", None, None, None)),
('//us/',
("Invalid X-Container-Sync-To format "
"'//us/'", None, None, None)),
('//us',
("Invalid X-Container-Sync-To format "
"'//us'", None, None, None))):
if badurl.startswith('//') and not realms_conf:
self.assertEqual(
utils.validate_sync_to(
badurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
(None, None, None, None))
else:
self.assertEqual(
utils.validate_sync_to(
badurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
result)
def test_TRUE_VALUES(self):
for v in utils.TRUE_VALUES:
self.assertEqual(v, v.lower())
def test_config_true_value(self):
orig_trues = utils.TRUE_VALUES
try:
utils.TRUE_VALUES = 'hello world'.split()
for val in 'hello world HELLO WORLD'.split():
self.assertTrue(utils.config_true_value(val) is True)
self.assertTrue(utils.config_true_value(True) is True)
self.assertTrue(utils.config_true_value('foo') is False)
self.assertTrue(utils.config_true_value(False) is False)
finally:
utils.TRUE_VALUES = orig_trues
def test_config_positive_int_value(self):
expectations = {
# value : expected,
u'1': 1,
b'1': 1,
1: 1,
u'2': 2,
b'2': 2,
u'1024': 1024,
b'1024': 1024,
u'0': ValueError,
b'0': ValueError,
u'-1': ValueError,
b'-1': ValueError,
u'0x01': ValueError,
b'0x01': ValueError,
u'asdf': ValueError,
b'asdf': ValueError,
None: ValueError,
0: ValueError,
-1: ValueError,
u'1.2': ValueError, # string expresses float should be value error
b'1.2': ValueError, # string expresses float should be value error
}
for value, expected in expectations.items():
try:
rv = utils.config_positive_int_value(value)
except Exception as e:
if e.__class__ is not expected:
raise
else:
self.assertEqual(
'Config option must be an positive int number, '
'not "%s".' % value, e.args[0])
else:
self.assertEqual(expected, rv)
def test_config_auto_int_value(self):
expectations = {
# (value, default) : expected,
('1', 0): 1,
(1, 0): 1,
('asdf', 0): ValueError,
('auto', 1): 1,
('AutO', 1): 1,
('Aut0', 1): ValueError,
(None, 1): 1,
}
for (value, default), expected in expectations.items():
try:
rv = utils.config_auto_int_value(value, default)
except Exception as e:
if e.__class__ is not expected:
raise
else:
self.assertEqual(expected, rv)
def test_streq_const_time(self):
self.assertTrue(utils.streq_const_time('abc123', 'abc123'))
self.assertFalse(utils.streq_const_time('a', 'aaaaa'))
self.assertFalse(utils.streq_const_time('ABC123', 'abc123'))
def test_quorum_size(self):
expected_sizes = {1: 1,
2: 1,
3: 2,
4: 2,
5: 3}
got_sizes = dict([(n, utils.quorum_size(n))
for n in expected_sizes])
self.assertEqual(expected_sizes, got_sizes)
def test_majority_size(self):
expected_sizes = {1: 1,
2: 2,
3: 2,
4: 3,
5: 3}
got_sizes = dict([(n, utils.majority_size(n))
for n in expected_sizes])
self.assertEqual(expected_sizes, got_sizes)
def test_rsync_ip_ipv4_localhost(self):
self.assertEqual(utils.rsync_ip('127.0.0.1'), '127.0.0.1')
def test_rsync_ip_ipv6_random_ip(self):
self.assertEqual(
utils.rsync_ip('fe80:0000:0000:0000:0202:b3ff:fe1e:8329'),
'[fe80:0000:0000:0000:0202:b3ff:fe1e:8329]')
def test_rsync_ip_ipv6_ipv4_compatible(self):
self.assertEqual(
utils.rsync_ip('::ffff:192.0.2.128'), '[::ffff:192.0.2.128]')
def test_rsync_module_interpolation(self):
fake_device = {'ip': '127.0.0.1', 'port': 11,
'replication_ip': '127.0.0.2', 'replication_port': 12,
'region': '1', 'zone': '2', 'device': 'sda1',
'meta': 'just_a_string'}
self.assertEqual(
utils.rsync_module_interpolation('{ip}', fake_device),
'127.0.0.1')
self.assertEqual(
utils.rsync_module_interpolation('{port}', fake_device),
'11')
self.assertEqual(
utils.rsync_module_interpolation('{replication_ip}', fake_device),
'127.0.0.2')
self.assertEqual(
utils.rsync_module_interpolation('{replication_port}',
fake_device),
'12')
self.assertEqual(
utils.rsync_module_interpolation('{region}', fake_device),
'1')
self.assertEqual(
utils.rsync_module_interpolation('{zone}', fake_device),
'2')
self.assertEqual(
utils.rsync_module_interpolation('{device}', fake_device),
'sda1')
self.assertEqual(
utils.rsync_module_interpolation('{meta}', fake_device),
'just_a_string')
self.assertEqual(
utils.rsync_module_interpolation('{replication_ip}::object',
fake_device),
'127.0.0.2::object')
self.assertEqual(
utils.rsync_module_interpolation('{ip}::container{port}',
fake_device),
'127.0.0.1::container11')
self.assertEqual(
utils.rsync_module_interpolation(
'{replication_ip}::object_{device}', fake_device),
'127.0.0.2::object_sda1')
self.assertEqual(
utils.rsync_module_interpolation(
'127.0.0.3::object_{replication_port}', fake_device),
'127.0.0.3::object_12')
self.assertRaises(ValueError, utils.rsync_module_interpolation,
'{replication_ip}::object_{deivce}', fake_device)
def test_fallocate_reserve(self):
class StatVFS(object):
f_frsize = 1024
f_bavail = 1
f_blocks = 100
def fstatvfs(fd):
return StatVFS()
orig_FALLOCATE_RESERVE = utils.FALLOCATE_RESERVE
orig_fstatvfs = utils.os.fstatvfs
try:
fallocate = utils.FallocateWrapper(noop=True)
utils.os.fstatvfs = fstatvfs
# Make sure setting noop, which disables fallocate, also stops the
# fallocate_reserve check.
# Set the fallocate_reserve to 99% and request an object that is
# about 50% the size. With fallocate_reserve off this will succeed.
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('99%')
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(500)), 0)
# Setting noop to False after the constructor allows us to use
# a noop fallocate syscall and still test fallocate_reserve.
fallocate.noop = False
# Want 1023 reserved, have 1024 * 1 free, so succeeds
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1023')
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1023 reserved, have 512 * 2 free, so succeeds
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1023')
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1024 reserved, have 1024 * 1 free, so fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(0))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 1024 reserved, have 512 * 2 free, so fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(0))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 2048 reserved, have 1024 * 1 free, so fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2048')
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(0))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1024 <= 2048'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 2048 reserved, have 512 * 2 free, so fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2048')
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(0))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1024 <= 2048'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 1023 reserved, have 1024 * 1 free, but file size is 1, so
# fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1023')
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(1))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1023 <= 1023'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 1022 reserved, have 1024 * 1 free, and file size is 1, so
# succeeds
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1022')
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(1)), 0)
# Want 1% reserved, have 100 bytes * 2/100 free, and file size is
# 99, so succeeds
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1%')
StatVFS.f_frsize = 100
StatVFS.f_bavail = 2
StatVFS.f_blocks = 100
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(99)), 0)
# Want 2% reserved, have 50 bytes * 2/50 free, and file size is 49,
# so succeeds
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2%')
StatVFS.f_frsize = 50
StatVFS.f_bavail = 2
StatVFS.f_blocks = 50
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(49)), 0)
# Want 100% reserved, have 100 * 100/100 free, and file size is 0,
# so fails.
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('100%')
StatVFS.f_frsize = 100
StatVFS.f_bavail = 100
StatVFS.f_blocks = 100
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(0))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 100 <= 100'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 1% reserved, have 100 * 2/100 free, and file size is 101,
# so fails.
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1%')
StatVFS.f_frsize = 100
StatVFS.f_bavail = 2
StatVFS.f_blocks = 100
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(101))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 0.99 <= 1'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# is 100, so fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('98%')
StatVFS.f_frsize = 100
StatVFS.f_bavail = 99
StatVFS.f_blocks = 100
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(100))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 98 <= 98'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 2% reserved, have 1000 bytes * 21/1000 free, and file size
# is 999, so succeeds.
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2%')
StatVFS.f_frsize = 1000
StatVFS.f_bavail = 21
StatVFS.f_blocks = 1000
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(999)), 0)
# Want 2% resereved, have 1000 bytes * 21/1000 free, and file size
# is 1000, so fails.
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2%')
StatVFS.f_frsize = 1000
StatVFS.f_bavail = 21
StatVFS.f_blocks = 1000
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(1000))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 2 <= 2'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
finally:
utils.FALLOCATE_RESERVE = orig_FALLOCATE_RESERVE
utils.os.fstatvfs = orig_fstatvfs
def test_fallocate_func(self):
class FallocateWrapper(object):
def __init__(self):
self.last_call = None
def __call__(self, *args):
self.last_call = list(args)
self.last_call[-1] = self.last_call[-1].value
return 0
with patch.object(utils, '_sys_fallocate', FallocateWrapper()):
utils._sys_fallocate = FallocateWrapper()
# Ensure fallocate calls _sys_fallocate even with 0 bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 0)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 0])
# Ensure fallocate calls _sys_fallocate even with negative bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, -5678)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 0])
# Ensure fallocate calls _sys_fallocate properly with positive
# bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 1)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 1])
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 10 * 1024 * 1024 * 1024)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 10 * 1024 * 1024 * 1024])
def test_generate_trans_id(self):
fake_time = 1366428370.5163341
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('')
self.assertEqual(len(trans_id), 34)
self.assertEqual(trans_id[:2], 'tx')
self.assertEqual(trans_id[23], '-')
self.assertEqual(int(trans_id[24:], 16), int(fake_time))
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('-suffix')
self.assertEqual(len(trans_id), 41)
self.assertEqual(trans_id[:2], 'tx')
self.assertEqual(trans_id[34:], '-suffix')
self.assertEqual(trans_id[23], '-')
self.assertEqual(int(trans_id[24:34], 16), int(fake_time))
def test_get_trans_id_time(self):
ts = utils.get_trans_id_time('tx8c8bc884cdaf499bb29429aa9c46946e')
self.assertIsNone(ts)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-0051720c06')
self.assertEqual(ts, 1366428678)
self.assertEqual(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time(
'tx1df4ff4f55ea45f7b2ec2-0051720c06-suffix')
self.assertEqual(ts, 1366428678)
self.assertEqual(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time('')
self.assertIsNone(ts)
ts = utils.get_trans_id_time('garbage')
self.assertIsNone(ts)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-almostright')
self.assertIsNone(ts)
def test_config_fallocate_value(self):
fallocate_value, is_percent = utils.config_fallocate_value('10%')
self.assertEqual(fallocate_value, 10)
self.assertTrue(is_percent)
fallocate_value, is_percent = utils.config_fallocate_value('10')
self.assertEqual(fallocate_value, 10)
self.assertFalse(is_percent)
try:
fallocate_value, is_percent = utils.config_fallocate_value('ab%')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: ab% is an invalid value for '
'fallocate_reserve.')
try:
fallocate_value, is_percent = utils.config_fallocate_value('ab')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: ab is an invalid value for '
'fallocate_reserve.')
try:
fallocate_value, is_percent = utils.config_fallocate_value('1%%')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: 1%% is an invalid value for '
'fallocate_reserve.')
try:
fallocate_value, is_percent = utils.config_fallocate_value('10.0')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: 10.0 is an invalid value for '
'fallocate_reserve.')
fallocate_value, is_percent = utils.config_fallocate_value('10.5%')
self.assertEqual(fallocate_value, 10.5)
self.assertTrue(is_percent)
fallocate_value, is_percent = utils.config_fallocate_value('10.000%')
self.assertEqual(fallocate_value, 10.000)
self.assertTrue(is_percent)
def test_tpool_reraise(self):
with patch.object(utils.tpool, 'execute', lambda f: f()):
self.assertTrue(
utils.tpool_reraise(MagicMock(return_value='test1')), 'test1')
self.assertRaises(
Exception,
utils.tpool_reraise, MagicMock(side_effect=Exception('test2')))
self.assertRaises(
BaseException,
utils.tpool_reraise,
MagicMock(side_effect=BaseException('test3')))
def test_lock_file(self):
flags = os.O_CREAT | os.O_RDWR
with NamedTemporaryFile(delete=False) as nt:
nt.write(b"test string")
nt.flush()
nt.close()
with utils.lock_file(nt.name, unlink=False) as f:
self.assertEqual(f.read(), b"test string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, unlink=False, append=True) as f:
f.seek(0)
self.assertEqual(f.read(), b"test string")
f.seek(0)
f.write(b"\nanother string")
f.flush()
f.seek(0)
self.assertEqual(f.read(), b"test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, timeout=3, unlink=False) as f:
try:
with utils.lock_file(
nt.name, timeout=1, unlink=False) as f:
self.assertTrue(
False, "Expected LockTimeout exception")
except LockTimeout:
pass
with utils.lock_file(nt.name, unlink=True) as f:
self.assertEqual(f.read(), b"test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(
IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
self.assertRaises(OSError, os.remove, nt.name)
def test_lock_file_unlinked_after_open(self):
os_open = os.open
first_pass = [True]
def deleting_open(filename, flags):
# unlink the file after it's opened. once.
fd = os_open(filename, flags)
if first_pass[0]:
os.unlink(filename)
first_pass[0] = False
return fd
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.open', deleting_open):
with utils.lock_file(nt.name, unlink=True) as f:
self.assertNotEqual(os.fstat(nt.fileno()).st_ino,
os.fstat(f.fileno()).st_ino)
first_pass = [True]
def recreating_open(filename, flags):
# unlink and recreate the file after it's opened
fd = os_open(filename, flags)
if first_pass[0]:
os.unlink(filename)
os.close(os_open(filename, os.O_CREAT | os.O_RDWR))
first_pass[0] = False
return fd
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.open', recreating_open):
with utils.lock_file(nt.name, unlink=True) as f:
self.assertNotEqual(os.fstat(nt.fileno()).st_ino,
os.fstat(f.fileno()).st_ino)
def test_lock_file_held_on_unlink(self):
os_unlink = os.unlink
def flocking_unlink(filename):
# make sure the lock is held when we unlink
fd = os.open(filename, os.O_RDWR)
self.assertRaises(
IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
os.close(fd)
os_unlink(filename)
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.unlink', flocking_unlink):
with utils.lock_file(nt.name, unlink=True):
pass
def test_lock_file_no_unlink_if_fail(self):
os_open = os.open
with NamedTemporaryFile(delete=True) as nt:
def lock_on_open(filename, flags):
# lock the file on another fd after it's opened.
fd = os_open(filename, flags)
fd2 = os_open(filename, flags)
fcntl.flock(fd2, fcntl.LOCK_EX | fcntl.LOCK_NB)
return fd
try:
timedout = False
with mock.patch('os.open', lock_on_open):
with utils.lock_file(nt.name, unlink=False, timeout=0.01):
pass
except LockTimeout:
timedout = True
self.assertTrue(timedout)
self.assertTrue(os.path.exists(nt.name))
def test_ismount_path_does_not_exist(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(os.path.join(tmpdir, 'bar')))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_not_mount(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
# Raises exception with _raw -- see next test.
utils.ismount(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_raw_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount_raw, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_symlink(self):
tmpdir = mkdtemp()
try:
link = os.path.join(tmpdir, "tmp")
os.symlink(tempfile.gettempdir(), link)
self.assertFalse(utils.ismount(link))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_root(self):
self.assertTrue(utils.ismount('/'))
def test_ismount_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
# Raises exception with _raw -- see next test.
utils.ismount(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_raw_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount_raw, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_dev(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
parent = _os_lstat(path)
return MockStat(parent.st_mode, parent.st_dev + 1,
parent.st_ino)
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_ino(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
return _os_lstat(path)
else:
parent_path = os.path.join(path, "..")
child = _os_lstat(path)
parent = _os_lstat(parent_path)
return MockStat(child.st_mode, parent.st_ino,
child.st_dev)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_stubfile(self):
tmpdir = mkdtemp()
fname = os.path.join(tmpdir, ".ismount")
try:
with open(fname, "w") as stubfile:
stubfile.write("")
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_parse_content_type(self):
self.assertEqual(utils.parse_content_type('text/plain'),
('text/plain', []))
self.assertEqual(utils.parse_content_type('text/plain;charset=utf-8'),
('text/plain', [('charset', 'utf-8')]))
self.assertEqual(
utils.parse_content_type('text/plain;hello="world";charset=utf-8'),
('text/plain', [('hello', '"world"'), ('charset', 'utf-8')]))
self.assertEqual(
utils.parse_content_type('text/plain; hello="world"; a=b'),
('text/plain', [('hello', '"world"'), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x="\""; a=b'),
('text/plain', [('x', r'"\""'), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x; a=b'),
('text/plain', [('x', ''), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x="\""; a'),
('text/plain', [('x', r'"\""'), ('a', '')]))
def test_override_bytes_from_content_type(self):
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=15'}
utils.override_bytes_from_content_type(listing_dict,
logger=FakeLogger())
self.assertEqual(listing_dict['bytes'], 15)
self.assertEqual(listing_dict['content_type'],
'text/plain;hello="world"')
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=hey'}
utils.override_bytes_from_content_type(listing_dict,
logger=FakeLogger())
self.assertEqual(listing_dict['bytes'], 1234)
self.assertEqual(listing_dict['content_type'],
'text/plain;hello="world"')
def test_extract_swift_bytes(self):
scenarios = {
# maps input value -> expected returned tuple
'': ('', None),
'text/plain': ('text/plain', None),
'text/plain; other=thing': ('text/plain;other=thing', None),
'text/plain; swift_bytes=123': ('text/plain', '123'),
'text/plain; other=thing;swift_bytes=123':
('text/plain;other=thing', '123'),
'text/plain; swift_bytes=123; other=thing':
('text/plain;other=thing', '123'),
'text/plain; swift_bytes=123; swift_bytes=456':
('text/plain', '456'),
'text/plain; swift_bytes=123; other=thing;swift_bytes=456':
('text/plain;other=thing', '456')}
for test_value, expected in scenarios.items():
self.assertEqual(expected, utils.extract_swift_bytes(test_value))
def test_clean_content_type(self):
subtests = {
'': '', 'text/plain': 'text/plain',
'text/plain; someother=thing': 'text/plain; someother=thing',
'text/plain; swift_bytes=123': 'text/plain',
'text/plain; someother=thing; swift_bytes=123':
'text/plain; someother=thing',
# Since Swift always tacks on the swift_bytes, clean_content_type()
# only strips swift_bytes if it's last. The next item simply shows
# that if for some other odd reason it's not last,
# clean_content_type() will not remove it from the header.
'text/plain; swift_bytes=123; someother=thing':
'text/plain; swift_bytes=123; someother=thing'}
for before, after in subtests.items():
self.assertEqual(utils.clean_content_type(before), after)
def test_get_valid_utf8_str(self):
def do_test(input_value, expected):
actual = utils.get_valid_utf8_str(input_value)
self.assertEqual(expected, actual)
self.assertIsInstance(actual, six.binary_type)
actual.decode('utf-8')
do_test(b'abc', b'abc')
do_test(u'abc', b'abc')
do_test(u'\uc77c\uc601', b'\xec\x9d\xbc\xec\x98\x81')
do_test(b'\xec\x9d\xbc\xec\x98\x81', b'\xec\x9d\xbc\xec\x98\x81')
# test some invalid UTF-8
do_test(b'\xec\x9d\xbc\xec\x98', b'\xec\x9d\xbc\xef\xbf\xbd')
# check surrogate pairs, too
do_test(u'\U0001f0a1', b'\xf0\x9f\x82\xa1'),
do_test(u'\uD83C\uDCA1', b'\xf0\x9f\x82\xa1'),
do_test(b'\xf0\x9f\x82\xa1', b'\xf0\x9f\x82\xa1'),
do_test(b'\xed\xa0\xbc\xed\xb2\xa1', b'\xf0\x9f\x82\xa1'),
def test_quote_bytes(self):
self.assertEqual(b'/v1/a/c3/subdirx/',
utils.quote(b'/v1/a/c3/subdirx/'))
self.assertEqual(b'/v1/a%26b/c3/subdirx/',
utils.quote(b'/v1/a&b/c3/subdirx/'))
self.assertEqual(b'%2Fv1%2Fa&b%2Fc3%2Fsubdirx%2F',
utils.quote(b'/v1/a&b/c3/subdirx/', safe='&'))
self.assertEqual(b'abc_%EC%9D%BC%EC%98%81',
utils.quote(u'abc_\uc77c\uc601'.encode('utf8')))
# Invalid utf8 is parsed as latin1, then re-encoded as utf8??
self.assertEqual(b'%EF%BF%BD%EF%BF%BD%EC%BC%9D%EF%BF%BD',
utils.quote(u'\uc77c\uc601'.encode('utf8')[::-1]))
def test_quote_unicode(self):
self.assertEqual(u'/v1/a/c3/subdirx/',
utils.quote(u'/v1/a/c3/subdirx/'))
self.assertEqual(u'/v1/a%26b/c3/subdirx/',
utils.quote(u'/v1/a&b/c3/subdirx/'))
self.assertEqual(u'%2Fv1%2Fa&b%2Fc3%2Fsubdirx%2F',
utils.quote(u'/v1/a&b/c3/subdirx/', safe='&'))
self.assertEqual(u'abc_%EC%9D%BC%EC%98%81',
utils.quote(u'abc_\uc77c\uc601'))
def test_get_hmac(self):
self.assertEqual(
utils.get_hmac('GET', '/path', 1, 'abc'),
'b17f6ff8da0e251737aa9e3ee69a881e3e092e2f')
def test_parse_override_options(self):
# When override_<thing> is passed in, it takes precedence.
opts = utils.parse_override_options(
override_policies=[0, 1],
override_devices=['sda', 'sdb'],
override_partitions=[100, 200],
policies='0,1,2,3',
devices='sda,sdb,sdc,sdd',
partitions='100,200,300,400')
self.assertEqual(opts.policies, [0, 1])
self.assertEqual(opts.devices, ['sda', 'sdb'])
self.assertEqual(opts.partitions, [100, 200])
# When override_<thing> is passed in, it applies even in run-once
# mode.
opts = utils.parse_override_options(
once=True,
override_policies=[0, 1],
override_devices=['sda', 'sdb'],
override_partitions=[100, 200],
policies='0,1,2,3',
devices='sda,sdb,sdc,sdd',
partitions='100,200,300,400')
self.assertEqual(opts.policies, [0, 1])
self.assertEqual(opts.devices, ['sda', 'sdb'])
self.assertEqual(opts.partitions, [100, 200])
# In run-once mode, we honor the passed-in overrides.
opts = utils.parse_override_options(
once=True,
policies='0,1,2,3',
devices='sda,sdb,sdc,sdd',
partitions='100,200,300,400')
self.assertEqual(opts.policies, [0, 1, 2, 3])
self.assertEqual(opts.devices, ['sda', 'sdb', 'sdc', 'sdd'])
self.assertEqual(opts.partitions, [100, 200, 300, 400])
# In run-forever mode, we ignore the passed-in overrides.
opts = utils.parse_override_options(
policies='0,1,2,3',
devices='sda,sdb,sdc,sdd',
partitions='100,200,300,400')
self.assertEqual(opts.policies, [])
self.assertEqual(opts.devices, [])
self.assertEqual(opts.partitions, [])
def test_get_policy_index(self):
# Account has no information about a policy
req = Request.blank(
'/sda1/p/a',
environ={'REQUEST_METHOD': 'GET'})
res = Response()
self.assertIsNone(utils.get_policy_index(req.headers,
res.headers))
# The policy of a container can be specified by the response header
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'})
res = Response(headers={'X-Backend-Storage-Policy-Index': '1'})
self.assertEqual('1', utils.get_policy_index(req.headers,
res.headers))
# The policy of an object to be created can be specified by the request
# header
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Backend-Storage-Policy-Index': '2'})
res = Response()
self.assertEqual('2', utils.get_policy_index(req.headers,
res.headers))
def test_get_log_line(self):
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'})
res = Response()
trans_time = 1.2
additional_info = 'some information'
server_pid = 1234
exp_line = '1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD ' \
'/sda1/p/a/c/o" 200 - "-" "-" "-" 1.2000 "some information" 1234 -'
with mock.patch(
'time.gmtime',
mock.MagicMock(side_effect=[time.gmtime(10001.0)])):
with mock.patch(
'os.getpid', mock.MagicMock(return_value=server_pid)):
self.assertEqual(
exp_line,
utils.get_log_line(req, res, trans_time, additional_info))
def test_cache_from_env(self):
# should never get logging when swift.cache is found
env = {'swift.cache': 42}
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env, False))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
# check allow_none controls logging when swift.cache is not found
err_msg = 'ERROR: swift.cache could not be found in env!'
env = {}
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env))
self.assertTrue(err_msg in logger.get_lines_for_level('error'))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env, False))
self.assertTrue(err_msg in logger.get_lines_for_level('error'))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
def test_fsync_dir(self):
tempdir = None
fd = None
try:
tempdir = mkdtemp()
fd, temppath = tempfile.mkstemp(dir=tempdir)
_mock_fsync = mock.Mock()
_mock_close = mock.Mock()
with patch('swift.common.utils.fsync', _mock_fsync):
with patch('os.close', _mock_close):
utils.fsync_dir(tempdir)
self.assertTrue(_mock_fsync.called)
self.assertTrue(_mock_close.called)
self.assertTrue(isinstance(_mock_fsync.call_args[0][0], int))
self.assertEqual(_mock_fsync.call_args[0][0],
_mock_close.call_args[0][0])
# Not a directory - arg is file path
self.assertRaises(OSError, utils.fsync_dir, temppath)
logger = FakeLogger()
def _mock_fsync(fd):
raise OSError(errno.EBADF, os.strerror(errno.EBADF))
with patch('swift.common.utils.fsync', _mock_fsync):
with mock.patch('swift.common.utils.logging', logger):
utils.fsync_dir(tempdir)
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
finally:
if fd is not None:
os.close(fd)
os.unlink(temppath)
if tempdir:
os.rmdir(tempdir)
def test_renamer_with_fsync_dir(self):
tempdir = None
try:
tempdir = mkdtemp()
# Simulate part of object path already existing
part_dir = os.path.join(tempdir, 'objects/1234/')
os.makedirs(part_dir)
obj_dir = os.path.join(part_dir, 'aaa', 'a' * 32)
obj_path = os.path.join(obj_dir, '1425276031.12345.data')
# Object dir had to be created
_m_os_rename = mock.Mock()
_m_fsync_dir = mock.Mock()
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.renamer("fake_path", obj_path)
_m_os_rename.assert_called_once_with('fake_path', obj_path)
# fsync_dir on parents of all newly create dirs
self.assertEqual(_m_fsync_dir.call_count, 3)
# Object dir existed
_m_os_rename.reset_mock()
_m_fsync_dir.reset_mock()
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.renamer("fake_path", obj_path)
_m_os_rename.assert_called_once_with('fake_path', obj_path)
# fsync_dir only on the leaf dir
self.assertEqual(_m_fsync_dir.call_count, 1)
finally:
if tempdir:
shutil.rmtree(tempdir)
def test_renamer_when_fsync_is_false(self):
_m_os_rename = mock.Mock()
_m_fsync_dir = mock.Mock()
_m_makedirs_count = mock.Mock(return_value=2)
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
with patch('swift.common.utils.makedirs_count',
_m_makedirs_count):
utils.renamer("fake_path", "/a/b/c.data", fsync=False)
_m_makedirs_count.assert_called_once_with("/a/b")
_m_os_rename.assert_called_once_with('fake_path', "/a/b/c.data")
self.assertFalse(_m_fsync_dir.called)
def test_makedirs_count(self):
tempdir = None
fd = None
try:
tempdir = mkdtemp()
os.makedirs(os.path.join(tempdir, 'a/b'))
# 4 new dirs created
dirpath = os.path.join(tempdir, 'a/b/1/2/3/4')
ret = utils.makedirs_count(dirpath)
self.assertEqual(ret, 4)
# no new dirs created - dir already exists
ret = utils.makedirs_count(dirpath)
self.assertEqual(ret, 0)
# path exists and is a file
fd, temppath = tempfile.mkstemp(dir=dirpath)
os.close(fd)
self.assertRaises(OSError, utils.makedirs_count, temppath)
finally:
if tempdir:
shutil.rmtree(tempdir)
def test_modify_priority(self):
pid = os.getpid()
logger = debug_logger()
called = {}
def _fake_setpriority(*args):
called['setpriority'] = args
def _fake_syscall(*args):
called['syscall'] = args
# Test if current architecture supports changing of priority
try:
utils.NR_ioprio_set()
except OSError as e:
raise unittest.SkipTest(e)
with patch('swift.common.utils._libc_setpriority',
_fake_setpriority), \
patch('swift.common.utils._posix_syscall', _fake_syscall):
called = {}
# not set / default
utils.modify_priority({}, logger)
self.assertEqual(called, {})
called = {}
# just nice
utils.modify_priority({'nice_priority': '1'}, logger)
self.assertEqual(called, {'setpriority': (0, pid, 1)})
called = {}
# just ionice class uses default priority 0
utils.modify_priority({'ionice_class': 'IOPRIO_CLASS_RT'}, logger)
architecture = os.uname()[4]
arch_bits = platform.architecture()[0]
if architecture == 'x86_64' and arch_bits == '64bit':
self.assertEqual(called, {'syscall': (251, 1, pid, 1 << 13)})
elif architecture == 'aarch64' and arch_bits == '64bit':
self.assertEqual(called, {'syscall': (30, 1, pid, 1 << 13)})
else:
self.fail("Unexpected call: %r" % called)
called = {}
# just ionice priority is ignored
utils.modify_priority({'ionice_priority': '4'}, logger)
self.assertEqual(called, {})
called = {}
# bad ionice class
utils.modify_priority({'ionice_class': 'class_foo'}, logger)
self.assertEqual(called, {})
called = {}
# ionice class & priority
utils.modify_priority({
'ionice_class': 'IOPRIO_CLASS_BE',
'ionice_priority': '4',
}, logger)
if architecture == 'x86_64' and arch_bits == '64bit':
self.assertEqual(called, {
'syscall': (251, 1, pid, 2 << 13 | 4)
})
elif architecture == 'aarch64' and arch_bits == '64bit':
self.assertEqual(called, {
'syscall': (30, 1, pid, 2 << 13 | 4)
})
else:
self.fail("Unexpected call: %r" % called)
called = {}
# all
utils.modify_priority({
'nice_priority': '-15',
'ionice_class': 'IOPRIO_CLASS_IDLE',
'ionice_priority': '6',
}, logger)
if architecture == 'x86_64' and arch_bits == '64bit':
self.assertEqual(called, {
'setpriority': (0, pid, -15),
'syscall': (251, 1, pid, 3 << 13 | 6),
})
elif architecture == 'aarch64' and arch_bits == '64bit':
self.assertEqual(called, {
'setpriority': (0, pid, -15),
'syscall': (30, 1, pid, 3 << 13 | 6),
})
else:
self.fail("Unexpected call: %r" % called)
def test__NR_ioprio_set(self):
with patch('os.uname', return_value=('', '', '', '', 'x86_64')), \
patch('platform.architecture', return_value=('64bit', '')):
self.assertEqual(251, utils.NR_ioprio_set())
with patch('os.uname', return_value=('', '', '', '', 'x86_64')), \
patch('platform.architecture', return_value=('32bit', '')):
self.assertRaises(OSError, utils.NR_ioprio_set)
with patch('os.uname', return_value=('', '', '', '', 'aarch64')), \
patch('platform.architecture', return_value=('64bit', '')):
self.assertEqual(30, utils.NR_ioprio_set())
with patch('os.uname', return_value=('', '', '', '', 'aarch64')), \
patch('platform.architecture', return_value=('32bit', '')):
self.assertRaises(OSError, utils.NR_ioprio_set)
with patch('os.uname', return_value=('', '', '', '', 'alpha')), \
patch('platform.architecture', return_value=('64bit', '')):
self.assertRaises(OSError, utils.NR_ioprio_set)
@requires_o_tmpfile_support_in_tmp
def test_link_fd_to_path_linkat_success(self):
tempdir = mkdtemp()
fd = os.open(tempdir, utils.O_TMPFILE | os.O_WRONLY)
data = b"I'm whatever Gotham needs me to be"
_m_fsync_dir = mock.Mock()
try:
os.write(fd, data)
# fd is O_WRONLY
self.assertRaises(OSError, os.read, fd, 1)
file_path = os.path.join(tempdir, uuid4().hex)
with mock.patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.link_fd_to_path(fd, file_path, 1)
with open(file_path, 'rb') as f:
self.assertEqual(f.read(), data)
self.assertEqual(_m_fsync_dir.call_count, 2)
finally:
os.close(fd)
shutil.rmtree(tempdir)
@requires_o_tmpfile_support_in_tmp
def test_link_fd_to_path_target_exists(self):
tempdir = mkdtemp()
# Create and write to a file
fd, path = tempfile.mkstemp(dir=tempdir)
os.write(fd, b"hello world")
os.fsync(fd)
os.close(fd)
self.assertTrue(os.path.exists(path))
fd = os.open(tempdir, utils.O_TMPFILE | os.O_WRONLY)
try:
os.write(fd, b"bye world")
os.fsync(fd)
utils.link_fd_to_path(fd, path, 0, fsync=False)
# Original file now should have been over-written
with open(path, 'rb') as f:
self.assertEqual(f.read(), b"bye world")
finally:
os.close(fd)
shutil.rmtree(tempdir)
@requires_o_tmpfile_support
def test_link_fd_to_path_errno_not_EEXIST_or_ENOENT(self):
_m_linkat = mock.Mock(
side_effect=IOError(errno.EACCES, os.strerror(errno.EACCES)))
with mock.patch('swift.common.utils.linkat', _m_linkat):
try:
utils.link_fd_to_path(0, '/path', 1)
except IOError as err:
self.assertEqual(err.errno, errno.EACCES)
else:
self.fail("Expecting IOError exception")
self.assertTrue(_m_linkat.called)
@requires_o_tmpfile_support_in_tmp
def test_linkat_race_dir_not_exists(self):
tempdir = mkdtemp()
target_dir = os.path.join(tempdir, uuid4().hex)
target_path = os.path.join(target_dir, uuid4().hex)
os.mkdir(target_dir)
fd = os.open(target_dir, utils.O_TMPFILE | os.O_WRONLY)
# Simulating directory deletion by other backend process
os.rmdir(target_dir)
self.assertFalse(os.path.exists(target_dir))
try:
utils.link_fd_to_path(fd, target_path, 1)
self.assertTrue(os.path.exists(target_dir))
self.assertTrue(os.path.exists(target_path))
finally:
os.close(fd)
shutil.rmtree(tempdir)
def test_safe_json_loads(self):
expectations = {
None: None,
'': None,
0: None,
1: None,
'"asdf"': 'asdf',
'[]': [],
'{}': {},
"{'foo': 'bar'}": None,
'{"foo": "bar"}': {'foo': 'bar'},
}
failures = []
for value, expected in expectations.items():
try:
result = utils.safe_json_loads(value)
except Exception as e:
# it's called safe, if it blows up the test blows up
self.fail('%r caused safe method to throw %r!' % (
value, e))
try:
self.assertEqual(expected, result)
except AssertionError:
failures.append('%r => %r (expected %r)' % (
value, result, expected))
if failures:
self.fail('Invalid results from pure function:\n%s' %
'\n'.join(failures))
def test_strict_b64decode(self):
expectations = {
None: ValueError,
0: ValueError,
b'': b'',
u'': b'',
b'A': ValueError,
b'AA': ValueError,
b'AAA': ValueError,
b'AAAA': b'\x00\x00\x00',
u'AAAA': b'\x00\x00\x00',
b'////': b'\xff\xff\xff',
u'////': b'\xff\xff\xff',
b'A===': ValueError,
b'AA==': b'\x00',
b'AAA=': b'\x00\x00',
b' AAAA': ValueError,
b'AAAA ': ValueError,
b'AAAA============': b'\x00\x00\x00',
b'AA&AA==': ValueError,
b'====': b'',
}
failures = []
for value, expected in expectations.items():
try:
result = utils.strict_b64decode(value)
except Exception as e:
if inspect.isclass(expected) and issubclass(
expected, Exception):
if not isinstance(e, expected):
failures.append('%r raised %r (expected to raise %r)' %
(value, e, expected))
else:
failures.append('%r raised %r (expected to return %r)' %
(value, e, expected))
else:
if inspect.isclass(expected) and issubclass(
expected, Exception):
failures.append('%r => %r (expected to raise %r)' %
(value, result, expected))
elif result != expected:
failures.append('%r => %r (expected %r)' % (
value, result, expected))
if failures:
self.fail('Invalid results from pure function:\n%s' %
'\n'.join(failures))
def test_replace_partition_in_path(self):
# Check for new part = part * 2
old = '/s/n/d/o/700/c77/af088baea4806dcaba30bf07d9e64c77/f'
new = '/s/n/d/o/1400/c77/af088baea4806dcaba30bf07d9e64c77/f'
# Expected outcome
self.assertEqual(utils.replace_partition_in_path(old, 11), new)
# Make sure there is no change if the part power didn't change
self.assertEqual(utils.replace_partition_in_path(old, 10), old)
self.assertEqual(utils.replace_partition_in_path(new, 11), new)
# Check for new part = part * 2 + 1
old = '/s/n/d/o/693/c77/ad708baea4806dcaba30bf07d9e64c77/f'
new = '/s/n/d/o/1387/c77/ad708baea4806dcaba30bf07d9e64c77/f'
# Expected outcome
self.assertEqual(utils.replace_partition_in_path(old, 11), new)
# Make sure there is no change if the part power didn't change
self.assertEqual(utils.replace_partition_in_path(old, 10), old)
self.assertEqual(utils.replace_partition_in_path(new, 11), new)
def test_round_robin_iter(self):
it1 = iter([1, 2, 3])
it2 = iter([4, 5])
it3 = iter([6, 7, 8, 9])
it4 = iter([])
rr_its = utils.round_robin_iter([it1, it2, it3, it4])
got = list(rr_its)
# Expect that items get fetched in a round-robin fashion from the
# iterators
self.assertListEqual([1, 4, 6, 2, 5, 7, 3, 8, 9], got)
class ResellerConfReader(unittest.TestCase):
def setUp(self):
self.default_rules = {'operator_roles': ['admin', 'swiftoperator'],
'service_roles': [],
'require_group': ''}
def test_defaults(self):
conf = {}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_'])
self.assertEqual(options['AUTH_'], self.default_rules)
def test_same_as_default(self):
conf = {'reseller_prefix': 'AUTH',
'operator_roles': 'admin, swiftoperator'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_'])
self.assertEqual(options['AUTH_'], self.default_rules)
def test_single_blank_reseller(self):
conf = {'reseller_prefix': ''}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''], self.default_rules)
def test_single_blank_reseller_with_conf(self):
conf = {'reseller_prefix': '',
"''operator_roles": 'role1, role2'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''].get('operator_roles'),
['role1', 'role2'])
self.assertEqual(options[''].get('service_roles'),
self.default_rules.get('service_roles'))
self.assertEqual(options[''].get('require_group'),
self.default_rules.get('require_group'))
def test_multiple_same_resellers(self):
conf = {'reseller_prefix': " '' , '' "}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
conf = {'reseller_prefix': '_, _'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['_'])
conf = {'reseller_prefix': 'AUTH, PRE2, AUTH, PRE2'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', 'PRE2_'])
def test_several_resellers_with_conf(self):
conf = {'reseller_prefix': 'PRE1, PRE2',
'PRE1_operator_roles': 'role1, role2',
'PRE1_service_roles': 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['PRE1_', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options['PRE1_'].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options['PRE1_'].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['PRE1_'].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_several_resellers_first_blank(self):
conf = {'reseller_prefix': " '' , PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_several_resellers_with_blank_comma(self):
conf = {'reseller_prefix': "AUTH , '', PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_'])
self.assertEqual(set(['admin', 'swiftoperator']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual([],
options['AUTH_'].get('service_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['AUTH_'].get('require_group'))
self.assertEqual('', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_stray_comma(self):
conf = {'reseller_prefix': "AUTH ,, PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', 'PRE2_'])
self.assertEqual(set(['admin', 'swiftoperator']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual([],
options['AUTH_'].get('service_roles'))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['AUTH_'].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_multiple_stray_commas_resellers(self):
conf = {'reseller_prefix': ' , , ,'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''], self.default_rules)
def test_unprefixed_options(self):
conf = {'reseller_prefix': "AUTH , '', PRE2",
"operator_roles": 'role1, role2',
"service_roles": 'role3, role4',
'require_group': 'auth_blank_group',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options['AUTH_'].get('service_roles')))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('auth_blank_group',
options['AUTH_'].get('require_group'))
self.assertEqual('auth_blank_group', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
class TestUnlinkOlder(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
self.mtime = {}
self.ts = make_timestamp_iter()
def tearDown(self):
rmtree(self.tempdir, ignore_errors=True)
def touch(self, fpath, mtime=None):
self.mtime[fpath] = mtime or next(self.ts)
open(fpath, 'w')
@contextlib.contextmanager
def high_resolution_getmtime(self):
orig_getmtime = os.path.getmtime
def mock_getmtime(fpath):
mtime = self.mtime.get(fpath)
if mtime is None:
mtime = orig_getmtime(fpath)
return mtime
with mock.patch('os.path.getmtime', mock_getmtime):
yield
def test_unlink_older_than_path_not_exists(self):
path = os.path.join(self.tempdir, 'does-not-exist')
# just make sure it doesn't blow up
utils.unlink_older_than(path, next(self.ts))
def test_unlink_older_than_file(self):
path = os.path.join(self.tempdir, 'some-file')
self.touch(path)
with self.assertRaises(OSError) as ctx:
utils.unlink_older_than(path, next(self.ts))
self.assertEqual(ctx.exception.errno, errno.ENOTDIR)
def test_unlink_older_than_now(self):
self.touch(os.path.join(self.tempdir, 'test'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, next(self.ts))
self.assertEqual([], os.listdir(self.tempdir))
def test_unlink_not_old_enough(self):
start = next(self.ts)
self.touch(os.path.join(self.tempdir, 'test'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, start)
self.assertEqual(['test'], os.listdir(self.tempdir))
def test_unlink_mixed(self):
self.touch(os.path.join(self.tempdir, 'first'))
cutoff = next(self.ts)
self.touch(os.path.join(self.tempdir, 'second'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, cutoff)
self.assertEqual(['second'], os.listdir(self.tempdir))
def test_unlink_paths(self):
paths = []
for item in ('first', 'second', 'third'):
path = os.path.join(self.tempdir, item)
self.touch(path)
paths.append(path)
# don't unlink everyone
with self.high_resolution_getmtime():
utils.unlink_paths_older_than(paths[:2], next(self.ts))
self.assertEqual(['third'], os.listdir(self.tempdir))
def test_unlink_empty_paths(self):
# just make sure it doesn't blow up
utils.unlink_paths_older_than([], next(self.ts))
def test_unlink_not_exists_paths(self):
path = os.path.join(self.tempdir, 'does-not-exist')
# just make sure it doesn't blow up
utils.unlink_paths_older_than([path], next(self.ts))
class TestSwiftInfo(unittest.TestCase):
def tearDown(self):
utils._swift_info = {}
utils._swift_admin_info = {}
def test_register_swift_info(self):
utils.register_swift_info(foo='bar')
utils.register_swift_info(lorem='ipsum')
utils.register_swift_info('cap1', cap1_foo='cap1_bar')
utils.register_swift_info('cap1', cap1_lorem='cap1_ipsum')
self.assertTrue('swift' in utils._swift_info)
self.assertTrue('foo' in utils._swift_info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('lorem' in utils._swift_info['swift'])
self.assertEqual(utils._swift_info['swift']['lorem'], 'ipsum')
self.assertTrue('cap1' in utils._swift_info)
self.assertTrue('cap1_foo' in utils._swift_info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
self.assertTrue('cap1_lorem' in utils._swift_info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_lorem'], 'cap1_ipsum')
self.assertRaises(ValueError,
utils.register_swift_info, 'admin', foo='bar')
self.assertRaises(ValueError,
utils.register_swift_info, 'disallowed_sections',
disallowed_sections=None)
utils.register_swift_info('goodkey', foo='5.6')
self.assertRaises(ValueError,
utils.register_swift_info, 'bad.key', foo='5.6')
data = {'bad.key': '5.6'}
self.assertRaises(ValueError,
utils.register_swift_info, 'goodkey', **data)
def test_get_swift_info(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info()
self.assertNotIn('admin', info)
self.assertIn('swift', info)
self.assertIn('foo', info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertIn('cap1', info)
self.assertIn('cap1_foo', info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
def test_get_swift_info_with_disallowed_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap3_foo': 'cap3_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(disallowed_sections=['cap1', 'cap3'])
self.assertNotIn('admin', info)
self.assertIn('swift', info)
self.assertIn('foo', info['swift'])
self.assertEqual(info['swift']['foo'], 'bar')
self.assertNotIn('cap1', info)
self.assertIn('cap2', info)
self.assertIn('cap2_foo', info['cap2'])
self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar')
self.assertNotIn('cap3', info)
def test_register_swift_admin_info(self):
utils.register_swift_info(admin=True, admin_foo='admin_bar')
utils.register_swift_info(admin=True, admin_lorem='admin_ipsum')
utils.register_swift_info('cap1', admin=True, ac1_foo='ac1_bar')
utils.register_swift_info('cap1', admin=True, ac1_lorem='ac1_ipsum')
self.assertIn('swift', utils._swift_admin_info)
self.assertIn('admin_foo', utils._swift_admin_info['swift'])
self.assertEqual(
utils._swift_admin_info['swift']['admin_foo'], 'admin_bar')
self.assertIn('admin_lorem', utils._swift_admin_info['swift'])
self.assertEqual(
utils._swift_admin_info['swift']['admin_lorem'], 'admin_ipsum')
self.assertIn('cap1', utils._swift_admin_info)
self.assertIn('ac1_foo', utils._swift_admin_info['cap1'])
self.assertEqual(
utils._swift_admin_info['cap1']['ac1_foo'], 'ac1_bar')
self.assertIn('ac1_lorem', utils._swift_admin_info['cap1'])
self.assertEqual(
utils._swift_admin_info['cap1']['ac1_lorem'], 'ac1_ipsum')
self.assertNotIn('swift', utils._swift_info)
self.assertNotIn('cap1', utils._swift_info)
def test_get_swift_admin_info(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(admin=True)
self.assertIn('admin', info)
self.assertIn('admin_cap1', info['admin'])
self.assertIn('ac1_foo', info['admin']['admin_cap1'])
self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar')
self.assertIn('swift', info)
self.assertIn('foo', info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertIn('cap1', info)
self.assertIn('cap1_foo', info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
def test_get_swift_admin_info_with_disallowed_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap3_foo': 'cap3_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(
admin=True, disallowed_sections=['cap1', 'cap3'])
self.assertIn('admin', info)
self.assertIn('admin_cap1', info['admin'])
self.assertIn('ac1_foo', info['admin']['admin_cap1'])
self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar')
self.assertIn('disallowed_sections', info['admin'])
self.assertIn('cap1', info['admin']['disallowed_sections'])
self.assertNotIn('cap2', info['admin']['disallowed_sections'])
self.assertIn('cap3', info['admin']['disallowed_sections'])
self.assertIn('swift', info)
self.assertIn('foo', info['swift'])
self.assertEqual(info['swift']['foo'], 'bar')
self.assertNotIn('cap1', info)
self.assertIn('cap2', info)
self.assertIn('cap2_foo', info['cap2'])
self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar')
self.assertNotIn('cap3', info)
def test_get_swift_admin_info_with_disallowed_sub_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar',
'cap1_moo': 'cap1_baa'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap2_foo': 'cap2_bar'},
'cap4': {'a': {'b': {'c': 'c'},
'b.c': 'b.c'}}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(
admin=True, disallowed_sections=['cap1.cap1_foo', 'cap3',
'cap4.a.b.c'])
self.assertNotIn('cap3', info)
self.assertEqual(info['cap1']['cap1_moo'], 'cap1_baa')
self.assertNotIn('cap1_foo', info['cap1'])
self.assertNotIn('c', info['cap4']['a']['b'])
self.assertEqual(info['cap4']['a']['b.c'], 'b.c')
def test_get_swift_info_with_unmatched_disallowed_sections(self):
cap1 = {'cap1_foo': 'cap1_bar',
'cap1_moo': 'cap1_baa'}
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': cap1}
# expect no exceptions
info = utils.get_swift_info(
disallowed_sections=['cap2.cap1_foo', 'cap1.no_match',
'cap1.cap1_foo.no_match.no_match'])
self.assertEqual(info['cap1'], cap1)
class TestFileLikeIter(unittest.TestCase):
def test_iter_file_iter(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
for chunk in utils.FileLikeIter(in_iter):
chunks.append(chunk)
self.assertEqual(chunks, in_iter)
def test_next(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
try:
chunk = next(iter_file)
except StopIteration:
break
chunks.append(chunk)
self.assertEqual(chunks, in_iter)
def test_read(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
iter_file = utils.FileLikeIter(in_iter)
self.assertEqual(iter_file.read(), b''.join(in_iter))
def test_read_with_size(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
chunk = iter_file.read(2)
if not chunk:
break
self.assertTrue(len(chunk) <= 2)
chunks.append(chunk)
self.assertEqual(b''.join(chunks), b''.join(in_iter))
def test_read_with_size_zero(self):
# makes little sense, but file supports it, so...
self.assertEqual(utils.FileLikeIter(b'abc').read(0), b'')
def test_readline(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline()
if not line:
break
lines.append(line)
self.assertEqual(
lines,
[v if v == b'trailing.' else v + b'\n'
for v in b''.join(in_iter).split(b'\n')])
def test_readline2(self):
self.assertEqual(
utils.FileLikeIter([b'abc', b'def\n']).readline(4),
b'abcd')
def test_readline3(self):
self.assertEqual(
utils.FileLikeIter([b'a' * 1111, b'bc\ndef']).readline(),
(b'a' * 1111) + b'bc\n')
def test_readline_with_size(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline(2)
if not line:
break
lines.append(line)
self.assertEqual(
lines,
[b'ab', b'c\n', b'd\n', b'ef', b'g\n', b'h\n', b'ij', b'\n', b'\n',
b'k\n', b'tr', b'ai', b'li', b'ng', b'.'])
def test_readlines(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = utils.FileLikeIter(in_iter).readlines()
self.assertEqual(
lines,
[v if v == b'trailing.' else v + b'\n'
for v in b''.join(in_iter).split(b'\n')])
def test_readlines_with_size(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
iter_file = utils.FileLikeIter(in_iter)
lists_of_lines = []
while True:
lines = iter_file.readlines(2)
if not lines:
break
lists_of_lines.append(lines)
self.assertEqual(
lists_of_lines,
[[b'ab'], [b'c\n'], [b'd\n'], [b'ef'], [b'g\n'], [b'h\n'], [b'ij'],
[b'\n', b'\n'], [b'k\n'], [b'tr'], [b'ai'], [b'li'], [b'ng'],
[b'.']])
def test_close(self):
iter_file = utils.FileLikeIter([b'a', b'b', b'c'])
self.assertEqual(next(iter_file), b'a')
iter_file.close()
self.assertTrue(iter_file.closed)
self.assertRaises(ValueError, iter_file.next)
self.assertRaises(ValueError, iter_file.read)
self.assertRaises(ValueError, iter_file.readline)
self.assertRaises(ValueError, iter_file.readlines)
# Just make sure repeated close calls don't raise an Exception
iter_file.close()
self.assertTrue(iter_file.closed)
class TestStatsdLogging(unittest.TestCase):
def setUp(self):
def fake_getaddrinfo(host, port, *args):
# this is what a real getaddrinfo('localhost', port,
# socket.AF_INET) returned once
return [(socket.AF_INET, # address family
socket.SOCK_STREAM, # socket type
socket.IPPROTO_TCP, # socket protocol
'', # canonical name,
('127.0.0.1', port)), # socket address
(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
'',
('127.0.0.1', port))]
self.real_getaddrinfo = utils.socket.getaddrinfo
self.getaddrinfo_patcher = mock.patch.object(
utils.socket, 'getaddrinfo', fake_getaddrinfo)
self.mock_getaddrinfo = self.getaddrinfo_patcher.start()
self.addCleanup(self.getaddrinfo_patcher.stop)
def test_get_logger_statsd_client_not_specified(self):
logger = utils.get_logger({}, 'some-name', log_route='some-route')
# white-box construction validation
self.assertIsNone(logger.logger.statsd_client)
def test_get_logger_statsd_client_defaults(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'},
'some-name', log_route='some-route')
# white-box construction validation
self.assertTrue(isinstance(logger.logger.statsd_client,
utils.StatsdClient))
self.assertEqual(logger.logger.statsd_client._host, 'some.host.com')
self.assertEqual(logger.logger.statsd_client._port, 8125)
self.assertEqual(logger.logger.statsd_client._prefix, 'some-name.')
self.assertEqual(logger.logger.statsd_client._default_sample_rate, 1)
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, '')
def test_get_logger_statsd_client_non_defaults(self):
logger = utils.get_logger({
'log_statsd_host': 'another.host.com',
'log_statsd_port': '9876',
'log_statsd_default_sample_rate': '0.75',
'log_statsd_sample_rate_factor': '0.81',
'log_statsd_metric_prefix': 'tomato.sauce',
}, 'some-name', log_route='some-route')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.')
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, 'tomato.sauce.')
self.assertEqual(logger.logger.statsd_client._host, 'another.host.com')
self.assertEqual(logger.logger.statsd_client._port, 9876)
self.assertEqual(logger.logger.statsd_client._default_sample_rate,
0.75)
self.assertEqual(logger.logger.statsd_client._sample_rate_factor,
0.81)
def test_ipv4_or_ipv6_hostname_defaults_to_ipv4(self):
def stub_getaddrinfo_both_ipv4_and_ipv6(host, port, family, *rest):
if family == socket.AF_INET:
return [(socket.AF_INET, 'blah', 'blah', 'blah',
('127.0.0.1', int(port)))]
elif family == socket.AF_INET6:
# Implemented so an incorrectly ordered implementation (IPv6
# then IPv4) would realistically fail.
return [(socket.AF_INET6, 'blah', 'blah', 'blah',
('::1', int(port), 0, 0))]
with mock.patch.object(utils.socket, 'getaddrinfo',
new=stub_getaddrinfo_both_ipv4_and_ipv6):
logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target, ('localhost', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
def test_ipv4_instantiation_and_socket_creation(self):
logger = utils.get_logger({
'log_statsd_host': '127.0.0.1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target, ('127.0.0.1', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
def test_ipv6_instantiation_and_socket_creation(self):
# We have to check the given hostname or IP for IPv4/IPv6 on logger
# instantiation so we don't call getaddrinfo() too often and don't have
# to call bind() on our socket to detect IPv4/IPv6 on every send.
#
# This test uses the real getaddrinfo, so we patch over the mock to
# put the real one back. If we just stop the mock, then
# unittest.exit() blows up, but stacking real-fake-real works okay.
with mock.patch.object(utils.socket, 'getaddrinfo',
self.real_getaddrinfo):
logger = utils.get_logger({
'log_statsd_host': '::1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET6)
self.assertEqual(statsd_client._target, ('::1', 9876, 0, 0))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET6)
def test_bad_hostname_instantiation(self):
with mock.patch.object(utils.socket, 'getaddrinfo',
side_effect=utils.socket.gaierror("whoops")):
logger = utils.get_logger({
'log_statsd_host': 'i-am-not-a-hostname-or-ip',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target,
('i-am-not-a-hostname-or-ip', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
# Maybe the DNS server gets fixed in a bit and it starts working... or
# maybe the DNS record hadn't propagated yet. In any case, failed
# statsd sends will warn in the logs until the DNS failure or invalid
# IP address in the configuration is fixed.
def test_sending_ipv6(self):
def fake_getaddrinfo(host, port, *args):
# this is what a real getaddrinfo('::1', port,
# socket.AF_INET6) returned once
return [(socket.AF_INET6,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
'', ('::1', port, 0, 0)),
(socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
'',
('::1', port, 0, 0))]
with mock.patch.object(utils.socket, 'getaddrinfo', fake_getaddrinfo):
logger = utils.get_logger({
'log_statsd_host': '::1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
fl = FakeLogger()
statsd_client.logger = fl
mock_socket = MockUdpSocket()
statsd_client._open_socket = lambda *_: mock_socket
logger.increment('tunafish')
self.assertEqual(fl.get_lines_for_level('warning'), [])
self.assertEqual(mock_socket.sent,
[(b'some-name.tunafish:1|c', ('::1', 9876, 0, 0))])
def test_no_exception_when_cant_send_udp_packet(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
statsd_client = logger.logger.statsd_client
fl = FakeLogger()
statsd_client.logger = fl
mock_socket = MockUdpSocket(sendto_errno=errno.EPERM)
statsd_client._open_socket = lambda *_: mock_socket
logger.increment('tunafish')
expected = ["Error sending UDP message to ('some.host.com', 8125): "
"[Errno 1] test errno 1"]
self.assertEqual(fl.get_lines_for_level('warning'), expected)
def test_sample_rates(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: 0.50001
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: 0.49999
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
self.assertTrue(payload.endswith(b"|@0.5"))
def test_sample_rates_with_sample_rate_factor(self):
logger = utils.get_logger({
'log_statsd_host': 'some.host.com',
'log_statsd_default_sample_rate': '0.82',
'log_statsd_sample_rate_factor': '0.91',
})
effective_sample_rate = 0.82 * 0.91
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: effective_sample_rate + 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
suffix = "|@%s" % effective_sample_rate
if six.PY3:
suffix = suffix.encode('utf-8')
self.assertTrue(payload.endswith(suffix), payload)
effective_sample_rate = 0.587 * 0.91
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles', sample_rate=0.587)
self.assertEqual(len(mock_socket.sent), 2)
payload = mock_socket.sent[1][0]
suffix = "|@%s" % effective_sample_rate
if six.PY3:
suffix = suffix.encode('utf-8')
self.assertTrue(payload.endswith(suffix), payload)
def test_timing_stats(self):
class MockController(object):
def __init__(self, status):
self.status = status
self.logger = self
self.args = ()
self.called = 'UNKNOWN'
def timing_since(self, *args):
self.called = 'timing'
self.args = args
@utils.timing_stats()
def METHOD(controller):
return Response(status=controller.status)
mock_controller = MockController(200)
METHOD(mock_controller)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(400)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(404)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(412)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(416)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(500)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.errors.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(507)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.errors.timing')
self.assertTrue(mock_controller.args[1] > 0)
class UnsafeXrange(object):
"""
Like xrange(limit), but with extra context switching to screw things up.
"""
def __init__(self, upper_bound):
self.current = 0
self.concurrent_calls = 0
self.upper_bound = upper_bound
self.concurrent_call = False
def __iter__(self):
return self
def next(self):
if self.concurrent_calls > 0:
self.concurrent_call = True
self.concurrent_calls += 1
try:
if self.current >= self.upper_bound:
raise StopIteration
else:
val = self.current
self.current += 1
eventlet.sleep() # yield control
return val
finally:
self.concurrent_calls -= 1
__next__ = next
class TestAffinityKeyFunction(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_single_region(self):
keyfn = utils.affinity_key_function("r3=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 5, 0, 1, 2, 3, 6, 7], ids)
def test_bogus_value(self):
self.assertRaises(ValueError,
utils.affinity_key_function, "r3")
self.assertRaises(ValueError,
utils.affinity_key_function, "r3=elephant")
def test_empty_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function("")
self.assertTrue(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_all_whitespace_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function(" \n")
self.assertTrue(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_with_zone_zero(self):
keyfn = utils.affinity_key_function("r4z0=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([6, 0, 1, 2, 3, 4, 5, 7], ids)
def test_multiple(self):
keyfn = utils.affinity_key_function("r1=100, r4=200, r3z1=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 0, 1, 6, 7, 2, 3, 5], ids)
def test_more_specific_after_less_specific(self):
keyfn = utils.affinity_key_function("r2=100, r2z2=50")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([3, 2, 0, 1, 4, 5, 6, 7], ids)
class TestAffinityLocalityPredicate(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_empty(self):
pred = utils.affinity_locality_predicate('')
self.assertTrue(pred is None)
def test_region(self):
pred = utils.affinity_locality_predicate('r1')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1], ids)
def test_zone(self):
pred = utils.affinity_locality_predicate('r1z1')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0], ids)
def test_multiple(self):
pred = utils.affinity_locality_predicate('r1, r3, r4z0')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1, 4, 5, 6], ids)
def test_invalid(self):
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'falafel')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r8zQ')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r2d2')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r1z1=1')
class TestRateLimitedIterator(unittest.TestCase):
def run_under_pseudo_time(
self, func, *args, **kwargs):
curr_time = [42.0]
def my_time():
curr_time[0] += 0.001
return curr_time[0]
def my_sleep(duration):
curr_time[0] += 0.001
curr_time[0] += duration
with patch('time.time', my_time), \
patch('eventlet.sleep', my_sleep):
return func(*args, **kwargs)
def test_rate_limiting(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(range(9999), 100)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.1:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# it's 11, not 10, because ratelimiting doesn't apply to the very
# first element.
self.assertEqual(len(got), 11)
def test_rate_limiting_sometimes(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(
range(9999), 100,
ratelimit_if=lambda item: item % 23 != 0)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.5:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# we'd get 51 without the ratelimit_if, but because 0, 23 and 46
# weren't subject to ratelimiting, we get 54 instead
self.assertEqual(len(got), 54)
def test_limit_after(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(
range(9999), 100, limit_after=5)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.1:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# it's 16, not 15, because ratelimiting doesn't apply to the very
# first element.
self.assertEqual(len(got), 16)
class TestGreenthreadSafeIterator(unittest.TestCase):
def increment(self, iterable):
plus_ones = []
for n in iterable:
plus_ones.append(n + 1)
return plus_ones
def test_setup_works(self):
# it should work without concurrent access
self.assertEqual([0, 1, 2, 3], list(UnsafeXrange(4)))
iterable = UnsafeXrange(10)
pile = eventlet.GreenPile(2)
for _ in range(2):
pile.spawn(self.increment, iterable)
sorted([resp for resp in pile])
self.assertTrue(
iterable.concurrent_call, 'test setup is insufficiently crazy')
def test_access_is_serialized(self):
pile = eventlet.GreenPile(2)
unsafe_iterable = UnsafeXrange(10)
iterable = utils.GreenthreadSafeIterator(unsafe_iterable)
for _ in range(2):
pile.spawn(self.increment, iterable)
response = sorted(sum([resp for resp in pile], []))
self.assertEqual(list(range(1, 11)), response)
self.assertTrue(
not unsafe_iterable.concurrent_call, 'concurrent call occurred')
class TestStatsdLoggingDelegation(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(('localhost', 0))
self.port = self.sock.getsockname()[1]
self.queue = Queue()
self.reader_thread = threading.Thread(target=self.statsd_reader)
self.reader_thread.setDaemon(1)
self.reader_thread.start()
def tearDown(self):
# The "no-op when disabled" test doesn't set up a real logger, so
# create one here so we can tell the reader thread to stop.
if not getattr(self, 'logger', None):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.logger.increment('STOP')
self.reader_thread.join(timeout=4)
self.sock.close()
del self.logger
def statsd_reader(self):
while True:
try:
payload = self.sock.recv(4096)
if payload and b'STOP' in payload:
return 42
self.queue.put(payload)
except Exception as e:
sys.stderr.write('statsd_reader thread: %r' % (e,))
break
def _send_and_get(self, sender_fn, *args, **kwargs):
"""
Because the client library may not actually send a packet with
sample_rate < 1, we keep trying until we get one through.
"""
got = None
while not got:
sender_fn(*args, **kwargs)
try:
got = self.queue.get(timeout=0.5)
except Empty:
pass
return got
def assertStat(self, expected, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
if six.PY3:
got = got.decode('utf-8')
return self.assertEqual(expected, got)
def assertStatMatches(self, expected_regexp, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
if six.PY3:
got = got.decode('utf-8')
return self.assertTrue(re.search(expected_regexp, got),
[got, expected_regexp])
def test_methods_are_no_ops_when_not_enabled(self):
logger = utils.get_logger({
# No "log_statsd_host" means "disabled"
'log_statsd_port': str(self.port),
}, 'some-name')
# Delegate methods are no-ops
self.assertIsNone(logger.update_stats('foo', 88))
self.assertIsNone(logger.update_stats('foo', 88, 0.57))
self.assertIsNone(logger.update_stats('foo', 88,
sample_rate=0.61))
self.assertIsNone(logger.increment('foo'))
self.assertIsNone(logger.increment('foo', 0.57))
self.assertIsNone(logger.increment('foo', sample_rate=0.61))
self.assertIsNone(logger.decrement('foo'))
self.assertIsNone(logger.decrement('foo', 0.57))
self.assertIsNone(logger.decrement('foo', sample_rate=0.61))
self.assertIsNone(logger.timing('foo', 88.048))
self.assertIsNone(logger.timing('foo', 88.57, 0.34))
self.assertIsNone(logger.timing('foo', 88.998, sample_rate=0.82))
self.assertIsNone(logger.timing_since('foo', 8938))
self.assertIsNone(logger.timing_since('foo', 8948, 0.57))
self.assertIsNone(logger.timing_since('foo', 849398,
sample_rate=0.61))
# Now, the queue should be empty (no UDP packets sent)
self.assertRaises(Empty, self.queue.get_nowait)
def test_delegate_methods_with_no_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.assertStat('some-name.some.counter:1|c', self.logger.increment,
'some.counter')
self.assertStat('some-name.some.counter:-1|c', self.logger.decrement,
'some.counter')
self.assertStat('some-name.some.operation:4900.0|ms',
self.logger.timing, 'some.operation', 4.9 * 1000)
self.assertStatMatches('some-name\.another\.operation:\d+\.\d+\|ms',
self.logger.timing_since, 'another.operation',
time.time())
self.assertStat('some-name.another.counter:42|c',
self.logger.update_stats, 'another.counter', 42)
# Each call can override the sample_rate (also, bonus prefix test)
self.logger.set_statsd_prefix('pfx')
self.assertStat('pfx.some.counter:1|c|@0.972', self.logger.increment,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.counter:-1|c|@0.972', self.logger.decrement,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.operation:4900.0|ms|@0.972',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.972)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.972',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.972)
self.assertStat('pfx.another.counter:3|c|@0.972',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.972)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.939', self.logger.increment,
'some.counter', 0.939)
self.assertStat('some.counter:-1|c|@0.939', self.logger.decrement,
'some.counter', 0.939)
self.assertStat('some.operation:4900.0|ms|@0.939',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.939)
self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.939',
self.logger.timing_since, 'another.op',
time.time(), 0.939)
self.assertStat('another.counter:3|c|@0.939',
self.logger.update_stats, 'another.counter', 3, 0.939)
def test_delegate_methods_with_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_default_sample_rate': '0.93',
}, 'pfx')
self.assertStat('pfx.some.counter:1|c|@0.93', self.logger.increment,
'some.counter')
self.assertStat('pfx.some.counter:-1|c|@0.93', self.logger.decrement,
'some.counter')
self.assertStat('pfx.some.operation:4760.0|ms|@0.93',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.93',
self.logger.timing_since, 'another.op',
time.time())
self.assertStat('pfx.another.counter:3|c|@0.93',
self.logger.update_stats, 'another.counter', 3)
# Each call can override the sample_rate
self.assertStat('pfx.some.counter:1|c|@0.9912', self.logger.increment,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.counter:-1|c|@0.9912', self.logger.decrement,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('pfx.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.987654', self.logger.increment,
'some.counter', 0.987654)
self.assertStat('some.counter:-1|c|@0.987654', self.logger.decrement,
'some.counter', 0.987654)
self.assertStat('some.operation:4900.0|ms|@0.987654',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.987654)
self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.987654',
self.logger.timing_since, 'another.op',
time.time(), 0.987654)
self.assertStat('another.counter:3|c|@0.987654',
self.logger.update_stats, 'another.counter',
3, 0.987654)
def test_delegate_methods_with_metric_prefix(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_metric_prefix': 'alpha.beta',
}, 'pfx')
self.assertStat('alpha.beta.pfx.some.counter:1|c',
self.logger.increment, 'some.counter')
self.assertStat('alpha.beta.pfx.some.counter:-1|c',
self.logger.decrement, 'some.counter')
self.assertStat('alpha.beta.pfx.some.operation:4760.0|ms',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches(
'alpha\.beta\.pfx\.another\.op:\d+\.\d+\|ms',
self.logger.timing_since, 'another.op', time.time())
self.assertStat('alpha.beta.pfx.another.counter:3|c',
self.logger.update_stats, 'another.counter', 3)
self.logger.set_statsd_prefix('')
self.assertStat('alpha.beta.some.counter:1|c|@0.9912',
self.logger.increment, 'some.counter',
sample_rate=0.9912)
self.assertStat('alpha.beta.some.counter:-1|c|@0.9912',
self.logger.decrement, 'some.counter', 0.9912)
self.assertStat('alpha.beta.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches('alpha\.beta\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('alpha.beta.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
@reset_logger_state
def test_thread_locals(self):
logger = utils.get_logger(None)
# test the setter
logger.thread_locals = ('id', 'ip')
self.assertEqual(logger.thread_locals, ('id', 'ip'))
# reset
logger.thread_locals = (None, None)
self.assertEqual(logger.thread_locals, (None, None))
logger.txn_id = '1234'
logger.client_ip = '1.2.3.4'
self.assertEqual(logger.thread_locals, ('1234', '1.2.3.4'))
logger.txn_id = '5678'
logger.client_ip = '5.6.7.8'
self.assertEqual(logger.thread_locals, ('5678', '5.6.7.8'))
def test_no_fdatasync(self):
called = []
class NoFdatasync(object):
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.os', NoFdatasync()):
with patch('swift.common.utils.fsync', fsync):
utils.fdatasync(12345)
self.assertEqual(called, [12345])
def test_yes_fdatasync(self):
called = []
class YesFdatasync(object):
def fdatasync(self, fd):
called.append(fd)
with patch('swift.common.utils.os', YesFdatasync()):
utils.fdatasync(12345)
self.assertEqual(called, [12345])
def test_fsync_bad_fullsync(self):
class FCNTL(object):
F_FULLSYNC = 123
def fcntl(self, fd, op):
raise IOError(18)
with patch('swift.common.utils.fcntl', FCNTL()):
self.assertRaises(OSError, lambda: utils.fsync(12345))
def test_fsync_f_fullsync(self):
called = []
class FCNTL(object):
F_FULLSYNC = 123
def fcntl(self, fd, op):
called[:] = [fd, op]
return 0
with patch('swift.common.utils.fcntl', FCNTL()):
utils.fsync(12345)
self.assertEqual(called, [12345, 123])
def test_fsync_no_fullsync(self):
called = []
class FCNTL(object):
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.fcntl', FCNTL()):
with patch('os.fsync', fsync):
utils.fsync(12345)
self.assertEqual(called, [12345])
class TestAuditLocationGenerator(unittest.TestCase):
def test_drive_tree_access(self):
orig_listdir = utils.listdir
def _mock_utils_listdir(path):
if 'bad_part' in path:
raise OSError(errno.EACCES)
elif 'bad_suffix' in path:
raise OSError(errno.EACCES)
elif 'bad_hash' in path:
raise OSError(errno.EACCES)
else:
return orig_listdir(path)
# Check Raise on Bad partition
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
obj_path = os.path.join(data, "bad_part")
with open(obj_path, "w"):
pass
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
part2 = os.path.join(data, "partition2")
os.makedirs(part2)
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
# Check Raise on Bad Suffix
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
part2 = os.path.join(data, "partition2")
os.makedirs(part2)
obj_path = os.path.join(part1, "bad_suffix")
with open(obj_path, 'w'):
pass
suffix = os.path.join(part2, "suffix")
os.makedirs(suffix)
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
# Check Raise on Bad Hash
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
suffix = os.path.join(part1, "suffix")
os.makedirs(suffix)
hash1 = os.path.join(suffix, "hash1")
os.makedirs(hash1)
obj_path = os.path.join(suffix, "bad_hash")
with open(obj_path, 'w'):
pass
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
def test_non_dir_drive(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
self.assertEqual(list(locations), [])
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
# Test without the logger
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False
)
self.assertEqual(list(locations), [])
def test_mount_check_drive(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=True, logger=logger
)
self.assertEqual(list(locations), [])
self.assertEqual(2, len(logger.get_lines_for_level('warning')))
# Test without the logger
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=True
)
self.assertEqual(list(locations), [])
def test_non_dir_contents(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
with open(os.path.join(data, "partition1"), "w"):
pass
partition = os.path.join(data, "partition2")
os.makedirs(partition)
with open(os.path.join(partition, "suffix1"), "w"):
pass
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
with open(os.path.join(suffix, "hash1"), "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
self.assertEqual(list(locations), [])
def test_find_objects(self):
with temptree([]) as tmpdir:
expected_objs = list()
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
partition = os.path.join(data, "partition1")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.db")
with open(obj_path, "w"):
pass
expected_objs.append((obj_path, 'drive', 'partition1'))
partition = os.path.join(data, "partition2")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash2")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj2.db")
with open(obj_path, "w"):
pass
expected_objs.append((obj_path, 'drive', 'partition2'))
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
got_objs = list(locations)
self.assertEqual(len(got_objs), len(expected_objs))
self.assertEqual(sorted(got_objs), sorted(expected_objs))
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
def test_ignore_metadata(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
partition = os.path.join(data, "partition2")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash2")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.dat")
with open(obj_path, "w"):
pass
meta_path = os.path.join(hash_path, "obj1.meta")
with open(meta_path, "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", ".dat", mount_check=False, logger=logger
)
self.assertEqual(list(locations),
[(obj_path, "drive", "partition2")])
class TestGreenAsyncPile(unittest.TestCase):
def test_runs_everything(self):
def run_test():
tests_ran[0] += 1
return tests_ran[0]
tests_ran = [0]
pile = utils.GreenAsyncPile(3)
for x in range(3):
pile.spawn(run_test)
self.assertEqual(sorted(x for x in pile), [1, 2, 3])
def test_is_asynchronous(self):
def run_test(index):
events[index].wait()
return index
pile = utils.GreenAsyncPile(3)
for order in ((1, 2, 0), (0, 1, 2), (2, 1, 0), (0, 2, 1)):
events = [eventlet.event.Event(), eventlet.event.Event(),
eventlet.event.Event()]
for x in range(3):
pile.spawn(run_test, x)
for x in order:
events[x].send()
self.assertEqual(next(pile), x)
def test_next_when_empty(self):
def run_test():
pass
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test)
self.assertIsNone(next(pile))
self.assertRaises(StopIteration, lambda: next(pile))
def test_waitall_timeout_timesout(self):
def run_test(sleep_duration):
eventlet.sleep(sleep_duration)
completed[0] += 1
return sleep_duration
completed = [0]
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 1.0)
self.assertEqual(pile.waitall(0.5), [0.1])
self.assertEqual(completed[0], 1)
def test_waitall_timeout_completes(self):
def run_test(sleep_duration):
eventlet.sleep(sleep_duration)
completed[0] += 1
return sleep_duration
completed = [0]
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 0.1)
self.assertEqual(pile.waitall(0.5), [0.1, 0.1])
self.assertEqual(completed[0], 2)
def test_waitfirst_only_returns_first(self):
def run_test(name):
eventlet.sleep(0)
completed.append(name)
return name
completed = []
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 'first')
pile.spawn(run_test, 'second')
pile.spawn(run_test, 'third')
self.assertEqual(pile.waitfirst(0.5), completed[0])
# 3 still completed, but only the first was returned.
self.assertEqual(3, len(completed))
def test_wait_with_firstn(self):
def run_test(name):
eventlet.sleep(0)
completed.append(name)
return name
for first_n in [None] + list(range(6)):
completed = []
pile = utils.GreenAsyncPile(10)
for i in range(10):
pile.spawn(run_test, i)
actual = pile._wait(1, first_n)
expected_n = first_n if first_n else 10
self.assertEqual(completed[:expected_n], actual)
self.assertEqual(10, len(completed))
def test_pending(self):
pile = utils.GreenAsyncPile(3)
self.assertEqual(0, pile._pending)
for repeats in range(2):
# repeat to verify that pending will go again up after going down
for i in range(4):
pile.spawn(lambda: i)
self.assertEqual(4, pile._pending)
for i in range(3, -1, -1):
next(pile)
self.assertEqual(i, pile._pending)
# sanity check - the pile is empty
self.assertRaises(StopIteration, pile.next)
# pending remains 0
self.assertEqual(0, pile._pending)
class TestLRUCache(unittest.TestCase):
def test_maxsize(self):
@utils.LRUCache(maxsize=10)
def f(*args):
return math.sqrt(*args)
_orig_math_sqrt = math.sqrt
# setup cache [0-10)
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate cache [0-10)
with patch('math.sqrt'):
for i in range(10):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# update cache [10-20)
for i in range(10, 20):
self.assertEqual(math.sqrt(i), f(i))
# cache size is fixed
self.assertEqual(f.size(), 10)
# validate cache [10-20)
with patch('math.sqrt'):
for i in range(10, 20):
self.assertEqual(_orig_math_sqrt(i), f(i))
# validate un-cached [0-10)
with patch('math.sqrt', new=None):
for i in range(10):
self.assertRaises(TypeError, f, i)
# cache unchanged
self.assertEqual(f.size(), 10)
with patch('math.sqrt'):
for i in range(10, 20):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
def test_maxtime(self):
@utils.LRUCache(maxtime=30)
def f(*args):
return math.sqrt(*args)
self.assertEqual(30, f.maxtime)
_orig_math_sqrt = math.sqrt
now = time.time()
the_future = now + 31
# setup cache [0-10)
with patch('time.time', lambda: now):
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate cache [0-10)
with patch('math.sqrt'):
for i in range(10):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate expired [0-10)
with patch('math.sqrt', new=None):
with patch('time.time', lambda: the_future):
for i in range(10):
self.assertRaises(TypeError, f, i)
# validate repopulates [0-10)
with patch('time.time', lambda: the_future):
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
# reuses cache space
self.assertEqual(f.size(), 10)
def test_set_maxtime(self):
@utils.LRUCache(maxtime=30)
def f(*args):
return math.sqrt(*args)
self.assertEqual(30, f.maxtime)
self.assertEqual(2, f(4))
self.assertEqual(1, f.size())
# expire everything
f.maxtime = -1
# validate un-cached [0-10)
with patch('math.sqrt', new=None):
self.assertRaises(TypeError, f, 4)
def test_set_maxsize(self):
@utils.LRUCache(maxsize=10)
def f(*args):
return math.sqrt(*args)
for i in range(12):
f(i)
self.assertEqual(f.size(), 10)
f.maxsize = 4
for i in range(12):
f(i)
self.assertEqual(f.size(), 4)
class TestSpliterator(unittest.TestCase):
def test_string(self):
input_chunks = ["coun", "ter-", "b", "ra", "nch-mater",
"nit", "y-fungusy", "-nummular"]
si = utils.Spliterator(input_chunks)
self.assertEqual(''.join(si.take(8)), "counter-")
self.assertEqual(''.join(si.take(7)), "branch-")
self.assertEqual(''.join(si.take(10)), "maternity-")
self.assertEqual(''.join(si.take(8)), "fungusy-")
self.assertEqual(''.join(si.take(8)), "nummular")
def test_big_input_string(self):
input_chunks = ["iridium"]
si = utils.Spliterator(input_chunks)
self.assertEqual(''.join(si.take(2)), "ir")
self.assertEqual(''.join(si.take(1)), "i")
self.assertEqual(''.join(si.take(2)), "di")
self.assertEqual(''.join(si.take(1)), "u")
self.assertEqual(''.join(si.take(1)), "m")
def test_chunk_boundaries(self):
input_chunks = ["soylent", "green", "is", "people"]
si = utils.Spliterator(input_chunks)
self.assertEqual(''.join(si.take(7)), "soylent")
self.assertEqual(''.join(si.take(5)), "green")
self.assertEqual(''.join(si.take(2)), "is")
self.assertEqual(''.join(si.take(6)), "people")
def test_no_empty_strings(self):
input_chunks = ["soylent", "green", "is", "people"]
si = utils.Spliterator(input_chunks)
outputs = (list(si.take(7)) # starts and ends on chunk boundary
+ list(si.take(2)) # spans two chunks
+ list(si.take(3)) # begins but does not end chunk
+ list(si.take(2)) # ends but does not begin chunk
+ list(si.take(6))) # whole chunk + EOF
self.assertNotIn('', outputs)
def test_running_out(self):
input_chunks = ["not much"]
si = utils.Spliterator(input_chunks)
self.assertEqual(''.join(si.take(4)), "not ")
self.assertEqual(''.join(si.take(99)), "much") # short
self.assertEqual(''.join(si.take(4)), "")
self.assertEqual(''.join(si.take(4)), "")
def test_overlap(self):
input_chunks = ["one fish", "two fish", "red fish", "blue fish"]
si = utils.Spliterator(input_chunks)
t1 = si.take(20) # longer than first chunk
self.assertLess(len(next(t1)), 20) # it's not exhausted
t2 = si.take(20)
self.assertRaises(ValueError, next, t2)
def test_closing(self):
input_chunks = ["abcd", "efg", "hij"]
si = utils.Spliterator(input_chunks)
it = si.take(3) # shorter than first chunk
self.assertEqual(next(it), 'abc')
it.close()
self.assertEqual(list(si.take(20)), ['d', 'efg', 'hij'])
si = utils.Spliterator(input_chunks)
self.assertEqual(list(si.take(1)), ['a'])
it = si.take(1) # still shorter than first chunk
self.assertEqual(next(it), 'b')
it.close()
self.assertEqual(list(si.take(20)), ['cd', 'efg', 'hij'])
si = utils.Spliterator(input_chunks)
it = si.take(6) # longer than first chunk, shorter than first + second
self.assertEqual(next(it), 'abcd')
self.assertEqual(next(it), 'ef')
it.close()
self.assertEqual(list(si.take(20)), ['g', 'hij'])
si = utils.Spliterator(input_chunks)
self.assertEqual(list(si.take(2)), ['ab'])
it = si.take(3) # longer than rest of chunk
self.assertEqual(next(it), 'cd')
it.close()
self.assertEqual(list(si.take(20)), ['efg', 'hij'])
class TestParseContentRange(unittest.TestCase):
def test_good(self):
start, end, total = utils.parse_content_range("bytes 100-200/300")
self.assertEqual(start, 100)
self.assertEqual(end, 200)
self.assertEqual(total, 300)
def test_bad(self):
self.assertRaises(ValueError, utils.parse_content_range,
"100-300/500")
self.assertRaises(ValueError, utils.parse_content_range,
"bytes 100-200/aardvark")
self.assertRaises(ValueError, utils.parse_content_range,
"bytes bulbous-bouffant/4994801")
class TestParseContentDisposition(unittest.TestCase):
def test_basic_content_type(self):
name, attrs = utils.parse_content_disposition('text/plain')
self.assertEqual(name, 'text/plain')
self.assertEqual(attrs, {})
def test_content_type_with_charset(self):
name, attrs = utils.parse_content_disposition(
'text/plain; charset=UTF8')
self.assertEqual(name, 'text/plain')
self.assertEqual(attrs, {'charset': 'UTF8'})
def test_content_disposition(self):
name, attrs = utils.parse_content_disposition(
'form-data; name="somefile"; filename="test.html"')
self.assertEqual(name, 'form-data')
self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'})
def test_content_disposition_without_white_space(self):
name, attrs = utils.parse_content_disposition(
'form-data;name="somefile";filename="test.html"')
self.assertEqual(name, 'form-data')
self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'})
class TestIterMultipartMimeDocuments(unittest.TestCase):
def test_bad_start(self):
it = utils.iter_multipart_mime_documents(BytesIO(b'blah'), b'unique')
exc = None
try:
next(it)
except MimeInvalid as err:
exc = err
self.assertTrue('invalid starting boundary' in str(exc))
self.assertTrue('--unique' in str(exc))
def test_empty(self):
it = utils.iter_multipart_mime_documents(BytesIO(b'--unique'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(), b'')
self.assertRaises(StopIteration, next, it)
def test_basic(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabcdefg\r\n--unique--'), b'unique')
fp = next(it)
self.assertEqual(fp.read(), b'abcdefg')
self.assertRaises(StopIteration, next, it)
def test_basic2(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(), b'abcdefg')
fp = next(it)
self.assertEqual(fp.read(), b'hijkl')
self.assertRaises(StopIteration, next, it)
def test_tiny_reads(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(2), b'ab')
self.assertEqual(fp.read(2), b'cd')
self.assertEqual(fp.read(2), b'ef')
self.assertEqual(fp.read(2), b'g')
self.assertEqual(fp.read(2), b'')
fp = next(it)
self.assertEqual(fp.read(), b'hijkl')
self.assertRaises(StopIteration, next, it)
def test_big_reads(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(65536), b'abcdefg')
self.assertEqual(fp.read(), b'')
fp = next(it)
self.assertEqual(fp.read(), b'hijkl')
self.assertRaises(StopIteration, next, it)
def test_leading_crlfs(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'\r\n\r\n\r\n--unique\r\nabcdefg\r\n'
b'--unique\r\nhijkl\r\n--unique--'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(65536), b'abcdefg')
self.assertEqual(fp.read(), b'')
fp = next(it)
self.assertEqual(fp.read(), b'hijkl')
self.assertRaises(StopIteration, next, it)
def test_broken_mid_stream(self):
# We go ahead and accept whatever is sent instead of rejecting the
# whole request, in case the partial form is still useful.
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabc'), b'unique')
fp = next(it)
self.assertEqual(fp.read(), b'abc')
self.assertRaises(StopIteration, next, it)
def test_readline(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n\r\n'
b'jkl\r\n\r\n--unique--'), b'unique')
fp = next(it)
self.assertEqual(fp.readline(), b'ab\r\n')
self.assertEqual(fp.readline(), b'cd\ref\ng')
self.assertEqual(fp.readline(), b'')
fp = next(it)
self.assertEqual(fp.readline(), b'hi\r\n')
self.assertEqual(fp.readline(), b'\r\n')
self.assertEqual(fp.readline(), b'jkl\r\n')
self.assertRaises(StopIteration, next, it)
def test_readline_with_tiny_chunks(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n'
b'\r\njkl\r\n\r\n--unique--'),
b'unique',
read_chunk_size=2)
fp = next(it)
self.assertEqual(fp.readline(), b'ab\r\n')
self.assertEqual(fp.readline(), b'cd\ref\ng')
self.assertEqual(fp.readline(), b'')
fp = next(it)
self.assertEqual(fp.readline(), b'hi\r\n')
self.assertEqual(fp.readline(), b'\r\n')
self.assertEqual(fp.readline(), b'jkl\r\n')
self.assertRaises(StopIteration, next, it)
class TestParseMimeHeaders(unittest.TestCase):
def test_parse_mime_headers(self):
doc_file = BytesIO(b"""Content-Disposition: form-data; name="file_size"
Foo: Bar
NOT-title-cAsED: quux
Connexion: =?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=
Status: =?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=
Latin-1: Resincronizaci\xf3n realizada con \xe9xito
Utf-8: \xd0\xba\xd0\xbe\xd0\xbd\xd1\x82\xd0\xb5\xd0\xb9\xd0\xbd\xd0\xb5\xd1\x80
This is the body
""")
headers = utils.parse_mime_headers(doc_file)
utf8 = u'\u043a\u043e\u043d\u0442\u0435\u0439\u043d\u0435\u0440'
if six.PY2:
utf8 = utf8.encode('utf-8')
expected_headers = {
'Content-Disposition': 'form-data; name="file_size"',
'Foo': "Bar",
'Not-Title-Cased': "quux",
# Encoded-word or non-ASCII values are treated just like any other
# bytestring (at least for now)
'Connexion': "=?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=",
'Status': "=?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=",
'Latin-1': "Resincronizaci\xf3n realizada con \xe9xito",
'Utf-8': utf8,
}
self.assertEqual(expected_headers, headers)
self.assertEqual(b"This is the body\n", doc_file.read())
class FakeResponse(object):
def __init__(self, status, headers, body):
self.status = status
self.headers = HeaderKeyDict(headers)
self.body = StringIO(body)
def getheader(self, header_name):
return str(self.headers.get(header_name, ''))
def getheaders(self):
return self.headers.items()
def read(self, length=None):
return self.body.read(length)
def readline(self, length=None):
return self.body.readline(length)
class TestDocumentItersToHTTPResponseBody(unittest.TestCase):
def test_no_parts(self):
body = utils.document_iters_to_http_response_body(
iter([]), 'dontcare',
multipart=False, logger=FakeLogger())
self.assertEqual(body, '')
def test_single_part(self):
body = "time flies like an arrow; fruit flies like a banana"
doc_iters = [{'part_iter': iter(StringIO(body).read, '')}]
resp_body = ''.join(
utils.document_iters_to_http_response_body(
iter(doc_iters), 'dontcare',
multipart=False, logger=FakeLogger()))
self.assertEqual(resp_body, body)
def test_multiple_parts(self):
part1 = "two peanuts were walking down a railroad track"
part2 = "and one was a salted. ... peanut."
doc_iters = [{
'start_byte': 88,
'end_byte': 133,
'content_type': 'application/peanut',
'entity_length': 1024,
'part_iter': iter(StringIO(part1).read, ''),
}, {
'start_byte': 500,
'end_byte': 532,
'content_type': 'application/salted',
'entity_length': 1024,
'part_iter': iter(StringIO(part2).read, ''),
}]
resp_body = ''.join(
utils.document_iters_to_http_response_body(
iter(doc_iters), 'boundaryboundary',
multipart=True, logger=FakeLogger()))
self.assertEqual(resp_body, (
"--boundaryboundary\r\n" +
# This is a little too strict; we don't actually care that the
# headers are in this order, but the test is much more legible
# this way.
"Content-Type: application/peanut\r\n" +
"Content-Range: bytes 88-133/1024\r\n" +
"\r\n" +
part1 + "\r\n" +
"--boundaryboundary\r\n"
"Content-Type: application/salted\r\n" +
"Content-Range: bytes 500-532/1024\r\n" +
"\r\n" +
part2 + "\r\n" +
"--boundaryboundary--"))
def test_closed_part_iterator(self):
print('test')
useful_iter_mock = mock.MagicMock()
useful_iter_mock.__iter__.return_value = ['']
body_iter = utils.document_iters_to_http_response_body(
iter([{'part_iter': useful_iter_mock}]), 'dontcare',
multipart=False, logger=FakeLogger())
body = ''
for s in body_iter:
body += s
self.assertEqual(body, '')
useful_iter_mock.close.assert_called_once_with()
# Calling "close" on the mock will now raise an AttributeError
del useful_iter_mock.close
body_iter = utils.document_iters_to_http_response_body(
iter([{'part_iter': useful_iter_mock}]), 'dontcare',
multipart=False, logger=FakeLogger())
body = ''
for s in body_iter:
body += s
class TestPairs(unittest.TestCase):
def test_pairs(self):
items = [10, 20, 30, 40, 50, 60]
got_pairs = set(utils.pairs(items))
self.assertEqual(got_pairs,
set([(10, 20), (10, 30), (10, 40), (10, 50), (10, 60),
(20, 30), (20, 40), (20, 50), (20, 60),
(30, 40), (30, 50), (30, 60),
(40, 50), (40, 60),
(50, 60)]))
class TestSocketStringParser(unittest.TestCase):
def test_socket_string_parser(self):
default = 1337
addrs = [('1.2.3.4', '1.2.3.4', default),
('1.2.3.4:5000', '1.2.3.4', 5000),
('[dead:beef::1]', 'dead:beef::1', default),
('[dead:beef::1]:5000', 'dead:beef::1', 5000),
('example.com', 'example.com', default),
('example.com:5000', 'example.com', 5000),
('foo.1-2-3.bar.com:5000', 'foo.1-2-3.bar.com', 5000),
('1.2.3.4:10:20', None, None),
('dead:beef::1:5000', None, None)]
for addr, expected_host, expected_port in addrs:
if expected_host:
host, port = utils.parse_socket_string(addr, default)
self.assertEqual(expected_host, host)
self.assertEqual(expected_port, int(port))
else:
with self.assertRaises(ValueError):
utils.parse_socket_string(addr, default)
class TestHashForFileFunction(unittest.TestCase):
def setUp(self):
self.tempfilename = tempfile.mktemp()
def tearDown(self):
try:
os.unlink(self.tempfilename)
except OSError:
pass
def test_hash_for_file_smallish(self):
stub_data = b'some data'
with open(self.tempfilename, 'wb') as fd:
fd.write(stub_data)
with mock.patch('swift.common.utils.md5') as mock_md5:
mock_hasher = mock_md5.return_value
rv = utils.md5_hash_for_file(self.tempfilename)
self.assertTrue(mock_hasher.hexdigest.called)
self.assertEqual(rv, mock_hasher.hexdigest.return_value)
self.assertEqual([mock.call(stub_data)],
mock_hasher.update.call_args_list)
def test_hash_for_file_big(self):
num_blocks = 10
block_size = utils.MD5_BLOCK_READ_BYTES
truncate = 523
start_char = ord('a')
expected_blocks = [chr(i).encode('utf8') * block_size
for i in range(start_char, start_char + num_blocks)]
full_data = b''.join(expected_blocks)
trimmed_data = full_data[:-truncate]
# sanity
self.assertEqual(len(trimmed_data), block_size * num_blocks - truncate)
with open(self.tempfilename, 'wb') as fd:
fd.write(trimmed_data)
with mock.patch('swift.common.utils.md5') as mock_md5:
mock_hasher = mock_md5.return_value
rv = utils.md5_hash_for_file(self.tempfilename)
self.assertTrue(mock_hasher.hexdigest.called)
self.assertEqual(rv, mock_hasher.hexdigest.return_value)
self.assertEqual(num_blocks, len(mock_hasher.update.call_args_list))
found_blocks = []
for i, (expected_block, call) in enumerate(zip(
expected_blocks, mock_hasher.update.call_args_list)):
args, kwargs = call
self.assertEqual(kwargs, {})
self.assertEqual(1, len(args))
block = args[0]
if i < num_blocks - 1:
self.assertEqual(block, expected_block)
else:
self.assertEqual(block, expected_block[:-truncate])
found_blocks.append(block)
self.assertEqual(b''.join(found_blocks), trimmed_data)
def test_hash_for_file_empty(self):
with open(self.tempfilename, 'wb'):
pass
with mock.patch('swift.common.utils.md5') as mock_md5:
mock_hasher = mock_md5.return_value
rv = utils.md5_hash_for_file(self.tempfilename)
self.assertTrue(mock_hasher.hexdigest.called)
self.assertIs(rv, mock_hasher.hexdigest.return_value)
self.assertEqual([], mock_hasher.update.call_args_list)
def test_hash_for_file_brittle(self):
data_to_expected_hash = {
b'': 'd41d8cd98f00b204e9800998ecf8427e',
b'some data': '1e50210a0202497fb79bc38b6ade6c34',
(b'a' * 4096 * 10)[:-523]: '06a41551609656c85f14f659055dc6d3',
}
# unlike some other places where the concrete implementation really
# matters for backwards compatibility these brittle tests are probably
# not needed or justified, if a future maintainer rips them out later
# they're probably doing the right thing
failures = []
for stub_data, expected_hash in data_to_expected_hash.items():
with open(self.tempfilename, 'wb') as fd:
fd.write(stub_data)
rv = utils.md5_hash_for_file(self.tempfilename)
try:
self.assertEqual(expected_hash, rv)
except AssertionError:
trim_cap = 80
if len(stub_data) > trim_cap:
stub_data = '%s...<truncated>' % stub_data[:trim_cap]
failures.append('hash for %r was %s instead of expected %s' % (
stub_data, rv, expected_hash))
if failures:
self.fail('Some data did not compute expected hash:\n' +
'\n'.join(failures))
class TestSetSwiftDir(unittest.TestCase):
def setUp(self):
self.swift_dir = tempfile.mkdtemp()
self.swift_conf = os.path.join(self.swift_dir, 'swift.conf')
self.policy_name = ''.join(random.sample(string.ascii_letters, 20))
with open(self.swift_conf, "wt") as sc:
sc.write('''
[swift-hash]
swift_hash_path_suffix = changeme
[storage-policy:0]
name = default
default = yes
[storage-policy:1]
name = %s
''' % self.policy_name)
def tearDown(self):
shutil.rmtree(self.swift_dir, ignore_errors=True)
def test_set_swift_dir(self):
set_swift_dir(None)
reload_storage_policies()
self.assertIsNone(POLICIES.get_by_name(self.policy_name))
set_swift_dir(self.swift_dir)
reload_storage_policies()
self.assertIsNotNone(POLICIES.get_by_name(self.policy_name))
class TestPipeMutex(unittest.TestCase):
def setUp(self):
self.mutex = utils.PipeMutex()
def tearDown(self):
self.mutex.close()
def test_nonblocking(self):
evt_lock1 = eventlet.event.Event()
evt_lock2 = eventlet.event.Event()
evt_unlock = eventlet.event.Event()
def get_the_lock():
self.mutex.acquire()
evt_lock1.send('got the lock')
evt_lock2.wait()
self.mutex.release()
evt_unlock.send('released the lock')
eventlet.spawn(get_the_lock)
evt_lock1.wait() # Now, the other greenthread has the lock.
self.assertFalse(self.mutex.acquire(blocking=False))
evt_lock2.send('please release the lock')
evt_unlock.wait() # The other greenthread has released the lock.
self.assertTrue(self.mutex.acquire(blocking=False))
def test_recursive(self):
self.assertTrue(self.mutex.acquire(blocking=False))
self.assertTrue(self.mutex.acquire(blocking=False))
def try_acquire_lock():
return self.mutex.acquire(blocking=False)
self.assertFalse(eventlet.spawn(try_acquire_lock).wait())
self.mutex.release()
self.assertFalse(eventlet.spawn(try_acquire_lock).wait())
self.mutex.release()
self.assertTrue(eventlet.spawn(try_acquire_lock).wait())
def test_release_without_acquire(self):
self.assertRaises(RuntimeError, self.mutex.release)
def test_too_many_releases(self):
self.mutex.acquire()
self.mutex.release()
self.assertRaises(RuntimeError, self.mutex.release)
def test_wrong_releaser(self):
self.mutex.acquire()
with quiet_eventlet_exceptions():
self.assertRaises(RuntimeError,
eventlet.spawn(self.mutex.release).wait)
def test_blocking(self):
evt = eventlet.event.Event()
sequence = []
def coro1():
eventlet.sleep(0) # let coro2 go
self.mutex.acquire()
sequence.append('coro1 acquire')
evt.send('go')
self.mutex.release()
sequence.append('coro1 release')
def coro2():
evt.wait() # wait for coro1 to start us
self.mutex.acquire()
sequence.append('coro2 acquire')
self.mutex.release()
sequence.append('coro2 release')
c1 = eventlet.spawn(coro1)
c2 = eventlet.spawn(coro2)
c1.wait()
c2.wait()
self.assertEqual(sequence, [
'coro1 acquire',
'coro1 release',
'coro2 acquire',
'coro2 release'])
def test_blocking_tpool(self):
# Note: this test's success isn't a guarantee that the mutex is
# working. However, this test's failure means that the mutex is
# definitely broken.
sequence = []
def do_stuff():
n = 10
while n > 0:
self.mutex.acquire()
sequence.append("<")
eventlet.sleep(0.0001)
sequence.append(">")
self.mutex.release()
n -= 1
greenthread1 = eventlet.spawn(do_stuff)
greenthread2 = eventlet.spawn(do_stuff)
real_thread1 = eventlet.patcher.original('threading').Thread(
target=do_stuff)
real_thread1.start()
real_thread2 = eventlet.patcher.original('threading').Thread(
target=do_stuff)
real_thread2.start()
greenthread1.wait()
greenthread2.wait()
real_thread1.join()
real_thread2.join()
self.assertEqual(''.join(sequence), "<>" * 40)
def test_blocking_preserves_ownership(self):
pthread1_event = eventlet.patcher.original('threading').Event()
pthread2_event1 = eventlet.patcher.original('threading').Event()
pthread2_event2 = eventlet.patcher.original('threading').Event()
thread_id = []
owner = []
def pthread1():
thread_id.append(id(eventlet.greenthread.getcurrent()))
self.mutex.acquire()
owner.append(self.mutex.owner)
pthread2_event1.set()
orig_os_write = utils.os.write
def patched_os_write(*a, **kw):
try:
return orig_os_write(*a, **kw)
finally:
pthread1_event.wait()
with mock.patch.object(utils.os, 'write', patched_os_write):
self.mutex.release()
pthread2_event2.set()
def pthread2():
pthread2_event1.wait() # ensure pthread1 acquires lock first
thread_id.append(id(eventlet.greenthread.getcurrent()))
self.mutex.acquire()
pthread1_event.set()
pthread2_event2.wait()
owner.append(self.mutex.owner)
self.mutex.release()
real_thread1 = eventlet.patcher.original('threading').Thread(
target=pthread1)
real_thread1.start()
real_thread2 = eventlet.patcher.original('threading').Thread(
target=pthread2)
real_thread2.start()
real_thread1.join()
real_thread2.join()
self.assertEqual(thread_id, owner)
self.assertIsNone(self.mutex.owner)
@classmethod
def tearDownClass(cls):
# PipeMutex turns this off when you instantiate one
eventlet.debug.hub_prevent_multiple_readers(True)
class TestDistributeEvenly(unittest.TestCase):
def test_evenly_divided(self):
out = utils.distribute_evenly(range(12), 3)
self.assertEqual(out, [
[0, 3, 6, 9],
[1, 4, 7, 10],
[2, 5, 8, 11],
])
out = utils.distribute_evenly(range(12), 4)
self.assertEqual(out, [
[0, 4, 8],
[1, 5, 9],
[2, 6, 10],
[3, 7, 11],
])
def test_uneven(self):
out = utils.distribute_evenly(range(11), 3)
self.assertEqual(out, [
[0, 3, 6, 9],
[1, 4, 7, 10],
[2, 5, 8],
])
def test_just_one(self):
out = utils.distribute_evenly(range(5), 1)
self.assertEqual(out, [[0, 1, 2, 3, 4]])
def test_more_buckets_than_items(self):
out = utils.distribute_evenly(range(5), 7)
self.assertEqual(out, [[0], [1], [2], [3], [4], [], []])
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -7,391,450,644,393,529,000 | 40.295195 | 79 | 0.548559 | false |
varunarya10/python-ironicclient | ironicclient/client.py | 3 | 4613 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient.v2_0 import client as ksclient
from ironicclient.common import utils
from ironicclient import exc
from ironicclient.openstack.common import gettextutils
gettextutils.install('ironicclient')
def _get_ksclient(**kwargs):
"""Get an endpoint and auth token from Keystone.
:param kwargs: keyword args containing credentials:
* username: name of user
* password: user's password
* auth_url: endpoint to authenticate against
* insecure: allow insecure SSL (no cert verification)
* tenant_{name|id}: name or ID of tenant
"""
return ksclient.Client(username=kwargs.get('username'),
password=kwargs.get('password'),
tenant_id=kwargs.get('tenant_id'),
tenant_name=kwargs.get('tenant_name'),
auth_url=kwargs.get('auth_url'),
insecure=kwargs.get('insecure'))
def _get_endpoint(client, **kwargs):
"""Get an endpoint using the provided keystone client."""
attr = None
filter_value = None
if kwargs.get('region_name'):
attr = 'region'
filter_value = kwargs.get('region_name')
return client.service_catalog.url_for(
service_type=kwargs.get('service_type') or 'baremetal',
attr=attr,
filter_value=filter_value,
endpoint_type=kwargs.get('endpoint_type') or 'publicURL')
def get_client(api_version, **kwargs):
"""Get an authenticated client, based on the credentials in args.
:param api_version: the API version to use. Valid value: '1'.
:param kwargs: keyword args containing credentials, either:
* os_auth_token: pre-existing token to re-use
* ironic_url: ironic API endpoint
or:
* os_username: name of user
* os_password: user's password
* os_auth_url: endpoint to authenticate against
* insecure: allow insecure SSL (no cert verification)
* os_tenant_{name|id}: name or ID of tenant
"""
if kwargs.get('os_auth_token') and kwargs.get('ironic_url'):
token = kwargs.get('os_auth_token')
endpoint = kwargs.get('ironic_url')
auth_ref = None
elif (kwargs.get('os_username') and
kwargs.get('os_password') and
kwargs.get('os_auth_url') and
(kwargs.get('os_tenant_id') or kwargs.get('os_tenant_name'))):
ks_kwargs = {
'username': kwargs.get('os_username'),
'password': kwargs.get('os_password'),
'tenant_id': kwargs.get('os_tenant_id'),
'tenant_name': kwargs.get('os_tenant_name'),
'auth_url': kwargs.get('os_auth_url'),
'service_type': kwargs.get('os_service_type'),
'endpoint_type': kwargs.get('os_endpoint_type'),
'insecure': kwargs.get('insecure'),
}
_ksclient = _get_ksclient(**ks_kwargs)
token = (kwargs.get('os_auth_token')
if kwargs.get('os_auth_token')
else _ksclient.auth_token)
ks_kwargs['region_name'] = kwargs.get('os_region_name')
endpoint = (kwargs.get('ironic_url') or
_get_endpoint(_ksclient, **ks_kwargs))
auth_ref = _ksclient.auth_ref
else:
e = (_('Must provide Keystone credentials or user-defined endpoint '
'and token'))
raise exc.AmbiguousAuthSystem(e)
cli_kwargs = {
'token': token,
'insecure': kwargs.get('insecure'),
'timeout': kwargs.get('timeout'),
'ca_file': kwargs.get('ca_file'),
'cert_file': kwargs.get('cert_file'),
'key_file': kwargs.get('key_file'),
'auth_ref': auth_ref,
}
return Client(api_version, endpoint, **cli_kwargs)
def Client(version, *args, **kwargs):
module = utils.import_versioned_module(version, 'client')
client_class = getattr(module, 'Client')
return client_class(*args, **kwargs)
| apache-2.0 | 4,589,553,237,861,276,700 | 36.811475 | 78 | 0.603945 | false |
amohanta/miasm | miasm2/expression/expression_helper.py | 5 | 18131 | #
# Copyright (C) 2011 EADS France, Fabrice Desclaux <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Expressions manipulation functions
import itertools
import collections
import random
import string
import miasm2.expression.expression as m2_expr
def parity(a):
tmp = (a) & 0xFFL
cpt = 1
while tmp != 0:
cpt ^= tmp & 1
tmp >>= 1
return cpt
def merge_sliceto_slice(args):
sources = {}
non_slice = {}
sources_int = {}
for a in args:
if isinstance(a[0], m2_expr.ExprInt):
# sources_int[a.start] = a
# copy ExprInt because we will inplace modify arg just below
# /!\ TODO XXX never ever modify inplace args...
sources_int[a[1]] = (m2_expr.ExprInt_fromsize(a[2] - a[1],
a[0].arg.__class__(
a[0].arg)),
a[1],
a[2])
elif isinstance(a[0], m2_expr.ExprSlice):
if not a[0].arg in sources:
sources[a[0].arg] = []
sources[a[0].arg].append(a)
else:
non_slice[a[1]] = a
# find max stop to determine size
max_size = None
for a in args:
if max_size is None or max_size < a[2]:
max_size = a[2]
# first simplify all num slices
final_sources = []
sorted_s = []
for x in sources_int.values():
x = list(x)
# mask int
v = x[0].arg & ((1 << (x[2] - x[1])) - 1)
x[0] = m2_expr.ExprInt_from(x[0], v)
x = tuple(x)
sorted_s.append((x[1], x))
sorted_s.sort()
while sorted_s:
start, v = sorted_s.pop()
out = [m2_expr.ExprInt(v[0].arg), v[1], v[2]]
size = v[2] - v[1]
while sorted_s:
if sorted_s[-1][1][2] != start:
break
s_start, s_stop = sorted_s[-1][1][1], sorted_s[-1][1][2]
size += s_stop - s_start
a = m2_expr.mod_size2uint[size](
(int(out[0].arg) << (out[1] - s_start)) +
int(sorted_s[-1][1][0].arg))
out[0] = m2_expr.ExprInt(a)
sorted_s.pop()
out[1] = s_start
out[0] = m2_expr.ExprInt_fromsize(size, out[0].arg)
final_sources.append((start, out))
final_sources_int = final_sources
# check if same sources have corresponding start/stop
# is slice AND is sliceto
simp_sources = []
for args in sources.values():
final_sources = []
sorted_s = []
for x in args:
sorted_s.append((x[1], x))
sorted_s.sort()
while sorted_s:
start, v = sorted_s.pop()
ee = v[0].arg[v[0].start:v[0].stop]
out = ee, v[1], v[2]
while sorted_s:
if sorted_s[-1][1][2] != start:
break
if sorted_s[-1][1][0].stop != out[0].start:
break
start = sorted_s[-1][1][1]
# out[0].start = sorted_s[-1][1][0].start
o_e, _, o_stop = out
o1, o2 = sorted_s[-1][1][0].start, o_e.stop
o_e = o_e.arg[o1:o2]
out = o_e, start, o_stop
# update _size
# out[0]._size = out[0].stop-out[0].start
sorted_s.pop()
out = out[0], start, out[2]
final_sources.append((start, out))
simp_sources += final_sources
simp_sources += final_sources_int
for i, v in non_slice.items():
simp_sources.append((i, v))
simp_sources.sort()
simp_sources = [x[1] for x in simp_sources]
return simp_sources
op_propag_cst = ['+', '*', '^', '&', '|', '>>',
'<<', "a>>", ">>>", "<<<",
"/", "%", 'idiv', 'imod', 'umod', 'udiv']
def is_pure_int(e):
"""
return True if expr is only composed with integers
/!\ ExprCond returns True is src1 and src2 are integers
"""
def modify_cond(e):
if isinstance(e, m2_expr.ExprCond):
return e.src1 | e.src2
return e
def find_int(e, s):
if isinstance(e, m2_expr.ExprId) or isinstance(e, m2_expr.ExprMem):
s.add(e)
return e
s = set()
new_e = e.visit(modify_cond)
new_e.visit(lambda x: find_int(x, s))
if s:
return False
return True
def is_int_or_cond_src_int(e):
if isinstance(e, m2_expr.ExprInt):
return True
if isinstance(e, m2_expr.ExprCond):
return (isinstance(e.src1, m2_expr.ExprInt) and
isinstance(e.src2, m2_expr.ExprInt))
return False
def fast_unify(seq, idfun=None):
# order preserving unifying list function
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen:
continue
seen[marker] = 1
result.append(item)
return result
def get_missing_interval(all_intervals, i_min=0, i_max=32):
"""Return a list of missing interval in all_interval
@all_interval: list of (int, int)
@i_min: int, minimal missing interval bound
@i_max: int, maximal missing interval bound"""
my_intervals = all_intervals[:]
my_intervals.sort()
my_intervals.append((i_max, i_max))
missing_i = []
last_pos = i_min
for start, stop in my_intervals:
if last_pos != start:
missing_i.append((last_pos, start))
last_pos = stop
return missing_i
class Variables_Identifier(object):
"""Identify variables in an expression.
Returns:
- variables with their corresponding values
- original expression with variables translated
"""
# Attribute used to distinguish created variables from original ones
is_var_ident = "is_var_ident"
def __init__(self, expr, var_prefix="v"):
"""Set the expression @expr to handle and launch variable identification
process
@expr: Expr instance
@var_prefix: (optional) prefix of the variable name, default is 'v'"""
# Init
self.var_indice = itertools.count()
self.var_asked = set()
self._vars = {} # VarID -> Expr
self.var_prefix = var_prefix
# Launch recurrence
self.find_variables_rec(expr)
# Compute inter-variable dependencies
has_change = True
while has_change:
has_change = False
for var_id, var_value in self._vars.iteritems():
cur = var_value
# Do not replace with itself
to_replace = {v_val:v_id
for v_id, v_val in self._vars.iteritems()
if v_id != var_id}
var_value = var_value.replace_expr(to_replace)
if cur != var_value:
# Force @self._vars update
has_change = True
self._vars[var_id] = var_value
break
# Replace in the original equation
self._equation = expr.replace_expr({v_val: v_id for v_id, v_val
in self._vars.iteritems()})
# Compute variables dependencies
self._vars_ordered = collections.OrderedDict()
todo = set(self._vars.iterkeys())
needs = {}
## Build initial needs
for var_id, var_expr in self._vars.iteritems():
### Handle corner cases while using Variable Identifier on an
### already computed equation
needs[var_id] = [var_name
for var_name in var_expr.get_r(mem_read=True)
if self.is_var_identifier(var_name) and \
var_name in todo and \
var_name != var_id]
## Build order list
while todo:
done = set()
for var_id in todo:
all_met = True
for need in needs[var_id]:
if need not in self._vars_ordered:
# A dependency is not met
all_met = False
break
if not all_met:
continue
# All dependencies are already met, add current
self._vars_ordered[var_id] = self._vars[var_id]
done.add(var_id)
# Update the todo list
for element_done in done:
todo.remove(element_done)
@classmethod
def is_var_identifier(cls, expr):
"Return True iff @expr is a variable identifier"
if not isinstance(expr, m2_expr.ExprId):
return False
return hasattr(expr, cls.is_var_ident) and \
getattr(expr, cls.is_var_ident) == True
def find_variables_rec(self, expr):
"""Recursive method called by find_variable to expand @expr.
Set @var_names and @var_values.
This implementation is faster than an expression visitor because
we do not rebuild each expression.
"""
if (expr in self.var_asked):
# Expr has already been asked
if (expr not in self._vars.values()):
# Create var
identifier = m2_expr.ExprId("%s%s" % (self.var_prefix,
self.var_indice.next()),
size = expr.size)
setattr(identifier, self.__class__.is_var_ident, True)
self._vars[identifier] = expr
# Recursion stop case
return
else:
# First time for @expr
self.var_asked.add(expr)
if isinstance(expr, m2_expr.ExprOp):
for a in expr.args:
self.find_variables_rec(a)
elif isinstance(expr, m2_expr.ExprInt):
pass
elif isinstance(expr, m2_expr.ExprId):
pass
elif isinstance(expr, m2_expr.ExprMem):
self.find_variables_rec(expr.arg)
elif isinstance(expr, m2_expr.ExprCompose):
for a in expr.args:
self.find_variables_rec(list(a)[0])
elif isinstance(expr, m2_expr.ExprSlice):
self.find_variables_rec(expr.arg)
elif isinstance(expr, m2_expr.ExprCond):
self.find_variables_rec(expr.cond)
self.find_variables_rec(expr.src1)
self.find_variables_rec(expr.src2)
else:
raise NotImplementedError("Type not handled: %s" % expr)
@property
def vars(self):
return self._vars_ordered
@property
def equation(self):
return self._equation
def __str__(self):
"Display variables and final equation"
out = ""
for var_id, var_expr in self.vars.iteritems():
out += "%s = %s\n" % (var_id, var_expr)
out += "Final: %s" % self.equation
return out
class ExprRandom(object):
"""Return an expression randomly generated"""
# Identifiers length
identifier_len = 5
# Identifiers' name charset
identifier_charset = string.letters
# Number max value
number_max = 0xFFFFFFFF
# Available operations
operations_by_args_number = {1: ["-"],
2: ["<<", "<<<", ">>", ">>>"],
"2+": ["+", "*", "&", "|", "^"],
}
# Maximum number of argument for operations
operations_max_args_number = 5
# If set, output expression is a perfect tree
perfect_tree = True
# Max argument size in slice, relative to slice size
slice_add_size = 10
# Maximum number of layer in compose
compose_max_layer = 5
# Maximum size of memory address in bits
memory_max_address_size = 32
# Re-use already generated elements to mimic a more realistic behavior
reuse_element = True
generated_elements = {} # (depth, size) -> [Expr]
@classmethod
def identifier(cls, size=32):
"""Return a random identifier
@size: (optional) identifier size
"""
return m2_expr.ExprId("".join([random.choice(cls.identifier_charset)
for _ in xrange(cls.identifier_len)]),
size=size)
@classmethod
def number(cls, size=32):
"""Return a random number
@size: (optional) number max bits
"""
num = random.randint(0, cls.number_max % (2**size))
return m2_expr.ExprInt_fromsize(size, num)
@classmethod
def atomic(cls, size=32):
"""Return an atomic Expression
@size: (optional) Expr size
"""
available_funcs = [cls.identifier, cls.number]
return random.choice(available_funcs)(size=size)
@classmethod
def operation(cls, size=32, depth=1):
"""Return an ExprOp
@size: (optional) Operation size
@depth: (optional) Expression depth
"""
operand_type = random.choice(cls.operations_by_args_number.keys())
if isinstance(operand_type, str) and "+" in operand_type:
number_args = random.randint(int(operand_type[:-1]),
cls.operations_max_args_number)
else:
number_args = operand_type
args = [cls._gen(size=size, depth=depth - 1)
for _ in xrange(number_args)]
operand = random.choice(cls.operations_by_args_number[operand_type])
return m2_expr.ExprOp(operand,
*args)
@classmethod
def slice(cls, size=32, depth=1):
"""Return an ExprSlice
@size: (optional) Operation size
@depth: (optional) Expression depth
"""
start = random.randint(0, size)
stop = start + size
return cls._gen(size=random.randint(stop, stop + cls.slice_add_size),
depth=depth - 1)[start:stop]
@classmethod
def compose(cls, size=32, depth=1):
"""Return an ExprCompose
@size: (optional) Operation size
@depth: (optional) Expression depth
"""
# First layer
upper_bound = random.randint(1, size)
args = [(cls._gen(size=upper_bound, depth=depth - 1), 0, upper_bound)]
# Next layers
while (upper_bound < size):
if len(args) == (cls.compose_max_layer - 1):
# We reach the maximum size
upper_bound = size
else:
upper_bound = random.randint(args[-1][-1] + 1, size)
args.append((cls._gen(size=upper_bound - args[-1][-1]),
args[-1][-1],
upper_bound))
return m2_expr.ExprCompose(args)
@classmethod
def memory(cls, size=32, depth=1):
"""Return an ExprMem
@size: (optional) Operation size
@depth: (optional) Expression depth
"""
address_size = random.randint(1, cls.memory_max_address_size)
return m2_expr.ExprMem(cls._gen(size=address_size,
depth=depth - 1),
size=size)
@classmethod
def _gen(cls, size=32, depth=1):
"""Internal function for generating sub-expression according to options
@size: (optional) Operation size
@depth: (optional) Expression depth
/!\ @generated_elements is left modified
"""
# Perfect tree handling
if not cls.perfect_tree:
depth = random.randint(max(0, depth - 2), depth)
# Element re-use
if cls.reuse_element and random.choice([True, False]) and \
(depth, size) in cls.generated_elements:
return random.choice(cls.generated_elements[(depth, size)])
# Recursion stop
if depth == 0:
return cls.atomic(size=size)
# Build a more complex expression
available_funcs = [cls.operation, cls.slice, cls.compose, cls.memory]
gen = random.choice(available_funcs)(size=size, depth=depth)
# Save it
new_value = cls.generated_elements.get((depth, size), []) + [gen]
cls.generated_elements[(depth, size)] = new_value
return gen
@classmethod
def get(cls, size=32, depth=1, clean=True):
"""Return a randomly generated expression
@size: (optional) Operation size
@depth: (optional) Expression depth
@clean: (optional) Clean expression cache between two calls
"""
# Init state
if clean:
cls.generated_elements = {}
# Get an element
got = cls._gen(size=size, depth=depth)
# Clear state
if clean:
cls.generated_elements = {}
return got
def _expr_cmp_gen(arg1, arg2):
return (arg2 - arg1) ^ ((arg2 ^ arg1) & ((arg2 - arg1) ^ arg2))
def expr_cmpu(arg1, arg2):
"""
Returns a one bit long Expression:
* 1 if @arg1 is strictly greater than @arg2 (unsigned)
* 0 otherwise.
"""
return (_expr_cmp_gen(arg1, arg2) ^ arg2 ^ arg1).msb()
def expr_cmps(arg1, arg2):
"""
Returns a one bit long Expression:
* 1 if @arg1 is strictly greater than @arg2 (signed)
* 0 otherwise.
"""
return _expr_cmp_gen(arg1, arg2).msb()
| gpl-2.0 | 4,514,579,523,865,483,300 | 31.727437 | 80 | 0.534995 | false |
owaiskhan/Retransmission-Combining | gnuradio-core/src/python/gnuradio/gr/qa_nlog10.py | 11 | 1510 | #!/usr/bin/env python
#
# Copyright 2005,2007,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
class test_nlog10(gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001(self):
src_data = (-10, 0, 10, 100, 1000, 10000, 100000)
expected_result = (-180, -180, 10, 20, 30, 40, 50)
src = gr.vector_source_f(src_data)
op = gr.nlog10_ff(10)
dst = gr.vector_sink_f()
self.tb.connect (src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual (expected_result, result_data)
if __name__ == '__main__':
gr_unittest.run(test_nlog10, "test_nlog10.xml")
| gpl-3.0 | 2,707,806,410,430,273,500 | 31.12766 | 72 | 0.66755 | false |
Hoekz/hackness-monster | venv/lib/python2.7/site-packages/jinja2/parser.py | 336 | 35442 | # -*- coding: utf-8 -*-
"""
jinja2.parser
~~~~~~~~~~~~~
Implements the template parser.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
from jinja2.lexer import describe_token, describe_token_expr
from jinja2._compat import imap
_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
'macro', 'include', 'from', 'import',
'set'])
_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
class Parser(object):
"""This is the central parsing class Jinja2 uses. It's passed to
extensions and can be used to parse expressions or statements.
"""
def __init__(self, environment, source, name=None, filename=None,
state=None):
self.environment = environment
self.stream = environment._tokenize(source, name, filename, state)
self.name = name
self.filename = filename
self.closed = False
self.extensions = {}
for extension in environment.iter_extensions():
for tag in extension.tags:
self.extensions[tag] = extension.parse
self._last_identifier = 0
self._tag_stack = []
self._end_token_stack = []
def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
"""Convenience method that raises `exc` with the message, passed
line number or last line number as well as the current name and
filename.
"""
if lineno is None:
lineno = self.stream.current.lineno
raise exc(msg, lineno, self.name, self.filename)
def _fail_ut_eof(self, name, end_token_stack, lineno):
expected = []
for exprs in end_token_stack:
expected.extend(imap(describe_token_expr, exprs))
if end_token_stack:
currently_looking = ' or '.join(
"'%s'" % describe_token_expr(expr)
for expr in end_token_stack[-1])
else:
currently_looking = None
if name is None:
message = ['Unexpected end of template.']
else:
message = ['Encountered unknown tag \'%s\'.' % name]
if currently_looking:
if name is not None and name in expected:
message.append('You probably made a nesting mistake. Jinja '
'is expecting this tag, but currently looking '
'for %s.' % currently_looking)
else:
message.append('Jinja was looking for the following tags: '
'%s.' % currently_looking)
if self._tag_stack:
message.append('The innermost block that needs to be '
'closed is \'%s\'.' % self._tag_stack[-1])
self.fail(' '.join(message), lineno)
def fail_unknown_tag(self, name, lineno=None):
"""Called if the parser encounters an unknown tag. Tries to fail
with a human readable error message that could help to identify
the problem.
"""
return self._fail_ut_eof(name, self._end_token_stack, lineno)
def fail_eof(self, end_tokens=None, lineno=None):
"""Like fail_unknown_tag but for end of template situations."""
stack = list(self._end_token_stack)
if end_tokens is not None:
stack.append(end_tokens)
return self._fail_ut_eof(None, stack, lineno)
def is_tuple_end(self, extra_end_rules=None):
"""Are we at the end of a tuple?"""
if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
return True
elif extra_end_rules is not None:
return self.stream.current.test_any(extra_end_rules)
return False
def free_identifier(self, lineno=None):
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
self._last_identifier += 1
rv = object.__new__(nodes.InternalName)
nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
return rv
def parse_statement(self):
"""Parse a single statement."""
token = self.stream.current
if token.type != 'name':
self.fail('tag name expected', token.lineno)
self._tag_stack.append(token.value)
pop_tag = True
try:
if token.value in _statement_keywords:
return getattr(self, 'parse_' + self.stream.current.value)()
if token.value == 'call':
return self.parse_call_block()
if token.value == 'filter':
return self.parse_filter_block()
ext = self.extensions.get(token.value)
if ext is not None:
return ext(self)
# did not work out, remove the token we pushed by accident
# from the stack so that the unknown tag fail function can
# produce a proper error message.
self._tag_stack.pop()
pop_tag = False
self.fail_unknown_tag(token.value, token.lineno)
finally:
if pop_tag:
self._tag_stack.pop()
def parse_statements(self, end_tokens, drop_needle=False):
"""Parse multiple statements into a list until one of the end tokens
is reached. This is used to parse the body of statements as it also
parses template data if appropriate. The parser checks first if the
current token is a colon and skips it if there is one. Then it checks
for the block end and parses until if one of the `end_tokens` is
reached. Per default the active token in the stream at the end of
the call is the matched end token. If this is not wanted `drop_needle`
can be set to `True` and the end token is removed.
"""
# the first token may be a colon for python compatibility
self.stream.skip_if('colon')
# in the future it would be possible to add whole code sections
# by adding some sort of end of statement token and parsing those here.
self.stream.expect('block_end')
result = self.subparse(end_tokens)
# we reached the end of the template too early, the subparser
# does not check for this, so we do that now
if self.stream.current.type == 'eof':
self.fail_eof(end_tokens)
if drop_needle:
next(self.stream)
return result
def parse_set(self):
"""Parse an assign statement."""
lineno = next(self.stream).lineno
target = self.parse_assign_target()
if self.stream.skip_if('assign'):
expr = self.parse_tuple()
return nodes.Assign(target, expr, lineno=lineno)
body = self.parse_statements(('name:endset',),
drop_needle=True)
return nodes.AssignBlock(target, body, lineno=lineno)
def parse_for(self):
"""Parse a for loop."""
lineno = self.stream.expect('name:for').lineno
target = self.parse_assign_target(extra_end_rules=('name:in',))
self.stream.expect('name:in')
iter = self.parse_tuple(with_condexpr=False,
extra_end_rules=('name:recursive',))
test = None
if self.stream.skip_if('name:if'):
test = self.parse_expression()
recursive = self.stream.skip_if('name:recursive')
body = self.parse_statements(('name:endfor', 'name:else'))
if next(self.stream).value == 'endfor':
else_ = []
else:
else_ = self.parse_statements(('name:endfor',), drop_needle=True)
return nodes.For(target, iter, body, else_, test,
recursive, lineno=lineno)
def parse_if(self):
"""Parse an if construct."""
node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
while 1:
node.test = self.parse_tuple(with_condexpr=False)
node.body = self.parse_statements(('name:elif', 'name:else',
'name:endif'))
token = next(self.stream)
if token.test('name:elif'):
new_node = nodes.If(lineno=self.stream.current.lineno)
node.else_ = [new_node]
node = new_node
continue
elif token.test('name:else'):
node.else_ = self.parse_statements(('name:endif',),
drop_needle=True)
else:
node.else_ = []
break
return result
def parse_block(self):
node = nodes.Block(lineno=next(self.stream).lineno)
node.name = self.stream.expect('name').value
node.scoped = self.stream.skip_if('name:scoped')
# common problem people encounter when switching from django
# to jinja. we do not support hyphens in block names, so let's
# raise a nicer error message in that case.
if self.stream.current.type == 'sub':
self.fail('Block names in Jinja have to be valid Python '
'identifiers and may not contain hyphens, use an '
'underscore instead.')
node.body = self.parse_statements(('name:endblock',), drop_needle=True)
self.stream.skip_if('name:' + node.name)
return node
def parse_extends(self):
node = nodes.Extends(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
return node
def parse_import_context(self, node, default):
if self.stream.current.test_any('name:with', 'name:without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
else:
node.with_context = default
return node
def parse_include(self):
node = nodes.Include(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
if self.stream.current.test('name:ignore') and \
self.stream.look().test('name:missing'):
node.ignore_missing = True
self.stream.skip(2)
else:
node.ignore_missing = False
return self.parse_import_context(node, True)
def parse_import(self):
node = nodes.Import(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:as')
node.target = self.parse_assign_target(name_only=True).name
return self.parse_import_context(node, False)
def parse_from(self):
node = nodes.FromImport(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:import')
node.names = []
def parse_context():
if self.stream.current.value in ('with', 'without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
return True
return False
while 1:
if node.names:
self.stream.expect('comma')
if self.stream.current.type == 'name':
if parse_context():
break
target = self.parse_assign_target(name_only=True)
if target.name.startswith('_'):
self.fail('names starting with an underline can not '
'be imported', target.lineno,
exc=TemplateAssertionError)
if self.stream.skip_if('name:as'):
alias = self.parse_assign_target(name_only=True)
node.names.append((target.name, alias.name))
else:
node.names.append(target.name)
if parse_context() or self.stream.current.type != 'comma':
break
else:
break
if not hasattr(node, 'with_context'):
node.with_context = False
self.stream.skip_if('comma')
return node
def parse_signature(self, node):
node.args = args = []
node.defaults = defaults = []
self.stream.expect('lparen')
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
arg = self.parse_assign_target(name_only=True)
arg.set_ctx('param')
if self.stream.skip_if('assign'):
defaults.append(self.parse_expression())
elif defaults:
self.fail('non-default argument follows default argument')
args.append(arg)
self.stream.expect('rparen')
def parse_call_block(self):
node = nodes.CallBlock(lineno=next(self.stream).lineno)
if self.stream.current.type == 'lparen':
self.parse_signature(node)
else:
node.args = []
node.defaults = []
node.call = self.parse_expression()
if not isinstance(node.call, nodes.Call):
self.fail('expected call', node.lineno)
node.body = self.parse_statements(('name:endcall',), drop_needle=True)
return node
def parse_filter_block(self):
node = nodes.FilterBlock(lineno=next(self.stream).lineno)
node.filter = self.parse_filter(None, start_inline=True)
node.body = self.parse_statements(('name:endfilter',),
drop_needle=True)
return node
def parse_macro(self):
node = nodes.Macro(lineno=next(self.stream).lineno)
node.name = self.parse_assign_target(name_only=True).name
self.parse_signature(node)
node.body = self.parse_statements(('name:endmacro',),
drop_needle=True)
return node
def parse_print(self):
node = nodes.Output(lineno=next(self.stream).lineno)
node.nodes = []
while self.stream.current.type != 'block_end':
if node.nodes:
self.stream.expect('comma')
node.nodes.append(self.parse_expression())
return node
def parse_assign_target(self, with_tuple=True, name_only=False,
extra_end_rules=None):
"""Parse an assignment target. As Jinja2 allows assignments to
tuples, this function can parse all allowed assignment targets. Per
default assignments to tuples are parsed, that can be disable however
by setting `with_tuple` to `False`. If only assignments to names are
wanted `name_only` can be set to `True`. The `extra_end_rules`
parameter is forwarded to the tuple parsing function.
"""
if name_only:
token = self.stream.expect('name')
target = nodes.Name(token.value, 'store', lineno=token.lineno)
else:
if with_tuple:
target = self.parse_tuple(simplified=True,
extra_end_rules=extra_end_rules)
else:
target = self.parse_primary()
target.set_ctx('store')
if not target.can_assign():
self.fail('can\'t assign to %r' % target.__class__.
__name__.lower(), target.lineno)
return target
def parse_expression(self, with_condexpr=True):
"""Parse an expression. Per default all expressions are parsed, if
the optional `with_condexpr` parameter is set to `False` conditional
expressions are not parsed.
"""
if with_condexpr:
return self.parse_condexpr()
return self.parse_or()
def parse_condexpr(self):
lineno = self.stream.current.lineno
expr1 = self.parse_or()
while self.stream.skip_if('name:if'):
expr2 = self.parse_or()
if self.stream.skip_if('name:else'):
expr3 = self.parse_condexpr()
else:
expr3 = None
expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
lineno = self.stream.current.lineno
return expr1
def parse_or(self):
lineno = self.stream.current.lineno
left = self.parse_and()
while self.stream.skip_if('name:or'):
right = self.parse_and()
left = nodes.Or(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_and(self):
lineno = self.stream.current.lineno
left = self.parse_not()
while self.stream.skip_if('name:and'):
right = self.parse_not()
left = nodes.And(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_not(self):
if self.stream.current.test('name:not'):
lineno = next(self.stream).lineno
return nodes.Not(self.parse_not(), lineno=lineno)
return self.parse_compare()
def parse_compare(self):
lineno = self.stream.current.lineno
expr = self.parse_add()
ops = []
while 1:
token_type = self.stream.current.type
if token_type in _compare_operators:
next(self.stream)
ops.append(nodes.Operand(token_type, self.parse_add()))
elif self.stream.skip_if('name:in'):
ops.append(nodes.Operand('in', self.parse_add()))
elif (self.stream.current.test('name:not') and
self.stream.look().test('name:in')):
self.stream.skip(2)
ops.append(nodes.Operand('notin', self.parse_add()))
else:
break
lineno = self.stream.current.lineno
if not ops:
return expr
return nodes.Compare(expr, ops, lineno=lineno)
def parse_add(self):
lineno = self.stream.current.lineno
left = self.parse_sub()
while self.stream.current.type == 'add':
next(self.stream)
right = self.parse_sub()
left = nodes.Add(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_sub(self):
lineno = self.stream.current.lineno
left = self.parse_concat()
while self.stream.current.type == 'sub':
next(self.stream)
right = self.parse_concat()
left = nodes.Sub(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_concat(self):
lineno = self.stream.current.lineno
args = [self.parse_mul()]
while self.stream.current.type == 'tilde':
next(self.stream)
args.append(self.parse_mul())
if len(args) == 1:
return args[0]
return nodes.Concat(args, lineno=lineno)
def parse_mul(self):
lineno = self.stream.current.lineno
left = self.parse_div()
while self.stream.current.type == 'mul':
next(self.stream)
right = self.parse_div()
left = nodes.Mul(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_div(self):
lineno = self.stream.current.lineno
left = self.parse_floordiv()
while self.stream.current.type == 'div':
next(self.stream)
right = self.parse_floordiv()
left = nodes.Div(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_floordiv(self):
lineno = self.stream.current.lineno
left = self.parse_mod()
while self.stream.current.type == 'floordiv':
next(self.stream)
right = self.parse_mod()
left = nodes.FloorDiv(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_mod(self):
lineno = self.stream.current.lineno
left = self.parse_pow()
while self.stream.current.type == 'mod':
next(self.stream)
right = self.parse_pow()
left = nodes.Mod(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_pow(self):
lineno = self.stream.current.lineno
left = self.parse_unary()
while self.stream.current.type == 'pow':
next(self.stream)
right = self.parse_unary()
left = nodes.Pow(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_unary(self, with_filter=True):
token_type = self.stream.current.type
lineno = self.stream.current.lineno
if token_type == 'sub':
next(self.stream)
node = nodes.Neg(self.parse_unary(False), lineno=lineno)
elif token_type == 'add':
next(self.stream)
node = nodes.Pos(self.parse_unary(False), lineno=lineno)
else:
node = self.parse_primary()
node = self.parse_postfix(node)
if with_filter:
node = self.parse_filter_expr(node)
return node
def parse_primary(self):
token = self.stream.current
if token.type == 'name':
if token.value in ('true', 'false', 'True', 'False'):
node = nodes.Const(token.value in ('true', 'True'),
lineno=token.lineno)
elif token.value in ('none', 'None'):
node = nodes.Const(None, lineno=token.lineno)
else:
node = nodes.Name(token.value, 'load', lineno=token.lineno)
next(self.stream)
elif token.type == 'string':
next(self.stream)
buf = [token.value]
lineno = token.lineno
while self.stream.current.type == 'string':
buf.append(self.stream.current.value)
next(self.stream)
node = nodes.Const(''.join(buf), lineno=lineno)
elif token.type in ('integer', 'float'):
next(self.stream)
node = nodes.Const(token.value, lineno=token.lineno)
elif token.type == 'lparen':
next(self.stream)
node = self.parse_tuple(explicit_parentheses=True)
self.stream.expect('rparen')
elif token.type == 'lbracket':
node = self.parse_list()
elif token.type == 'lbrace':
node = self.parse_dict()
else:
self.fail("unexpected '%s'" % describe_token(token), token.lineno)
return node
def parse_tuple(self, simplified=False, with_condexpr=True,
extra_end_rules=None, explicit_parentheses=False):
"""Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
if no commas where found.
The default parsing mode is a full tuple. If `simplified` is `True`
only names and literals are parsed. The `no_condexpr` parameter is
forwarded to :meth:`parse_expression`.
Because tuples do not require delimiters and may end in a bogus comma
an extra hint is needed that marks the end of a tuple. For example
for loops support tuples between `for` and `in`. In that case the
`extra_end_rules` is set to ``['name:in']``.
`explicit_parentheses` is true if the parsing was triggered by an
expression in parentheses. This is used to figure out if an empty
tuple is a valid expression or not.
"""
lineno = self.stream.current.lineno
if simplified:
parse = self.parse_primary
elif with_condexpr:
parse = self.parse_expression
else:
parse = lambda: self.parse_expression(with_condexpr=False)
args = []
is_tuple = False
while 1:
if args:
self.stream.expect('comma')
if self.is_tuple_end(extra_end_rules):
break
args.append(parse())
if self.stream.current.type == 'comma':
is_tuple = True
else:
break
lineno = self.stream.current.lineno
if not is_tuple:
if args:
return args[0]
# if we don't have explicit parentheses, an empty tuple is
# not a valid expression. This would mean nothing (literally
# nothing) in the spot of an expression would be an empty
# tuple.
if not explicit_parentheses:
self.fail('Expected an expression, got \'%s\'' %
describe_token(self.stream.current))
return nodes.Tuple(args, 'load', lineno=lineno)
def parse_list(self):
token = self.stream.expect('lbracket')
items = []
while self.stream.current.type != 'rbracket':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbracket':
break
items.append(self.parse_expression())
self.stream.expect('rbracket')
return nodes.List(items, lineno=token.lineno)
def parse_dict(self):
token = self.stream.expect('lbrace')
items = []
while self.stream.current.type != 'rbrace':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbrace':
break
key = self.parse_expression()
self.stream.expect('colon')
value = self.parse_expression()
items.append(nodes.Pair(key, value, lineno=key.lineno))
self.stream.expect('rbrace')
return nodes.Dict(items, lineno=token.lineno)
def parse_postfix(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'dot' or token_type == 'lbracket':
node = self.parse_subscript(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_filter_expr(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'pipe':
node = self.parse_filter(node)
elif token_type == 'name' and self.stream.current.value == 'is':
node = self.parse_test(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_subscript(self, node):
token = next(self.stream)
if token.type == 'dot':
attr_token = self.stream.current
next(self.stream)
if attr_token.type == 'name':
return nodes.Getattr(node, attr_token.value, 'load',
lineno=token.lineno)
elif attr_token.type != 'integer':
self.fail('expected name or number', attr_token.lineno)
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
if token.type == 'lbracket':
args = []
while self.stream.current.type != 'rbracket':
if args:
self.stream.expect('comma')
args.append(self.parse_subscribed())
self.stream.expect('rbracket')
if len(args) == 1:
arg = args[0]
else:
arg = nodes.Tuple(args, 'load', lineno=token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
self.fail('expected subscript expression', self.lineno)
def parse_subscribed(self):
lineno = self.stream.current.lineno
if self.stream.current.type == 'colon':
next(self.stream)
args = [None]
else:
node = self.parse_expression()
if self.stream.current.type != 'colon':
return node
next(self.stream)
args = [node]
if self.stream.current.type == 'colon':
args.append(None)
elif self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
if self.stream.current.type == 'colon':
next(self.stream)
if self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
else:
args.append(None)
return nodes.Slice(lineno=lineno, *args)
def parse_call(self, node):
token = self.stream.expect('lparen')
args = []
kwargs = []
dyn_args = dyn_kwargs = None
require_comma = False
def ensure(expr):
if not expr:
self.fail('invalid syntax for function call expression',
token.lineno)
while self.stream.current.type != 'rparen':
if require_comma:
self.stream.expect('comma')
# support for trailing comma
if self.stream.current.type == 'rparen':
break
if self.stream.current.type == 'mul':
ensure(dyn_args is None and dyn_kwargs is None)
next(self.stream)
dyn_args = self.parse_expression()
elif self.stream.current.type == 'pow':
ensure(dyn_kwargs is None)
next(self.stream)
dyn_kwargs = self.parse_expression()
else:
ensure(dyn_args is None and dyn_kwargs is None)
if self.stream.current.type == 'name' and \
self.stream.look().type == 'assign':
key = self.stream.current.value
self.stream.skip(2)
value = self.parse_expression()
kwargs.append(nodes.Keyword(key, value,
lineno=value.lineno))
else:
ensure(not kwargs)
args.append(self.parse_expression())
require_comma = True
self.stream.expect('rparen')
if node is None:
return args, kwargs, dyn_args, dyn_kwargs
return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
lineno=token.lineno)
def parse_filter(self, node, start_inline=False):
while self.stream.current.type == 'pipe' or start_inline:
if not start_inline:
next(self.stream)
token = self.stream.expect('name')
name = token.value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
else:
args = []
kwargs = []
dyn_args = dyn_kwargs = None
node = nodes.Filter(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
start_inline = False
return node
def parse_test(self, node):
token = next(self.stream)
if self.stream.current.test('name:not'):
next(self.stream)
negated = True
else:
negated = False
name = self.stream.expect('name').value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
dyn_args = dyn_kwargs = None
kwargs = []
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
elif (self.stream.current.type in ('name', 'string', 'integer',
'float', 'lparen', 'lbracket',
'lbrace') and not
self.stream.current.test_any('name:else', 'name:or',
'name:and')):
if self.stream.current.test('name:is'):
self.fail('You cannot chain multiple tests with is')
args = [self.parse_expression()]
else:
args = []
node = nodes.Test(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
if negated:
node = nodes.Not(node, lineno=token.lineno)
return node
def subparse(self, end_tokens=None):
body = []
data_buffer = []
add_data = data_buffer.append
if end_tokens is not None:
self._end_token_stack.append(end_tokens)
def flush_data():
if data_buffer:
lineno = data_buffer[0].lineno
body.append(nodes.Output(data_buffer[:], lineno=lineno))
del data_buffer[:]
try:
while self.stream:
token = self.stream.current
if token.type == 'data':
if token.value:
add_data(nodes.TemplateData(token.value,
lineno=token.lineno))
next(self.stream)
elif token.type == 'variable_begin':
next(self.stream)
add_data(self.parse_tuple(with_condexpr=True))
self.stream.expect('variable_end')
elif token.type == 'block_begin':
flush_data()
next(self.stream)
if end_tokens is not None and \
self.stream.current.test_any(*end_tokens):
return body
rv = self.parse_statement()
if isinstance(rv, list):
body.extend(rv)
else:
body.append(rv)
self.stream.expect('block_end')
else:
raise AssertionError('internal parsing error')
flush_data()
finally:
if end_tokens is not None:
self._end_token_stack.pop()
return body
def parse(self):
"""Parse the whole template into a `Template` node."""
result = nodes.Template(self.subparse(), lineno=1)
result.set_environment(self.environment)
return result
| mit | -3,097,034,657,908,402,000 | 38.423804 | 82 | 0.545398 | false |
patrioticcow/MessagesForSkype | packages/win32/bundle/MessagesForSkype/modules/python/1.3.1-beta/Lib/idlelib/PyParse.py | 185 | 19510 | import re
import sys
# Reason last stmt is continued (or C_NONE if it's not).
(C_NONE, C_BACKSLASH, C_STRING_FIRST_LINE,
C_STRING_NEXT_LINES, C_BRACKET) = range(5)
if 0: # for throwaway debugging output
def dump(*stuff):
sys.__stdout__.write(" ".join(map(str, stuff)) + "\n")
# Find what looks like the start of a popular stmt.
_synchre = re.compile(r"""
^
[ \t]*
(?: while
| else
| def
| return
| assert
| break
| class
| continue
| elif
| try
| except
| raise
| import
| yield
)
\b
""", re.VERBOSE | re.MULTILINE).search
# Match blank line or non-indenting comment line.
_junkre = re.compile(r"""
[ \t]*
(?: \# \S .* )?
\n
""", re.VERBOSE).match
# Match any flavor of string; the terminating quote is optional
# so that we're robust in the face of incomplete program text.
_match_stringre = re.compile(r"""
\""" [^"\\]* (?:
(?: \\. | "(?!"") )
[^"\\]*
)*
(?: \""" )?
| " [^"\\\n]* (?: \\. [^"\\\n]* )* "?
| ''' [^'\\]* (?:
(?: \\. | '(?!'') )
[^'\\]*
)*
(?: ''' )?
| ' [^'\\\n]* (?: \\. [^'\\\n]* )* '?
""", re.VERBOSE | re.DOTALL).match
# Match a line that starts with something interesting;
# used to find the first item of a bracket structure.
_itemre = re.compile(r"""
[ \t]*
[^\s#\\] # if we match, m.end()-1 is the interesting char
""", re.VERBOSE).match
# Match start of stmts that should be followed by a dedent.
_closere = re.compile(r"""
\s*
(?: return
| break
| continue
| raise
| pass
)
\b
""", re.VERBOSE).match
# Chew up non-special chars as quickly as possible. If match is
# successful, m.end() less 1 is the index of the last boring char
# matched. If match is unsuccessful, the string starts with an
# interesting char.
_chew_ordinaryre = re.compile(r"""
[^[\](){}#'"\\]+
""", re.VERBOSE).match
# Build translation table to map uninteresting chars to "x", open
# brackets to "(", and close brackets to ")".
_tran = ['x'] * 256
for ch in "({[":
_tran[ord(ch)] = '('
for ch in ")}]":
_tran[ord(ch)] = ')'
for ch in "\"'\\\n#":
_tran[ord(ch)] = ch
_tran = ''.join(_tran)
del ch
try:
UnicodeType = type(unicode(""))
except NameError:
UnicodeType = None
class Parser:
def __init__(self, indentwidth, tabwidth):
self.indentwidth = indentwidth
self.tabwidth = tabwidth
def set_str(self, str):
assert len(str) == 0 or str[-1] == '\n'
if type(str) is UnicodeType:
# The parse functions have no idea what to do with Unicode, so
# replace all Unicode characters with "x". This is "safe"
# so long as the only characters germane to parsing the structure
# of Python are 7-bit ASCII. It's *necessary* because Unicode
# strings don't have a .translate() method that supports
# deletechars.
uniphooey = str
str = []
push = str.append
for raw in map(ord, uniphooey):
push(raw < 127 and chr(raw) or "x")
str = "".join(str)
self.str = str
self.study_level = 0
# Return index of a good place to begin parsing, as close to the
# end of the string as possible. This will be the start of some
# popular stmt like "if" or "def". Return None if none found:
# the caller should pass more prior context then, if possible, or
# if not (the entire program text up until the point of interest
# has already been tried) pass 0 to set_lo.
#
# This will be reliable iff given a reliable is_char_in_string
# function, meaning that when it says "no", it's absolutely
# guaranteed that the char is not in a string.
def find_good_parse_start(self, is_char_in_string=None,
_synchre=_synchre):
str, pos = self.str, None
if not is_char_in_string:
# no clue -- make the caller pass everything
return None
# Peek back from the end for a good place to start,
# but don't try too often; pos will be left None, or
# bumped to a legitimate synch point.
limit = len(str)
for tries in range(5):
i = str.rfind(":\n", 0, limit)
if i < 0:
break
i = str.rfind('\n', 0, i) + 1 # start of colon line
m = _synchre(str, i, limit)
if m and not is_char_in_string(m.start()):
pos = m.start()
break
limit = i
if pos is None:
# Nothing looks like a block-opener, or stuff does
# but is_char_in_string keeps returning true; most likely
# we're in or near a giant string, the colorizer hasn't
# caught up enough to be helpful, or there simply *aren't*
# any interesting stmts. In any of these cases we're
# going to have to parse the whole thing to be sure, so
# give it one last try from the start, but stop wasting
# time here regardless of the outcome.
m = _synchre(str)
if m and not is_char_in_string(m.start()):
pos = m.start()
return pos
# Peeking back worked; look forward until _synchre no longer
# matches.
i = pos + 1
while 1:
m = _synchre(str, i)
if m:
s, i = m.span()
if not is_char_in_string(s):
pos = s
else:
break
return pos
# Throw away the start of the string. Intended to be called with
# find_good_parse_start's result.
def set_lo(self, lo):
assert lo == 0 or self.str[lo-1] == '\n'
if lo > 0:
self.str = self.str[lo:]
# As quickly as humanly possible <wink>, find the line numbers (0-
# based) of the non-continuation lines.
# Creates self.{goodlines, continuation}.
def _study1(self):
if self.study_level >= 1:
return
self.study_level = 1
# Map all uninteresting characters to "x", all open brackets
# to "(", all close brackets to ")", then collapse runs of
# uninteresting characters. This can cut the number of chars
# by a factor of 10-40, and so greatly speed the following loop.
str = self.str
str = str.translate(_tran)
str = str.replace('xxxxxxxx', 'x')
str = str.replace('xxxx', 'x')
str = str.replace('xx', 'x')
str = str.replace('xx', 'x')
str = str.replace('\nx', '\n')
# note that replacing x\n with \n would be incorrect, because
# x may be preceded by a backslash
# March over the squashed version of the program, accumulating
# the line numbers of non-continued stmts, and determining
# whether & why the last stmt is a continuation.
continuation = C_NONE
level = lno = 0 # level is nesting level; lno is line number
self.goodlines = goodlines = [0]
push_good = goodlines.append
i, n = 0, len(str)
while i < n:
ch = str[i]
i = i+1
# cases are checked in decreasing order of frequency
if ch == 'x':
continue
if ch == '\n':
lno = lno + 1
if level == 0:
push_good(lno)
# else we're in an unclosed bracket structure
continue
if ch == '(':
level = level + 1
continue
if ch == ')':
if level:
level = level - 1
# else the program is invalid, but we can't complain
continue
if ch == '"' or ch == "'":
# consume the string
quote = ch
if str[i-1:i+2] == quote * 3:
quote = quote * 3
firstlno = lno
w = len(quote) - 1
i = i+w
while i < n:
ch = str[i]
i = i+1
if ch == 'x':
continue
if str[i-1:i+w] == quote:
i = i+w
break
if ch == '\n':
lno = lno + 1
if w == 0:
# unterminated single-quoted string
if level == 0:
push_good(lno)
break
continue
if ch == '\\':
assert i < n
if str[i] == '\n':
lno = lno + 1
i = i+1
continue
# else comment char or paren inside string
else:
# didn't break out of the loop, so we're still
# inside a string
if (lno - 1) == firstlno:
# before the previous \n in str, we were in the first
# line of the string
continuation = C_STRING_FIRST_LINE
else:
continuation = C_STRING_NEXT_LINES
continue # with outer loop
if ch == '#':
# consume the comment
i = str.find('\n', i)
assert i >= 0
continue
assert ch == '\\'
assert i < n
if str[i] == '\n':
lno = lno + 1
if i+1 == n:
continuation = C_BACKSLASH
i = i+1
# The last stmt may be continued for all 3 reasons.
# String continuation takes precedence over bracket
# continuation, which beats backslash continuation.
if (continuation != C_STRING_FIRST_LINE
and continuation != C_STRING_NEXT_LINES and level > 0):
continuation = C_BRACKET
self.continuation = continuation
# Push the final line number as a sentinel value, regardless of
# whether it's continued.
assert (continuation == C_NONE) == (goodlines[-1] == lno)
if goodlines[-1] != lno:
push_good(lno)
def get_continuation_type(self):
self._study1()
return self.continuation
# study1 was sufficient to determine the continuation status,
# but doing more requires looking at every character. study2
# does this for the last interesting statement in the block.
# Creates:
# self.stmt_start, stmt_end
# slice indices of last interesting stmt
# self.stmt_bracketing
# the bracketing structure of the last interesting stmt;
# for example, for the statement "say(boo) or die", stmt_bracketing
# will be [(0, 0), (3, 1), (8, 0)]. Strings and comments are
# treated as brackets, for the matter.
# self.lastch
# last non-whitespace character before optional trailing
# comment
# self.lastopenbracketpos
# if continuation is C_BRACKET, index of last open bracket
def _study2(self):
if self.study_level >= 2:
return
self._study1()
self.study_level = 2
# Set p and q to slice indices of last interesting stmt.
str, goodlines = self.str, self.goodlines
i = len(goodlines) - 1
p = len(str) # index of newest line
while i:
assert p
# p is the index of the stmt at line number goodlines[i].
# Move p back to the stmt at line number goodlines[i-1].
q = p
for nothing in range(goodlines[i-1], goodlines[i]):
# tricky: sets p to 0 if no preceding newline
p = str.rfind('\n', 0, p-1) + 1
# The stmt str[p:q] isn't a continuation, but may be blank
# or a non-indenting comment line.
if _junkre(str, p):
i = i-1
else:
break
if i == 0:
# nothing but junk!
assert p == 0
q = p
self.stmt_start, self.stmt_end = p, q
# Analyze this stmt, to find the last open bracket (if any)
# and last interesting character (if any).
lastch = ""
stack = [] # stack of open bracket indices
push_stack = stack.append
bracketing = [(p, 0)]
while p < q:
# suck up all except ()[]{}'"#\\
m = _chew_ordinaryre(str, p, q)
if m:
# we skipped at least one boring char
newp = m.end()
# back up over totally boring whitespace
i = newp - 1 # index of last boring char
while i >= p and str[i] in " \t\n":
i = i-1
if i >= p:
lastch = str[i]
p = newp
if p >= q:
break
ch = str[p]
if ch in "([{":
push_stack(p)
bracketing.append((p, len(stack)))
lastch = ch
p = p+1
continue
if ch in ")]}":
if stack:
del stack[-1]
lastch = ch
p = p+1
bracketing.append((p, len(stack)))
continue
if ch == '"' or ch == "'":
# consume string
# Note that study1 did this with a Python loop, but
# we use a regexp here; the reason is speed in both
# cases; the string may be huge, but study1 pre-squashed
# strings to a couple of characters per line. study1
# also needed to keep track of newlines, and we don't
# have to.
bracketing.append((p, len(stack)+1))
lastch = ch
p = _match_stringre(str, p, q).end()
bracketing.append((p, len(stack)))
continue
if ch == '#':
# consume comment and trailing newline
bracketing.append((p, len(stack)+1))
p = str.find('\n', p, q) + 1
assert p > 0
bracketing.append((p, len(stack)))
continue
assert ch == '\\'
p = p+1 # beyond backslash
assert p < q
if str[p] != '\n':
# the program is invalid, but can't complain
lastch = ch + str[p]
p = p+1 # beyond escaped char
# end while p < q:
self.lastch = lastch
if stack:
self.lastopenbracketpos = stack[-1]
self.stmt_bracketing = tuple(bracketing)
# Assuming continuation is C_BRACKET, return the number
# of spaces the next line should be indented.
def compute_bracket_indent(self):
self._study2()
assert self.continuation == C_BRACKET
j = self.lastopenbracketpos
str = self.str
n = len(str)
origi = i = str.rfind('\n', 0, j) + 1
j = j+1 # one beyond open bracket
# find first list item; set i to start of its line
while j < n:
m = _itemre(str, j)
if m:
j = m.end() - 1 # index of first interesting char
extra = 0
break
else:
# this line is junk; advance to next line
i = j = str.find('\n', j) + 1
else:
# nothing interesting follows the bracket;
# reproduce the bracket line's indentation + a level
j = i = origi
while str[j] in " \t":
j = j+1
extra = self.indentwidth
return len(str[i:j].expandtabs(self.tabwidth)) + extra
# Return number of physical lines in last stmt (whether or not
# it's an interesting stmt! this is intended to be called when
# continuation is C_BACKSLASH).
def get_num_lines_in_stmt(self):
self._study1()
goodlines = self.goodlines
return goodlines[-1] - goodlines[-2]
# Assuming continuation is C_BACKSLASH, return the number of spaces
# the next line should be indented. Also assuming the new line is
# the first one following the initial line of the stmt.
def compute_backslash_indent(self):
self._study2()
assert self.continuation == C_BACKSLASH
str = self.str
i = self.stmt_start
while str[i] in " \t":
i = i+1
startpos = i
# See whether the initial line starts an assignment stmt; i.e.,
# look for an = operator
endpos = str.find('\n', startpos) + 1
found = level = 0
while i < endpos:
ch = str[i]
if ch in "([{":
level = level + 1
i = i+1
elif ch in ")]}":
if level:
level = level - 1
i = i+1
elif ch == '"' or ch == "'":
i = _match_stringre(str, i, endpos).end()
elif ch == '#':
break
elif level == 0 and ch == '=' and \
(i == 0 or str[i-1] not in "=<>!") and \
str[i+1] != '=':
found = 1
break
else:
i = i+1
if found:
# found a legit =, but it may be the last interesting
# thing on the line
i = i+1 # move beyond the =
found = re.match(r"\s*\\", str[i:endpos]) is None
if not found:
# oh well ... settle for moving beyond the first chunk
# of non-whitespace chars
i = startpos
while str[i] not in " \t\n":
i = i+1
return len(str[self.stmt_start:i].expandtabs(\
self.tabwidth)) + 1
# Return the leading whitespace on the initial line of the last
# interesting stmt.
def get_base_indent_string(self):
self._study2()
i, n = self.stmt_start, self.stmt_end
j = i
str = self.str
while j < n and str[j] in " \t":
j = j + 1
return str[i:j]
# Did the last interesting stmt open a block?
def is_block_opener(self):
self._study2()
return self.lastch == ':'
# Did the last interesting stmt close a block?
def is_block_closer(self):
self._study2()
return _closere(self.str, self.stmt_start) is not None
# index of last open bracket ({[, or None if none
lastopenbracketpos = None
def get_last_open_bracket_pos(self):
self._study2()
return self.lastopenbracketpos
# the structure of the bracketing of the last interesting statement,
# in the format defined in _study2, or None if the text didn't contain
# anything
stmt_bracketing = None
def get_last_stmt_bracketing(self):
self._study2()
return self.stmt_bracketing
| mit | -4,247,327,307,918,890,500 | 31.845118 | 79 | 0.489288 | false |
davidvon/pipa-pay-server | site-packages/sqlalchemy/orm/scoping.py | 18 | 6109 | # orm/scoping.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .. import exc as sa_exc
from ..util import ScopedRegistry, ThreadLocalRegistry, warn
from . import class_mapper, exc as orm_exc
from .session import Session
__all__ = ['scoped_session']
class scoped_session(object):
"""Provides scoped management of :class:`.Session` objects.
See :ref:`unitofwork_contextual` for a tutorial.
"""
def __init__(self, session_factory, scopefunc=None):
"""Construct a new :class:`.scoped_session`.
:param session_factory: a factory to create new :class:`.Session`
instances. This is usually, but not necessarily, an instance
of :class:`.sessionmaker`.
:param scopefunc: optional function which defines
the current scope. If not passed, the :class:`.scoped_session`
object assumes "thread-local" scope, and will use
a Python ``threading.local()`` in order to maintain the current
:class:`.Session`. If passed, the function should return
a hashable token; this token will be used as the key in a
dictionary in order to store and retrieve the current
:class:`.Session`.
"""
self.session_factory = session_factory
if scopefunc:
self.registry = ScopedRegistry(session_factory, scopefunc)
else:
self.registry = ThreadLocalRegistry(session_factory)
def __call__(self, **kw):
"""Return the current :class:`.Session`, creating it
using the session factory if not present.
:param \**kw: Keyword arguments will be passed to the
session factory callable, if an existing :class:`.Session`
is not present. If the :class:`.Session` is present and
keyword arguments have been passed,
:exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
"""
if kw:
scope = kw.pop('scope', False)
if scope is not None:
if self.registry.has():
raise sa_exc.InvalidRequestError(
"Scoped session is already present; "
"no new arguments may be specified.")
else:
sess = self.session_factory(**kw)
self.registry.set(sess)
return sess
else:
return self.session_factory(**kw)
else:
return self.registry()
def remove(self):
"""Dispose of the current :class:`.Session`, if present.
This will first call :meth:`.Session.close` method
on the current :class:`.Session`, which releases any existing
transactional/connection resources still being held; transactions
specifically are rolled back. The :class:`.Session` is then
discarded. Upon next usage within the same scope,
the :class:`.scoped_session` will produce a new
:class:`.Session` object.
"""
if self.registry.has():
self.registry().close()
self.registry.clear()
def configure(self, **kwargs):
"""reconfigure the :class:`.sessionmaker` used by this
:class:`.scoped_session`.
See :meth:`.sessionmaker.configure`.
"""
if self.registry.has():
warn('At least one scoped session is already present. '
' configure() can not affect sessions that have '
'already been created.')
self.session_factory.configure(**kwargs)
def query_property(self, query_cls=None):
"""return a class property which produces a :class:`.Query` object
against the class and the current :class:`.Session` when called.
e.g.::
Session = scoped_session(sessionmaker())
class MyClass(object):
query = Session.query_property()
# after mappers are defined
result = MyClass.query.filter(MyClass.name=='foo').all()
Produces instances of the session's configured query class by
default. To override and use a custom implementation, provide
a ``query_cls`` callable. The callable will be invoked with
the class's mapper as a positional argument and a session
keyword argument.
There is no limit to the number of query properties placed on
a class.
"""
class query(object):
def __get__(s, instance, owner):
try:
mapper = class_mapper(owner)
if mapper:
if query_cls:
# custom query class
return query_cls(mapper, session=self.registry())
else:
# session's configured query class
return self.registry().query(mapper)
except orm_exc.UnmappedClassError:
return None
return query()
ScopedSession = scoped_session
"""Old name for backwards compatibility."""
def instrument(name):
def do(self, *args, **kwargs):
return getattr(self.registry(), name)(*args, **kwargs)
return do
for meth in Session.public_methods:
setattr(scoped_session, meth, instrument(meth))
def makeprop(name):
def set(self, attr):
setattr(self.registry(), name, attr)
def get(self):
return getattr(self.registry(), name)
return property(get, set)
for prop in ('bind', 'dirty', 'deleted', 'new', 'identity_map',
'is_active', 'autoflush', 'no_autoflush'):
setattr(scoped_session, prop, makeprop(prop))
def clslevel(name):
def do(cls, *args, **kwargs):
return getattr(Session, name)(*args, **kwargs)
return classmethod(do)
for prop in ('close_all', 'object_session', 'identity_key'):
setattr(scoped_session, prop, clslevel(prop))
| apache-2.0 | 2,887,174,544,559,926,300 | 33.710227 | 84 | 0.594696 | false |
zjuchenyuan/BioWeb | Lib/Bio/Index.py | 1 | 4980 | # Copyright 1999 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Index.py
This module provides a way to create indexes to text files.
Classes:
Index Dictionary-like class used to store index information.
_ShelveIndex An Index class based on the shelve module.
_InMemoryIndex An in-memory Index class.
"""
import os
import array
import shelve
try:
import cPickle as pickle # Only available under Python 2
except ImportError:
import pickle # Python 3
class _ShelveIndex(dict):
"""An index file wrapped around shelve.
"""
# Without a good dbm module installed, this is pretty slow and
# generates large files. When generating an index on a FASTA-
# formatted file with 82000 sequences (37Mb), the
# index 'dat' file is 42Mb and 'dir' file is 8Mb.
__version = 2
__version_key = '__version'
def __init__(self, indexname, truncate=None):
dict.__init__(self)
try:
if truncate:
# In python 1.52 and before, dumbdbm (under shelve)
# doesn't clear the old database.
files = [indexname + '.dir',
indexname + '.dat',
indexname + '.bak'
]
for file in files:
if os.path.exists(file):
os.unlink(file)
raise Exception("open a new shelf")
self.data = shelve.open(indexname, flag='r')
except Exception: # TODO: Which exception?
# No database exists.
self.data = shelve.open(indexname, flag='n')
self.data[self.__version_key] = self.__version
else:
# Check to make sure the database is the correct version.
version = self.data.get(self.__version_key)
if version is None:
raise IOError("Unrecognized index format")
elif version != self.__version:
raise IOError("Version %s doesn't match my version %s"
% (version, self.__version))
def __del__(self):
if 'data' in self.__dict__:
self.data.close()
class _InMemoryIndex(dict):
"""This creates an in-memory index file.
"""
# File Format:
# version
# key value
# [...]
__version = 3
__version_key = '__version'
def __init__(self, indexname, truncate=None):
self._indexname = indexname
dict.__init__(self)
self.__changed = 0 # the index hasn't changed
# Remove the database if truncate is true.
if truncate and os.path.exists(indexname):
os.unlink(indexname)
self.__changed = 1
# Load the database if it exists
if os.path.exists(indexname):
with open(indexname) as handle:
version = self._toobj(handle.readline().rstrip())
if version != self.__version:
raise IOError("Version %s doesn't match my version %s"
% (version, self.__version))
for line in handle:
key, value = line.split()
key, value = self._toobj(key), self._toobj(value)
self[key] = value
self.__changed = 0
def update(self, dict):
self.__changed = 1
dict.update(self, dict)
def __setitem__(self, key, value):
self.__changed = 1
dict.__setitem__(self, key, value)
def __delitem__(self, key):
self.__changed = 1
dict.__delitem__(self, key)
def clear(self):
self.__changed = 1
dict.clear(self)
def __del__(self):
if self.__changed:
with open(self._indexname, 'w') as handle:
handle.write("%s\n" % self._tostr(self.__version))
for key, value in self.items():
handle.write("%s %s\n" %
(self._tostr(key), self._tostr(value)))
def _tostr(self, obj):
# I need a representation of the object that's saveable to
# a file that uses whitespace as delimiters. Thus, I'm
# going to pickle the object, and then convert each character of
# the string to its ASCII integer value. Then, I'm going to convert
# the integers into strings and join them together with commas.
# It's not the most efficient way of storing things, but it's
# relatively fast.
s = pickle.dumps(obj)
intlist = array.array('b', s)
return ','.join(str(i) for i in intlist)
def _toobj(self, str):
intlist = [int(i) for i in str.split(',')]
intlist = array.array('b', intlist)
return pickle.loads(''.join(chr(i) for i in intlist))
Index = _InMemoryIndex
| mit | 5,823,405,821,340,564,000 | 32.2 | 76 | 0.554418 | false |
wyc/django | django/contrib/syndication/views.py | 192 | 8680 | from __future__ import unicode_literals
from calendar import timegm
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.http import Http404, HttpResponse
from django.template import TemplateDoesNotExist, loader
from django.utils import feedgenerator, six
from django.utils.encoding import force_text, iri_to_uri, smart_text
from django.utils.html import escape
from django.utils.http import http_date
from django.utils.timezone import get_default_timezone, is_naive, make_aware
def add_domain(domain, url, secure=False):
protocol = 'https' if secure else 'http'
if url.startswith('//'):
# Support network-path reference (see #16753) - RSS requires a protocol
url = '%s:%s' % (protocol, url)
elif not url.startswith(('http://', 'https://', 'mailto:')):
url = iri_to_uri('%s://%s%s' % (protocol, domain, url))
return url
class FeedDoesNotExist(ObjectDoesNotExist):
pass
class Feed(object):
feed_type = feedgenerator.DefaultFeed
title_template = None
description_template = None
def __call__(self, request, *args, **kwargs):
try:
obj = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
raise Http404('Feed object does not exist.')
feedgen = self.get_feed(obj, request)
response = HttpResponse(content_type=feedgen.content_type)
if hasattr(self, 'item_pubdate') or hasattr(self, 'item_updateddate'):
# if item_pubdate or item_updateddate is defined for the feed, set
# header so as ConditionalGetMiddleware is able to send 304 NOT MODIFIED
response['Last-Modified'] = http_date(
timegm(feedgen.latest_post_date().utctimetuple()))
feedgen.write(response, 'utf-8')
return response
def item_title(self, item):
# Titles should be double escaped by default (see #6533)
return escape(force_text(item))
def item_description(self, item):
return force_text(item)
def item_link(self, item):
try:
return item.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
'Give your %s class a get_absolute_url() method, or define an '
'item_link() method in your Feed class.' % item.__class__.__name__
)
def __get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check co_argcount rather than try/excepting the function and
# catching the TypeError, because something inside the function
# may raise the TypeError. This technique is more accurate.
try:
code = six.get_function_code(attr)
except AttributeError:
code = six.get_function_code(attr.__call__)
if code.co_argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr
def feed_extra_kwargs(self, obj):
"""
Returns an extra keyword arguments dictionary that is used when
initializing the feed generator.
"""
return {}
def item_extra_kwargs(self, item):
"""
Returns an extra keyword arguments dictionary that is used with
the `add_item` call of the feed generator.
"""
return {}
def get_object(self, request, *args, **kwargs):
return None
def get_context_data(self, **kwargs):
"""
Returns a dictionary to use as extra context if either
``self.description_template`` or ``self.item_template`` are used.
Default implementation preserves the old behavior
of using {'obj': item, 'site': current_site} as the context.
"""
return {'obj': kwargs.get('item'), 'site': kwargs.get('site')}
def get_feed(self, obj, request):
"""
Returns a feedgenerator.DefaultFeed object, fully populated, for
this feed. Raises FeedDoesNotExist for invalid parameters.
"""
current_site = get_current_site(request)
link = self.__get_dynamic_attr('link', obj)
link = add_domain(current_site.domain, link, request.is_secure())
feed = self.feed_type(
title=self.__get_dynamic_attr('title', obj),
subtitle=self.__get_dynamic_attr('subtitle', obj),
link=link,
description=self.__get_dynamic_attr('description', obj),
language=settings.LANGUAGE_CODE,
feed_url=add_domain(
current_site.domain,
self.__get_dynamic_attr('feed_url', obj) or request.path,
request.is_secure(),
),
author_name=self.__get_dynamic_attr('author_name', obj),
author_link=self.__get_dynamic_attr('author_link', obj),
author_email=self.__get_dynamic_attr('author_email', obj),
categories=self.__get_dynamic_attr('categories', obj),
feed_copyright=self.__get_dynamic_attr('feed_copyright', obj),
feed_guid=self.__get_dynamic_attr('feed_guid', obj),
ttl=self.__get_dynamic_attr('ttl', obj),
**self.feed_extra_kwargs(obj)
)
title_tmp = None
if self.title_template is not None:
try:
title_tmp = loader.get_template(self.title_template)
except TemplateDoesNotExist:
pass
description_tmp = None
if self.description_template is not None:
try:
description_tmp = loader.get_template(self.description_template)
except TemplateDoesNotExist:
pass
for item in self.__get_dynamic_attr('items', obj):
context = self.get_context_data(item=item, site=current_site,
obj=obj, request=request)
if title_tmp is not None:
title = title_tmp.render(context, request)
else:
title = self.__get_dynamic_attr('item_title', item)
if description_tmp is not None:
description = description_tmp.render(context, request)
else:
description = self.__get_dynamic_attr('item_description', item)
link = add_domain(
current_site.domain,
self.__get_dynamic_attr('item_link', item),
request.is_secure(),
)
enc = None
enc_url = self.__get_dynamic_attr('item_enclosure_url', item)
if enc_url:
enc = feedgenerator.Enclosure(
url=smart_text(enc_url),
length=smart_text(self.__get_dynamic_attr('item_enclosure_length', item)),
mime_type=smart_text(self.__get_dynamic_attr('item_enclosure_mime_type', item))
)
author_name = self.__get_dynamic_attr('item_author_name', item)
if author_name is not None:
author_email = self.__get_dynamic_attr('item_author_email', item)
author_link = self.__get_dynamic_attr('item_author_link', item)
else:
author_email = author_link = None
tz = get_default_timezone()
pubdate = self.__get_dynamic_attr('item_pubdate', item)
if pubdate and is_naive(pubdate):
pubdate = make_aware(pubdate, tz)
updateddate = self.__get_dynamic_attr('item_updateddate', item)
if updateddate and is_naive(updateddate):
updateddate = make_aware(updateddate, tz)
feed.add_item(
title=title,
link=link,
description=description,
unique_id=self.__get_dynamic_attr('item_guid', item, link),
unique_id_is_permalink=self.__get_dynamic_attr(
'item_guid_is_permalink', item),
enclosure=enc,
pubdate=pubdate,
updateddate=updateddate,
author_name=author_name,
author_email=author_email,
author_link=author_link,
categories=self.__get_dynamic_attr('item_categories', item),
item_copyright=self.__get_dynamic_attr('item_copyright', item),
**self.item_extra_kwargs(item)
)
return feed
| bsd-3-clause | 5,143,231,024,307,370,000 | 39.185185 | 99 | 0.579954 | false |
royredman4/Rubik_Cube | Rubik_Info.py | 1 | 7722 | try:
# for Python 2
from Tkinter import *
except ImportError:
# for Python 3
from tkinter import *
import time
import Solving_algorithm
# Converts a set of coordinates into indexes in the cube
# returns square index horizontally and vertically (x,y)
def CoordinatesToIndex(coordinates):
t = (coordinates[0] - 98) / 60
i = (coordinates[1] - 90) / 60
return[t, i]
# Creates the cubes defaults at startup
def CreateCube():
'''
side1 = ["dark gray", "dark gray", "dark gray",
"dark gray", "dark gray", "dark gray",
"dark gray", "dark gray", "dark gray"]
side2 = ["dark gray", "dark gray", "dark gray",
"dark gray", "dark gray", "dark gray",
"dark gray", "dark gray", "dark gray"]
side3 = ["dark gray", "dark gray", "dark gray",
"dark gray", "dark gray", "dark gray",
"dark gray", "dark gray", "dark gray"]
side4 = ["dark gray", "dark gray", "dark gray",
"dark gray", "dark gray", "dark gray",
"dark gray", "dark gray", "dark gray"]
side5 = ["dark gray", "dark gray", "dark gray",
"dark gray", "dark gray", "dark gray",
"dark gray", "dark gray", "dark gray"]
frontside = ["dark gray", "dark gray", "dark gray",
"dark gray", "White", "dark gray",
"dark gray", "dark gray", "dark gray"]
'''
# If you want the pieces filled in (for testing purposes),
# Uncomment this section and comment out the previous section
'''
side1 = ["Yellow", "Yellow", "Yellow",
"Yellow", "Yellow", "Yellow",
"Yellow", "Yellow", "Yellow"]
side2 = ["Blue", "Blue", "Blue",
"Blue", "Blue", "Blue",
"Blue", "Blue", "Blue"]
side3 = ["Green", "Green", "Green",
"Green", "Green", "Green",
"Green", "Green", "Green"]
side4 = ["Red", "Red", "Red",
"Red", "Red", "Red",
"Red", "Red", "Red"]
side5 = ["Orange", "Orange", "Orange",
"Orange", "Orange", "Orange",
"Orange", "Orange", "Orange"]
frontside = ["White", "White", "White",
"White", "White", "White",
"White", "White", "White"]
'''
# For debugging turning up/down
'''
side1 = ["Green", "Green", "Green",
"Yellow", "Yellow", "Yellow",
"Blue", "Blue", "Blue"]
side2 = ["White", "White", "White",
"Blue", "Blue", "Blue",
"Yellow", "Yellow", "Yellow"]
side3 = ["Yellow", "Yellow", "Yellow",
"Green", "Green", "Green",
"White", "White", "White"]
side4 = ["Red", "Red", "Red",
"Red", "Red", "Red",
"Red", "Red", "Red"]
side5 = ["Orange", "Orange", "Orange",
"Orange", "Orange", "Orange",
"Orange", "Orange", "Orange"]
frontside = ["Green", "Green", "Green",
"White", "White", "White",
"Blue", "Blue", "Blue"]
'''
# For debugging turning left/right
'''
side1 = ["Blue", "Yellow", "White",
"Blue", "Yellow", "White",
"Blue", "Yellow", "White"]
side2 = ["Orange", "Orange", "Orange",
"Orange", "Orange", "Orange",
"Orange", "Orange", "Orange"]
side3 = ["Red", "Red", "Red",
"Red", "Red", "Red",
"Red", "Red", "Red"]
side4 = ["Green", "Blue", "White",
"Green", "Blue", "White",
"Green", "Blue", "White"]
side5 = ["Blue", "Green", "Yellow",
"Blue", "Green", "Yellow",
"Blue", "Green", "Yellow"]
frontside = ["Yellow", "White", "Green",
"Yellow", "White", "Green",
"Yellow", "White", "Green"]
'''
# For testing the yellow cross
side1 = ["White", "Orange", "Yellow",
"Green", "Orange", "Orange",
"White", "Blue", "Red"]
side2 = ["Green", "Green", "Orange",
"Yellow", "Blue", "Yellow",
"Blue", "White", "White"]
side3 = ["Red", "Yellow", "Green",
"Red", "Green", "Orange",
"Red", "Green", "Blue"]
side4 = ["Orange", "White", "Orange",
"White", "Yellow", "Green",
"Yellow", "Blue", "Green"]
side5 = ["Red", "Red", "Green",
"Red", "White", "Red",
"Yellow", "White", "Orange"]
frontside = ["Blue", "Orange", "Yellow",
"Blue", "Red", "Blue",
"Blue", "Yellow", "White"]
master = ["Front",frontside, "Back",side1, "Left",side2, "Right",side3, "Top",side4, "Bottom",side5]
return master
# Creates the GUI portion of the cube
# (creates all the colors on the screen
# for a cubes side).
def RubikSetup(canvas, colors):
i = 90
counter = 0
print(colors)
# time.sleep(10)
for z in range(0, 3):
t = 98
for q in range(0, 3):
canvas.create_rectangle(t, i, t+60, i+60, fill=colors[counter])
t += 60
counter += 1
i += 60
# Changes a single cubes color to the users requested color on the screen
def ChangeColor(canvas, color, index):
multiple = (index[0] - 98) / 60
t = 98 + (60 * multiple)
multiple = (index[1] - 90) / 60
i = 90 + (60 * multiple)
canvas.create_rectangle(t, i, t+60, i+60, fill=color)
# Changes the color of an array from its original color to its new color
def Update_Array(Master, side, color, coordinates):
index = CoordinatesToIndex(coordinates)
# print(str(Master.index(side)+1))
print(str(index))
# time.sleep(10)
Master[index[0] + (index[1] * 3)] = color
def Before_After(canvas, all_sides, colors, temp):
canvas.delete("line")
if (temp[1] == "Down"):
x1 = 65
y1 = 80
x2 = 65
y2 = 40
if temp[2] == 1:
x1 += 50
x2 = x1
elif temp[2] == 2:
x1 += 100
x2 = x1
elif (temp[1] == "Up"):
x1 = 65
y1 = 260
x2 = 65
y2 = 290
if (temp[2] == 1):
x1 += 50
x2 = x1
elif (temp[2] == 2):
x1 += 100
x2 = x1
elif (temp[1] == "Left"):
x1 = 200
y1 = 115
x2 = 230
y2 = 115
if (temp[2] == 1):
y1 += 50
y2 = y1
elif (temp[2] == 2):
y1 += 100
y2 = y1
elif (temp[1] == "Right"):
x1 = 35
y1 = 115
x2 = 5
y2 = 115
if (temp[2] == 1):
y1 += 50
y2 = y1
elif (temp[2] == 2):
y1 += 100
y2 = y1
# Where you start the end of x, where you start the end of y (arrow spot)
# The lines x axis at the end, the y axis at the end (begin line spot)
w = canvas.create_line(x1, y1, x2, y2, arrow=FIRST, tag = "line")
print(colors)
# time.sleep(10)
for r in range(0, 2):
i = 90
counter = 0
if r ==1:
print("This spot needs to change the rubiks cube to the \"after\" section")
Solving_algorithm.Rotate_Cube(all_sides, temp[0], all_sides[all_sides.index("Front")+1], temp[2], temp[1], temp[3])
for z in range(0, 3):
if r == 1:
t = 260 #98
else:
t = 40
for q in range(0, 3):
canvas.create_rectangle(t, i, t+50, i+50, fill=colors[counter])
t += 50
counter += 1
#i += 60
i += 50
| gpl-3.0 | -2,129,485,634,200,387,600 | 27.6 | 127 | 0.462574 | false |
MrNuggles/HeyBoet-Telegram-Bot | temboo/Library/Uber/Estimates/GetPriceEstimates.py | 5 | 4198 | # -*- coding: utf-8 -*-
###############################################################################
#
# GetPriceEstimates
# Returns an estimated price range for each product offered at a given location.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetPriceEstimates(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetPriceEstimates Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetPriceEstimates, self).__init__(temboo_session, '/Library/Uber/Estimates/GetPriceEstimates')
def new_input_set(self):
return GetPriceEstimatesInputSet()
def _make_result_set(self, result, path):
return GetPriceEstimatesResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetPriceEstimatesChoreographyExecution(session, exec_id, path)
class GetPriceEstimatesInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetPriceEstimates
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_EndLatitude(self, value):
"""
Set the value of the EndLatitude input for this Choreo. ((required, decimal) The latitude coordinate for the destination e.g., 40.650729.)
"""
super(GetPriceEstimatesInputSet, self)._set_input('EndLatitude', value)
def set_EndLongitude(self, value):
"""
Set the value of the EndLongitude input for this Choreo. ((required, decimal) The longitude coordinate for the destination e.g., -74.009536.)
"""
super(GetPriceEstimatesInputSet, self)._set_input('EndLongitude', value)
def set_ServerToken(self, value):
"""
Set the value of the ServerToken input for this Choreo. ((required, string) The Sever Token provided by Uber.)
"""
super(GetPriceEstimatesInputSet, self)._set_input('ServerToken', value)
def set_StartLatitude(self, value):
"""
Set the value of the StartLatitude input for this Choreo. ((required, decimal) The latitude coordinate for the starting location e.g., 40.71863.)
"""
super(GetPriceEstimatesInputSet, self)._set_input('StartLatitude', value)
def set_StartLongitude(self, value):
"""
Set the value of the StartLongitude input for this Choreo. ((required, decimal) The longitude coordinate for the starting location e.g., -74.005584.)
"""
super(GetPriceEstimatesInputSet, self)._set_input('StartLongitude', value)
class GetPriceEstimatesResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetPriceEstimates Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Uber.)
"""
return self._output.get('Response', None)
class GetPriceEstimatesChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetPriceEstimatesResultSet(response, path)
| gpl-3.0 | -8,830,977,029,070,973,000 | 40.156863 | 157 | 0.68223 | false |
lbeltrame/bcbio-nextgen | bcbio/structural/gatkcnv.py | 2 | 17264 | """Support for Copy Number Variations (CNVs) with GATK4
https://software.broadinstitute.org/gatk/documentation/article?id=11682
https://gatkforums.broadinstitute.org/dsde/discussion/11683/
"""
import glob
import os
import shutil
import numpy as np
import toolz as tz
from bcbio import broad, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio.variation import bedutils, vcfutils
def run(items, background=None):
"""Detect copy number variations from batched set of samples using GATK4 CNV calling.
TODO: implement germline calling with DetermineGermlineContigPloidy and GermlineCNVCaller
"""
if not background: background = []
paired = vcfutils.get_paired(items + background)
if paired:
out = _run_paired(paired)
else:
out = items
logger.warn("GATK4 CNV calling currently only available for somatic samples: %s" %
", ".join([dd.get_sample_name(d) for d in items + background]))
return out
def _run_paired(paired):
"""Run somatic variant calling pipeline.
"""
from bcbio.structural import titancna
work_dir = _sv_workdir(paired.tumor_data)
seg_files = model_segments(tz.get_in(["depth", "bins", "normalized"], paired.tumor_data),
work_dir, paired)
call_file = call_copy_numbers(seg_files["seg"], work_dir, paired.tumor_data)
out = []
if paired.normal_data:
out.append(paired.normal_data)
if "sv" not in paired.tumor_data:
paired.tumor_data["sv"] = []
paired.tumor_data["sv"].append({"variantcaller": "gatk-cnv",
"call_file": call_file,
"vrn_file": titancna.to_vcf(call_file, "GATK4-CNV", _get_seg_header,
_seg_to_vcf, paired.tumor_data),
"seg": seg_files["seg"],
"plot": plot_model_segments(seg_files, work_dir, paired.tumor_data)})
out.append(paired.tumor_data)
return out
def call_copy_numbers(seg_file, work_dir, data):
"""Call copy numbers from a normalized and segmented input file.
"""
out_file = os.path.join(work_dir, "%s-call.seg" % dd.get_sample_name(data))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
params = ["-T", "CallCopyRatioSegments",
"-I", seg_file, "-O", tx_out_file]
_run_with_memory_scaling(params, tx_out_file, data)
return out_file
def plot_model_segments(seg_files, work_dir, data):
"""Diagnostic plots of segmentation and inputs.
"""
from bcbio.heterogeneity import chromhacks
out_file = os.path.join(work_dir, "%s.modeled.png" % dd.get_sample_name(data))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
dict_file = utils.splitext_plus(dd.get_ref_file(data))[0] + ".dict"
plot_dict = os.path.join(os.path.dirname(tx_out_file), os.path.basename(dict_file))
with open(dict_file) as in_handle:
with open(plot_dict, "w") as out_handle:
for line in in_handle:
if line.startswith("@SQ"):
cur_chrom = [x.split(":", 1)[1].strip()
for x in line.split("\t") if x.startswith("SN:")][0]
if chromhacks.is_autosomal_or_sex(cur_chrom):
out_handle.write(line)
else:
out_handle.write(line)
params = ["-T", "PlotModeledSegments",
"--denoised-copy-ratios", tz.get_in(["depth", "bins", "normalized"], data),
"--segments", seg_files["final_seg"],
"--allelic-counts", seg_files["tumor_hets"],
"--sequence-dictionary", plot_dict,
"--minimum-contig-length", "10",
"--output-prefix", dd.get_sample_name(data),
"-O", os.path.dirname(tx_out_file)]
_run_with_memory_scaling(params, tx_out_file, data)
return {"seg": out_file}
def model_segments(copy_file, work_dir, paired):
"""Perform segmentation on input copy number log2 ratio file.
"""
out_file = os.path.join(work_dir, "%s.cr.seg" % dd.get_sample_name(paired.tumor_data))
tumor_counts, normal_counts = heterogzygote_counts(paired)
if not utils.file_exists(out_file):
with file_transaction(paired.tumor_data, out_file) as tx_out_file:
params = ["-T", "ModelSegments",
"--denoised-copy-ratios", copy_file,
"--allelic-counts", tumor_counts,
"--output-prefix", dd.get_sample_name(paired.tumor_data),
"-O", os.path.dirname(tx_out_file)]
if normal_counts:
params += ["--normal-allelic-counts", normal_counts]
_run_with_memory_scaling(params, tx_out_file, paired.tumor_data)
for tx_fname in glob.glob(os.path.join(os.path.dirname(tx_out_file),
"%s*" % dd.get_sample_name(paired.tumor_data))):
shutil.copy(tx_fname, os.path.join(work_dir, os.path.basename(tx_fname)))
return {"seg": out_file, "tumor_hets": out_file.replace(".cr.seg", ".hets.tsv"),
"final_seg": out_file.replace(".cr.seg", ".modelFinal.seg")}
def denoise(data, pon, work_dir):
"""Normalize read counts using panel of normal background or GC/mappability
"""
std_file = os.path.join(work_dir, "%s-crstandardized.tsv" % dd.get_sample_name(data))
denoise_file = os.path.join(work_dir, "%s-crdenoised.tsv" % dd.get_sample_name(data))
if not utils.file_exists(std_file):
with file_transaction(data, std_file, denoise_file) as (tx_std_file, tx_denoise_file):
params = ["-T", "DenoiseReadCounts",
"-I", tz.get_in(["depth", "bins", "target"], data),
"--standardized-copy-ratios", tx_std_file,
"--denoised-copy-ratios", tx_denoise_file]
if pon:
params += ["--count-panel-of-normals", pon]
else:
params += ["--annotated-intervals", tz.get_in(["regions", "bins", "gcannotated"], data)]
_run_with_memory_scaling(params, tx_std_file, data)
return denoise_file if pon else std_file
def create_panel_of_normals(items, group_id, work_dir):
"""Create a panel of normals from one or more background read counts.
"""
out_file = os.path.join(work_dir, "%s-%s-pon.hdf5" % (dd.get_sample_name(items[0]), group_id))
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
params = ["-T", "CreateReadCountPanelOfNormals",
"-O", tx_out_file,
"--annotated-intervals", tz.get_in(["regions", "bins", "gcannotated"], items[0])]
for data in items:
params += ["-I", tz.get_in(["depth", "bins", "target"], data)]
_run_with_memory_scaling(params, tx_out_file, items[0], ld_preload=True)
return out_file
def pon_to_bed(pon_file, out_dir, data):
"""Extract BED intervals from a GATK4 hdf5 panel of normal file.
"""
out_file = os.path.join(out_dir, "%s-intervals.bed" % (utils.splitext_plus(os.path.basename(pon_file))[0]))
if not utils.file_uptodate(out_file, pon_file):
import h5py
with file_transaction(data, out_file) as tx_out_file:
with h5py.File(pon_file, "r") as f:
with open(tx_out_file, "w") as out_handle:
intervals = f["original_data"]["intervals"]
for i in range(len(intervals["transposed_index_start_end"][0])):
chrom = intervals["indexed_contig_names"][intervals["transposed_index_start_end"][0][i]]
if isinstance(chrom, bytes):
chrom = chrom.decode("utf-8")
start = int(intervals["transposed_index_start_end"][1][i]) - 1
end = int(intervals["transposed_index_start_end"][2][i])
out_handle.write("%s\t%s\t%s\n" % (chrom, start, end))
return out_file
def prepare_intervals(data, region_file, work_dir):
"""Prepare interval regions for targeted and gene based regions.
"""
target_file = os.path.join(work_dir, "%s-target.interval_list" % dd.get_sample_name(data))
if not utils.file_uptodate(target_file, region_file):
with file_transaction(data, target_file) as tx_out_file:
params = ["-T", "PreprocessIntervals", "-R", dd.get_ref_file(data),
"--interval-merging-rule", "OVERLAPPING_ONLY",
"-O", tx_out_file]
if dd.get_coverage_interval(data) == "genome":
params += ["--bin-length", "1000", "--padding", "0"]
else:
params += ["-L", region_file, "--bin-length", "0", "--padding", "250"]
_run_with_memory_scaling(params, tx_out_file, data)
return target_file
def annotate_intervals(target_file, data):
"""Provide GC annotated intervals for error correction during panels and denoising.
TODO: include mappability and segmentation duplication inputs
"""
out_file = "%s-gcannotated.tsv" % utils.splitext_plus(target_file)[0]
if not utils.file_uptodate(out_file, target_file):
with file_transaction(data, out_file) as tx_out_file:
params = ["-T", "AnnotateIntervals", "-R", dd.get_ref_file(data),
"-L", target_file,
"--interval-merging-rule", "OVERLAPPING_ONLY",
"-O", tx_out_file]
_run_with_memory_scaling(params, tx_out_file, data)
return out_file
def collect_read_counts(data, work_dir):
"""Count reads in defined bins using CollectReadCounts.
"""
out_file = os.path.join(work_dir, "%s-target-coverage.hdf5" % dd.get_sample_name(data))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
params = ["-T", "CollectReadCounts", "-I", dd.get_align_bam(data),
"-L", tz.get_in(["regions", "bins", "target"], data),
"--interval-merging-rule", "OVERLAPPING_ONLY",
"-O", tx_out_file, "--format", "HDF5"]
_run_with_memory_scaling(params, tx_out_file, data)
return out_file
def heterogzygote_counts(paired):
"""Provide tumor/normal counts at population heterozyogte sites with CollectAllelicCounts.
"""
work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(paired.tumor_data), "structural", "counts"))
key = "germline_het_pon"
het_bed = tz.get_in(["genome_resources", "variation", key], paired.tumor_data)
vr = bedutils.population_variant_regions([x for x in [paired.tumor_data, paired.normal_data] if x])
cur_het_bed = bedutils.intersect_two(het_bed, vr, work_dir, paired.tumor_data)
tumor_counts = _run_collect_allelic_counts(cur_het_bed, key, work_dir, paired.tumor_data)
normal_counts = (_run_collect_allelic_counts(cur_het_bed, key, work_dir, paired.normal_data)
if paired.normal_data else None)
if normal_counts:
tumor_counts, normal_counts = _filter_by_normal(tumor_counts, normal_counts, paired.tumor_data)
return tumor_counts, normal_counts
def _filter_by_normal(tumor_counts, normal_counts, data):
"""Filter count files based on normal frequency and median depth, avoiding high depth regions.
For frequency, restricts normal positions to those between 0.4 and 0.65
For depth, matches approach used in AMBER to try and avoid problematic genomic regions
with high count in the normal:
https://github.com/hartwigmedical/hmftools/tree/master/amber#usage
"""
from bcbio.heterogeneity import bubbletree
fparams = bubbletree.NORMAL_FILTER_PARAMS
tumor_out = "%s-normfilter%s" % utils.splitext_plus(tumor_counts)
normal_out = "%s-normfilter%s" % utils.splitext_plus(normal_counts)
if not utils.file_uptodate(tumor_out, tumor_counts):
with file_transaction(data, tumor_out, normal_out) as (tx_tumor_out, tx_normal_out):
median_depth = _get_normal_median_depth(normal_counts)
min_normal_depth = median_depth * fparams["min_depth_percent"]
max_normal_depth = median_depth * fparams["max_depth_percent"]
with open(tumor_counts) as tumor_handle:
with open(normal_counts) as normal_handle:
with open(tx_tumor_out, "w") as tumor_out_handle:
with open(tx_normal_out, "w") as normal_out_handle:
header = None
for t, n in zip(tumor_handle, normal_handle):
if header is None:
if not n.startswith("@"):
header = n.strip().split()
tumor_out_handle.write(t)
normal_out_handle.write(n)
elif (_normal_passes_depth(header, n, min_normal_depth, max_normal_depth) and
_normal_passes_freq(header, n, fparams)):
tumor_out_handle.write(t)
normal_out_handle.write(n)
return tumor_out, normal_out
def _normal_passes_freq(header, line, fparams):
vals = dict(zip(header, line.strip().split()))
cur_depth = float(vals["REF_COUNT"]) + int(vals["ALT_COUNT"])
if cur_depth > 0:
cur_freq = float(vals["ALT_COUNT"]) / cur_depth
else:
cur_freq = 0.0
return cur_freq >= fparams["min_freq_narrow"] and cur_freq <= fparams["max_freq_narrow"]
def _normal_passes_depth(header, line, min_normal_depth, max_normal_depth):
vals = dict(zip(header, line.strip().split()))
cur_depth = int(vals["REF_COUNT"]) + int(vals["ALT_COUNT"])
return cur_depth >= min_normal_depth and cur_depth <= max_normal_depth
def _get_normal_median_depth(normal_counts):
depths = []
with open(normal_counts) as in_handle:
header = None
for line in in_handle:
if header is None and not line.startswith("@"):
header = line.strip().split()
elif header:
n_vals = dict(zip(header, line.strip().split()))
depths.append(int(n_vals["REF_COUNT"]) + int(n_vals["ALT_COUNT"]))
return np.median(depths)
def _run_collect_allelic_counts(pos_file, pos_name, work_dir, data):
"""Counts by alleles for a specific sample and set of positions.
"""
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "structural", "counts"))
out_file = os.path.join(out_dir, "%s-%s-counts.tsv" % (dd.get_sample_name(data), pos_name))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
params = ["-T", "CollectAllelicCounts", "-L", pos_file, "-I", dd.get_align_bam(data),
"-R", dd.get_ref_file(data), "-O", tx_out_file]
_run_with_memory_scaling(params, tx_out_file, data)
return out_file
def _run_with_memory_scaling(params, tx_out_file, data, ld_preload=False):
num_cores = dd.get_num_cores(data)
memscale = {"magnitude": 0.9 * num_cores, "direction": "increase"} if num_cores > 1 else None
# Ignore tools_off: [gatk4], since it doesn't apply to GATK CNV calling
config = utils.deepish_copy(data["config"])
if "gatk4" in dd.get_tools_off({"config": config}):
config["algorithm"]["tools_off"].remove("gatk4")
broad_runner = broad.runner_from_config(config)
broad_runner.run_gatk(params, os.path.dirname(tx_out_file), memscale=memscale, ld_preload=ld_preload)
# ## VCF output
def _get_seg_header(in_handle):
for line in in_handle:
if not line.startswith("@"):
break
return line.strip().split("\t"), in_handle
def _seg_to_vcf(vals):
"""Convert GATK CNV calls seg output to a VCF line.
"""
call_to_cn = {"+": 3, "-": 1}
call_to_type = {"+": "DUP", "-": "DEL"}
if vals["CALL"] not in ["0"]:
info = ["FOLD_CHANGE_LOG=%s" % vals["MEAN_LOG2_COPY_RATIO"],
"PROBES=%s" % vals["NUM_POINTS_COPY_RATIO"],
"SVTYPE=%s" % call_to_type[vals["CALL"]],
"SVLEN=%s" % (int(vals["END"]) - int(vals["START"])),
"END=%s" % vals["END"],
"CN=%s" % call_to_cn[vals["CALL"]]]
return [vals["CONTIG"], vals["START"], ".", "N", "<%s>" % call_to_type[vals["CALL"]], ".",
".", ";".join(info), "GT", "0/1"]
def _sv_workdir(data):
return utils.safe_makedir(os.path.join(dd.get_work_dir(data), "structural",
dd.get_sample_name(data), "gatk-cnv"))
| mit | -3,875,645,723,407,961,600 | 50.380952 | 112 | 0.579414 | false |
bemcdonnell/SWMMOutputAPI | swmmoutputapi/swmmbinreader.py | 1 | 24788 | """
SWMM Output File Wrapper for the New OutputAPI.
Author: Bryant E. McDonnell
Date: 1/10/2016
"""
from ctypes import *
from _toolkitpyswmm import *
from datetime import datetime, timedelta
import os
__author__ = 'Bryant E. McDonnell ([email protected])'
__copyright__ = 'Copyright (c) 2016 Bryant E. McDonnell'
__license__ = 'BSD2'
__version__ = '0.2.1'
class _Opaque(Structure):
"""
Used soley for passing the pointer to the smoapu struct to API
"""
pass
class SWMMBinReader:
def __init__(self):
"""
Instantiate python Wrapper Object and build Wrapper functions.
"""
def get_pkgpath():
import _toolkitpyswmm as tkp
return os.path.dirname(tkp.__file__.replace('\\','/'))
try:
#Later Check for OS Type
dllname = 'outputAPI_winx86.dll'
#when platform detection is enabled, dllname can be changed
dllLoc = get_pkgpath() + '/data/'+ dllname
self.swmmdll = CDLL(dllLoc)
except:
raise Exception('Failed to Open Linked Library')
#### Initializing DLL Function List
#Initialize Pointer to smoapi
self._initsmoapi = self.swmmdll.SMO_init
self._initsmoapi.restype = POINTER(_Opaque)
#Open File Function Handle
self._openBinFile = self.swmmdll.SMO_open
self._free = self.swmmdll.SMO_free
self._close = self.swmmdll.SMO_close
#Project Data
self._getProjectSize = self.swmmdll.SMO_getProjectSize
self._getTimes = self.swmmdll.SMO_getTimes
self._getStartTime = self.swmmdll.SMO_getStartTime
self._getUnits = self.swmmdll.SMO_getUnits
#Object ID Function Handles
self._getIDs = self.swmmdll.SMO_getElementName
#Object Series Function Handles
self._getSubcatchSeries = self.swmmdll.SMO_getSubcatchSeries
self._getNodeSeries = self.swmmdll.SMO_getNodeSeries
self._getLinkSeries = self.swmmdll.SMO_getLinkSeries
self._getSystemSeries = self.swmmdll.SMO_getSystemSeries
#Object Attribure Function Handles
self._getSubcatchAttribute = self.swmmdll.SMO_getSubcatchAttribute
self._getNodeAttribute = self.swmmdll.SMO_getNodeAttribute
self._getLinkAttribute = self.swmmdll.SMO_getLinkAttribute
self._getSystemAttribute = self.swmmdll.SMO_getSystemAttribute
#Object Result Function Handles
self._getSubcatchResult = self.swmmdll.SMO_getSubcatchResult
self._getNodeResult = self.swmmdll.SMO_getNodeResult
self._getLinkResult = self.swmmdll.SMO_getLinkResult
self._getSystemResult = self.swmmdll.SMO_getSystemResult
#Array Builder
self._newOutValueArray = self.swmmdll.SMO_newOutValueArray
self._newOutValueArray.argtypes = [POINTER(_Opaque), c_int, c_int, POINTER(c_int), POINTER(c_int)]
self._newOutValueArray.restype = POINTER(c_float)
#Series Builder
self._newOutValueSeries = self.swmmdll.SMO_newOutValueSeries
self._newOutValueSeries.argtypes = [POINTER(_Opaque), c_int, c_int, POINTER(c_int), POINTER(c_int)]
self._newOutValueSeries.restype = POINTER(c_float)
#SWMM Date num 2 String
self.SWMMdateToStr = self.swmmdll.datetime_dateToStr
#SWMM Time num 2 String
self.SWMMtimeToStr = self.swmmdll.datetime_timeToStr
def OpenBinFile(self, OutLoc):
"""Opens SWMM5 binary output file.
:param str OutLoc: Path to Binary output file
:return: None
Examples:
>>> OutputFile = SWMMBinReader()
>>> OutputFile.OpenBinFile(r"C:\\PROJECTCODE\\SWMMOutputAPI\\testing\\outputfile.out")
"""
self.smoapi = self._initsmoapi()
ErrNo = self._openBinFile(self.smoapi,OutLoc)
if ErrNo != 0:
raise Exception("API ErrNo {0}:{1}".format(ErrNo, DLLErrorKeys[ErrNo]))
def CloseBinFile(self):
"""Closes binary output file and cleans up member variables.
:returns: None
Examples:
>>> OutputFile = SWMMBinReader()
>>> OutputFile.OpenBinFile(r"C:\\PROJECTCODE\\SWMMOutputAPI\\testing\\outputfile.out")
>>> OutputFile.CloseBinFile()
"""
ErrNo = self._close(self.smoapi)
if hasattr(self, 'SubcatchmentIDs'): delattr(self,'SubcatchmentIDs')
if hasattr(self, 'NodeIDs'): delattr(self,'NodeIDs')
if hasattr(self, 'LinkIDs'): delattr(self,'LinkIDs')
if hasattr(self, 'PollutantIDs'): delattr(self,'PollutantIDs')
if ErrNo != 0:
raise Exception("API ErrNo {0}:{1}".format(ErrNo1.value, DLLErrorKeys[ErrNo.value]) )
def _get_SubcatchIDs(self):
"""
Purpose: Generates member Element IDs dictionary for Subcatchments
"""
self.SubcatchmentIDs = {}
for i in range(self.get_ProjectSize(subcatchCount)):
NAME = create_string_buffer(46)
LEN = c_int(46)
ErrNo1 = self._getIDs(self.smoapi, SM_subcatch, i, byref(NAME), byref(LEN))
if ErrNo1 != 0:
raise Exception("API ErrNo {0}:{1}".format(ErrNo1.value, DLLErrorKeys[ErrNo1.value]) )
self.SubcatchmentIDs[str(NAME.value)] = i
def _get_NodeIDs(self):
"""
Internal
Purpose: Generates member Element IDs dictionary for Nodes
"""
self.NodeIDs = {}
for i in range(self.get_ProjectSize(nodeCount)):
NAME = create_string_buffer(46)
LEN = c_int(46)
ErrNo1 = self._getIDs(self.smoapi, SM_node, i, byref(NAME), byref(LEN))
if ErrNo1 != 0:
raise Exception("API ErrNo {0}:{1}".format(ErrNo1.value, DLLErrorKeys[ErrNo1.value]) )
self.NodeIDs[str(NAME.value)] = i
def _get_LinkIDs(self):
"""
Internal
Purpose: Generates member Element IDs dictionary for Links
"""
self.LinkIDs = {}
for i in range(self.get_ProjectSize(linkCount)):
NAME = create_string_buffer(46)
LEN = c_int(46)
ErrNo1 = self._getIDs(self.smoapi, SM_link, i, byref(NAME), byref(LEN))
if ErrNo1 != 0:
raise Exception("API ErrNo {0}:{1}".format(ErrNo1.value, DLLErrorKeys[ErrNo1.value]) )
self.LinkIDs[str(NAME.value)] = i
def _get_PollutantIDs(self):
"""
Internal
Purpose: Generates member Element IDs dictionary for Pollutants
"""
self.PollutantIDs = {}
for i in range(self.get_ProjectSize(pollutantCount)):
NAME = create_string_buffer(46)
LEN = c_int(46)
ErrNo1 = self._getIDs(self.smoapi, SM_sys, i, byref(NAME), byref(LEN))
if ErrNo1 != 0:
raise Exception("API ErrNo {0}:{1}".format(ErrNo1.value, DLLErrorKeys[ErrNo1.value]) )
self.PollutantIDs[str(NAME.value)] = i
def get_IDs(self, SMO_elementIDType):
"""Returns List Type of Element IDs
:param int SMO_elementCount: element ID type :doc:`/keyrefs`
:return: list ordered List of IDs
:rtype: list
Examples:
>>> OutputFile = SWMMBinReader()
>>> OutputFile.OpenBinFile(r"C:\\PROJECTCODE\\SWMMOutputAPI\\testing\\outputfile.out")
>>> Test.get_IDs(SM_subcatch)
>>> ['S3', 'S2', 'S1']
>>> Test.get_IDs(SM_node)
>>> ['J4', 'J1', 'J2', 'J3']
>>> Test.get_IDs(SM_link)
>>> ['C3', 'C2', 'C1']
"""
if SMO_elementIDType == subcatchCount:
if not hasattr(self, 'SubcatchmentIDs'):
self._get_SubcatchIDs()
IDlist = self.SubcatchmentIDs.keys()
elif SMO_elementIDType == SM_node:
if not hasattr(self, 'NodeIDs'):
self._get_NodeIDs()
IDlist = self.NodeIDs.keys()
elif SMO_elementIDType == SM_link:
if not hasattr(self, 'LinkIDs'):
self._get_LinkIDs()
IDlist = self.LinkIDs.keys()
elif SMO_elementIDType == SM_sys:
if not hasattr(self, 'PollutantIDs'):
self._get_PollutantIDs()
IDlist = self.PollutantIDs.keys()
else:
raise Exception("SMO_elementType: {} Outside Valid Types".format(SMO_elementType))
return 0
# Do not sort lists
return IDlist
def get_Units(self, SMO_unit):
"""Returns flow units and Concentration
:param int SMO_unit: element ID type :doc:`/keyrefs`
:return: Unit Type
:rtype: str
Examples:
>>> OutputFile = SWMMBinReader()
>>> OutputFile.OpenBinFile(r"C:\\PROJECTCODE\\SWMMOutputAPI\\testing\\outputfile.out")
>>> OutputFile.get_Units(flow_rate)
>>> 'CFS'
"""
FlowUnitsType = ['CFS','GPM', 'MGD','CMS', 'LPS', 'MLD']
# cubic feet per second
# gallons per minute
# million gallons per day
# cubic meters per second
# liters per second
# million liters per day
ConcUnitsType = ['mg','ug','COUNT']
# Milligrams / L
# Micrograms / L
# Counts / L
x = c_int()
ErrNo1 = self._getUnits(self.smoapi, SMO_unit, byref(x))
if ErrNo1 != 0:
raise Exception("API ErrNo {0}:{1}".format(ErrNo1, DLLErrorKeys[ErrNo1]) )
if SMO_unit == flow_rate:
return FlowUnitsType[x.value]
elif SMO_unit == concentration:
return ConcUnitsType[x.value]
else:
raise Exception("SMO_unit: {} Outside Valid Types".format(SMO_unit))
def get_Times(self, SMO_timeElementType):
"""Returns report and simulation time related parameters.
:param int SMO_timeElementType: element ID type :doc:`/keyrefs`
:return: Report Step (seconds) or Number of Time Steps
:rtype: int
Examples:
>>> OutputFile = SWMMBinReader()
>>> OutputFile.OpenBinFile(r"C:\\PROJECTCODE\\SWMMOutputAPI\\testing\\outputfile.out")
>>> OutputFile.get_Times(reportStep)
>>> 300
"""
timeElement = c_int()
ErrNo1 = self._getTimes(self.smoapi, SMO_timeElementType, byref(timeElement))
if ErrNo1 != 0:
raise Exception("API ErrNo {0}:{1}".format(ErrNo1, DLLErrorKeys[ErrNo1]) )
return timeElement.value
def _get_StartTimeSWMM(self):
"""
Internal
Purpose: Returns the simulation start datetime as double.
"""
StartTime = c_double()
ErrNo1 = self._getStartTime(self.smoapi, byref(StartTime))
if ErrNo1 != 0:
raise Exception("API ErrNo {0}:{1}".format(ErrNo1, DLLErrorKeys[ErrNo1]) )
return StartTime.value
def get_StartTime(self):
"""Uses SWMM5 Conversion Functions to Pull DateTime String and converts to Python datetime format
:return: Simulation start time.
:rtype: datetime
Examples:
>>> OutputFile = SWMMBinReader()
>>> OutputFile.OpenBinFile(r"C:\\PROJECTCODE\\SWMMOutputAPI\\testing\\outputfile.out")
>>> OutputFile.get_StartTime()
>>> datetime.datetime(2016,10,4,12,4,0)
"""
_StartTime = self._get_StartTimeSWMM()
_date = int(_StartTime)
_time = _StartTime - _date
#Pull Date String
DateStr = create_string_buffer(50)
self.SWMMdateToStr(c_double(_date), byref(DateStr))
DATE = DateStr.value
#Pull Time String
TimeStr = create_string_buffer(50)
self.SWMMtimeToStr(c_double(_time), byref(TimeStr))
TIME = TimeStr.value
DTime = datetime.strptime(DATE+' '+TIME,'%Y-%b-%d %H:%M:%S')
return DTime
def get_TimeSeries(self):
""" Gets simulation start time and builds timeseries array based on the reportStep
:return: Simulation time series.
:rtype: list of datetime
Examples:
>>> OutputFile = SWMMBinReader()
>>> OutputFile.OpenBinFile(r"C:\\PROJECTCODE\\SWMMOutputAPI\\testing\\outputfile.out")
>>> OutputFile.get_TimeSeries()
>>> [datetime.datetime(2015, 11, 29, 14, 0), datetime.datetime(2015, 11, 29, 14, 1), ..., datetime.datetime(2015, 11, 29, 14, 9)]
"""
return [self.get_StartTime() + timedelta(seconds = ind*self.get_Times(reportStep))\
for ind in range(self.get_Times(numPeriods))]
def get_ProjectSize(self, SMO_elementCount):
"""Returns number of elements of a specific element type.
:param int SMO_elementCount: element ID type :doc:`/keyrefs`
:return: Number of Objects
:rtype: int
Examples:
>>> OutputFile = SWMMBinReader()
>>> OutputFile.OpenBinFile(r"C:\\PROJECTCODE\\SWMMOutputAPI\\testing\\outputfile.out")
>>> OutputFile.get_ProjectSize(nodeCount)
>>> 10
"""
numel = c_int()
ErrNo1 = self._getProjectSize(self.smoapi, SMO_elementCount, byref(numel))
if ErrNo1 != 0:
raise Exception("API ErrNo {0}:{1}".format(ErrNo1,DLLErrorKeys[ErrNo1]) )
return numel.value
def get_Series(self, SMO_elementType, SMO_Attribute, IDName = None, TimeStartInd = 0, TimeEndInd = -1):
"""Get time series results for particular attribute for an object. Specify series start and length using TimeStartInd and TimeEndInd respectively.
:param int SMO_elementType: Element type :doc:`/keyrefs`.
:param int SMO_Attribute: Attribute Type :doc:`/keyrefs`.
:param str IDName: Element ID name (Default is None for to reach sys variables) (ID Names are case sensitive).
:param int TimeStartInd: Starting index for the time series data period (default is 0).
:param int TimeEndInd: Array index for the time series data period (defualt is -1 for end).
:return: data series
:rtype: list
Examples:
>>> OutputFile = SWMMBinReader()
>>> OutputFile.OpenBinFile(r"C:\\PROJECTCODE\\SWMMOutputAPI\\testing\\outputfile.out")
>>> OutputFile.get_Series(SM_subcatch, runoff_rate, 'S3', 0, 50)
>>> [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, .... 0.0]
>>> OutputFile.get_Series(SM_node, invert_depth, 'J1', 0, 50)
>>> [3.908519983291626, 4.6215434074401855, 4.594745635986328, 4.595311641693115, ..., 4.595311641693115]
>>> OutputFile.get_Series(SM_link, rainfall_subcatch, 'C2', 0, 50)
>>> [10.2869873046875, 10.04793643951416, 9.997148513793945, 10.000744819641113, ..., 10.011372566223145]
>>> OutputFile.get_Series(SM_sys, rainfall_system, TimeStartInd = 0, TimeEndInd = 50)
>>> [0.017500000074505806, 0.017500000074505806, 0.017500000074505806, 0.017500000074505806, ..., 0.017500000074505806]
"""
if TimeEndInd > self.get_Times(numPeriods):
raise Exception("Outside Number of TimeSteps")
elif TimeEndInd == -1:
TimeEndInd = self.get_Times(numPeriods) + 1 - TimeEndInd
sLength = c_int()
ErrNo1 = c_int()
SeriesPtr = self._newOutValueSeries(self.smoapi, TimeStartInd,\
TimeEndInd, byref(sLength), byref(ErrNo1))
if ErrNo1.value != 0:
raise Exception("API ErrNo {0}:{1}".format(ErrNo1.value,DLLErrorKeys[ErrNo1.value]) )
if SMO_elementType == SM_subcatch:
if not hasattr(self, 'SubcatchmentIDs'):
self._get_SubcatchIDs()
ErrNo2 = self._getSubcatchSeries(self.smoapi, self.SubcatchmentIDs[IDName], SMO_Attribute, \
TimeStartInd, sLength.value, SeriesPtr)
elif SMO_elementType == SM_node:
if not hasattr(self, 'NodeIDs'):
self._get_NodeIDs()
ErrNo2 = self._getNodeSeries(self.smoapi, self.NodeIDs[IDName], SMO_Attribute, \
TimeStartInd, sLength.value, SeriesPtr)
elif SMO_elementType == SM_link:
if not hasattr(self, 'LinkIDs'):
self._get_LinkIDs()
ErrNo2 = self._getLinkSeries(self.smoapi, self.LinkIDs[IDName], SMO_Attribute, \
TimeStartInd, sLength.value, SeriesPtr)
## Add Pollutants Later
elif SMO_elementType == SM_sys:
ErrNo2 = self._getSystemSeries(self.smoapi, SMO_Attribute, \
TimeStartInd, sLength.value, SeriesPtr)
else:
raise Exception("SMO_elementType: {} Outside Valid Types".format(SMO_elementType))
if ErrNo2 != 0:
raise Exception("API ErrNo {0}:{1}".format(ErrNo2,DLLErrorKeys[ErrNo2]) )
BldArray = [SeriesPtr[i] for i in range(sLength.value)]
self._free(SeriesPtr)
return BldArray
def get_Attribute(self, SMO_elementType, SMO_Attribute, TimeInd):
"""Get results for particular attribute for all elements at a specific time index.
:param int SMO_elementType: Element type :doc:`/keyrefs`.
:param int SMO_Attribute: Attribute Type :doc:`/keyrefs`.
:param int TimeInd: TimeInd
:return: data list in order of the IDs of the SMO_elementType
:rtype: list
Examples:
>>> OutputFile = SWMMBinReader()
>>> OutputFile.OpenBinFile(r"C:\\PROJECTCODE\\SWMMOutputAPI\\testing\\outputfile.out")
>>> OutputFile.get_Attribute(SM_subcatch, rainfall_subcatch, 0)
>>> [0.017500000074505806, 0.017500000074505806, 0.017500000074505806]
>>> OutputFile.get_Attribute(SM_node, invert_depth, 10)
>>> [4.596884250640869, 0.720202624797821, 0.6315776705741882, 0.6312257051467896]
>>> OutputFile.get_Attribute(SM_link, flow_rate_link, 50)
>>> [9.00419807434082, 10.011459350585938, 11.020767211914062]
"""
if TimeInd > self.get_Times(numPeriods)-1:
raise Exception("Outside Number of TimeSteps")
aLength = c_int()
ErrNo1 = c_int()
ValArrayPtr = self._newOutValueArray(self.smoapi, getAttribute,\
SMO_elementType, byref(aLength), byref(ErrNo1))
if ErrNo1.value != 0:
raise Exception("API ErrNo {0}:{1}".format(ErrNo1.value,DLLErrorKeys[ErrNo1.value]) )
if SMO_elementType == SM_subcatch:
ErrNo2 = self._getSubcatchAttribute(self.smoapi, TimeInd, SMO_Attribute, ValArrayPtr)
elif SMO_elementType == SM_link:
ErrNo2 = self._getLinkAttribute(self.smoapi, TimeInd, SMO_Attribute, ValArrayPtr)
elif SMO_elementType == SM_node:
ErrNo2 = self._getNodeAttribute(self.smoapi, TimeInd, SMO_Attribute, ValArrayPtr)
## Add Pollutants Later
else:
raise Exception("SMO_elementType: {} Outside Valid Types".format(SMO_elementType))
if ErrNo2 != 0:
raise Exception("API ErrNo {0}:{1}".format(ErrNo2,DLLErrorKeys[ErrNo2]) )
BldArray = [ValArrayPtr[i] for i in range(aLength.value)]
self._free(ValArrayPtr)
return BldArray
def get_Result(self, SMO_elementType, TimeInd, IDName = None):
"""For a element ID at given time, get all attributes
:param int SMO_elementType: Element type :doc:`/keyrefs`.
:param int TimeInd: Time Index
:param int IDName: IDName (default None for System Variables)
Examples:
>>> OutputFile = SWMMBinReader()
>>> OutputFile.OpenBinFile(r"C:\\PROJECTCODE\\SWMMOutputAPI\\testing\\outputfile.out")
>>> OutputFile.get_Result(SM_subcatch,3000,'S3')
>>> [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
>>> OutputFile.get_Result(SM_node,3000,'J1')
>>> [4.594789505004883, 25.322790145874023, 0.0, 9.000000953674316, 9.000000953674316, 0.0]
>>> OutputFile.get_Result(SM_link,9000,'C3')
>>> [11.0, 0.6312892436981201, 12.93112564086914, 185.72474670410156, 0.270773708820343]
>>> OutputFile.get_Result(SM_sys,3000,'S3')
>>> [70.0, 0.0, 0.0, 0.0, 0.0, 8.0, 0.0, 0.0, 3.0, 11.0, 0.0, 11.000021934509277, 532.2583618164062, 0.0, 0.0]
"""
if TimeInd > self.get_Times(numPeriods)-1:
raise Exception("Outside Number of TimeSteps")
alength = c_int()
ErrNo1 = c_int()
ValArrayPtr = self._newOutValueArray(self.smoapi, getResult,\
SMO_elementType, byref(alength), byref(ErrNo1))
if SMO_elementType == SM_subcatch:
if not hasattr(self, 'SubcatchmentIDs'):
self._get_SubcatchIDs()
ErrNo2 = self._getSubcatchResult(self.smoapi, TimeInd, self.SubcatchmentIDs[IDName], ValArrayPtr)
elif SMO_elementType == SM_node:
if not hasattr(self, 'NodeIDs'):
self._get_NodeIDs()
ErrNo2 = self._getNodeResult(self.smoapi, TimeInd, self.NodeIDs[IDName], ValArrayPtr)
elif SMO_elementType == SM_link:
if not hasattr(self, 'LinkIDs'):
self._get_LinkIDs()
ErrNo2 = self._getLinkResult(self.smoapi, TimeInd, self.LinkIDs[IDName], ValArrayPtr)
## Add Pollutants Later
elif SMO_elementType == SM_sys:
ErrNo2 = self._getSystemResult(self.smoapi, TimeInd, ValArrayPtr)
else:
raise Exception("SMO_elementType: {} Outside Valid Types".format(SMO_elementType))
BldArray = [ValArrayPtr[i] for i in range(alength.value)]
self._free(ValArrayPtr)
return BldArray
if __name__ in "__main__":
## Run Tests
## Open
Test = SWMMBinReader()
Test.OpenBinFile(r"C:\PROJECTCODE\SWMMOutputAPI\testing\OutputTestModel522_SHORT.out")
## Get IDs
print("\nProject Element ID Info")
print(Test.get_IDs(SM_subcatch))
print(Test.get_IDs(SM_node))
print(Test.get_IDs(SM_link))
print("\nGet Units")
print('flow_rate: {}'.format(Test.get_Units(flow_rate)))
print('concentration: {}'.format(Test.get_Units(concentration)))
## Get Project Size
print("\nProject Size Info")
print("Subcatchments: {}".format(Test.get_ProjectSize(subcatchCount)))
print("Nodes: {}".format(Test.get_ProjectSize(nodeCount)))
print("Links: {}".format(Test.get_ProjectSize(linkCount)))
print("Pollutants: {}".format(Test.get_ProjectSize(pollutantCount)))
## Project Time Steps
print("\nProject Time Info")
print("Report Step: {}".format(Test.get_Times(reportStep)))
print("Periods: {}".format(Test.get_Times(numPeriods)))
## Get Time Series
print("\nGet Time Series")
TimeSeries = Test.get_TimeSeries()
print(TimeSeries[:10])
## Get Series
print("\nSeries Tests")
SubcSeries = Test.get_Series(SM_subcatch, runoff_rate, 'S3', 0, 50)
print(SubcSeries)
NodeSeries = Test.get_Series(SM_node, invert_depth, 'J1', 0, 50)
print(NodeSeries)
LinkSeries = Test.get_Series(SM_link, rainfall_subcatch, 'C2', 0, 50)
print(LinkSeries)
SystSeries = Test.get_Series(SM_sys, rainfall_system, TimeStartInd = 0, TimeEndInd = 50)
print(SystSeries)
## Get Attributes
print("\nAttributes Tests")
SubcAttributes = Test.get_Attribute(SM_subcatch, rainfall_subcatch, 0) #<- Check Values.. Might be issue here
print(SubcAttributes)
NodeAttributes = Test.get_Attribute(SM_node, invert_depth, 10)
print(NodeAttributes)
LinkAttributes = Test.get_Attribute(SM_link, flow_rate_link, 50)
print(LinkAttributes)
## Get Results
print("\nResult Tests")
SubcResults = Test.get_Result(SM_subcatch,3000,'S3')
print(SubcResults)
NodeResults = Test.get_Result(SM_node,3000,'J1')
print(NodeResults)
LinkResults = Test.get_Result(SM_link,9000,'C3')
print(LinkResults)
SystResults = Test.get_Result(SM_sys,3000,'S3')
print(SystResults)
## Close Output File
Test.CloseBinFile()
help(SWMMBinReader)
| bsd-2-clause | 2,478,650,889,802,605,600 | 38.408585 | 154 | 0.592746 | false |
geokala/cloudify-plugins-common | cloudify/workflows/tasks.py | 2 | 24871 | ########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import sys
import time
import uuid
import Queue
from cloudify import exceptions
from cloudify.workflows import api
INFINITE_TOTAL_RETRIES = -1
DEFAULT_TOTAL_RETRIES = INFINITE_TOTAL_RETRIES
DEFAULT_RETRY_INTERVAL = 30
DEFAULT_SEND_TASK_EVENTS = True
TASK_PENDING = 'pending'
TASK_SENDING = 'sending'
TASK_SENT = 'sent'
TASK_STARTED = 'started'
TASK_RESCHEDULED = 'rescheduled'
TASK_SUCCEEDED = 'succeeded'
TASK_FAILED = 'failed'
TERMINATED_STATES = [TASK_RESCHEDULED, TASK_SUCCEEDED, TASK_FAILED]
def retry_failure_handler(task):
"""Basic on_success/on_failure handler that always returns retry"""
return HandlerResult.retry()
class WorkflowTask(object):
"""A base class for workflow tasks"""
def __init__(self,
workflow_context,
task_id=None,
info=None,
on_success=None,
on_failure=None,
total_retries=DEFAULT_TOTAL_RETRIES,
retry_interval=DEFAULT_RETRY_INTERVAL,
send_task_events=DEFAULT_SEND_TASK_EVENTS):
"""
:param task_id: The id of this task (generated if none is provided)
:param info: A short description of this task (for logging)
:param on_success: A handler called when the task's execution
terminates successfully.
Expected to return one of
[HandlerResult.retry(), HandlerResult.cont()]
to indicate whether this task should be re-executed.
:param on_failure: A handler called when the task's execution
fails.
Expected to return one of
[HandlerResult.retry(), HandlerResult.ignore(),
HandlerResult.fail()]
to indicate whether this task should be re-executed,
cause the engine to terminate workflow execution
immediately or simply ignore this task failure and
move on.
:param total_retries: Maximum retry attempt for this task, in case
the handlers return a retry attempt.
:param retry_interval: Number of seconds to wait between retries
:param workflow_context: the CloudifyWorkflowContext instance
"""
self.id = task_id or str(uuid.uuid4())
self._state = TASK_PENDING
self.async_result = None
self.on_success = on_success
self.on_failure = on_failure
self.info = info
self.error = None
self.total_retries = total_retries
self.retry_interval = retry_interval
self.terminated = Queue.Queue(maxsize=1)
self.is_terminated = False
self.workflow_context = workflow_context
self.send_task_events = send_task_events
self.current_retries = 0
# timestamp for which the task should not be executed
# by the task graph before reached, overridden by the task
# graph during retries
self.execute_after = time.time()
def dump(self):
return {
'id': self.id,
'state': self.get_state(),
'info': self.info,
'error': self.error,
'current_retries': self.current_retries,
'cloudify_context': self.cloudify_context
}
def is_remote(self):
"""
:return: Is this a remote task
"""
return not self.is_local()
def is_local(self):
"""
:return: Is this a local task
"""
raise NotImplementedError('Implemented by subclasses')
def is_nop(self):
"""
:return: Is this a NOP task
"""
return False
def get_state(self):
"""
Get the task state
:return: The task state [pending, sending, sent, started,
rescheduled, succeeded, failed]
"""
return self._state
def set_state(self, state):
"""
Set the task state
:param state: The state to set [pending, sending, sent, started,
rescheduled, succeeded, failed]
"""
if state not in [TASK_PENDING, TASK_SENDING, TASK_SENT, TASK_STARTED,
TASK_RESCHEDULED, TASK_SUCCEEDED, TASK_FAILED]:
raise RuntimeError('Illegal state set on task: {0} '
'[task={1}]'.format(state, str(self)))
self._state = state
if state in TERMINATED_STATES:
self.is_terminated = True
self.terminated.put_nowait(True)
def wait_for_terminated(self, timeout=None):
if self.is_terminated:
return
self.terminated.get(timeout=timeout)
def handle_task_terminated(self):
if self.get_state() in (TASK_FAILED, TASK_RESCHEDULED):
handler_result = self._handle_task_not_succeeded()
else:
handler_result = self._handle_task_succeeded()
if handler_result.action == HandlerResult.HANDLER_RETRY:
if any([self.total_retries == INFINITE_TOTAL_RETRIES,
self.current_retries < self.total_retries,
handler_result.ignore_total_retries]):
if handler_result.retry_after is None:
handler_result.retry_after = self.retry_interval
new_task = self.duplicate_for_retry(
time.time() + handler_result.retry_after)
handler_result.retried_task = new_task
else:
handler_result.action = HandlerResult.HANDLER_FAIL
return handler_result
def _handle_task_succeeded(self):
"""Call handler for task success"""
if self.on_success:
return self.on_success(self)
else:
return HandlerResult.cont()
def _handle_task_not_succeeded(self):
"""
Call handler for task which hasn't ended in 'succeeded' state
(i.e. has either failed or been rescheduled)
"""
try:
exception = self.async_result.result
except Exception as e:
exception = exceptions.NonRecoverableError(
'Could not de-serialize '
'exception of task {0} --> {1}: {2}'
.format(self.name,
type(e).__name__,
str(e)))
if isinstance(exception, exceptions.OperationRetry):
# operation explicitly requested a retry, so we ignore
# the handler set on the task.
handler_result = HandlerResult.retry()
elif self.on_failure:
handler_result = self.on_failure(self)
else:
handler_result = HandlerResult.retry()
if handler_result.action == HandlerResult.HANDLER_RETRY:
if isinstance(exception, exceptions.NonRecoverableError):
handler_result = HandlerResult.fail()
elif isinstance(exception, exceptions.RecoverableError):
handler_result.retry_after = exception.retry_after
return handler_result
def __str__(self):
suffix = self.info if self.info is not None else ''
return '{0}({1})'.format(self.name, suffix)
def duplicate_for_retry(self, execute_after):
"""
:return: A new instance of this task with a new task id
"""
dup = self._duplicate()
dup.execute_after = execute_after
dup.current_retries = self.current_retries + 1
if dup.cloudify_context and 'operation' in dup.cloudify_context:
op_ctx = dup.cloudify_context['operation']
op_ctx['retry_number'] = dup.current_retries
return dup
def _duplicate(self):
raise NotImplementedError('Implemented by subclasses')
@property
def cloudify_context(self):
raise NotImplementedError('Implemented by subclasses')
@property
def name(self):
"""
:return: The task name
"""
raise NotImplementedError('Implemented by subclasses')
class RemoteWorkflowTask(WorkflowTask):
"""A WorkflowTask wrapping a celery based task"""
# cache for registered tasks queries to celery workers
cache = {}
def __init__(self,
task,
cloudify_context,
workflow_context,
task_id=None,
info=None,
on_success=None,
on_failure=retry_failure_handler,
total_retries=DEFAULT_TOTAL_RETRIES,
retry_interval=DEFAULT_RETRY_INTERVAL,
send_task_events=DEFAULT_SEND_TASK_EVENTS):
"""
:param task: The celery task
:param cloudify_context: the cloudify context dict
:param task_id: The id of this task (generated if none is provided)
:param info: A short description of this task (for logging)
:param on_success: A handler called when the task's execution
terminates successfully.
Expected to return one of
[HandlerResult.retry(), HandlerResult.cont()]
to indicate whether this task should be re-executed.
:param on_failure: A handler called when the task's execution
fails.
Expected to return one of
[HandlerResult.retry(), HandlerResult.ignore(),
HandlerResult.fail()]
to indicate whether this task should be re-executed,
cause the engine to terminate workflow execution
immediately or simply ignore this task failure and
move on.
:param total_retries: Maximum retry attempt for this task, in case
the handlers return a retry attempt.
:param retry_interval: Number of seconds to wait between retries
:param workflow_context: the CloudifyWorkflowContext instance
"""
super(RemoteWorkflowTask, self).__init__(
workflow_context,
task_id,
info=info,
on_success=on_success,
on_failure=on_failure,
total_retries=total_retries,
retry_interval=retry_interval,
send_task_events=send_task_events)
self.task = task
self._cloudify_context = cloudify_context
def apply_async(self):
"""
Call the underlying celery tasks apply_async. Verify the task
is registered and send an event before doing so.
:return: a RemoteWorkflowTaskResult instance wrapping the
celery async result
"""
try:
self._verify_task_registered()
self.workflow_context.internal.send_task_event(TASK_SENDING, self)
self.set_state(TASK_SENT)
async_result = self.task.apply_async(task_id=self.id)
self.async_result = RemoteWorkflowTaskResult(self, async_result)
except exceptions.NonRecoverableError as e:
self.set_state(TASK_FAILED)
self.workflow_context.internal\
.send_task_event(TASK_FAILED, self, {'exception': e})
self.error = e
self.async_result = RemoteWorkflowNotExistTaskResult(self)
return self.async_result
def is_local(self):
return False
def _duplicate(self):
dup = RemoteWorkflowTask(task=self.task,
cloudify_context=self.cloudify_context,
workflow_context=self.workflow_context,
task_id=None, # we want a new task id
info=self.info,
on_success=self.on_success,
on_failure=self.on_failure,
total_retries=self.total_retries,
retry_interval=self.retry_interval,
send_task_events=self.send_task_events)
dup.cloudify_context['task_id'] = dup.id
return dup
@property
def name(self):
"""The task name"""
return self.cloudify_context['task_name']
@property
def cloudify_context(self):
return self._cloudify_context
@property
def target(self):
"""The task target (queue name)"""
return self.cloudify_context['task_target']
def _verify_task_registered(self):
verify_task_registered(self.name, self.target, self._get_registered)
def _get_registered(self):
# import here because this only applies in remote execution
# environments
from cloudify.celery import celery
worker_name = 'celery@{0}'.format(self.target)
inspect = celery.control.inspect(destination=[worker_name])
registered = inspect.registered() or {}
result = registered.get(worker_name, set())
return set(result)
class LocalWorkflowTask(WorkflowTask):
"""A WorkflowTask wrapping a local callable"""
def __init__(self,
local_task,
workflow_context,
node=None,
info=None,
on_success=None,
on_failure=retry_failure_handler,
total_retries=DEFAULT_TOTAL_RETRIES,
retry_interval=DEFAULT_RETRY_INTERVAL,
send_task_events=DEFAULT_SEND_TASK_EVENTS,
kwargs=None,
task_id=None,
name=None):
"""
:param local_task: A callable
:param workflow_context: the CloudifyWorkflowContext instance
:param node: The CloudifyWorkflowNode instance (if in node context)
:param info: A short description of this task (for logging)
:param on_success: A handler called when the task's execution
terminates successfully.
Expected to return one of
[HandlerResult.retry(), HandlerResult.cont()]
to indicate whether this task should be re-executed.
:param on_failure: A handler called when the task's execution
fails.
Expected to return one of
[HandlerResult.retry(), HandlerResult.ignore(),
HandlerResult.fail()]
to indicate whether this task should be re-executed,
cause the engine to terminate workflow execution
immediately or simply ignore this task failure and
move on.
:param total_retries: Maximum retry attempt for this task, in case
the handlers return a retry attempt.
:param retry_interval: Number of seconds to wait between retries
:param kwargs: Local task keyword arguments
:param name: optional parameter (default: local_task.__name__)
"""
super(LocalWorkflowTask, self).__init__(
info=info,
on_success=on_success,
on_failure=on_failure,
total_retries=total_retries,
retry_interval=retry_interval,
task_id=task_id,
workflow_context=workflow_context,
send_task_events=send_task_events)
self.local_task = local_task
self.node = node
self.kwargs = kwargs or {}
self._name = name or local_task.__name__
def dump(self):
super_dump = super(LocalWorkflowTask, self).dump()
super_dump.update({
'name': self._name
})
return super_dump
def apply_async(self):
"""
Execute the task in the local task thread pool
:return: A wrapper for the task result
"""
def local_task_wrapper():
try:
self.workflow_context.internal.send_task_event(TASK_STARTED,
self)
result = self.local_task(**self.kwargs)
self.workflow_context.internal.send_task_event(
TASK_SUCCEEDED, self, event={'result': str(result)})
self.async_result._holder.result = result
self.set_state(TASK_SUCCEEDED)
except BaseException as e:
new_task_state = TASK_RESCHEDULED if isinstance(
e, exceptions.OperationRetry) else TASK_FAILED
exc_type, exception, tb = sys.exc_info()
self.workflow_context.internal.send_task_event(
new_task_state, self, event={'exception': str(exception)})
self.async_result._holder.error = (exception, tb)
self.set_state(new_task_state)
self.async_result = LocalWorkflowTaskResult(self)
self.workflow_context.internal.send_task_event(TASK_SENDING, self)
self.set_state(TASK_SENT)
self.workflow_context.internal.add_local_task(local_task_wrapper)
return self.async_result
def is_local(self):
return True
def _duplicate(self):
dup = LocalWorkflowTask(local_task=self.local_task,
workflow_context=self.workflow_context,
node=self.node,
info=self.info,
on_success=self.on_success,
on_failure=self.on_failure,
total_retries=self.total_retries,
retry_interval=self.retry_interval,
send_task_events=self.send_task_events,
kwargs=self.kwargs,
name=self.name)
return dup
@property
def name(self):
"""The task name"""
return self._name
@property
def cloudify_context(self):
return self.kwargs.get('__cloudify_context')
# NOP tasks class
class NOPLocalWorkflowTask(LocalWorkflowTask):
def __init__(self, workflow_context):
super(NOPLocalWorkflowTask, self).__init__(lambda: None,
workflow_context)
@property
def name(self):
"""The task name"""
return 'NOP'
def apply_async(self):
self.set_state(TASK_SUCCEEDED)
return LocalWorkflowTaskResult(self)
def is_nop(self):
return True
class WorkflowTaskResult(object):
"""A base wrapper for workflow task results"""
def __init__(self, task):
self.task = task
def _process(self, retry_on_failure):
if self.task.workflow_context.internal.graph_mode:
return self._get()
task_graph = self.task.workflow_context.internal.task_graph
while True:
self._wait_for_task_terminated()
handler_result = self.task.handle_task_terminated()
task_graph.remove_task(self.task)
try:
result = self._get()
if handler_result.action != HandlerResult.HANDLER_RETRY:
return result
except:
if (not retry_on_failure or
handler_result.action == HandlerResult.HANDLER_FAIL):
raise
self._sleep(handler_result.retry_after)
self.task = handler_result.retried_task
task_graph.add_task(self.task)
self._check_execution_cancelled()
self.task.apply_async()
self._refresh_state()
@staticmethod
def _check_execution_cancelled():
if api.has_cancel_request():
raise api.ExecutionCancelled()
def _wait_for_task_terminated(self):
while True:
self._check_execution_cancelled()
try:
self.task.wait_for_terminated(timeout=1)
break
except Queue.Empty:
continue
def _sleep(self, seconds):
while seconds > 0:
self._check_execution_cancelled()
sleep_time = 1 if seconds > 1 else seconds
time.sleep(sleep_time)
seconds -= sleep_time
def get(self, retry_on_failure=True):
"""
Get the task result.
Will block until the task execution ends.
:return: The task result
"""
return self._process(retry_on_failure)
def _get(self):
raise NotImplementedError('Implemented by subclasses')
def _refresh_state(self):
raise NotImplementedError('Implemented by subclasses')
class RemoteWorkflowNotExistTaskResult(WorkflowTaskResult):
def __init__(self, task):
super(RemoteWorkflowNotExistTaskResult, self).__init__(task)
self.task = task
def _get(self):
raise self.task.error
@property
def result(self):
return self.task.error
class RemoteWorkflowTaskResult(WorkflowTaskResult):
"""A wrapper for celery's AsyncResult"""
def __init__(self, task, async_result):
super(RemoteWorkflowTaskResult, self).__init__(task)
self.async_result = async_result
def _get(self):
return self.async_result.get()
def _refresh_state(self):
self.async_result = self.task.async_result.async_result
@property
def result(self):
return self.async_result.result
class LocalWorkflowTaskResult(WorkflowTaskResult):
"""A wrapper for local workflow task results"""
class ResultHolder(object):
def __init__(self, result=None, error=None):
self.result = result
self.error = error
def __init__(self, task):
"""
:param task: The LocalWorkflowTask instance
"""
super(LocalWorkflowTaskResult, self).__init__(task)
self._holder = self.ResultHolder()
def _get(self):
if self._holder.error is not None:
exception, traceback = self._holder.error
raise exception, None, traceback
return self._holder.result
def _refresh_state(self):
self._holder = self.task.async_result._holder
@property
def result(self):
if self._holder.error:
return self._holder.error[0]
else:
return self._holder.result
class HandlerResult(object):
HANDLER_RETRY = 'handler_retry'
HANDLER_FAIL = 'handler_fail'
HANDLER_IGNORE = 'handler_ignore'
HANDLER_CONTINUE = 'handler_continue'
def __init__(self,
action,
ignore_total_retries=False,
retry_after=None):
self.action = action
self.ignore_total_retries = ignore_total_retries
self.retry_after = retry_after
# this field is filled by handle_terminated_task() below after
# duplicating the task and updating the relevant task fields
self.retried_task = None
@classmethod
def retry(cls, ignore_total_retries=False, retry_after=None):
return HandlerResult(cls.HANDLER_RETRY,
ignore_total_retries=ignore_total_retries,
retry_after=retry_after)
@classmethod
def fail(cls):
return HandlerResult(cls.HANDLER_FAIL)
@classmethod
def cont(cls):
return HandlerResult(cls.HANDLER_CONTINUE)
@classmethod
def ignore(cls):
return HandlerResult(cls.HANDLER_IGNORE)
def verify_task_registered(name, target, get_registered):
cache = RemoteWorkflowTask.cache
registered = cache.get(target, set())
if name not in registered:
registered = get_registered()
cache[target] = registered
if name not in registered:
raise exceptions.NonRecoverableError(
'Missing task: {0} in worker celery.{1} \n'
'Registered tasks are: {2}'
.format(name, target, registered))
| apache-2.0 | -3,628,943,667,350,454,300 | 34.888889 | 79 | 0.571187 | false |
akintolga/superdesk-aap | server/aap/commands/import_text_archive.py | 2 | 15489 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from bson import ObjectId
import superdesk
import urllib3
import urllib
import xml.etree.ElementTree as etree
import pytz
from pytz import NonExistentTimeError, AmbiguousTimeError
from superdesk import config
from superdesk.io.iptc import subject_codes
from datetime import datetime
import time
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE, ITEM_STATE, CONTENT_STATE, FORMAT, FORMATS
from superdesk.io.commands.update_ingest import process_iptc_codes
from superdesk.etree import get_text_word_count
from apps.archive.common import generate_unique_id_and_name
import json
from eve.utils import ParsedRequest
# The older content does not contain an anpa category, so we derive it from the
# publication name
pubnames = {
'International Sport': 's',
'Racing': 'r',
'Parliamentary Press Releases': 'p',
'Features': 'c',
'Financial News': 'f',
'General': 'a',
'aap Features': 'c',
'aap International News': 'i',
'aap Australian Sport': 't',
'Australian General News': 'a',
'Asia Pulse Full': 'i',
'AFR Summary': 'a',
'Australian Sport': 't',
'PR Releases': 'j',
'Entertainment News': 'e',
'Special Events': 'y',
'Asia Pulse': 'i',
'aap International Sport': 's',
'Emergency Services': 'a',
'BRW Summary': 'a',
'FBM Summary': 'a',
'aap Australian General News': 'a',
'International News': 'i',
'aap Financial News': 'f',
'Asia Pulse Basic': 'i',
'Political News': 'p',
'Advisories': 'v'
}
class AppImportTextArchiveCommand(superdesk.Command):
option_list = (
superdesk.Option('--start', '-strt', dest='start_id', required=True),
superdesk.Option('--user', '-usr', dest='user', required=True),
superdesk.Option('--password', '-pwd', dest='password', required=True),
superdesk.Option('--url_root', '-url', dest='url', required=True),
superdesk.Option('--query', '-qry', dest='query', required=True),
superdesk.Option('--count', '-c', dest='limit', required=False),
superdesk.Option('--direction', '-d', dest='direction', required=False)
)
BATCH_SIZE = 500
def run(self, start_id, user, password, url, query, limit, direction):
print('Starting text archive import at {}'.format(start_id))
self._user = user
self._password = password
self._id = int(start_id)
self._url_root = url
self._query = urllib.parse.quote(query)
# direction True is forwards
self._direction = True
if direction is not None:
if direction.lower()[0] == 'r':
self._direction = False
if limit is not None:
self._limit = int(limit)
else:
self._limit = None
self._api_login()
x = self._get_bunch(self._id)
while x:
self._process_bunch(x)
x = self._get_bunch(self._id)
if self._limit is not None and self._limit <= 0:
break
if limit is None and int(x.find('doc_count').text) == 0:
print('Complete')
break
print('finished text archive import')
def _api_login(self):
self._http = urllib3.PoolManager()
credentials = '?login[username]={}&login[password]={}'.format(self._user, self._password)
url = self._url_root + credentials
r = self._http.urlopen('GET', url, headers={'Content-Type': 'application/xml'})
self._headers = {'cookie': r.getheader('set-cookie')}
self._anpa_categories = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='categories')
def _get_bunch(self, id):
url = self._url_root
if self._direction:
d = '>'
else:
d = '<'
url += 'archives/txtarch?search_docs[struct_query]=(DCDATA_ID{0}{1})&search_docs[query]='.format(d, id)
url += self._query
url += '&search_docs[format]=full&search_docs[pagesize]={0}&search_docs[page]=1'.format(self.BATCH_SIZE)
if self._direction:
url += '&search_docs[sortorder]=DCDATA_ID%20ASC'
else:
url += '&search_docs[sortorder]=DCDATA_ID%20DESC'
print('Getting batch from DC url [{0}]'.format(url))
retries = 3
while retries > 0:
s = time.time()
r = self._http.request('GET', url, headers=self._headers)
print('DC returned in {:.2f} seconds'.format(time.time() - s))
if r.status == 200:
e = etree.fromstring(r.data)
# print(str(r.data))
count = int(e.find('doc_count').text)
if count > 0:
print('count : {}'.format(count))
return e
else:
self._api_login()
retries -= 1
return None
def _get_head_value(self, doc, field):
el = doc.find('dcdossier/document/head/' + field)
if el is not None:
return el.text
return None
def _addkeywords(self, key, doc, item):
code = self._get_head_value(doc, key)
if code:
if 'keywords' not in item:
item['keywords'] = []
item['keywords'].append(code)
def _process_bunch(self, x):
# x.findall('dc_rest_docs/dc_rest_doc')[0].get('href')
items = []
for doc in x.findall('dc_rest_docs/dc_rest_doc'):
try:
# print(doc.get('href'))
id = doc.find('dcdossier').get('id')
if self._direction:
if int(id) > self._id:
self._id = int(id)
else:
if int(id) < self._id:
self._id = int(id)
item = {}
item['guid'] = doc.find('dcdossier').get('guid')
item[ITEM_TYPE] = CONTENT_TYPE.TEXT
format = self._get_head_value(doc, 'Format')
if format == 't':
item[FORMAT] = FORMATS.PRESERVED
else:
item[FORMAT] = FORMATS.HTML
# item[FORMAT] = FORMATS.HTML
# if the item has been modified in the archive then it is due to a kill
# there is an argument that this item should not be imported at all
if doc.find('dcdossier').get('created') != doc.find('dcdossier').get('modified'):
# item[ITEM_STATE] = CONTENT_STATE.KILLED
continue
else:
item[ITEM_STATE] = CONTENT_STATE.PUBLISHED
value = datetime.strptime(self._get_head_value(doc, 'PublicationDate'), '%Y%m%d%H%M%S')
local_tz = pytz.timezone('Australia/Sydney')
try:
aus_dt = local_tz.localize(value, is_dst=None)
except NonExistentTimeError as ex:
aus_dt = local_tz.localize(value, is_dst=True)
except AmbiguousTimeError:
aus_dt = local_tz.localize(value, is_dst=False)
item['firstcreated'] = aus_dt.astimezone(pytz.utc)
item['versioncreated'] = item['firstcreated']
generate_unique_id_and_name(item)
item['ingest_id'] = id
last_line = None
el = doc.find('dcdossier/document/body/BodyText')
if el is not None:
story = el.text
lines = story.split('\n')
if len(lines) > 0:
last_line = lines[-1]
if item.get(FORMAT) == FORMATS.HTML:
story = story.replace('\n ', '<p></p>')
story = story.replace('\n', '<br>')
item['body_html'] = '<p>' + story + '</p>'
else:
item['body_html'] = '<pre>' + story + '</pre>'
try:
item['word_count'] = get_text_word_count(item['body_html'])
except:
pass
else:
# Items with no body are ignored
continue
item['source'] = self._get_head_value(doc, 'Agency')
# if the source document contains no agency then by definition it is unknown
if item['source'] is None:
item['source'] = 'UNKNOWN'
else:
# check if the source of the document was Newscentre
dc_unique = doc.find('dcdossier').get('unique')
if dc_unique.startswith('NC.') and last_line is not None:
# The AFR summary articles all have agency values 25 chars long
if len(item['source']) == 25:
item['source'] = 'AAP'
# is it a numeric Agency
elif self._get_head_value(doc, 'Agency').isdigit():
sign_off = last_line.split(' ')
if len(sign_off) > 0:
item['source'] = sign_off[0].upper()
else:
item['source'] = sign_off.upper()
# clean up what we have extracted
if item['source'].startswith('AAP'):
item['source'] = 'AAP'
else:
# make sure it is one of the known values
if item['source'] not in {'AAP', 'AP', 'REUT', 'Asia Pulse', 'DPA', 'AFP', 'RAW', 'NZA',
'NZPA', 'KRT', 'PA', 'PAA', 'SNI', 'REUTERS'}:
print('Source : {}'.format(item['source']))
item['source'] = 'UNKNOWN'
# self._addkeywords('AsiaPulseCodes', doc, item)
byline = self._get_head_value(doc, 'Byline')
if byline:
item['byline'] = byline
# item['service'] = self._get_head_value(doc,'Service')
category = self._get_head_value(doc, 'Category')
if not category:
publication_name = self._get_head_value(doc, 'PublicationName')
if publication_name in pubnames:
category = pubnames[publication_name]
if category:
anpacategory = {}
anpacategory['qcode'] = category
for anpa_category in self._anpa_categories['items']:
if anpacategory['qcode'].lower() == anpa_category['qcode'].lower():
anpacategory = {'qcode': anpacategory['qcode'], 'name': anpa_category['name']}
break
item['anpa_category'] = [anpacategory]
self._addkeywords('CompanyCodes', doc, item)
item['keyword'] = self._get_head_value(doc, 'Keyword')
item['ingest_provider_sequence'] = self._get_head_value(doc, 'Sequence')
orginal_source = self._get_head_value(doc, 'Author')
if orginal_source:
item['original_source'] = orginal_source
item['headline'] = self._get_head_value(doc, 'Headline')
code = self._get_head_value(doc, 'SubjectRefNum')
if code and len(code) == 7:
code = '0' + code
if code and code in subject_codes:
item['subject'] = []
item['subject'].append({'qcode': code, 'name': subject_codes[code]})
try:
process_iptc_codes(item, None)
except:
pass
slug = self._get_head_value(doc, 'SLUG')
if slug:
item['slugline'] = slug
else:
item['slugline'] = self._get_head_value(doc, 'Keyword')
take_key = self._get_head_value(doc, 'Takekey')
if take_key:
item['anpa_take_key'] = take_key
self._addkeywords('Topic', doc, item)
# self._addkeywords('Selectors', doc, item)
item['pubstatus'] = 'usable'
# this is required for the archived service additional lookup
item['item_id'] = item['guid']
item[config.VERSION] = 1
item['flags'] = {'marked_archived_only': True}
# item['_id'] = ObjectId(id.rjust(24,'0'))
item['_id'] = ObjectId()
items.append(item)
if self._limit:
self._limit -= 1
# print(item)
except Exception as ex:
print('Exception parsing DC documnent {}'.format(id))
pass
try:
res = superdesk.get_resource_service('archived')
s = time.time()
res.post(items)
print('Post to Batch to Superdesk took {:.2f}'.format(time.time() - s))
except Exception as ex:
if ex.code == 409:
print('Key clash exceptionn detected')
# create a list of the guids we tried to post
guids = [g['guid'] for g in items]
# create a query for all those id's
query = {
'size': self.BATCH_SIZE,
'query': {
'filtered': {
'filter': {
"terms": {
"guid": [guids]
}
}
}
}
}
req = ParsedRequest()
repos = 'archived'
req.args = {'source': json.dumps(query), 'repo': repos}
search_res = superdesk.get_resource_service('search')
existing = search_res.get(req=req, lookup=None)
existing_guids = [e['guid'] for e in existing]
not_existing = [g for g in guids if g not in existing_guids]
for missing_guid in not_existing:
i = [m for m in items if m['guid'] == missing_guid]
original = res.find_one(req=None, guid=i[0]['guid'])
if not original:
try:
s = time.time()
res.post(i)
print('Post single item to Superdesk in {:.2f} seconds'.format(time.time() - s))
except Exception as ex:
print('Exception posting single item')
else:
print('Exception posting batch')
superdesk.command('app:import_text_archive', AppImportTextArchiveCommand())
| agpl-3.0 | -5,094,927,837,455,075,000 | 39.97619 | 120 | 0.486797 | false |
dawran6/zulip | zerver/management/commands/generate_invite_links.py | 22 | 2688 | from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand
from confirmation.models import Confirmation
from zerver.models import UserProfile, PreregistrationUser, \
get_user_profile_by_email, get_realm, email_allowed_for_realm
class Command(BaseCommand):
help = "Generate activation links for users and print them to stdout."
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('--realm',
dest='string_id',
type=str,
help='The realm in which to generate the invites (use for open realms).')
parser.add_argument('--force',
dest='force',
action="store_true",
default=False,
help='Override that the domain is restricted to external users.')
parser.add_argument('emails', metavar='<email>', type=str, nargs='*',
help='email of user to generate an activation link for')
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
duplicates = False
for email in options['emails']:
try:
get_user_profile_by_email(email)
print(email + ": There is already a user registered with that address.")
duplicates = True
continue
except UserProfile.DoesNotExist:
pass
if duplicates:
return
realm = None
string_id = options["string_id"]
if string_id:
realm = get_realm(string_id)
if not realm:
print("The realm %s doesn't exist yet, please create it first." % (string_id,))
print("Don't forget default streams!")
exit(1)
for email in options['emails']:
if realm:
if not email_allowed_for_realm(email, realm) and not options["force"]:
print("You've asked to add an external user (%s) to a closed realm (%s)." % (
email, string_id))
print("Are you sure? To do this, pass --force.")
exit(1)
else:
prereg_user = PreregistrationUser(email=email, realm=realm)
else:
prereg_user = PreregistrationUser(email=email)
prereg_user.save()
print(email + ": " + Confirmation.objects.get_link_for_object(prereg_user, host=realm.host))
| apache-2.0 | 8,779,287,674,726,566,000 | 40.353846 | 104 | 0.547991 | false |
igemsoftware/SYSU-Software2013 | project/Python27_32/Lib/test/test_anydbm.py | 93 | 2288 | #! /usr/bin/env python
"""Test script for the anydbm module
based on testdumbdbm.py
"""
import os
import unittest
import glob
from test import test_support
_fname = test_support.TESTFN
# Silence Py3k warning
anydbm = test_support.import_module('anydbm', deprecated=True)
def _delete_files():
# we don't know the precise name the underlying database uses
# so we use glob to locate all names
for f in glob.glob(_fname + "*"):
try:
os.unlink(f)
except OSError:
pass
class AnyDBMTestCase(unittest.TestCase):
_dict = {'0': '',
'a': 'Python:',
'b': 'Programming',
'c': 'the',
'd': 'way',
'f': 'Guido',
'g': 'intended'
}
def __init__(self, *args):
unittest.TestCase.__init__(self, *args)
def test_anydbm_creation(self):
f = anydbm.open(_fname, 'c')
self.assertEqual(f.keys(), [])
for key in self._dict:
f[key] = self._dict[key]
self.read_helper(f)
f.close()
def test_anydbm_modification(self):
self.init_db()
f = anydbm.open(_fname, 'c')
self._dict['g'] = f['g'] = "indented"
self.read_helper(f)
f.close()
def test_anydbm_read(self):
self.init_db()
f = anydbm.open(_fname, 'r')
self.read_helper(f)
f.close()
def test_anydbm_keys(self):
self.init_db()
f = anydbm.open(_fname, 'r')
keys = self.keys_helper(f)
f.close()
def read_helper(self, f):
keys = self.keys_helper(f)
for key in self._dict:
self.assertEqual(self._dict[key], f[key])
def init_db(self):
f = anydbm.open(_fname, 'n')
for k in self._dict:
f[k] = self._dict[k]
f.close()
def keys_helper(self, f):
keys = f.keys()
keys.sort()
dkeys = self._dict.keys()
dkeys.sort()
self.assertEqual(keys, dkeys)
return keys
def tearDown(self):
_delete_files()
def setUp(self):
_delete_files()
def test_main():
try:
test_support.run_unittest(AnyDBMTestCase)
finally:
_delete_files()
if __name__ == "__main__":
test_main()
| mit | 8,362,809,782,611,743,000 | 22.587629 | 65 | 0.523601 | false |
stevemao/brackets-shell | gyp/pylib/gyp/generator/msvs.py | 17 | 118480 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import ntpath
import os
import posixpath
import re
import subprocess
import sys
import gyp.common
import gyp.easy_xml as easy_xml
import gyp.MSVSNew as MSVSNew
import gyp.MSVSProject as MSVSProject
import gyp.MSVSSettings as MSVSSettings
import gyp.MSVSToolFile as MSVSToolFile
import gyp.MSVSUserFile as MSVSUserFile
import gyp.MSVSVersion as MSVSVersion
from gyp.common import GypError
# Regular expression for validating Visual Studio GUIDs. If the GUID
# contains lowercase hex letters, MSVS will be fine. However,
# IncrediBuild BuildConsole will parse the solution file, but then
# silently skip building the target causing hard to track down errors.
# Note that this only happens with the BuildConsole, and does not occur
# if IncrediBuild is executed from inside Visual Studio. This regex
# validates that the string looks like a GUID with all uppercase hex
# letters.
VALID_MSVS_GUID_CHARS = re.compile('^[A-F0-9\-]+$')
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '.exe',
'STATIC_LIB_PREFIX': '',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.lib',
'SHARED_LIB_SUFFIX': '.dll',
'INTERMEDIATE_DIR': '$(IntDir)',
'SHARED_INTERMEDIATE_DIR': '$(OutDir)obj/global_intermediate',
'OS': 'win',
'PRODUCT_DIR': '$(OutDir)',
'LIB_DIR': '$(OutDir)lib',
'RULE_INPUT_ROOT': '$(InputName)',
'RULE_INPUT_DIRNAME': '$(InputDir)',
'RULE_INPUT_EXT': '$(InputExt)',
'RULE_INPUT_NAME': '$(InputFileName)',
'RULE_INPUT_PATH': '$(InputPath)',
'CONFIGURATION_NAME': '$(ConfigurationName)',
}
# The msvs specific sections that hold paths
generator_additional_path_sections = [
'msvs_cygwin_dirs',
'msvs_props',
]
generator_additional_non_configuration_keys = [
'msvs_cygwin_dirs',
'msvs_cygwin_shell',
'msvs_shard',
]
# List of precompiled header related keys.
precomp_keys = [
'msvs_precompiled_header',
'msvs_precompiled_source',
]
cached_username = None
cached_domain = None
# TODO(gspencer): Switch the os.environ calls to be
# win32api.GetDomainName() and win32api.GetUserName() once the
# python version in depot_tools has been updated to work on Vista
# 64-bit.
def _GetDomainAndUserName():
if sys.platform not in ('win32', 'cygwin'):
return ('DOMAIN', 'USERNAME')
global cached_username
global cached_domain
if not cached_domain or not cached_username:
domain = os.environ.get('USERDOMAIN')
username = os.environ.get('USERNAME')
if not domain or not username:
call = subprocess.Popen(['net', 'config', 'Workstation'],
stdout=subprocess.PIPE)
config = call.communicate()[0]
username_re = re.compile('^User name\s+(\S+)', re.MULTILINE)
username_match = username_re.search(config)
if username_match:
username = username_match.group(1)
domain_re = re.compile('^Logon domain\s+(\S+)', re.MULTILINE)
domain_match = domain_re.search(config)
if domain_match:
domain = domain_match.group(1)
cached_domain = domain
cached_username = username
return (cached_domain, cached_username)
fixpath_prefix = None
def _NormalizedSource(source):
"""Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
Arguments:
source: The path to be normalize.d
Returns:
The normalized path.
"""
normalized = os.path.normpath(source)
if source.count('$') == normalized.count('$'):
source = normalized
return source
def _FixPath(path):
"""Convert paths to a form that will make sense in a vcproj file.
Arguments:
path: The path to convert, may contain / etc.
Returns:
The path with all slashes made into backslashes.
"""
if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$':
path = os.path.join(fixpath_prefix, path)
path = path.replace('/', '\\')
path = _NormalizedSource(path)
if path and path[-1] == '\\':
path = path[:-1]
return path
def _FixPaths(paths):
"""Fix each of the paths of the list."""
return [_FixPath(i) for i in paths]
def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None,
list_excluded=True):
"""Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
excluded: A set of excluded files.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
"""
if not prefix: prefix = []
result = []
excluded_result = []
folders = dict()
# Gather files into the final result, excluded, or folders.
for s in sources:
if len(s) == 1:
filename = _NormalizedSource('\\'.join(prefix + s))
if filename in excluded:
excluded_result.append(filename)
else:
result.append(filename)
else:
if not folders.get(s[0]):
folders[s[0]] = []
folders[s[0]].append(s[1:])
# Add a folder for excluded files.
if excluded_result and list_excluded:
excluded_folder = MSVSProject.Filter('_excluded_files',
contents=excluded_result)
result.append(excluded_folder)
# Populate all the folders.
for f in folders:
contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f],
excluded=excluded,
list_excluded=list_excluded)
contents = MSVSProject.Filter(f, contents=contents)
result.append(contents)
return result
def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False):
if not value: return
# TODO(bradnelson): ugly hack, fix this more generally!!!
if 'Directories' in setting or 'Dependencies' in setting:
if type(value) == str:
value = value.replace('/', '\\')
else:
value = [i.replace('/', '\\') for i in value]
if not tools.get(tool_name):
tools[tool_name] = dict()
tool = tools[tool_name]
if tool.get(setting):
if only_if_unset: return
if type(tool[setting]) == list:
tool[setting] += value
else:
raise TypeError(
'Appending "%s" to a non-list setting "%s" for tool "%s" is '
'not allowed, previous value: %s' % (
value, setting, tool_name, str(tool[setting])))
else:
tool[setting] = value
def _ConfigPlatform(config_data):
return config_data.get('msvs_configuration_platform', 'Win32')
def _ConfigBaseName(config_name, platform_name):
if config_name.endswith('_' + platform_name):
return config_name[0:-len(platform_name)-1]
else:
return config_name
def _ConfigFullName(config_name, config_data):
platform_name = _ConfigPlatform(config_data)
return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name)
def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, has_input_path,
quote_cmd, do_setup_env):
if [x for x in cmd if '$(InputDir)' in x]:
input_dir_preamble = (
'set INPUTDIR=$(InputDir)\n'
'set INPUTDIR=%INPUTDIR:$(ProjectDir)=%\n'
'set INPUTDIR=%INPUTDIR:~0,-1%\n'
)
else:
input_dir_preamble = ''
if cygwin_shell:
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Prepare command.
direct_cmd = cmd
direct_cmd = [i.replace('$(IntDir)',
'`cygpath -m "${INTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(OutDir)',
'`cygpath -m "${OUTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(InputDir)',
'`cygpath -m "${INPUTDIR}"`') for i in direct_cmd]
if has_input_path:
direct_cmd = [i.replace('$(InputPath)',
'`cygpath -m "${INPUTPATH}"`')
for i in direct_cmd]
direct_cmd = ['\\"%s\\"' % i.replace('"', '\\\\\\"') for i in direct_cmd]
#direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd)
direct_cmd = ' '.join(direct_cmd)
# TODO(quote): regularize quoting path names throughout the module
cmd = ''
if do_setup_env:
cmd += 'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && '
cmd += 'set CYGWIN=nontsec&& '
if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0:
cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& '
if direct_cmd.find('INTDIR') >= 0:
cmd += 'set INTDIR=$(IntDir)&& '
if direct_cmd.find('OUTDIR') >= 0:
cmd += 'set OUTDIR=$(OutDir)&& '
if has_input_path and direct_cmd.find('INPUTPATH') >= 0:
cmd += 'set INPUTPATH=$(InputPath) && '
cmd += 'bash -c "%(cmd)s"'
cmd = cmd % {'cygwin_dir': cygwin_dir,
'cmd': direct_cmd}
return input_dir_preamble + cmd
else:
# Convert cat --> type to mimic unix.
if cmd[0] == 'cat':
command = ['type']
else:
command = [cmd[0].replace('/', '\\')]
# Add call before command to ensure that commands can be tied together one
# after the other without aborting in Incredibuild, since IB makes a bat
# file out of the raw command string, and some commands (like python) are
# actually batch files themselves.
command.insert(0, 'call')
# Fix the paths
# TODO(quote): This is a really ugly heuristic, and will miss path fixing
# for arguments like "--arg=path" or "/opt:path".
# If the argument starts with a slash or dash, it's probably a command line
# switch
arguments = [i if (i[:1] in "/-") else _FixPath(i) for i in cmd[1:]]
arguments = [i.replace('$(InputDir)','%INPUTDIR%') for i in arguments]
arguments = [MSVSSettings.FixVCMacroSlashes(i) for i in arguments]
if quote_cmd:
# Support a mode for using cmd directly.
# Convert any paths to native form (first element is used directly).
# TODO(quote): regularize quoting path names throughout the module
arguments = ['"%s"' % i for i in arguments]
# Collapse into a single command.
return input_dir_preamble + ' '.join(command + arguments)
def _BuildCommandLineForRule(spec, rule, has_input_path, do_setup_env):
# Currently this weird argument munging is used to duplicate the way a
# python script would need to be run as part of the chrome tree.
# Eventually we should add some sort of rule_default option to set this
# per project. For now the behavior chrome needs is the default.
mcs = rule.get('msvs_cygwin_shell')
if mcs is None:
mcs = int(spec.get('msvs_cygwin_shell', 1))
elif isinstance(mcs, str):
mcs = int(mcs)
quote_cmd = int(rule.get('msvs_quote_cmd', 1))
return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, has_input_path,
quote_cmd, do_setup_env=do_setup_env)
def _AddActionStep(actions_dict, inputs, outputs, description, command):
"""Merge action into an existing list of actions.
Care must be taken so that actions which have overlapping inputs either don't
get assigned to the same input, or get collapsed into one.
Arguments:
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
inputs: list of inputs
outputs: list of outputs
description: description of the action
command: command line to execute
"""
# Require there to be at least one input (call sites will ensure this).
assert inputs
action = {
'inputs': inputs,
'outputs': outputs,
'description': description,
'command': command,
}
# Pick where to stick this action.
# While less than optimal in terms of build time, attach them to the first
# input for now.
chosen_input = inputs[0]
# Add it there.
if chosen_input not in actions_dict:
actions_dict[chosen_input] = []
actions_dict[chosen_input].append(action)
def _AddCustomBuildToolForMSVS(p, spec, primary_input,
inputs, outputs, description, cmd):
"""Add a custom build tool to execute something.
Arguments:
p: the target project
spec: the target project dict
primary_input: input file to attach the build tool to
inputs: list of inputs
outputs: list of outputs
description: description of the action
cmd: command line to execute
"""
inputs = _FixPaths(inputs)
outputs = _FixPaths(outputs)
tool = MSVSProject.Tool(
'VCCustomBuildTool',
{'Description': description,
'AdditionalDependencies': ';'.join(inputs),
'Outputs': ';'.join(outputs),
'CommandLine': cmd,
})
# Add to the properties of primary input for each config.
for config_name, c_data in spec['configurations'].iteritems():
p.AddFileConfig(_FixPath(primary_input),
_ConfigFullName(config_name, c_data), tools=[tool])
def _AddAccumulatedActionsToMSVS(p, spec, actions_dict):
"""Add actions accumulated into an actions_dict, merging as needed.
Arguments:
p: the target project
spec: the target project dict
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
"""
for primary_input in actions_dict:
inputs = set()
outputs = set()
descriptions = []
commands = []
for action in actions_dict[primary_input]:
inputs.update(set(action['inputs']))
outputs.update(set(action['outputs']))
descriptions.append(action['description'])
commands.append(action['command'])
# Add the custom build step for one input file.
description = ', and also '.join(descriptions)
command = '\r\n'.join(commands)
_AddCustomBuildToolForMSVS(p, spec,
primary_input=primary_input,
inputs=inputs,
outputs=outputs,
description=description,
cmd=command)
def _RuleExpandPath(path, input_file):
"""Given the input file to which a rule applied, string substitute a path.
Arguments:
path: a path to string expand
input_file: the file to which the rule applied.
Returns:
The string substituted path.
"""
path = path.replace('$(InputName)',
os.path.splitext(os.path.split(input_file)[1])[0])
path = path.replace('$(InputDir)', os.path.dirname(input_file))
path = path.replace('$(InputExt)',
os.path.splitext(os.path.split(input_file)[1])[1])
path = path.replace('$(InputFileName)', os.path.split(input_file)[1])
path = path.replace('$(InputPath)', input_file)
return path
def _FindRuleTriggerFiles(rule, sources):
"""Find the list of files which a particular rule applies to.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The list of sources that trigger a particular rule.
"""
rule_ext = rule['extension']
return [s for s in sources if s.endswith('.' + rule_ext)]
def _RuleInputsAndOutputs(rule, trigger_file):
"""Find the inputs and outputs generated by a rule.
Arguments:
rule: the rule in question.
trigger_file: the main trigger for this rule.
Returns:
The pair of (inputs, outputs) involved in this rule.
"""
raw_inputs = _FixPaths(rule.get('inputs', []))
raw_outputs = _FixPaths(rule.get('outputs', []))
inputs = set()
outputs = set()
inputs.add(trigger_file)
for i in raw_inputs:
inputs.add(_RuleExpandPath(i, trigger_file))
for o in raw_outputs:
outputs.add(_RuleExpandPath(o, trigger_file))
return (inputs, outputs)
def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options):
"""Generate a native rules file.
Arguments:
p: the target project
rules: the set of rules to include
output_dir: the directory in which the project/gyp resides
spec: the project dict
options: global generator options
"""
rules_filename = '%s%s.rules' % (spec['target_name'],
options.suffix)
rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename),
spec['target_name'])
# Add each rule.
for r in rules:
rule_name = r['rule_name']
rule_ext = r['extension']
inputs = _FixPaths(r.get('inputs', []))
outputs = _FixPaths(r.get('outputs', []))
# Skip a rule with no action and no inputs.
if 'action' not in r and not r.get('rule_sources', []):
continue
cmd = _BuildCommandLineForRule(spec, r, has_input_path=True,
do_setup_env=True)
rules_file.AddCustomBuildRule(name=rule_name,
description=r.get('message', rule_name),
extensions=[rule_ext],
additional_dependencies=inputs,
outputs=outputs,
cmd=cmd)
# Write out rules file.
rules_file.WriteIfChanged()
# Add rules file to project.
p.AddToolFile(rules_filename)
def _Cygwinify(path):
path = path.replace('$(OutDir)', '$(OutDirCygwin)')
path = path.replace('$(IntDir)', '$(IntDirCygwin)')
return path
def _GenerateExternalRules(rules, output_dir, spec,
sources, options, actions_to_add):
"""Generate an external makefile to do a set of rules.
Arguments:
rules: the list of rules to include
output_dir: path containing project and gyp files
spec: project specification data
sources: set of sources known
options: global generator options
actions_to_add: The list of actions we will add to.
"""
filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix)
mk_file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename))
# Find cygwin style versions of some paths.
mk_file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n')
mk_file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n')
# Gather stuff needed to emit all: target.
all_inputs = set()
all_outputs = set()
all_output_dirs = set()
first_outputs = []
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
all_inputs.update(set(inputs))
all_outputs.update(set(outputs))
# Only use one target from each rule as the dependency for
# 'all' so we don't try to build each rule multiple times.
first_outputs.append(list(outputs)[0])
# Get the unique output directories for this rule.
output_dirs = [os.path.split(i)[0] for i in outputs]
for od in output_dirs:
all_output_dirs.add(od)
first_outputs_cyg = [_Cygwinify(i) for i in first_outputs]
# Write out all: target, including mkdir for each output directory.
mk_file.write('all: %s\n' % ' '.join(first_outputs_cyg))
for od in all_output_dirs:
if od:
mk_file.write('\tmkdir -p `cygpath -u "%s"`\n' % od)
mk_file.write('\n')
# Define how each output is generated.
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
# Get all the inputs and outputs for this rule for this trigger file.
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
inputs = [_Cygwinify(i) for i in inputs]
outputs = [_Cygwinify(i) for i in outputs]
# Prepare the command line for this rule.
cmd = [_RuleExpandPath(c, tf) for c in rule['action']]
cmd = ['"%s"' % i for i in cmd]
cmd = ' '.join(cmd)
# Add it to the makefile.
mk_file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs)))
mk_file.write('\t%s\n\n' % cmd)
# Close up the file.
mk_file.close()
# Add makefile to list of sources.
sources.add(filename)
# Add a build action to call makefile.
cmd = ['make',
'OutDir=$(OutDir)',
'IntDir=$(IntDir)',
'-j', '${NUMBER_OF_PROCESSORS_PLUS_1}',
'-f', filename]
cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True, True)
# Insert makefile as 0'th input, so it gets the action attached there,
# as this is easier to understand from in the IDE.
all_inputs = list(all_inputs)
all_inputs.insert(0, filename)
_AddActionStep(actions_to_add,
inputs=_FixPaths(all_inputs),
outputs=_FixPaths(all_outputs),
description='Running external rules for %s' %
spec['target_name'],
command=cmd)
def _EscapeEnvironmentVariableExpansion(s):
"""Escapes % characters.
Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this.
Args:
s: The string to be escaped.
Returns:
The escaped string.
"""
s = s.replace('%', '%%')
return s
quote_replacer_regex = re.compile(r'(\\*)"')
def _EscapeCommandLineArgumentForMSVS(s):
"""Escapes a Windows command-line argument.
So that the Win32 CommandLineToArgv function will turn the escaped result back
into the original string.
See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
("Parsing C++ Command-Line Arguments") to understand why we have to do
this.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a literal quote, CommandLineToArgv requires an odd number of
# backslashes preceding it, and it produces half as many literal backslashes
# (rounded down). So we need to produce 2n+1 backslashes.
return 2 * match.group(1) + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex.sub(_Replace, s)
# Now add unescaped quotes so that any whitespace is interpreted literally.
s = '"' + s + '"'
return s
delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)')
def _EscapeVCProjCommandLineArgListItem(s):
"""Escapes command line arguments for MSVS.
The VCProj format stores string lists in a single string using commas and
semi-colons as separators, which must be quoted if they are to be
interpreted literally. However, command-line arguments may already have
quotes, and the VCProj parser is ignorant of the backslash escaping
convention used by CommandLineToArgv, so the command-line quotes and the
VCProj quotes may not be the same quotes. So to store a general
command-line argument in a VCProj list, we need to parse the existing
quoting according to VCProj's convention and quote any delimiters that are
not already quoted by that convention. The quotes that we add will also be
seen by CommandLineToArgv, so if backslashes precede them then we also have
to escape those backslashes according to the CommandLineToArgv
convention.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a non-literal quote, CommandLineToArgv requires an even number of
# backslashes preceding it, and it produces half as many literal
# backslashes. So we need to produce 2n backslashes.
return 2 * match.group(1) + '"' + match.group(2) + '"'
segments = s.split('"')
# The unquoted segments are at the even-numbered indices.
for i in range(0, len(segments), 2):
segments[i] = delimiters_replacer_regex.sub(_Replace, segments[i])
# Concatenate back into a single string
s = '"'.join(segments)
if len(segments) % 2 == 0:
# String ends while still quoted according to VCProj's convention. This
# means the delimiter and the next list item that follow this one in the
# .vcproj file will be misinterpreted as part of this item. There is nothing
# we can do about this. Adding an extra quote would correct the problem in
# the VCProj but cause the same problem on the final command-line. Moving
# the item to the end of the list does works, but that's only possible if
# there's only one such item. Let's just warn the user.
print >> sys.stderr, ('Warning: MSVS may misinterpret the odd number of ' +
'quotes in ' + s)
return s
def _EscapeCppDefineForMSVS(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSVS(s)
s = _EscapeVCProjCommandLineArgListItem(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s
quote_replacer_regex2 = re.compile(r'(\\+)"')
def _EscapeCommandLineArgumentForMSBuild(s):
"""Escapes a Windows command-line argument for use by MSBuild."""
def _Replace(match):
return (len(match.group(1))/2*4)*'\\' + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex2.sub(_Replace, s)
return s
def _EscapeMSBuildSpecialCharacters(s):
escape_dictionary = {
'%': '%25',
'$': '%24',
'@': '%40',
"'": '%27',
';': '%3B',
'?': '%3F',
'*': '%2A'
}
result = ''.join([escape_dictionary.get(c, c) for c in s])
return result
def _EscapeCppDefineForMSBuild(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSBuild(s)
s = _EscapeMSBuildSpecialCharacters(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s
def _GenerateRulesForMSVS(p, output_dir, options, spec,
sources, excluded_sources,
actions_to_add):
"""Generate all the rules for a particular project.
Arguments:
p: the project
output_dir: directory to emit rules to
options: global options passed to the generator
spec: the specification for this project
sources: the set of all known source files in this project
excluded_sources: the set of sources excluded from normal processing
actions_to_add: deferred list of actions to add in
"""
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
# Handle rules that use a native rules file.
if rules_native:
_GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options)
# Handle external rules (non-native rules).
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources)
def _AdjustSourcesForRules(rules, sources, excluded_sources):
# Add outputs generated by each rule (if applicable).
for rule in rules:
# Done if not processing outputs as sources.
if int(rule.get('process_outputs_as_sources', False)):
# Add in the outputs from this rule.
trigger_files = _FindRuleTriggerFiles(rule, sources)
for trigger_file in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file)
inputs = set(_FixPaths(inputs))
outputs = set(_FixPaths(outputs))
inputs.remove(_FixPath(trigger_file))
sources.update(inputs)
excluded_sources.update(inputs)
sources.update(outputs)
def _FilterActionsFromExcluded(excluded_sources, actions_to_add):
"""Take inputs with actions attached out of the list of exclusions.
Arguments:
excluded_sources: list of source files not to be built.
actions_to_add: dict of actions keyed on source file they're attached to.
Returns:
excluded_sources with files that have actions attached removed.
"""
must_keep = set(_FixPaths(actions_to_add.keys()))
return [s for s in excluded_sources if s not in must_keep]
def _GetDefaultConfiguration(spec):
return spec['configurations'][spec['default_configuration']]
def _GetGuidOfProject(proj_path, spec):
"""Get the guid for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
Returns:
the guid.
Raises:
ValueError: if the specified GUID is invalid.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
# Decide the guid of the project.
guid = default_config.get('msvs_guid')
if guid:
if VALID_MSVS_GUID_CHARS.match(guid) is None:
raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' %
(guid, VALID_MSVS_GUID_CHARS.pattern))
guid = '{%s}' % guid
guid = guid or MSVSNew.MakeGuid(proj_path)
return guid
def _GetMsbuildToolsetOfProject(proj_path, spec, version):
"""Get the platform toolset for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
version: The MSVSVersion object.
Returns:
the platform toolset string or None.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
toolset = default_config.get('msbuild_toolset')
if not toolset and version.DefaultToolset():
toolset = version.DefaultToolset()
return toolset
def _GenerateProject(project, options, version, generator_flags):
"""Generates a vcproj file.
Arguments:
project: the MSVSProject object.
options: global generator options.
version: the MSVSVersion object.
generator_flags: dict of generator-specific flags.
Returns:
A list of source files that cannot be found on disk.
"""
default_config = _GetDefaultConfiguration(project.spec)
# Skip emitting anything if told to with msvs_existing_vcproj option.
if default_config.get('msvs_existing_vcproj'):
return []
if version.UsesVcxproj():
return _GenerateMSBuildProject(project, options, version, generator_flags)
else:
return _GenerateMSVSProject(project, options, version, generator_flags)
def _GenerateMSVSProject(project, options, version, generator_flags):
"""Generates a .vcproj file. It may create .rules and .user files too.
Arguments:
project: The project object we will generate the file for.
options: Global options passed to the generator.
version: The VisualStudioVersion object.
generator_flags: dict of generator-specific flags.
"""
spec = project.spec
vcproj_dir = os.path.dirname(project.path)
if vcproj_dir and not os.path.exists(vcproj_dir):
os.makedirs(vcproj_dir)
platforms = _GetUniquePlatforms(spec)
p = MSVSProject.Writer(project.path, version, spec['target_name'],
project.guid, platforms)
# Get directory project file is in.
project_dir = os.path.split(project.path)[0]
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
config_type = _GetMSVSConfigurationType(spec, project.build_file)
for config_name, config in spec['configurations'].iteritems():
_AddConfigurationToMSVSProject(p, spec, config_type, config_name, config)
# Prepare list of sources and excluded sources.
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
_GenerateRulesForMSVS(p, project_dir, options, spec,
sources, excluded_sources,
actions_to_add)
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(
spec, options, project_dir, sources, excluded_sources, list_excluded))
# Add in files.
missing_sources = _VerifySourcesExist(sources, project_dir)
p.AddFiles(sources)
_AddToolFilesToMSVS(p, spec)
_HandlePreCompiledHeaders(p, sources, spec)
_AddActions(actions_to_add, spec, relative_path_of_gyp_file)
_AddCopies(actions_to_add, spec)
_WriteMSVSUserFile(project.path, version, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
_ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded)
_AddAccumulatedActionsToMSVS(p, spec, actions_to_add)
# Write it out.
p.WriteIfChanged()
return missing_sources
def _GetUniquePlatforms(spec):
"""Returns the list of unique platforms for this spec, e.g ['win32', ...].
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
# Gather list of unique platforms.
platforms = set()
for configuration in spec['configurations']:
platforms.add(_ConfigPlatform(spec['configurations'][configuration]))
platforms = list(platforms)
return platforms
def _CreateMSVSUserFile(proj_path, version, spec):
"""Generates a .user file for the user running this Gyp program.
Arguments:
proj_path: The path of the project file being created. The .user file
shares the same path (with an appropriate suffix).
version: The VisualStudioVersion object.
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
(domain, username) = _GetDomainAndUserName()
vcuser_filename = '.'.join([proj_path, domain, username, 'user'])
user_file = MSVSUserFile.Writer(vcuser_filename, version,
spec['target_name'])
return user_file
def _GetMSVSConfigurationType(spec, build_file):
"""Returns the configuration type for this project.
It's a number defined by Microsoft. May raise an exception.
Args:
spec: The target dictionary containing the properties of the target.
build_file: The path of the gyp file.
Returns:
An integer, the configuration type.
"""
try:
config_type = {
'executable': '1', # .exe
'shared_library': '2', # .dll
'loadable_module': '2', # .dll
'static_library': '4', # .lib
'none': '10', # Utility type
}[spec['type']]
except KeyError:
if spec.get('type'):
raise Exception('Target type %s is not a valid target type for '
'target %s in %s.' %
(spec['type'], spec['target_name'], build_file))
else:
raise Exception('Missing type field for target %s in %s.' %
(spec['target_name'], build_file))
return config_type
def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config):
"""Adds a configuration to the MSVS project.
Many settings in a vcproj file are specific to a configuration. This
function the main part of the vcproj file that's configuration specific.
Arguments:
p: The target project being generated.
spec: The target dictionary containing the properties of the target.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
config: The dictionnary that defines the special processing to be done
for this configuration.
"""
# Get the information for this configuration
include_dirs, resource_include_dirs = _GetIncludeDirs(config)
libraries = _GetLibraries(spec)
out_file, vc_tool, _ = _GetOutputFilePathAndTool(spec, msbuild=False)
defines = _GetDefines(config)
defines = [_EscapeCppDefineForMSVS(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(config)
prebuild = config.get('msvs_prebuild')
postbuild = config.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = config.get('msvs_precompiled_header')
# Prepare the list of tools as a dictionary.
tools = dict()
# Add in user specified msvs_settings.
msvs_settings = config.get('msvs_settings', {})
MSVSSettings.ValidateMSVSSettings(msvs_settings)
for tool in msvs_settings:
settings = config['msvs_settings'][tool]
for setting in settings:
_ToolAppend(tools, tool, setting, settings[setting])
# Add the information to the appropriate tool
_ToolAppend(tools, 'VCCLCompilerTool',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(tools, 'VCResourceCompilerTool',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries)
if out_file:
_ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True)
# Add defines.
_ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines)
_ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions',
defines)
# Change program database directory to prevent collisions.
_ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName',
'$(IntDir)$(ProjectName)\\vc80.pdb', only_if_unset=True)
# Add disabled warnings.
_ToolAppend(tools, 'VCCLCompilerTool',
'DisableSpecificWarnings', disabled_warnings)
# Add Pre-build.
_ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild)
# Add Post-build.
_ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2')
_ToolAppend(tools, 'VCCLCompilerTool',
'PrecompiledHeaderThrough', precompiled_header)
_ToolAppend(tools, 'VCCLCompilerTool',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file)
_AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name)
def _GetIncludeDirs(config):
"""Returns the list of directories to be used for #include directives.
Arguments:
config: The dictionnary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
# TODO(bradnelson): include_dirs should really be flexible enough not to
# require this sort of thing.
include_dirs = (
config.get('include_dirs', []) +
config.get('msvs_system_include_dirs', []))
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
include_dirs = _FixPaths(include_dirs)
resource_include_dirs = _FixPaths(resource_include_dirs)
return include_dirs, resource_include_dirs
def _GetLibraries(spec):
"""Returns the list of libraries for this configuration.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The list of directory paths.
"""
libraries = spec.get('libraries', [])
# Strip out -l, as it is not used on windows (but is needed so we can pass
# in libraries that are assumed to be in the default library path).
# Also remove duplicate entries, leaving only the last duplicate, while
# preserving order.
found = set()
unique_libraries_list = []
for entry in reversed(libraries):
library = re.sub('^\-l', '', entry)
if not os.path.splitext(library)[1]:
library += '.lib'
if library not in found:
found.add(library)
unique_libraries_list.append(library)
unique_libraries_list.reverse()
return unique_libraries_list
def _GetOutputFilePathAndTool(spec, msbuild):
"""Returns the path and tool to use for this target.
Figures out the path of the file this spec will create and the name of
the VC tool that will create it.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A triple of (file path, name of the vc tool, name of the msbuild tool)
"""
# Select a name for the output file.
out_file = ''
vc_tool = ''
msbuild_tool = ''
output_file_map = {
'executable': ('VCLinkerTool', 'Link', '$(OutDir)', '.exe'),
'shared_library': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'loadable_module': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'static_library': ('VCLibrarianTool', 'Lib', '$(OutDir)lib\\', '.lib'),
}
output_file_props = output_file_map.get(spec['type'])
if output_file_props and int(spec.get('msvs_auto_output_file', 1)):
vc_tool, msbuild_tool, out_dir, suffix = output_file_props
if spec.get('standalone_static_library', 0):
out_dir = '$(OutDir)'
out_dir = spec.get('product_dir', out_dir)
product_extension = spec.get('product_extension')
if product_extension:
suffix = '.' + product_extension
elif msbuild:
suffix = '$(TargetExt)'
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
out_file = ntpath.join(out_dir, prefix + product_name + suffix)
return out_file, vc_tool, msbuild_tool
def _GetDefines(config):
"""Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionnary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions.
"""
defines = []
for d in config.get('defines', []):
if type(d) == list:
fd = '='.join([str(dpart) for dpart in d])
else:
fd = str(d)
defines.append(fd)
return defines
def _GetDisabledWarnings(config):
return [str(i) for i in config.get('msvs_disabled_warnings', [])]
def _GetModuleDefinition(spec):
def_file = ''
if spec['type'] in ['shared_library', 'loadable_module', 'executable']:
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
def_file = _FixPath(def_files[0])
elif def_files:
raise ValueError(
'Multiple module definition files in one target, target %s lists '
'multiple .def files: %s' % (
spec['target_name'], ' '.join(def_files)))
return def_file
def _ConvertToolsToExpectedForm(tools):
"""Convert tools to a form expected by Visual Studio.
Arguments:
tools: A dictionnary of settings; the tool name is the key.
Returns:
A list of Tool objects.
"""
tool_list = []
for tool, settings in tools.iteritems():
# Collapse settings with lists.
settings_fixed = {}
for setting, value in settings.iteritems():
if type(value) == list:
if ((tool == 'VCLinkerTool' and
setting == 'AdditionalDependencies') or
setting == 'AdditionalOptions'):
settings_fixed[setting] = ' '.join(value)
else:
settings_fixed[setting] = ';'.join(value)
else:
settings_fixed[setting] = value
# Add in this tool.
tool_list.append(MSVSProject.Tool(tool, settings_fixed))
return tool_list
def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name):
"""Add to the project file the configuration specified by config.
Arguments:
p: The target project being generated.
spec: the target project dict.
tools: A dictionnary of settings; the tool name is the key.
config: The dictionnary that defines the special processing to be done
for this configuration.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
"""
attributes = _GetMSVSAttributes(spec, config, config_type)
# Add in this configuration.
tool_list = _ConvertToolsToExpectedForm(tools)
p.AddConfig(_ConfigFullName(config_name, config),
attrs=attributes, tools=tool_list)
def _GetMSVSAttributes(spec, config, config_type):
# Prepare configuration attributes.
prepared_attrs = {}
source_attrs = config.get('msvs_configuration_attributes', {})
for a in source_attrs:
prepared_attrs[a] = source_attrs[a]
# Add props files.
vsprops_dirs = config.get('msvs_props', [])
vsprops_dirs = _FixPaths(vsprops_dirs)
if vsprops_dirs:
prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs)
# Set configuration type.
prepared_attrs['ConfigurationType'] = config_type
output_dir = prepared_attrs.get('OutputDirectory',
'$(SolutionDir)$(ConfigurationName)')
prepared_attrs['OutputDirectory'] = _FixPath(output_dir) + '\\'
if 'IntermediateDirectory' not in prepared_attrs:
intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)'
prepared_attrs['IntermediateDirectory'] = _FixPath(intermediate) + '\\'
else:
intermediate = _FixPath(prepared_attrs['IntermediateDirectory']) + '\\'
intermediate = MSVSSettings.FixVCMacroSlashes(intermediate)
prepared_attrs['IntermediateDirectory'] = intermediate
return prepared_attrs
def _AddNormalizedSources(sources_set, sources_array):
sources = [_NormalizedSource(s) for s in sources_array]
sources_set.update(set(sources))
def _PrepareListOfSources(spec, generator_flags, gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
spec: The target dictionary containing the properties of the target.
gyp_file: The name of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources).
The sources will be relative to the gyp file.
"""
sources = set()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = set()
# Add in the gyp file.
if not generator_flags.get('standalone'):
sources.add(gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a['inputs']
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = set(inputs)
sources.update(inputs)
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources)
def _AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources, list_excluded):
"""Adjusts the list of sources and excluded sources.
Also converts the sets to lists.
Arguments:
spec: The target dictionary containing the properties of the target.
options: Global generator options.
gyp_dir: The path to the gyp file being processed.
sources: A set of sources to be included for this project.
excluded_sources: A set of sources to be excluded for this project.
Returns:
A trio of (list of sources, list of excluded sources,
path of excluded IDL file)
"""
# Exclude excluded sources coming into the generator.
excluded_sources.update(set(spec.get('sources_excluded', [])))
# Add excluded sources into sources for good measure.
sources.update(excluded_sources)
# Convert to proper windows form.
# NOTE: sources goes from being a set to a list here.
# NOTE: excluded_sources goes from being a set to a list here.
sources = _FixPaths(sources)
# Convert to proper windows form.
excluded_sources = _FixPaths(excluded_sources)
excluded_idl = _IdlFilesHandledNonNatively(spec, sources)
precompiled_related = _GetPrecompileRelatedFiles(spec)
# Find the excluded ones, minus the precompiled header related ones.
fully_excluded = [i for i in excluded_sources if i not in precompiled_related]
# Convert to folders and the right slashes.
sources = [i.split('\\') for i in sources]
sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded,
list_excluded=list_excluded)
return sources, excluded_sources, excluded_idl
def _IdlFilesHandledNonNatively(spec, sources):
# If any non-native rules use 'idl' as an extension exclude idl files.
# Gather a list here to use later.
using_idl = False
for rule in spec.get('rules', []):
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
using_idl = True
break
if using_idl:
excluded_idl = [i for i in sources if i.endswith('.idl')]
else:
excluded_idl = []
return excluded_idl
def _GetPrecompileRelatedFiles(spec):
# Gather a list of precompiled header related sources.
precompiled_related = []
for _, config in spec['configurations'].iteritems():
for k in precomp_keys:
f = config.get(k)
if f:
precompiled_related.append(_FixPath(f))
return precompiled_related
def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded):
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
for file_name, excluded_configs in exclusions.iteritems():
if (not list_excluded and
len(excluded_configs) == len(spec['configurations'])):
# If we're not listing excluded files, then they won't appear in the
# project, so don't try to configure them to be excluded.
pass
else:
for config_name, config in excluded_configs:
p.AddFileConfig(file_name, _ConfigFullName(config_name, config),
{'ExcludedFromBuild': 'true'})
def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl):
exclusions = {}
# Exclude excluded sources from being built.
for f in excluded_sources:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
precomped = [_FixPath(config.get(i, '')) for i in precomp_keys]
# Don't do this for ones that are precompiled header related.
if f not in precomped:
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
# If any non-native rules use 'idl' as an extension exclude idl files.
# Exclude them now.
for f in excluded_idl:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
return exclusions
def _AddToolFilesToMSVS(p, spec):
# Add in tool files (rules).
tool_files = set()
for _, config in spec['configurations'].iteritems():
for f in config.get('msvs_tool_files', []):
tool_files.add(f)
for f in tool_files:
p.AddToolFile(f)
def _HandlePreCompiledHeaders(p, sources, spec):
# Pre-compiled header source stubs need a different compiler flag
# (generate precompiled header) and any source file not of the same
# kind (i.e. C vs. C++) as the precompiled header source stub needs
# to have use of precompiled headers disabled.
extensions_excluded_from_precompile = []
for config_name, config in spec['configurations'].iteritems():
source = config.get('msvs_precompiled_source')
if source:
source = _FixPath(source)
# UsePrecompiledHeader=1 for if using precompiled headers.
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '1'})
p.AddFileConfig(source, _ConfigFullName(config_name, config),
{}, tools=[tool])
basename, extension = os.path.splitext(source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
def DisableForSourceTree(source_tree):
for source in source_tree:
if isinstance(source, MSVSProject.Filter):
DisableForSourceTree(source.contents)
else:
basename, extension = os.path.splitext(source)
if extension in extensions_excluded_from_precompile:
for config_name, config in spec['configurations'].iteritems():
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '0',
'ForcedIncludeFiles': '$(NOINHERIT)'})
p.AddFileConfig(_FixPath(source),
_ConfigFullName(config_name, config),
{}, tools=[tool])
# Do nothing if there was no precompiled source.
if extensions_excluded_from_precompile:
DisableForSourceTree(sources)
def _AddActions(actions_to_add, spec, relative_path_of_gyp_file):
# Add actions.
actions = spec.get('actions', [])
# Don't setup_env every time. When all the actions are run together in one
# batch file in VS, the PATH will grow too long.
# Membership in this set means that the cygwin environment has been set up,
# and does not need to be set up again.
have_setup_env = set()
for a in actions:
# Attach actions to the gyp file if nothing else is there.
inputs = a.get('inputs') or [relative_path_of_gyp_file]
attached_to = inputs[0]
need_setup_env = attached_to not in have_setup_env
cmd = _BuildCommandLineForRule(spec, a, has_input_path=False,
do_setup_env=need_setup_env)
have_setup_env.add(attached_to)
# Add the action.
_AddActionStep(actions_to_add,
inputs=inputs,
outputs=a.get('outputs', []),
description=a.get('message', a['action_name']),
command=cmd)
def _WriteMSVSUserFile(project_path, version, spec):
# Add run_as and test targets.
if 'run_as' in spec:
run_as = spec['run_as']
action = run_as.get('action', [])
environment = run_as.get('environment', [])
working_directory = run_as.get('working_directory', '.')
elif int(spec.get('test', 0)):
action = ['$(TargetPath)', '--gtest_print_time']
environment = []
working_directory = '.'
else:
return # Nothing to add
# Write out the user file.
user_file = _CreateMSVSUserFile(project_path, version, spec)
for config_name, c_data in spec['configurations'].iteritems():
user_file.AddDebugSettings(_ConfigFullName(config_name, c_data),
action, environment, working_directory)
user_file.WriteIfChanged()
def _AddCopies(actions_to_add, spec):
copies = _GetCopies(spec)
for inputs, outputs, cmd, description in copies:
_AddActionStep(actions_to_add, inputs=inputs, outputs=outputs,
description=description, command=cmd)
def _GetCopies(spec):
copies = []
# Add copies.
for cpy in spec.get('copies', []):
for src in cpy.get('files', []):
dst = os.path.join(cpy['destination'], os.path.basename(src))
# _AddCustomBuildToolForMSVS() will call _FixPath() on the inputs and
# outputs, so do the same for our generated command line.
if src.endswith('/'):
src_bare = src[:-1]
base_dir = posixpath.split(src_bare)[0]
outer_dir = posixpath.split(src_bare)[1]
cmd = 'xcopy /e /f /y "%s\\%s" "%s\\%s\\"' % (
_FixPath(base_dir), outer_dir, _FixPath(dst), outer_dir)
copies.append(([src], ['dummy_copies', dst], cmd,
'Copying %s to %s' % (src, dst)))
else:
cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % (
_FixPath(cpy['destination']), _FixPath(src), _FixPath(dst))
copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, dst)))
return copies
def _GetPathDict(root, path):
# |path| will eventually be empty (in the recursive calls) if it was initially
# relative; otherwise it will eventually end up as '\', 'D:\', etc.
if not path or path.endswith(os.sep):
return root
parent, folder = os.path.split(path)
parent_dict = _GetPathDict(root, parent)
if folder not in parent_dict:
parent_dict[folder] = dict()
return parent_dict[folder]
def _DictsToFolders(base_path, bucket, flat):
# Convert to folders recursively.
children = []
for folder, contents in bucket.iteritems():
if type(contents) == dict:
folder_children = _DictsToFolders(os.path.join(base_path, folder),
contents, flat)
if flat:
children += folder_children
else:
folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder),
name='(' + folder + ')',
entries=folder_children)
children.append(folder_children)
else:
children.append(contents)
return children
def _CollapseSingles(parent, node):
# Recursively explorer the tree of dicts looking for projects which are
# the sole item in a folder which has the same name as the project. Bring
# such projects up one level.
if (type(node) == dict and
len(node) == 1 and
node.keys()[0] == parent + '.vcproj'):
return node[node.keys()[0]]
if type(node) != dict:
return node
for child in node:
node[child] = _CollapseSingles(child, node[child])
return node
def _GatherSolutionFolders(sln_projects, project_objects, flat):
root = {}
# Convert into a tree of dicts on path.
for p in sln_projects:
gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2]
gyp_dir = os.path.dirname(gyp_file)
path_dict = _GetPathDict(root, gyp_dir)
path_dict[target + '.vcproj'] = project_objects[p]
# Walk down from the top until we hit a folder that has more than one entry.
# In practice, this strips the top-level "src/" dir from the hierarchy in
# the solution.
while len(root) == 1 and type(root[root.keys()[0]]) == dict:
root = root[root.keys()[0]]
# Collapse singles.
root = _CollapseSingles('', root)
# Merge buckets until everything is a root entry.
return _DictsToFolders('', root, flat)
def _GetPathOfProject(qualified_target, spec, options, msvs_version):
default_config = _GetDefaultConfiguration(spec)
proj_filename = default_config.get('msvs_existing_vcproj')
if not proj_filename:
proj_filename = (spec['target_name'] + options.suffix +
msvs_version.ProjectExtension())
build_file = gyp.common.BuildFile(qualified_target)
proj_path = os.path.join(os.path.dirname(build_file), proj_filename)
fix_prefix = None
if options.generator_output:
project_dir_path = os.path.dirname(os.path.abspath(proj_path))
proj_path = os.path.join(options.generator_output, proj_path)
fix_prefix = gyp.common.RelativePath(project_dir_path,
os.path.dirname(proj_path))
return proj_path, fix_prefix
def _GetPlatformOverridesOfProject(spec):
# Prepare a dict indicating which project configurations are used for which
# solution configurations for this target.
config_platform_overrides = {}
for config_name, c in spec['configurations'].iteritems():
config_fullname = _ConfigFullName(config_name, c)
platform = c.get('msvs_target_platform', _ConfigPlatform(c))
fixed_config_fullname = '%s|%s' % (
_ConfigBaseName(config_name, _ConfigPlatform(c)), platform)
config_platform_overrides[config_fullname] = fixed_config_fullname
return config_platform_overrides
def _CreateProjectObjects(target_list, target_dicts, options, msvs_version):
"""Create a MSVSProject object for the targets found in target list.
Arguments:
target_list: the list of targets to generate project objects for.
target_dicts: the dictionary of specifications.
options: global generator options.
msvs_version: the MSVSVersion object.
Returns:
A set of created projects, keyed by target.
"""
global fixpath_prefix
# Generate each project.
projects = {}
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in msvs build (target %s)' %
qualified_target)
proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec,
options, msvs_version)
guid = _GetGuidOfProject(proj_path, spec)
overrides = _GetPlatformOverridesOfProject(spec)
build_file = gyp.common.BuildFile(qualified_target)
# Create object for this project.
obj = MSVSNew.MSVSProject(
proj_path,
name=spec['target_name'],
guid=guid,
spec=spec,
build_file=build_file,
config_platform_overrides=overrides,
fixpath_prefix=fixpath_prefix)
# Set project toolset if any (MS build only)
if msvs_version.UsesVcxproj():
obj.set_msbuild_toolset(
_GetMsbuildToolsetOfProject(proj_path, spec, msvs_version))
projects[qualified_target] = obj
# Set all the dependencies
for project in projects.values():
deps = project.spec.get('dependencies', [])
deps = [projects[d] for d in deps]
project.set_dependencies(deps)
return projects
def CalculateVariables(default_variables, params):
"""Generated variables that require params to be known."""
generator_flags = params.get('generator_flags', {})
# Select project file format version (if unset, default to auto detecting).
msvs_version = MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'))
# Stash msvs_version for later (so we don't have to probe the system twice).
params['msvs_version'] = msvs_version
# Set a variable so conditions can be based on msvs_version.
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or
os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%d' % (parts[0], number)
return '#'.join(parts)
def _ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
dependencies = copy.copy(new_target_dicts[t].get('dependencies', []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t]['dependencies'] = new_dependencies
return (new_target_list, new_target_dicts)
def PerformBuild(data, configurations, params):
options = params['options']
msvs_version = params['msvs_version']
devenv = os.path.join(msvs_version.path, 'Common7', 'IDE', 'devenv.com')
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
sln_path = build_file_root + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
for config in configurations:
arguments = [devenv, sln_path, '/Build', config]
print 'Building [%s]: %s' % (config, arguments)
rtn = subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
"""
global fixpath_prefix
options = params['options']
# Get the project file format version back out of where we stashed it in
# GeneratorCalculatedVariables.
msvs_version = params['msvs_version']
generator_flags = params.get('generator_flags', {})
# Optionally shard targets marked with 'msvs_shard': SHARD_COUNT.
(target_list, target_dicts) = _ShardTargets(target_list, target_dicts)
# Prepare the set of configurations.
configs = set()
for qualified_target in target_list:
spec = target_dicts[qualified_target]
for config_name, config in spec['configurations'].iteritems():
configs.add(_ConfigFullName(config_name, config))
configs = list(configs)
# Figure out all the projects that will be generated and their guids
project_objects = _CreateProjectObjects(target_list, target_dicts, options,
msvs_version)
# Generate each project.
missing_sources = []
for project in project_objects.values():
fixpath_prefix = project.fixpath_prefix
missing_sources.extend(_GenerateProject(project, options, msvs_version,
generator_flags))
fixpath_prefix = None
for build_file in data:
# Validate build_file extension
if not build_file.endswith('.gyp'):
continue
sln_path = os.path.splitext(build_file)[0] + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
# Get projects in the solution, and their dependents.
sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
# Create folder hierarchy.
root_entries = _GatherSolutionFolders(
sln_projects, project_objects, flat=msvs_version.FlatSolution())
# Create solution.
sln = MSVSNew.MSVSSolution(sln_path,
entries=root_entries,
variants=configs,
websiteProperties=False,
version=msvs_version)
sln.Write()
if missing_sources:
error_message = "Missing input files:\n" + \
'\n'.join(set(missing_sources))
if generator_flags.get('msvs_error_on_missing_sources', False):
raise Exception(error_message)
else:
print >>sys.stdout, "Warning: " + error_message
def _GenerateMSBuildFiltersFile(filters_path, source_files,
extension_to_rule_name):
"""Generate the filters file.
This file is used by Visual Studio to organize the presentation of source
files into folders.
Arguments:
filters_path: The path of the file to be created.
source_files: The hierarchical structure of all the sources.
extension_to_rule_name: A dictionary mapping file extensions to rules.
"""
filter_group = []
source_group = []
_AppendFiltersForMSBuild('', source_files, extension_to_rule_name,
filter_group, source_group)
if filter_group:
content = ['Project',
{'ToolsVersion': '4.0',
'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
},
['ItemGroup'] + filter_group,
['ItemGroup'] + source_group
]
easy_xml.WriteXmlIfChanged(content, filters_path, pretty=True, win32=True)
elif os.path.exists(filters_path):
# We don't need this filter anymore. Delete the old filter file.
os.unlink(filters_path)
def _AppendFiltersForMSBuild(parent_filter_name, sources,
extension_to_rule_name,
filter_group, source_group):
"""Creates the list of filters and sources to be added in the filter file.
Args:
parent_filter_name: The name of the filter under which the sources are
found.
sources: The hierarchy of filters and sources to process.
extension_to_rule_name: A dictionary mapping file extensions to rules.
filter_group: The list to which filter entries will be appended.
source_group: The list to which source entries will be appeneded.
"""
for source in sources:
if isinstance(source, MSVSProject.Filter):
# We have a sub-filter. Create the name of that sub-filter.
if not parent_filter_name:
filter_name = source.name
else:
filter_name = '%s\\%s' % (parent_filter_name, source.name)
# Add the filter to the group.
filter_group.append(
['Filter', {'Include': filter_name},
['UniqueIdentifier', MSVSNew.MakeGuid(source.name)]])
# Recurse and add its dependents.
_AppendFiltersForMSBuild(filter_name, source.contents,
extension_to_rule_name,
filter_group, source_group)
else:
# It's a source. Create a source entry.
_, element = _MapFileToMsBuildSourceType(source, extension_to_rule_name)
source_entry = [element, {'Include': source}]
# Specify the filter it is part of, if any.
if parent_filter_name:
source_entry.append(['Filter', parent_filter_name])
source_group.append(source_entry)
def _MapFileToMsBuildSourceType(source, extension_to_rule_name):
"""Returns the group and element type of the source file.
Arguments:
source: The source file name.
extension_to_rule_name: A dictionary mapping file extensions to rules.
Returns:
A pair of (group this file should be part of, the label of element)
"""
_, ext = os.path.splitext(source)
if ext in extension_to_rule_name:
group = 'rule'
element = extension_to_rule_name[ext]
elif ext in ['.cc', '.cpp', '.c', '.cxx']:
group = 'compile'
element = 'ClCompile'
elif ext in ['.h', '.hxx']:
group = 'include'
element = 'ClInclude'
elif ext == '.rc':
group = 'resource'
element = 'ResourceCompile'
elif ext == '.idl':
group = 'midl'
element = 'Midl'
else:
group = 'none'
element = 'None'
return (group, element)
def _GenerateRulesForMSBuild(output_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, extension_to_rule_name):
# MSBuild rules are implemented using three files: an XML file, a .targets
# file and a .props file.
# See http://blogs.msdn.com/b/vcblog/archive/2010/04/21/quick-help-on-vs2010-custom-build-rule.aspx
# for more details.
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
msbuild_rules = []
for rule in rules_native:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
msbuild_rule = MSBuildRule(rule, spec)
msbuild_rules.append(msbuild_rule)
extension_to_rule_name[msbuild_rule.extension] = msbuild_rule.rule_name
if msbuild_rules:
base = spec['target_name'] + options.suffix
props_name = base + '.props'
targets_name = base + '.targets'
xml_name = base + '.xml'
props_files_of_rules.add(props_name)
targets_files_of_rules.add(targets_name)
props_path = os.path.join(output_dir, props_name)
targets_path = os.path.join(output_dir, targets_name)
xml_path = os.path.join(output_dir, xml_name)
_GenerateMSBuildRulePropsFile(props_path, msbuild_rules)
_GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules)
_GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules)
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources)
class MSBuildRule(object):
"""Used to store information used to generate an MSBuild rule.
Attributes:
rule_name: The rule name, sanitized to use in XML.
target_name: The name of the target.
after_targets: The name of the AfterTargets element.
before_targets: The name of the BeforeTargets element.
depends_on: The name of the DependsOn element.
compute_output: The name of the ComputeOutput element.
dirs_to_make: The name of the DirsToMake element.
inputs: The name of the _inputs element.
tlog: The name of the _tlog element.
extension: The extension this rule applies to.
description: The message displayed when this rule is invoked.
additional_dependencies: A string listing additional dependencies.
outputs: The outputs of this rule.
command: The command used to run the rule.
"""
def __init__(self, rule, spec):
self.display_name = rule['rule_name']
# Assure that the rule name is only characters and numbers
self.rule_name = re.sub(r'\W', '_', self.display_name)
# Create the various element names, following the example set by the
# Visual Studio 2008 to 2010 conversion. I don't know if VS2010
# is sensitive to the exact names.
self.target_name = '_' + self.rule_name
self.after_targets = self.rule_name + 'AfterTargets'
self.before_targets = self.rule_name + 'BeforeTargets'
self.depends_on = self.rule_name + 'DependsOn'
self.compute_output = 'Compute%sOutput' % self.rule_name
self.dirs_to_make = self.rule_name + 'DirsToMake'
self.inputs = self.rule_name + '_inputs'
self.tlog = self.rule_name + '_tlog'
self.extension = rule['extension']
if not self.extension.startswith('.'):
self.extension = '.' + self.extension
self.description = MSVSSettings.ConvertVCMacrosToMSBuild(
rule.get('message', self.rule_name))
old_additional_dependencies = _FixPaths(rule.get('inputs', []))
self.additional_dependencies = (
';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_additional_dependencies]))
old_outputs = _FixPaths(rule.get('outputs', []))
self.outputs = ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_outputs])
old_command = _BuildCommandLineForRule(spec, rule, has_input_path=True,
do_setup_env=True)
self.command = MSVSSettings.ConvertVCMacrosToMSBuild(old_command)
def _GenerateMSBuildRulePropsFile(props_path, msbuild_rules):
"""Generate the .props file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'}]
for rule in msbuild_rules:
content.extend([
['PropertyGroup',
{'Condition': "'$(%s)' == '' and '$(%s)' == '' and "
"'$(ConfigurationType)' != 'Makefile'" % (rule.before_targets,
rule.after_targets)
},
[rule.before_targets, 'Midl'],
[rule.after_targets, 'CustomBuild'],
],
['PropertyGroup',
[rule.depends_on,
{'Condition': "'$(ConfigurationType)' != 'Makefile'"},
'_SelectedFiles;$(%s)' % rule.depends_on
],
],
['ItemDefinitionGroup',
[rule.rule_name,
['CommandLineTemplate', rule.command],
['Outputs', rule.outputs],
['ExecutionDescription', rule.description],
['AdditionalDependencies', rule.additional_dependencies],
],
]
])
easy_xml.WriteXmlIfChanged(content, props_path, pretty=True, win32=True)
def _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules):
"""Generate the .targets file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
}
]
item_group = [
'ItemGroup',
['PropertyPageSchema',
{'Include': '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'}
]
]
for rule in msbuild_rules:
item_group.append(
['AvailableItemName',
{'Include': rule.rule_name},
['Targets', rule.target_name],
])
content.append(item_group)
for rule in msbuild_rules:
content.append(
['UsingTask',
{'TaskName': rule.rule_name,
'TaskFactory': 'XamlTaskFactory',
'AssemblyName': 'Microsoft.Build.Tasks.v4.0'
},
['Task', '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'],
])
for rule in msbuild_rules:
rule_name = rule.rule_name
target_outputs = '%%(%s.Outputs)' % rule_name
target_inputs = ('%%(%s.Identity);%%(%s.AdditionalDependencies);'
'$(MSBuildProjectFile)') % (rule_name, rule_name)
rule_inputs = '%%(%s.Identity)' % rule_name
extension_condition = ("'%(Extension)'=='.obj' or "
"'%(Extension)'=='.res' or "
"'%(Extension)'=='.rsc' or "
"'%(Extension)'=='.lib'")
remove_section = [
'ItemGroup',
{'Condition': "'@(SelectedFiles)' != ''"},
[rule_name,
{'Remove': '@(%s)' % rule_name,
'Condition': "'%(Identity)' != '@(SelectedFiles)'"
}
]
]
inputs_section = [
'ItemGroup',
[rule.inputs, {'Include': '%%(%s.AdditionalDependencies)' % rule_name}]
]
logging_section = [
'ItemGroup',
[rule.tlog,
{'Include': '%%(%s.Outputs)' % rule_name,
'Condition': ("'%%(%s.Outputs)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" %
(rule_name, rule_name))
},
['Source', "@(%s, '|')" % rule_name],
['Inputs', "@(%s -> '%%(Fullpath)', ';')" % rule.inputs],
],
]
message_section = [
'Message',
{'Importance': 'High',
'Text': '%%(%s.ExecutionDescription)' % rule_name
}
]
write_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).write.1.tlog',
'Lines': "^%%(%s.Source);@(%s->'%%(Fullpath)')" % (rule.tlog,
rule.tlog)
}
]
read_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).read.1.tlog',
'Lines': "^%%(%s.Source);%%(%s.Inputs)" % (rule.tlog, rule.tlog)
}
]
command_and_input_section = [
rule_name,
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule_name, rule_name),
'CommandLineTemplate': '%%(%s.CommandLineTemplate)' % rule_name,
'AdditionalOptions': '%%(%s.AdditionalOptions)' % rule_name,
'Inputs': rule_inputs
}
]
content.extend([
['Target',
{'Name': rule.target_name,
'BeforeTargets': '$(%s)' % rule.before_targets,
'AfterTargets': '$(%s)' % rule.after_targets,
'Condition': "'@(%s)' != ''" % rule_name,
'DependsOnTargets': '$(%s);%s' % (rule.depends_on,
rule.compute_output),
'Outputs': target_outputs,
'Inputs': target_inputs
},
remove_section,
inputs_section,
logging_section,
message_section,
write_tlog_section,
read_tlog_section,
command_and_input_section,
],
['PropertyGroup',
['ComputeLinkInputsTargets',
'$(ComputeLinkInputsTargets);',
'%s;' % rule.compute_output
],
['ComputeLibInputsTargets',
'$(ComputeLibInputsTargets);',
'%s;' % rule.compute_output
],
],
['Target',
{'Name': rule.compute_output,
'Condition': "'@(%s)' != ''" % rule_name
},
['ItemGroup',
[rule.dirs_to_make,
{'Condition': "'@(%s)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" % (rule_name, rule_name),
'Include': '%%(%s.Outputs)' % rule_name
}
],
['Link',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['Lib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['ImpLib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
],
['MakeDir',
{'Directories': ("@(%s->'%%(RootDir)%%(Directory)')" %
rule.dirs_to_make)
}
]
],
])
easy_xml.WriteXmlIfChanged(content, targets_path, pretty=True, win32=True)
def _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules):
# Generate the .xml file
content = [
'ProjectSchemaDefinitions',
{'xmlns': ('clr-namespace:Microsoft.Build.Framework.XamlTypes;'
'assembly=Microsoft.Build.Framework'),
'xmlns:x': 'http://schemas.microsoft.com/winfx/2006/xaml',
'xmlns:sys': 'clr-namespace:System;assembly=mscorlib',
'xmlns:transformCallback':
'Microsoft.Cpp.Dev10.ConvertPropertyCallback'
}
]
for rule in msbuild_rules:
content.extend([
['Rule',
{'Name': rule.rule_name,
'PageTemplate': 'tool',
'DisplayName': rule.display_name,
'Order': '200'
},
['Rule.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name
}
]
],
['Rule.Categories',
['Category',
{'Name': 'General'},
['Category.DisplayName',
['sys:String', 'General'],
],
],
['Category',
{'Name': 'Command Line',
'Subtype': 'CommandLine'
},
['Category.DisplayName',
['sys:String', 'Command Line'],
],
],
],
['StringListProperty',
{'Name': 'Inputs',
'Category': 'Command Line',
'IsRequired': 'true',
'Switch': ' '
},
['StringListProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name,
'SourceType': 'Item'
}
]
],
],
['StringProperty',
{'Name': 'CommandLineTemplate',
'DisplayName': 'Command Line',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['DynamicEnumProperty',
{'Name': rule.before_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute Before'],
],
['DynamicEnumProperty.Description',
['sys:String', 'Specifies the targets for the build customization'
' to run before.'
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.before_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'HasConfigurationCondition': 'true'
}
]
],
],
['DynamicEnumProperty',
{'Name': rule.after_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute After'],
],
['DynamicEnumProperty.Description',
['sys:String', ('Specifies the targets for the build customization'
' to run after.')
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.after_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': '',
'HasConfigurationCondition': 'true'
}
]
],
],
['StringListProperty',
{'Name': 'Outputs',
'DisplayName': 'Outputs',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringProperty',
{'Name': 'ExecutionDescription',
'DisplayName': 'Execution Description',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringListProperty',
{'Name': 'AdditionalDependencies',
'DisplayName': 'Additional Dependencies',
'IncludeInCommandLine': 'False',
'Visible': 'false'
}
],
['StringProperty',
{'Subtype': 'AdditionalOptions',
'Name': 'AdditionalOptions',
'Category': 'Command Line'
},
['StringProperty.DisplayName',
['sys:String', 'Additional Options'],
],
['StringProperty.Description',
['sys:String', 'Additional Options'],
],
],
],
['ItemType',
{'Name': rule.rule_name,
'DisplayName': rule.display_name
}
],
['FileExtension',
{'Name': '*' + rule.extension,
'ContentType': rule.rule_name
}
],
['ContentType',
{'Name': rule.rule_name,
'DisplayName': '',
'ItemType': rule.rule_name
}
]
])
easy_xml.WriteXmlIfChanged(content, xml_path, pretty=True, win32=True)
def _GetConfigurationAndPlatform(name, settings):
configuration = name.rsplit('_', 1)[0]
platform = settings.get('msvs_configuration_platform', 'Win32')
return (configuration, platform)
def _GetConfigurationCondition(name, settings):
return (r"'$(Configuration)|$(Platform)'=='%s|%s'" %
_GetConfigurationAndPlatform(name, settings))
def _GetMSBuildProjectConfigurations(configurations):
group = ['ItemGroup', {'Label': 'ProjectConfigurations'}]
for (name, settings) in sorted(configurations.iteritems()):
configuration, platform = _GetConfigurationAndPlatform(name, settings)
designation = '%s|%s' % (configuration, platform)
group.append(
['ProjectConfiguration', {'Include': designation},
['Configuration', configuration],
['Platform', platform]])
return [group]
def _GetMSBuildGlobalProperties(spec, guid, gyp_file_name):
namespace = os.path.splitext(gyp_file_name)[0]
return [
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', guid],
['Keyword', 'Win32Proj'],
['RootNamespace', namespace],
]
]
def _GetMSBuildConfigurationDetails(spec, build_file):
properties = {}
for name, settings in spec['configurations'].iteritems():
msbuild_attributes = _GetMSBuildAttributes(spec, settings, build_file)
condition = _GetConfigurationCondition(name, settings)
character_set = msbuild_attributes.get('CharacterSet')
_AddConditionalProperty(properties, condition, 'ConfigurationType',
msbuild_attributes['ConfigurationType'])
if character_set:
_AddConditionalProperty(properties, condition, 'CharacterSet',
character_set)
return _GetMSBuildPropertyGroup(spec, 'Configuration', properties)
def _GetMSBuildLocalProperties(msbuild_toolset):
# Currently the only local property we support is PlatformToolset
properties = {}
if msbuild_toolset:
properties = [
['PropertyGroup', {'Label': 'Locals'},
['PlatformToolset', msbuild_toolset],
]
]
return properties
def _GetMSBuildPropertySheets(configurations):
user_props = r'$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props'
additional_props = {}
props_specified = False
for name, settings in sorted(configurations.iteritems()):
configuration = _GetConfigurationCondition(name, settings)
if settings.has_key('msbuild_props'):
additional_props[configuration] = _FixPaths(settings['msbuild_props'])
props_specified = True
else:
additional_props[configuration] = ''
if not props_specified:
return [
['ImportGroup',
{'Label': 'PropertySheets'},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
]
else:
sheets = []
for condition, props in additional_props.iteritems():
import_group = [
'ImportGroup',
{'Label': 'PropertySheets',
'Condition': condition
},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
for props_file in props:
import_group.append(['Import', {'Project':props_file}])
sheets.append(import_group)
return sheets
def _ConvertMSVSBuildAttributes(spec, config, build_file):
config_type = _GetMSVSConfigurationType(spec, build_file)
msvs_attributes = _GetMSVSAttributes(spec, config, config_type)
msbuild_attributes = {}
for a in msvs_attributes:
if a in ['IntermediateDirectory', 'OutputDirectory']:
directory = MSVSSettings.ConvertVCMacrosToMSBuild(msvs_attributes[a])
if not directory.endswith('\\'):
directory += '\\'
msbuild_attributes[a] = directory
elif a == 'CharacterSet':
msbuild_attributes[a] = _ConvertMSVSCharacterSet(msvs_attributes[a])
elif a == 'ConfigurationType':
msbuild_attributes[a] = _ConvertMSVSConfigurationType(msvs_attributes[a])
else:
print 'Warning: Do not know how to convert MSVS attribute ' + a
return msbuild_attributes
def _ConvertMSVSCharacterSet(char_set):
if char_set.isdigit():
char_set = {
'0': 'MultiByte',
'1': 'Unicode',
'2': 'MultiByte',
}[char_set]
return char_set
def _ConvertMSVSConfigurationType(config_type):
if config_type.isdigit():
config_type = {
'1': 'Application',
'2': 'DynamicLibrary',
'4': 'StaticLibrary',
'10': 'Utility'
}[config_type]
return config_type
def _GetMSBuildAttributes(spec, config, build_file):
if 'msbuild_configuration_attributes' not in config:
msbuild_attributes = _ConvertMSVSBuildAttributes(spec, config, build_file)
else:
config_type = _GetMSVSConfigurationType(spec, build_file)
config_type = _ConvertMSVSConfigurationType(config_type)
msbuild_attributes = config.get('msbuild_configuration_attributes', {})
msbuild_attributes.setdefault('ConfigurationType', config_type)
output_dir = msbuild_attributes.get('OutputDirectory',
'$(SolutionDir)$(Configuration)')
msbuild_attributes['OutputDirectory'] = _FixPath(output_dir) + '\\'
if 'IntermediateDirectory' not in msbuild_attributes:
intermediate = _FixPath('$(Configuration)') + '\\'
msbuild_attributes['IntermediateDirectory'] = intermediate
if 'CharacterSet' in msbuild_attributes:
msbuild_attributes['CharacterSet'] = _ConvertMSVSCharacterSet(
msbuild_attributes['CharacterSet'])
if 'TargetName' not in msbuild_attributes:
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
target_name = prefix + product_name
msbuild_attributes['TargetName'] = target_name
# Make sure that 'TargetPath' matches 'Lib.OutputFile' or 'Link.OutputFile'
# (depending on the tool used) to avoid MSB8012 warning.
msbuild_tool_map = {
'executable': 'Link',
'shared_library': 'Link',
'loadable_module': 'Link',
'static_library': 'Lib',
}
msbuild_tool = msbuild_tool_map.get(spec['type'])
if msbuild_tool:
msbuild_settings = config['finalized_msbuild_settings']
out_file = msbuild_settings[msbuild_tool].get('OutputFile')
if out_file:
msbuild_attributes['TargetPath'] = _FixPath(out_file)
return msbuild_attributes
def _GetMSBuildConfigurationGlobalProperties(spec, configurations, build_file):
# TODO(jeanluc) We could optimize out the following and do it only if
# there are actions.
# TODO(jeanluc) Handle the equivalent of setting 'CYGWIN=nontsec'.
new_paths = []
cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])[0]
if cygwin_dirs:
cyg_path = '$(MSBuildProjectDirectory)\\%s\\bin\\' % _FixPath(cygwin_dirs)
new_paths.append(cyg_path)
# TODO(jeanluc) Change the convention to have both a cygwin_dir and a
# python_dir.
python_path = cyg_path.replace('cygwin\\bin', 'python_26')
new_paths.append(python_path)
if new_paths:
new_paths = '$(ExecutablePath);' + ';'.join(new_paths)
properties = {}
for (name, configuration) in sorted(configurations.iteritems()):
condition = _GetConfigurationCondition(name, configuration)
attributes = _GetMSBuildAttributes(spec, configuration, build_file)
msbuild_settings = configuration['finalized_msbuild_settings']
_AddConditionalProperty(properties, condition, 'IntDir',
attributes['IntermediateDirectory'])
_AddConditionalProperty(properties, condition, 'OutDir',
attributes['OutputDirectory'])
_AddConditionalProperty(properties, condition, 'TargetName',
attributes['TargetName'])
if attributes.get('TargetPath'):
_AddConditionalProperty(properties, condition, 'TargetPath',
attributes['TargetPath'])
if new_paths:
_AddConditionalProperty(properties, condition, 'ExecutablePath',
new_paths)
tool_settings = msbuild_settings.get('', {})
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild('', name, value)
_AddConditionalProperty(properties, condition, name, formatted_value)
return _GetMSBuildPropertyGroup(spec, None, properties)
def _AddConditionalProperty(properties, condition, name, value):
"""Adds a property / conditional value pair to a dictionary.
Arguments:
properties: The dictionary to be modified. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
condition: The condition under which the named property has the value.
name: The name of the property.
value: The value of the property.
"""
if name not in properties:
properties[name] = {}
values = properties[name]
if value not in values:
values[value] = []
conditions = values[value]
conditions.append(condition)
# Regex for msvs variable references ( i.e. $(FOO) ).
MSVS_VARIABLE_REFERENCE = re.compile('\$\(([a-zA-Z_][a-zA-Z0-9_]*)\)')
def _GetMSBuildPropertyGroup(spec, label, properties):
"""Returns a PropertyGroup definition for the specified properties.
Arguments:
spec: The target project dict.
label: An optional label for the PropertyGroup.
properties: The dictionary to be converted. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
"""
group = ['PropertyGroup']
if label:
group.append({'Label': label})
num_configurations = len(spec['configurations'])
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
edges = set()
for value in sorted(properties[node].keys()):
# Add to edges all $(...) references to variables.
#
# Variable references that refer to names not in properties are excluded
# These can exist for instance to refer built in definitions like
# $(SolutionDir).
#
# Self references are ignored. Self reference is used in a few places to
# append to the default value. I.e. PATH=$(PATH);other_path
edges.update(set([v for v in MSVS_VARIABLE_REFERENCE.findall(value)
if v in properties and v != node]))
return edges
properties_ordered = gyp.common.TopologicallySorted(
properties.keys(), GetEdges)
# Walk properties in the reverse of a topological sort on
# user_of_variable -> used_variable as this ensures variables are
# defined before they are used.
# NOTE: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
for name in reversed(properties_ordered):
values = properties[name]
for value, conditions in sorted(values.iteritems()):
if len(conditions) == num_configurations:
# If the value is the same all configurations,
# just add one unconditional entry.
group.append([name, value])
else:
for condition in conditions:
group.append([name, {'Condition': condition}, value])
return [group]
def _GetMSBuildToolSettingsSections(spec, configurations):
groups = []
for (name, configuration) in sorted(configurations.iteritems()):
msbuild_settings = configuration['finalized_msbuild_settings']
group = ['ItemDefinitionGroup',
{'Condition': _GetConfigurationCondition(name, configuration)}
]
for tool_name, tool_settings in sorted(msbuild_settings.iteritems()):
# Skip the tool named '' which is a holder of global settings handled
# by _GetMSBuildConfigurationGlobalProperties.
if tool_name:
if tool_settings:
tool = [tool_name]
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild(tool_name, name,
value)
tool.append([name, formatted_value])
group.append(tool)
groups.append(group)
return groups
def _FinalizeMSBuildSettings(spec, configuration):
if 'msbuild_settings' in configuration:
converted = False
msbuild_settings = configuration['msbuild_settings']
MSVSSettings.ValidateMSBuildSettings(msbuild_settings)
else:
converted = True
msvs_settings = configuration.get('msvs_settings', {})
msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(msvs_settings)
include_dirs, resource_include_dirs = _GetIncludeDirs(configuration)
libraries = _GetLibraries(spec)
out_file, _, msbuild_tool = _GetOutputFilePathAndTool(spec, msbuild=True)
defines = _GetDefines(configuration)
if converted:
# Visual Studio 2010 has TR1
defines = [d for d in defines if d != '_HAS_TR1=0']
# Warn of ignored settings
ignored_settings = ['msvs_prebuild', 'msvs_postbuild', 'msvs_tool_files']
for ignored_setting in ignored_settings:
value = configuration.get(ignored_setting)
if value:
print ('Warning: The automatic conversion to MSBuild does not handle '
'%s. Ignoring setting of %s' % (ignored_setting, str(value)))
defines = [_EscapeCppDefineForMSBuild(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(configuration)
# TODO(jeanluc) Validate & warn that we don't translate
# prebuild = configuration.get('msvs_prebuild')
# postbuild = configuration.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = configuration.get('msvs_precompiled_header')
# Add the information to the appropriate tool
# TODO(jeanluc) We could optimize and generate these settings only if
# the corresponding files are found, e.g. don't generate ResourceCompile
# if you don't have any resources.
_ToolAppend(msbuild_settings, 'ClCompile',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(msbuild_settings, 'Link', 'AdditionalDependencies', libraries)
if out_file:
_ToolAppend(msbuild_settings, msbuild_tool, 'OutputFile', out_file,
only_if_unset=True)
# Add defines.
_ToolAppend(msbuild_settings, 'ClCompile',
'PreprocessorDefinitions', defines)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'PreprocessorDefinitions', defines)
# Add disabled warnings.
_ToolAppend(msbuild_settings, 'ClCompile',
'DisableSpecificWarnings', disabled_warnings)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'Use')
_ToolAppend(msbuild_settings, 'ClCompile',
'PrecompiledHeaderFile', precompiled_header)
_ToolAppend(msbuild_settings, 'ClCompile',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(msbuild_settings, 'Link', 'ModuleDefinitionFile', def_file)
configuration['finalized_msbuild_settings'] = msbuild_settings
def _GetValueFormattedForMSBuild(tool_name, name, value):
if type(value) == list:
# For some settings, VS2010 does not automatically extends the settings
# TODO(jeanluc) Is this what we want?
if name in ['AdditionalDependencies',
'AdditionalIncludeDirectories',
'AdditionalLibraryDirectories',
'AdditionalOptions',
'DelayLoadDLLs',
'DisableSpecificWarnings',
'PreprocessorDefinitions']:
value.append('%%(%s)' % name)
# For most tools, entries in a list should be separated with ';' but some
# settings use a space. Check for those first.
exceptions = {
'ClCompile': ['AdditionalOptions'],
'Link': ['AdditionalOptions'],
'Lib': ['AdditionalOptions']}
if tool_name in exceptions and name in exceptions[tool_name]:
char = ' '
else:
char = ';'
formatted_value = char.join(
[MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in value])
else:
formatted_value = MSVSSettings.ConvertVCMacrosToMSBuild(value)
return formatted_value
def _VerifySourcesExist(sources, root_dir):
"""Verifies that all source files exist on disk.
Checks that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation but no otherwise
visible errors.
Arguments:
sources: A recursive list of Filter/file names.
root_dir: The root directory for the relative path names.
Returns:
A list of source files that cannot be found on disk.
"""
missing_sources = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
missing_sources.extend(_VerifySourcesExist(source.contents, root_dir))
else:
if '$' not in source:
full_path = os.path.join(root_dir, source)
if not os.path.exists(full_path):
missing_sources.append(full_path)
return missing_sources
def _GetMSBuildSources(spec, sources, exclusions, extension_to_rule_name,
actions_spec, sources_handled_by_action, list_excluded):
groups = ['none', 'midl', 'include', 'compile', 'resource', 'rule']
grouped_sources = {}
for g in groups:
grouped_sources[g] = []
_AddSources2(spec, sources, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action, list_excluded)
sources = []
for g in groups:
if grouped_sources[g]:
sources.append(['ItemGroup'] + grouped_sources[g])
if actions_spec:
sources.append(['ItemGroup'] + actions_spec)
return sources
def _AddSources2(spec, sources, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action,
list_excluded):
extensions_excluded_from_precompile = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
_AddSources2(spec, source.contents, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action,
list_excluded)
else:
if not source in sources_handled_by_action:
detail = []
excluded_configurations = exclusions.get(source, [])
if len(excluded_configurations) == len(spec['configurations']):
detail.append(['ExcludedFromBuild', 'true'])
else:
for config_name, configuration in sorted(excluded_configurations):
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['ExcludedFromBuild',
{'Condition': condition},
'true'])
# Add precompile if needed
for config_name, configuration in spec['configurations'].iteritems():
precompiled_source = configuration.get('msvs_precompiled_source', '')
if precompiled_source != '':
precompiled_source = _FixPath(precompiled_source)
if not extensions_excluded_from_precompile:
# If the precompiled header is generated by a C source, we must
# not try to use it for C++ sources, and vice versa.
basename, extension = os.path.splitext(precompiled_source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
if precompiled_source == source:
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['PrecompiledHeader',
{'Condition': condition},
'Create'
])
else:
# Turn off precompiled header usage for source files of a
# different type than the file that generated the
# precompiled header.
for extension in extensions_excluded_from_precompile:
if source.endswith(extension):
detail.append(['PrecompiledHeader', ''])
detail.append(['ForcedIncludeFiles', ''])
group, element = _MapFileToMsBuildSourceType(source,
extension_to_rule_name)
grouped_sources[group].append([element, {'Include': source}] + detail)
def _GetMSBuildProjectReferences(project):
references = []
if project.dependencies:
group = ['ItemGroup']
for dependency in project.dependencies:
guid = dependency.guid
project_dir = os.path.split(project.path)[0]
relative_path = gyp.common.RelativePath(dependency.path, project_dir)
project_ref = ['ProjectReference',
{'Include': relative_path},
['Project', guid],
['ReferenceOutputAssembly', 'false']
]
for config in dependency.spec.get('configurations', {}).itervalues():
# If it's disabled in any config, turn it off in the reference.
if config.get('msvs_2010_disable_uldi_when_referenced', 0):
project_ref.append(['UseLibraryDependencyInputs', 'false'])
break
group.append(project_ref)
references.append(group)
return references
def _GenerateMSBuildProject(project, options, version, generator_flags):
spec = project.spec
configurations = spec['configurations']
project_dir, project_file_name = os.path.split(project.path)
msbuildproj_dir = os.path.dirname(project.path)
if msbuildproj_dir and not os.path.exists(msbuildproj_dir):
os.makedirs(msbuildproj_dir)
# Prepare list of sources and excluded sources.
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
props_files_of_rules = set()
targets_files_of_rules = set()
extension_to_rule_name = {}
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
_GenerateRulesForMSBuild(project_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, extension_to_rule_name)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options,
project_dir, sources,
excluded_sources,
list_excluded))
_AddActions(actions_to_add, spec, project.build_file)
_AddCopies(actions_to_add, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
actions_spec, sources_handled_by_action = _GenerateActionsForMSBuild(
spec, actions_to_add)
_GenerateMSBuildFiltersFile(project.path + '.filters', sources,
extension_to_rule_name)
missing_sources = _VerifySourcesExist(sources, project_dir)
for configuration in configurations.itervalues():
_FinalizeMSBuildSettings(spec, configuration)
# Add attributes to root element
import_default_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.Default.props'}]]
import_cpp_props_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.props'}]]
import_cpp_targets_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.targets'}]]
macro_section = [['PropertyGroup', {'Label': 'UserMacros'}]]
content = [
'Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003',
'ToolsVersion': version.ProjectVersion(),
'DefaultTargets': 'Build'
}]
content += _GetMSBuildProjectConfigurations(configurations)
content += _GetMSBuildGlobalProperties(spec, project.guid, project_file_name)
content += import_default_section
content += _GetMSBuildConfigurationDetails(spec, project.build_file)
content += _GetMSBuildLocalProperties(project.msbuild_toolset)
content += import_cpp_props_section
content += _GetMSBuildExtensions(props_files_of_rules)
content += _GetMSBuildPropertySheets(configurations)
content += macro_section
content += _GetMSBuildConfigurationGlobalProperties(spec, configurations,
project.build_file)
content += _GetMSBuildToolSettingsSections(spec, configurations)
content += _GetMSBuildSources(
spec, sources, exclusions, extension_to_rule_name, actions_spec,
sources_handled_by_action, list_excluded)
content += _GetMSBuildProjectReferences(project)
content += import_cpp_targets_section
content += _GetMSBuildExtensionTargets(targets_files_of_rules)
# TODO(jeanluc) File a bug to get rid of runas. We had in MSVS:
# has_run_as = _WriteMSVSUserFile(project.path, version, spec)
easy_xml.WriteXmlIfChanged(content, project.path, pretty=True, win32=True)
return missing_sources
def _GetMSBuildExtensions(props_files_of_rules):
extensions = ['ImportGroup', {'Label': 'ExtensionSettings'}]
for props_file in props_files_of_rules:
extensions.append(['Import', {'Project': props_file}])
return [extensions]
def _GetMSBuildExtensionTargets(targets_files_of_rules):
targets_node = ['ImportGroup', {'Label': 'ExtensionTargets'}]
for targets_file in sorted(targets_files_of_rules):
targets_node.append(['Import', {'Project': targets_file}])
return [targets_node]
def _GenerateActionsForMSBuild(spec, actions_to_add):
"""Add actions accumulated into an actions_to_add, merging as needed.
Arguments:
spec: the target project dict
actions_to_add: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
Returns:
A pair of (action specification, the sources handled by this action).
"""
sources_handled_by_action = set()
actions_spec = []
for primary_input, actions in actions_to_add.iteritems():
inputs = set()
outputs = set()
descriptions = []
commands = []
for action in actions:
inputs.update(set(action['inputs']))
outputs.update(set(action['outputs']))
descriptions.append(action['description'])
cmd = action['command']
# For most actions, add 'call' so that actions that invoke batch files
# return and continue executing. msbuild_use_call provides a way to
# disable this but I have not seen any adverse effect from doing that
# for everything.
if action.get('msbuild_use_call', True):
cmd = 'call ' + cmd
commands.append(cmd)
# Add the custom build action for one input file.
description = ', and also '.join(descriptions)
# We can't join the commands simply with && because the command line will
# get too long. See also _AddActions: cygwin's setup_env mustn't be called
# for every invocation or the command that sets the PATH will grow too
# long.
command = (
'\r\nif %errorlevel% neq 0 exit /b %errorlevel%\r\n'.join(commands))
_AddMSBuildAction(spec,
primary_input,
inputs,
outputs,
command,
description,
sources_handled_by_action,
actions_spec)
return actions_spec, sources_handled_by_action
def _AddMSBuildAction(spec, primary_input, inputs, outputs, cmd, description,
sources_handled_by_action, actions_spec):
command = MSVSSettings.ConvertVCMacrosToMSBuild(cmd)
primary_input = _FixPath(primary_input)
inputs_array = _FixPaths(inputs)
outputs_array = _FixPaths(outputs)
additional_inputs = ';'.join([i for i in inputs_array
if i != primary_input])
outputs = ';'.join(outputs_array)
sources_handled_by_action.add(primary_input)
action_spec = ['CustomBuild', {'Include': primary_input}]
action_spec.extend(
# TODO(jeanluc) 'Document' for all or just if as_sources?
[['FileType', 'Document'],
['Command', command],
['Message', description],
['Outputs', outputs]
])
if additional_inputs:
action_spec.append(['AdditionalInputs', additional_inputs])
actions_spec.append(action_spec)
| mit | -6,071,213,355,596,386,000 | 36.387188 | 134 | 0.641602 | false |
dulems/hue | desktop/core/ext-py/configobj/validate.py | 42 | 46768 | # validate.py
# A Validator object
# Copyright (C) 2005 Michael Foord, Mark Andrews, Nicola Larosa
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# mark AT la-la DOT com
# nico AT tekNico DOT net
# This software is licensed under the terms of the BSD license.
# http://www.voidspace.org.uk/python/license.shtml
# Basically you're free to copy, modify, distribute and relicense it,
# So long as you keep a copy of the license with it.
# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
# For information about bugfixes, updates and support, please join the
# ConfigObj mailing list:
# http://lists.sourceforge.net/lists/listinfo/configobj-develop
# Comments, suggestions and bug reports welcome.
"""
The Validator object is used to check that supplied values
conform to a specification.
The value can be supplied as a string - e.g. from a config file.
In this case the check will also *convert* the value to
the required type. This allows you to add validation
as a transparent layer to access data stored as strings.
The validation checks that the data is correct *and*
converts it to the expected type.
Some standard checks are provided for basic data types.
Additional checks are easy to write. They can be
provided when the ``Validator`` is instantiated or
added afterwards.
The standard functions work with the following basic data types :
* integers
* floats
* booleans
* strings
* ip_addr
plus lists of these datatypes
Adding additional checks is done through coding simple functions.
The full set of standard checks are :
* 'integer': matches integer values (including negative)
Takes optional 'min' and 'max' arguments : ::
integer()
integer(3, 9) # any value from 3 to 9
integer(min=0) # any positive value
integer(max=9)
* 'float': matches float values
Has the same parameters as the integer check.
* 'boolean': matches boolean values - ``True`` or ``False``
Acceptable string values for True are :
true, on, yes, 1
Acceptable string values for False are :
false, off, no, 0
Any other value raises an error.
* 'ip_addr': matches an Internet Protocol address, v.4, represented
by a dotted-quad string, i.e. '1.2.3.4'.
* 'string': matches any string.
Takes optional keyword args 'min' and 'max'
to specify min and max lengths of the string.
* 'list': matches any list.
Takes optional keyword args 'min', and 'max' to specify min and
max sizes of the list. (Always returns a list.)
* 'tuple': matches any tuple.
Takes optional keyword args 'min', and 'max' to specify min and
max sizes of the tuple. (Always returns a tuple.)
* 'int_list': Matches a list of integers.
Takes the same arguments as list.
* 'float_list': Matches a list of floats.
Takes the same arguments as list.
* 'bool_list': Matches a list of boolean values.
Takes the same arguments as list.
* 'ip_addr_list': Matches a list of IP addresses.
Takes the same arguments as list.
* 'string_list': Matches a list of strings.
Takes the same arguments as list.
* 'mixed_list': Matches a list with different types in
specific positions. List size must match
the number of arguments.
Each position can be one of :
'integer', 'float', 'ip_addr', 'string', 'boolean'
So to specify a list with two strings followed
by two integers, you write the check as : ::
mixed_list('string', 'string', 'integer', 'integer')
* 'pass': This check matches everything ! It never fails
and the value is unchanged.
It is also the default if no check is specified.
* 'option': This check matches any from a list of options.
You specify this check with : ::
option('option 1', 'option 2', 'option 3')
You can supply a default value (returned if no value is supplied)
using the default keyword argument.
You specify a list argument for default using a list constructor syntax in
the check : ::
checkname(arg1, arg2, default=list('val 1', 'val 2', 'val 3'))
A badly formatted set of arguments will raise a ``VdtParamError``.
"""
__docformat__ = "restructuredtext en"
__version__ = '1.0.0'
__revision__ = '$Id: validate.py 123 2005-09-08 08:54:28Z fuzzyman $'
__all__ = (
'__version__',
'dottedQuadToNum',
'numToDottedQuad',
'ValidateError',
'VdtUnknownCheckError',
'VdtParamError',
'VdtTypeError',
'VdtValueError',
'VdtValueTooSmallError',
'VdtValueTooBigError',
'VdtValueTooShortError',
'VdtValueTooLongError',
'VdtMissingValue',
'Validator',
'is_integer',
'is_float',
'is_boolean',
'is_list',
'is_tuple',
'is_ip_addr',
'is_string',
'is_int_list',
'is_bool_list',
'is_float_list',
'is_string_list',
'is_ip_addr_list',
'is_mixed_list',
'is_option',
'__docformat__',
)
import re
_list_arg = re.compile(r'''
(?:
([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*list\(
(
(?:
\s*
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s\)][^,\)]*?) # unquoted
)
\s*,\s*
)*
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s\)][^,\)]*?) # unquoted
)? # last one
)
\)
)
''', re.VERBOSE | re.DOTALL) # two groups
_list_members = re.compile(r'''
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s=][^,=]*?) # unquoted
)
(?:
(?:\s*,\s*)|(?:\s*$) # comma
)
''', re.VERBOSE | re.DOTALL) # one group
_paramstring = r'''
(?:
(
(?:
[a-zA-Z_][a-zA-Z0-9_]*\s*=\s*list\(
(?:
\s*
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s\)][^,\)]*?) # unquoted
)
\s*,\s*
)*
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s\)][^,\)]*?) # unquoted
)? # last one
\)
)|
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s=][^,=]*?)| # unquoted
(?: # keyword argument
[a-zA-Z_][a-zA-Z0-9_]*\s*=\s*
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s=][^,=]*?) # unquoted
)
)
)
)
(?:
(?:\s*,\s*)|(?:\s*$) # comma
)
)
'''
_matchstring = '^%s*' % _paramstring
# Python pre 2.2.1 doesn't have bool
try:
bool
except NameError:
def bool(val):
"""Simple boolean equivalent function. """
if val:
return 1
else:
return 0
def dottedQuadToNum(ip):
"""
Convert decimal dotted quad string to long integer
>>> int(dottedQuadToNum('1 '))
1
>>> int(dottedQuadToNum(' 1.2'))
16777218
>>> int(dottedQuadToNum(' 1.2.3 '))
16908291
>>> int(dottedQuadToNum('1.2.3.4'))
16909060
>>> dottedQuadToNum('1.2.3. 4')
16909060
>>> dottedQuadToNum('255.255.255.255')
4294967295L
>>> dottedQuadToNum('255.255.255.256')
Traceback (most recent call last):
ValueError: Not a good dotted-quad IP: 255.255.255.256
"""
# import here to avoid it when ip_addr values are not used
import socket, struct
try:
return struct.unpack('!L',
socket.inet_aton(ip.strip()))[0]
except socket.error:
# bug in inet_aton, corrected in Python 2.3
if ip.strip() == '255.255.255.255':
return 0xFFFFFFFFL
else:
raise ValueError('Not a good dotted-quad IP: %s' % ip)
return
def numToDottedQuad(num):
"""
Convert long int to dotted quad string
>>> numToDottedQuad(-1L)
Traceback (most recent call last):
ValueError: Not a good numeric IP: -1
>>> numToDottedQuad(1L)
'0.0.0.1'
>>> numToDottedQuad(16777218L)
'1.0.0.2'
>>> numToDottedQuad(16908291L)
'1.2.0.3'
>>> numToDottedQuad(16909060L)
'1.2.3.4'
>>> numToDottedQuad(4294967295L)
'255.255.255.255'
>>> numToDottedQuad(4294967296L)
Traceback (most recent call last):
ValueError: Not a good numeric IP: 4294967296
"""
# import here to avoid it when ip_addr values are not used
import socket, struct
# no need to intercept here, 4294967295L is fine
if num > 4294967295L or num < 0:
raise ValueError('Not a good numeric IP: %s' % num)
try:
return socket.inet_ntoa(
struct.pack('!L', long(num)))
except (socket.error, struct.error, OverflowError):
raise ValueError('Not a good numeric IP: %s' % num)
class ValidateError(Exception):
"""
This error indicates that the check failed.
It can be the base class for more specific errors.
Any check function that fails ought to raise this error.
(or a subclass)
>>> raise ValidateError
Traceback (most recent call last):
ValidateError
"""
class VdtMissingValue(ValidateError):
"""No value was supplied to a check that needed one."""
class VdtUnknownCheckError(ValidateError):
"""An unknown check function was requested"""
def __init__(self, value):
"""
>>> raise VdtUnknownCheckError('yoda')
Traceback (most recent call last):
VdtUnknownCheckError: the check "yoda" is unknown.
"""
ValidateError.__init__(self, 'the check "%s" is unknown.' % (value,))
class VdtParamError(SyntaxError):
"""An incorrect parameter was passed"""
def __init__(self, name, value):
"""
>>> raise VdtParamError('yoda', 'jedi')
Traceback (most recent call last):
VdtParamError: passed an incorrect value "jedi" for parameter "yoda".
"""
SyntaxError.__init__(self, 'passed an incorrect value "%s" for parameter "%s".' % (value, name))
class VdtTypeError(ValidateError):
"""The value supplied was of the wrong type"""
def __init__(self, value):
"""
>>> raise VdtTypeError('jedi')
Traceback (most recent call last):
VdtTypeError: the value "jedi" is of the wrong type.
"""
ValidateError.__init__(self, 'the value "%s" is of the wrong type.' % (value,))
class VdtValueError(ValidateError):
"""The value supplied was of the correct type, but was not an allowed value."""
def __init__(self, value):
"""
>>> raise VdtValueError('jedi')
Traceback (most recent call last):
VdtValueError: the value "jedi" is unacceptable.
"""
ValidateError.__init__(self, 'the value "%s" is unacceptable.' % (value,))
class VdtValueTooSmallError(VdtValueError):
"""The value supplied was of the correct type, but was too small."""
def __init__(self, value):
"""
>>> raise VdtValueTooSmallError('0')
Traceback (most recent call last):
VdtValueTooSmallError: the value "0" is too small.
"""
ValidateError.__init__(self, 'the value "%s" is too small.' % (value,))
class VdtValueTooBigError(VdtValueError):
"""The value supplied was of the correct type, but was too big."""
def __init__(self, value):
"""
>>> raise VdtValueTooBigError('1')
Traceback (most recent call last):
VdtValueTooBigError: the value "1" is too big.
"""
ValidateError.__init__(self, 'the value "%s" is too big.' % (value,))
class VdtValueTooShortError(VdtValueError):
"""The value supplied was of the correct type, but was too short."""
def __init__(self, value):
"""
>>> raise VdtValueTooShortError('jed')
Traceback (most recent call last):
VdtValueTooShortError: the value "jed" is too short.
"""
ValidateError.__init__(
self,
'the value "%s" is too short.' % (value,))
class VdtValueTooLongError(VdtValueError):
"""The value supplied was of the correct type, but was too long."""
def __init__(self, value):
"""
>>> raise VdtValueTooLongError('jedie')
Traceback (most recent call last):
VdtValueTooLongError: the value "jedie" is too long.
"""
ValidateError.__init__(self, 'the value "%s" is too long.' % (value,))
class Validator(object):
"""
Validator is an object that allows you to register a set of 'checks'.
These checks take input and test that it conforms to the check.
This can also involve converting the value from a string into
the correct datatype.
The ``check`` method takes an input string which configures which
check is to be used and applies that check to a supplied value.
An example input string would be:
'int_range(param1, param2)'
You would then provide something like:
>>> def int_range_check(value, min, max):
... # turn min and max from strings to integers
... min = int(min)
... max = int(max)
... # check that value is of the correct type.
... # possible valid inputs are integers or strings
... # that represent integers
... if not isinstance(value, (int, long, basestring)):
... raise VdtTypeError(value)
... elif isinstance(value, basestring):
... # if we are given a string
... # attempt to convert to an integer
... try:
... value = int(value)
... except ValueError:
... raise VdtValueError(value)
... # check the value is between our constraints
... if not min <= value:
... raise VdtValueTooSmallError(value)
... if not value <= max:
... raise VdtValueTooBigError(value)
... return value
>>> fdict = {'int_range': int_range_check}
>>> vtr1 = Validator(fdict)
>>> vtr1.check('int_range(20, 40)', '30')
30
>>> vtr1.check('int_range(20, 40)', '60')
Traceback (most recent call last):
VdtValueTooBigError: the value "60" is too big.
New functions can be added with : ::
>>> vtr2 = Validator()
>>> vtr2.functions['int_range'] = int_range_check
Or by passing in a dictionary of functions when Validator
is instantiated.
Your functions *can* use keyword arguments,
but the first argument should always be 'value'.
If the function doesn't take additional arguments,
the parentheses are optional in the check.
It can be written with either of : ::
keyword = function_name
keyword = function_name()
The first program to utilise Validator() was Michael Foord's
ConfigObj, an alternative to ConfigParser which supports lists and
can validate a config file using a config schema.
For more details on using Validator with ConfigObj see:
http://www.voidspace.org.uk/python/configobj.html
"""
# this regex does the initial parsing of the checks
_func_re = re.compile(r'(.+?)\((.*)\)', re.DOTALL)
# this regex takes apart keyword arguments
_key_arg = re.compile(r'^([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*(.*)$', re.DOTALL)
# this regex finds keyword=list(....) type values
_list_arg = _list_arg
# this regex takes individual values out of lists - in one pass
_list_members = _list_members
# These regexes check a set of arguments for validity
# and then pull the members out
_paramfinder = re.compile(_paramstring, re.VERBOSE | re.DOTALL)
_matchfinder = re.compile(_matchstring, re.VERBOSE | re.DOTALL)
def __init__(self, functions=None):
"""
>>> vtri = Validator()
"""
self.functions = {
'': self._pass,
'integer': is_integer,
'float': is_float,
'boolean': is_boolean,
'ip_addr': is_ip_addr,
'string': is_string,
'list': is_list,
'tuple': is_tuple,
'int_list': is_int_list,
'float_list': is_float_list,
'bool_list': is_bool_list,
'ip_addr_list': is_ip_addr_list,
'string_list': is_string_list,
'mixed_list': is_mixed_list,
'pass': self._pass,
'option': is_option,
'force_list': force_list,
}
if functions is not None:
self.functions.update(functions)
# tekNico: for use by ConfigObj
self.baseErrorClass = ValidateError
self._cache = {}
def check(self, check, value, missing=False):
"""
Usage: check(check, value)
Arguments:
check: string representing check to apply (including arguments)
value: object to be checked
Returns value, converted to correct type if necessary
If the check fails, raises a ``ValidateError`` subclass.
>>> vtor.check('yoda', '')
Traceback (most recent call last):
VdtUnknownCheckError: the check "yoda" is unknown.
>>> vtor.check('yoda()', '')
Traceback (most recent call last):
VdtUnknownCheckError: the check "yoda" is unknown.
>>> vtor.check('string(default="")', '', missing=True)
''
"""
fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check)
if missing:
if default is None:
# no information needed here - to be handled by caller
raise VdtMissingValue()
value = self._handle_none(default)
if value is None:
return None
return self._check_value(value, fun_name, fun_args, fun_kwargs)
def _handle_none(self, value):
if value == 'None':
value = None
elif value in ("'None'", '"None"'):
# Special case a quoted None
value = self._unquote(value)
return value
def _parse_with_caching(self, check):
if check in self._cache:
fun_name, fun_args, fun_kwargs, default = self._cache[check]
# We call list and dict below to work with *copies* of the data
# rather than the original (which are mutable of course)
fun_args = list(fun_args)
fun_kwargs = dict(fun_kwargs)
else:
fun_name, fun_args, fun_kwargs, default = self._parse_check(check)
fun_kwargs = dict((str(key), value) for (key, value) in fun_kwargs.items())
self._cache[check] = fun_name, list(fun_args), dict(fun_kwargs), default
return fun_name, fun_args, fun_kwargs, default
def _check_value(self, value, fun_name, fun_args, fun_kwargs):
try:
fun = self.functions[fun_name]
except KeyError:
raise VdtUnknownCheckError(fun_name)
else:
return fun(value, *fun_args, **fun_kwargs)
def _parse_check(self, check):
fun_match = self._func_re.match(check)
if fun_match:
fun_name = fun_match.group(1)
arg_string = fun_match.group(2)
arg_match = self._matchfinder.match(arg_string)
if arg_match is None:
# Bad syntax
raise VdtParamError('Bad syntax in check "%s".' % check)
fun_args = []
fun_kwargs = {}
# pull out args of group 2
for arg in self._paramfinder.findall(arg_string):
# args may need whitespace removing (before removing quotes)
arg = arg.strip()
listmatch = self._list_arg.match(arg)
if listmatch:
key, val = self._list_handle(listmatch)
fun_kwargs[key] = val
continue
keymatch = self._key_arg.match(arg)
if keymatch:
val = keymatch.group(2)
if not val in ("'None'", '"None"'):
# Special case a quoted None
val = self._unquote(val)
fun_kwargs[keymatch.group(1)] = val
continue
fun_args.append(self._unquote(arg))
else:
# allows for function names without (args)
return check, (), {}, None
# Default must be deleted if the value is specified too,
# otherwise the check function will get a spurious "default" keyword arg
try:
default = fun_kwargs.pop('default', None)
except AttributeError:
# Python 2.2 compatibility
default = None
try:
default = fun_kwargs['default']
del fun_kwargs['default']
except KeyError:
pass
return fun_name, fun_args, fun_kwargs, default
def _unquote(self, val):
"""Unquote a value if necessary."""
if (len(val) >= 2) and (val[0] in ("'", '"')) and (val[0] == val[-1]):
val = val[1:-1]
return val
def _list_handle(self, listmatch):
"""Take apart a ``keyword=list('val, 'val')`` type string."""
out = []
name = listmatch.group(1)
args = listmatch.group(2)
for arg in self._list_members.findall(args):
out.append(self._unquote(arg))
return name, out
def _pass(self, value):
"""
Dummy check that always passes
>>> vtor.check('', 0)
0
>>> vtor.check('', '0')
'0'
"""
return value
def get_default_value(self, check):
"""
Given a check, return the default value for the check
(converted to the right type).
If the check doesn't specify a default value then a
``KeyError`` will be raised.
"""
fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check)
if default is None:
raise KeyError('Check "%s" has no default value.' % check)
value = self._handle_none(default)
if value is None:
return value
return self._check_value(value, fun_name, fun_args, fun_kwargs)
def _is_num_param(names, values, to_float=False):
"""
Return numbers from inputs or raise VdtParamError.
Lets ``None`` pass through.
Pass in keyword argument ``to_float=True`` to
use float for the conversion rather than int.
>>> _is_num_param(('', ''), (0, 1.0))
[0, 1]
>>> _is_num_param(('', ''), (0, 1.0), to_float=True)
[0.0, 1.0]
>>> _is_num_param(('a'), ('a'))
Traceback (most recent call last):
VdtParamError: passed an incorrect value "a" for parameter "a".
"""
fun = to_float and float or int
out_params = []
for (name, val) in zip(names, values):
if val is None:
out_params.append(val)
elif isinstance(val, (int, long, float, basestring)):
try:
out_params.append(fun(val))
except ValueError, e:
raise VdtParamError(name, val)
else:
raise VdtParamError(name, val)
return out_params
# built in checks
# you can override these by setting the appropriate name
# in Validator.functions
# note: if the params are specified wrongly in your input string,
# you will also raise errors.
def is_integer(value, min=None, max=None):
"""
A check that tests that a given value is an integer (int, or long)
and optionally, between bounds. A negative value is accepted, while
a float will fail.
If the value is a string, then the conversion is done - if possible.
Otherwise a VdtError is raised.
>>> vtor.check('integer', '-1')
-1
>>> vtor.check('integer', '0')
0
>>> vtor.check('integer', 9)
9
>>> vtor.check('integer', 'a')
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
>>> vtor.check('integer', '2.2')
Traceback (most recent call last):
VdtTypeError: the value "2.2" is of the wrong type.
>>> vtor.check('integer(10)', '20')
20
>>> vtor.check('integer(max=20)', '15')
15
>>> vtor.check('integer(10)', '9')
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(10)', 9)
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(max=20)', '35')
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(max=20)', 35)
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(0, 9)', False)
0
"""
(min_val, max_val) = _is_num_param(('min', 'max'), (min, max))
if not isinstance(value, (int, long, basestring)):
raise VdtTypeError(value)
if isinstance(value, basestring):
# if it's a string - does it represent an integer ?
try:
value = int(value)
except ValueError:
raise VdtTypeError(value)
if (min_val is not None) and (value < min_val):
raise VdtValueTooSmallError(value)
if (max_val is not None) and (value > max_val):
raise VdtValueTooBigError(value)
return value
def is_float(value, min=None, max=None):
"""
A check that tests that a given value is a float
(an integer will be accepted), and optionally - that it is between bounds.
If the value is a string, then the conversion is done - if possible.
Otherwise a VdtError is raised.
This can accept negative values.
>>> vtor.check('float', '2')
2.0
From now on we multiply the value to avoid comparing decimals
>>> vtor.check('float', '-6.8') * 10
-68.0
>>> vtor.check('float', '12.2') * 10
122.0
>>> vtor.check('float', 8.4) * 10
84.0
>>> vtor.check('float', 'a')
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
>>> vtor.check('float(10.1)', '10.2') * 10
102.0
>>> vtor.check('float(max=20.2)', '15.1') * 10
151.0
>>> vtor.check('float(10.0)', '9.0')
Traceback (most recent call last):
VdtValueTooSmallError: the value "9.0" is too small.
>>> vtor.check('float(max=20.0)', '35.0')
Traceback (most recent call last):
VdtValueTooBigError: the value "35.0" is too big.
"""
(min_val, max_val) = _is_num_param(
('min', 'max'), (min, max), to_float=True)
if not isinstance(value, (int, long, float, basestring)):
raise VdtTypeError(value)
if not isinstance(value, float):
# if it's a string - does it represent a float ?
try:
value = float(value)
except ValueError:
raise VdtTypeError(value)
if (min_val is not None) and (value < min_val):
raise VdtValueTooSmallError(value)
if (max_val is not None) and (value > max_val):
raise VdtValueTooBigError(value)
return value
bool_dict = {
True: True, 'on': True, '1': True, 'true': True, 'yes': True,
False: False, 'off': False, '0': False, 'false': False, 'no': False,
}
def is_boolean(value):
"""
Check if the value represents a boolean.
>>> vtor.check('boolean', 0)
0
>>> vtor.check('boolean', False)
0
>>> vtor.check('boolean', '0')
0
>>> vtor.check('boolean', 'off')
0
>>> vtor.check('boolean', 'false')
0
>>> vtor.check('boolean', 'no')
0
>>> vtor.check('boolean', 'nO')
0
>>> vtor.check('boolean', 'NO')
0
>>> vtor.check('boolean', 1)
1
>>> vtor.check('boolean', True)
1
>>> vtor.check('boolean', '1')
1
>>> vtor.check('boolean', 'on')
1
>>> vtor.check('boolean', 'true')
1
>>> vtor.check('boolean', 'yes')
1
>>> vtor.check('boolean', 'Yes')
1
>>> vtor.check('boolean', 'YES')
1
>>> vtor.check('boolean', '')
Traceback (most recent call last):
VdtTypeError: the value "" is of the wrong type.
>>> vtor.check('boolean', 'up')
Traceback (most recent call last):
VdtTypeError: the value "up" is of the wrong type.
"""
if isinstance(value, basestring):
try:
return bool_dict[value.lower()]
except KeyError:
raise VdtTypeError(value)
# we do an equality test rather than an identity test
# this ensures Python 2.2 compatibilty
# and allows 0 and 1 to represent True and False
if value == False:
return False
elif value == True:
return True
else:
raise VdtTypeError(value)
def is_ip_addr(value):
"""
Check that the supplied value is an Internet Protocol address, v.4,
represented by a dotted-quad string, i.e. '1.2.3.4'.
>>> vtor.check('ip_addr', '1 ')
'1'
>>> vtor.check('ip_addr', ' 1.2')
'1.2'
>>> vtor.check('ip_addr', ' 1.2.3 ')
'1.2.3'
>>> vtor.check('ip_addr', '1.2.3.4')
'1.2.3.4'
>>> vtor.check('ip_addr', '0.0.0.0')
'0.0.0.0'
>>> vtor.check('ip_addr', '255.255.255.255')
'255.255.255.255'
>>> vtor.check('ip_addr', '255.255.255.256')
Traceback (most recent call last):
VdtValueError: the value "255.255.255.256" is unacceptable.
>>> vtor.check('ip_addr', '1.2.3.4.5')
Traceback (most recent call last):
VdtValueError: the value "1.2.3.4.5" is unacceptable.
>>> vtor.check('ip_addr', 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
"""
if not isinstance(value, basestring):
raise VdtTypeError(value)
value = value.strip()
try:
dottedQuadToNum(value)
except ValueError:
raise VdtValueError(value)
return value
def is_list(value, min=None, max=None):
"""
Check that the value is a list of values.
You can optionally specify the minimum and maximum number of members.
It does no check on list members.
>>> vtor.check('list', ())
[]
>>> vtor.check('list', [])
[]
>>> vtor.check('list', (1, 2))
[1, 2]
>>> vtor.check('list', [1, 2])
[1, 2]
>>> vtor.check('list(3)', (1, 2))
Traceback (most recent call last):
VdtValueTooShortError: the value "(1, 2)" is too short.
>>> vtor.check('list(max=5)', (1, 2, 3, 4, 5, 6))
Traceback (most recent call last):
VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long.
>>> vtor.check('list(min=3, max=5)', (1, 2, 3, 4))
[1, 2, 3, 4]
>>> vtor.check('list', 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
>>> vtor.check('list', '12')
Traceback (most recent call last):
VdtTypeError: the value "12" is of the wrong type.
"""
(min_len, max_len) = _is_num_param(('min', 'max'), (min, max))
if isinstance(value, basestring):
raise VdtTypeError(value)
try:
num_members = len(value)
except TypeError:
raise VdtTypeError(value)
if min_len is not None and num_members < min_len:
raise VdtValueTooShortError(value)
if max_len is not None and num_members > max_len:
raise VdtValueTooLongError(value)
return list(value)
def is_tuple(value, min=None, max=None):
"""
Check that the value is a tuple of values.
You can optionally specify the minimum and maximum number of members.
It does no check on members.
>>> vtor.check('tuple', ())
()
>>> vtor.check('tuple', [])
()
>>> vtor.check('tuple', (1, 2))
(1, 2)
>>> vtor.check('tuple', [1, 2])
(1, 2)
>>> vtor.check('tuple(3)', (1, 2))
Traceback (most recent call last):
VdtValueTooShortError: the value "(1, 2)" is too short.
>>> vtor.check('tuple(max=5)', (1, 2, 3, 4, 5, 6))
Traceback (most recent call last):
VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long.
>>> vtor.check('tuple(min=3, max=5)', (1, 2, 3, 4))
(1, 2, 3, 4)
>>> vtor.check('tuple', 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
>>> vtor.check('tuple', '12')
Traceback (most recent call last):
VdtTypeError: the value "12" is of the wrong type.
"""
return tuple(is_list(value, min, max))
def is_string(value, min=None, max=None):
"""
Check that the supplied value is a string.
You can optionally specify the minimum and maximum number of members.
>>> vtor.check('string', '0')
'0'
>>> vtor.check('string', 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
>>> vtor.check('string(2)', '12')
'12'
>>> vtor.check('string(2)', '1')
Traceback (most recent call last):
VdtValueTooShortError: the value "1" is too short.
>>> vtor.check('string(min=2, max=3)', '123')
'123'
>>> vtor.check('string(min=2, max=3)', '1234')
Traceback (most recent call last):
VdtValueTooLongError: the value "1234" is too long.
"""
if not isinstance(value, basestring):
raise VdtTypeError(value)
(min_len, max_len) = _is_num_param(('min', 'max'), (min, max))
try:
num_members = len(value)
except TypeError:
raise VdtTypeError(value)
if min_len is not None and num_members < min_len:
raise VdtValueTooShortError(value)
if max_len is not None and num_members > max_len:
raise VdtValueTooLongError(value)
return value
def is_int_list(value, min=None, max=None):
"""
Check that the value is a list of integers.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is an integer.
>>> vtor.check('int_list', ())
[]
>>> vtor.check('int_list', [])
[]
>>> vtor.check('int_list', (1, 2))
[1, 2]
>>> vtor.check('int_list', [1, 2])
[1, 2]
>>> vtor.check('int_list', [1, 'a'])
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
"""
return [is_integer(mem) for mem in is_list(value, min, max)]
def is_bool_list(value, min=None, max=None):
"""
Check that the value is a list of booleans.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is a boolean.
>>> vtor.check('bool_list', ())
[]
>>> vtor.check('bool_list', [])
[]
>>> check_res = vtor.check('bool_list', (True, False))
>>> check_res == [True, False]
1
>>> check_res = vtor.check('bool_list', [True, False])
>>> check_res == [True, False]
1
>>> vtor.check('bool_list', [True, 'a'])
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
"""
return [is_boolean(mem) for mem in is_list(value, min, max)]
def is_float_list(value, min=None, max=None):
"""
Check that the value is a list of floats.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is a float.
>>> vtor.check('float_list', ())
[]
>>> vtor.check('float_list', [])
[]
>>> vtor.check('float_list', (1, 2.0))
[1.0, 2.0]
>>> vtor.check('float_list', [1, 2.0])
[1.0, 2.0]
>>> vtor.check('float_list', [1, 'a'])
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
"""
return [is_float(mem) for mem in is_list(value, min, max)]
def is_string_list(value, min=None, max=None):
"""
Check that the value is a list of strings.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is a string.
>>> vtor.check('string_list', ())
[]
>>> vtor.check('string_list', [])
[]
>>> vtor.check('string_list', ('a', 'b'))
['a', 'b']
>>> vtor.check('string_list', ['a', 1])
Traceback (most recent call last):
VdtTypeError: the value "1" is of the wrong type.
>>> vtor.check('string_list', 'hello')
Traceback (most recent call last):
VdtTypeError: the value "hello" is of the wrong type.
"""
if isinstance(value, basestring):
raise VdtTypeError(value)
return [is_string(mem) for mem in is_list(value, min, max)]
def is_ip_addr_list(value, min=None, max=None):
"""
Check that the value is a list of IP addresses.
You can optionally specify the minimum and maximum number of members.
Each list member is checked that it is an IP address.
>>> vtor.check('ip_addr_list', ())
[]
>>> vtor.check('ip_addr_list', [])
[]
>>> vtor.check('ip_addr_list', ('1.2.3.4', '5.6.7.8'))
['1.2.3.4', '5.6.7.8']
>>> vtor.check('ip_addr_list', ['a'])
Traceback (most recent call last):
VdtValueError: the value "a" is unacceptable.
"""
return [is_ip_addr(mem) for mem in is_list(value, min, max)]
def force_list(value, min=None, max=None):
"""
Check that a value is a list, coercing strings into
a list with one member. Useful where users forget the
trailing comma that turns a single value into a list.
You can optionally specify the minimum and maximum number of members.
A minumum of greater than one will fail if the user only supplies a
string.
>>> vtor.check('force_list', ())
[]
>>> vtor.check('force_list', [])
[]
>>> vtor.check('force_list', 'hello')
['hello']
"""
if not isinstance(value, (list, tuple)):
value = [value]
return is_list(value, min, max)
fun_dict = {
'integer': is_integer,
'float': is_float,
'ip_addr': is_ip_addr,
'string': is_string,
'boolean': is_boolean,
}
def is_mixed_list(value, *args):
"""
Check that the value is a list.
Allow specifying the type of each member.
Work on lists of specific lengths.
You specify each member as a positional argument specifying type
Each type should be one of the following strings :
'integer', 'float', 'ip_addr', 'string', 'boolean'
So you can specify a list of two strings, followed by
two integers as :
mixed_list('string', 'string', 'integer', 'integer')
The length of the list must match the number of positional
arguments you supply.
>>> mix_str = "mixed_list('integer', 'float', 'ip_addr', 'string', 'boolean')"
>>> check_res = vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', True))
>>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
1
>>> check_res = vtor.check(mix_str, ('1', '2.0', '1.2.3.4', 'a', 'True'))
>>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
1
>>> vtor.check(mix_str, ('b', 2.0, '1.2.3.4', 'a', True))
Traceback (most recent call last):
VdtTypeError: the value "b" is of the wrong type.
>>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a'))
Traceback (most recent call last):
VdtValueTooShortError: the value "(1, 2.0, '1.2.3.4', 'a')" is too short.
>>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', 1, 'b'))
Traceback (most recent call last):
VdtValueTooLongError: the value "(1, 2.0, '1.2.3.4', 'a', 1, 'b')" is too long.
>>> vtor.check(mix_str, 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
This test requires an elaborate setup, because of a change in error string
output from the interpreter between Python 2.2 and 2.3 .
>>> res_seq = (
... 'passed an incorrect value "',
... 'yoda',
... '" for parameter "mixed_list".',
... )
>>> res_str = "'".join(res_seq)
>>> try:
... vtor.check('mixed_list("yoda")', ('a'))
... except VdtParamError, err:
... str(err) == res_str
1
"""
try:
length = len(value)
except TypeError:
raise VdtTypeError(value)
if length < len(args):
raise VdtValueTooShortError(value)
elif length > len(args):
raise VdtValueTooLongError(value)
try:
return [fun_dict[arg](val) for arg, val in zip(args, value)]
except KeyError, e:
raise VdtParamError('mixed_list', e)
def is_option(value, *options):
"""
This check matches the value to any of a set of options.
>>> vtor.check('option("yoda", "jedi")', 'yoda')
'yoda'
>>> vtor.check('option("yoda", "jedi")', 'jed')
Traceback (most recent call last):
VdtValueError: the value "jed" is unacceptable.
>>> vtor.check('option("yoda", "jedi")', 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
"""
if not isinstance(value, basestring):
raise VdtTypeError(value)
if not value in options:
raise VdtValueError(value)
return value
def _test(value, *args, **keywargs):
"""
A function that exists for test purposes.
>>> checks = [
... '3, 6, min=1, max=3, test=list(a, b, c)',
... '3',
... '3, 6',
... '3,',
... 'min=1, test="a b c"',
... 'min=5, test="a, b, c"',
... 'min=1, max=3, test="a, b, c"',
... 'min=-100, test=-99',
... 'min=1, max=3',
... '3, 6, test="36"',
... '3, 6, test="a, b, c"',
... '3, max=3, test=list("a", "b", "c")',
... '''3, max=3, test=list("'a'", 'b', "x=(c)")''',
... "test='x=fish(3)'",
... ]
>>> v = Validator({'test': _test})
>>> for entry in checks:
... print v.check(('test(%s)' % entry), 3)
(3, ('3', '6'), {'test': ['a', 'b', 'c'], 'max': '3', 'min': '1'})
(3, ('3',), {})
(3, ('3', '6'), {})
(3, ('3',), {})
(3, (), {'test': 'a b c', 'min': '1'})
(3, (), {'test': 'a, b, c', 'min': '5'})
(3, (), {'test': 'a, b, c', 'max': '3', 'min': '1'})
(3, (), {'test': '-99', 'min': '-100'})
(3, (), {'max': '3', 'min': '1'})
(3, ('3', '6'), {'test': '36'})
(3, ('3', '6'), {'test': 'a, b, c'})
(3, ('3',), {'test': ['a', 'b', 'c'], 'max': '3'})
(3, ('3',), {'test': ["'a'", 'b', 'x=(c)'], 'max': '3'})
(3, (), {'test': 'x=fish(3)'})
>>> v = Validator()
>>> v.check('integer(default=6)', '3')
3
>>> v.check('integer(default=6)', None, True)
6
>>> v.get_default_value('integer(default=6)')
6
>>> v.get_default_value('float(default=6)')
6.0
>>> v.get_default_value('pass(default=None)')
>>> v.get_default_value("string(default='None')")
'None'
>>> v.get_default_value('pass')
Traceback (most recent call last):
KeyError: 'Check "pass" has no default value.'
>>> v.get_default_value('pass(default=list(1, 2, 3, 4))')
['1', '2', '3', '4']
>>> v = Validator()
>>> v.check("pass(default=None)", None, True)
>>> v.check("pass(default='None')", None, True)
'None'
>>> v.check('pass(default="None")', None, True)
'None'
>>> v.check('pass(default=list(1, 2, 3, 4))', None, True)
['1', '2', '3', '4']
Bug test for unicode arguments
>>> v = Validator()
>>> v.check(u'string(min=4)', u'test')
u'test'
>>> v = Validator()
>>> v.get_default_value(u'string(min=4, default="1234")')
u'1234'
>>> v.check(u'string(min=4, default="1234")', u'test')
u'test'
>>> v = Validator()
>>> default = v.get_default_value('string(default=None)')
>>> default == None
1
"""
return (value, args, keywargs)
def _test2():
"""
>>>
>>> v = Validator()
>>> v.get_default_value('string(default="#ff00dd")')
'#ff00dd'
>>> v.get_default_value('integer(default=3) # comment')
3
"""
def _test3():
r"""
>>> vtor.check('string(default="")', '', missing=True)
''
>>> vtor.check('string(default="\n")', '', missing=True)
'\n'
>>> print vtor.check('string(default="\n")', '', missing=True),
<BLANKLINE>
>>> vtor.check('string()', '\n')
'\n'
>>> vtor.check('string(default="\n\n\n")', '', missing=True)
'\n\n\n'
>>> vtor.check('string()', 'random \n text goes here\n\n')
'random \n text goes here\n\n'
>>> vtor.check('string(default=" \nrandom text\ngoes \n here\n\n ")',
... '', missing=True)
' \nrandom text\ngoes \n here\n\n '
>>> vtor.check("string(default='\n\n\n')", '', missing=True)
'\n\n\n'
>>> vtor.check("option('\n','a','b',default='\n')", '', missing=True)
'\n'
>>> vtor.check("string_list()", ['foo', '\n', 'bar'])
['foo', '\n', 'bar']
>>> vtor.check("string_list(default=list('\n'))", '', missing=True)
['\n']
"""
if __name__ == '__main__':
# run the code tests in doctest format
import sys
import doctest
m = sys.modules.get('__main__')
globs = m.__dict__.copy()
globs.update({
'vtor': Validator(),
})
doctest.testmod(m, globs=globs)
| apache-2.0 | -7,835,903,823,894,607,000 | 30.923549 | 104 | 0.545095 | false |
wuzhy/autotest | client/tests/linus_stress/linus_stress.py | 5 | 1435 | import os
from autotest_lib.client.bin import test, utils
class linus_stress(test.test):
version = 1
def setup(self):
os.mkdir(self.srcdir)
os.chdir(self.bindir)
utils.system('cp linus_stress.c src/')
os.chdir(self.srcdir)
utils.system(utils.get_cc() + ' linus_stress.c -D_POSIX_C_SOURCE=200112 -o linus_stress')
def initialize(self):
self.job.require_gcc()
def run_the_test(self, iterations):
utils.write_one_line('/proc/sys/vm/dirty_ratio', '4')
utils.write_one_line('/proc/sys/vm/dirty_background_ratio', '2')
cmd = os.path.join(self.srcdir, 'linus_stress')
args = "%d" % (utils.memtotal() / 32)
profilers = self.job.profilers
if profilers.present():
profilers.start(self)
for i in range(iterations):
utils.system(cmd + ' ' + args)
if profilers.present():
profilers.stop(self)
profilers.report(self)
def execute(self, iterations = 1):
dirty_ratio = utils.read_one_line('/proc/sys/vm/dirty_ratio')
dirty_background_ratio = utils.read_one_line('/proc/sys/vm/dirty_background_ratio')
try:
self.run_the_test(iterations)
finally:
utils.write_one_line('/proc/sys/vm/dirty_ratio', dirty_ratio)
utils.write_one_line('/proc/sys/vm/dirty_background_ratio', dirty_background_ratio)
| gpl-2.0 | -4,460,709,582,484,501,500 | 30.195652 | 97 | 0.604878 | false |
garbear/EventGhost | plugins/RadioSure/__init__.py | 1 | 248696 | version="0.2.13"
# plugins/RadioSure/__init__.py
#
# Copyright (C) 2009, 2010, 2011 Pako ([email protected])
#
# This file is a plugin for EventGhost.
#
# EventGhost is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# EventGhost is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EventGhost; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# Changelog (in reverse chronological order):
# -------------------------------------------
# 0.2.13 by Pako 2011-04-06 11:07 UTC+1
# - Bug fix - bad name of language file
# 0.2.12 by Pako 2011-04-05 17:38 UTC+1
# - Added first version of Favorites manager
# - Added "menu browser"
# - Added many new actions
# 0.2.11 by Pako 2011-03-03 09:08 UTC+1
# - The cursor is changed to indicate the existence of a context menu
# - If exists file "contextCursor.cur", used as the cursor where there is a contextual menu
# 0.2.10 by Pako 2011-02-12 09:53 UTC+1
# - FixedTimeCtrl replaced by eg.TimeCtrl
# 0.2.9 by Pako 2011-01-15 11:50 UTC+1
# - different shape of the cursor on the table of schedules indicate that there is context menu available
# 0.2.8 by Pako 2011-01-11 14:25 UTC+1
# - if you turn on logging then into the log file is written whole command line
# 0.2.7 by Pako 2011-01-07 18:39 UTC+1
# - fixed bug - the Scheduler window opens although in Scheduler.xml there not the attribute "Position"
# (this can happen when you upgrade from version 0.2.0 and lower)
# 0.2.6 by Pako 2011-01-07 11:39 UTC+1
# - fixed bug - incorrect reading favorites, when applied a new structure of RadioSure.xml file
# 0.2.5 by Pako 2010-12-28 16:02 UTC+1
# - added popup menu and features "Move schedule up/down"
# 0.2.4 by Pako 2010-12-24 12:08 UTC+1
# - there is no need to reinstall this plugin, when changing the way the installation (especially the paths) of Radio?Sure!
# 0.2.3 by Pako 2010-12-24 08:30 UTC+1
# - scheduler dialog opens, even though there is no node "Favorites" in RadioSure.xml
# 0.2.2 by Pako 2010-12-19 15:54 UTC+1
# - changed the way of paths settings to the RadioSure.exe and RadioSure.xml
# 0.2.1 by Pako 2010-12-19 08:19 UTC+1
# - scheduler dialog remembers its position even after closing EventGhost
# - bugfix - "Add schedule" enable buttons, when schedule list is empty
# 0.2.0 by Pako 2010-12-14 11:13 UTC+1
# - a comprehensive rework according to the plugin SchedulGhost:
# - addded new types of schedule
# - changed format of "Scheduler.xml" file
# - added ability to affect certain types of schedules according to public holidays
# - added option to select the first day of the week (Sunday or Monday)
# - scheduler dialog remembers its position
# - scheduler dialog is not modal and can be minimized
# - added Apply button (scheduler dialog)
# - added new actions - "Run schedule immediately"
# 0.1.9 by Pako 2010-12-09 13:52 UTC+1
# - correction of previous versions (moreover redefine one pseudo-private method)
# 0.1.8 by Pako 2010-12-06 20:10 UTC+1
# - wx.lib.masked.TimeCtrl workaround (see http://trac.wxwidgets.org/ticket/11171)
# 0.1.7 by Pako 2010-07-22 20:27 GMT+1
# - bugfix
# 0.1.6 by Pako 2010-07-22 10:30 GMT+1
# - added wx.ComboBox for Scheduler actions
# 0.1.5 by Pako 2010-07-10 08:21 GMT+1
# - added Scheduler
# - added guid attribute
# 0.1.4 by Pako 2010-03-23 11:20 GMT+1
# - added action Random favorite
# 0.1.3 by Pako 2010-03-22 09:09 GMT+1
# - added actions Start and Stop observation of titlebar
#===============================================================================
eg.RegisterPlugin(
name = "RadioSure",
author = "Pako",
version = version,
kind = "program",
guid = "{84703620-87B4-4982-A9AB-DA1B3F8D22EA}",
description = ur'''<rst>
Adds actions to control the `Radio?Sure!`_
.. _Radio?Sure!: http://www.radiosure.com/ ''',
createMacrosOnAdd = True,
url = "http://www.eventghost.net/forum/viewtopic.php?f=9&t=2359",
icon = (
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAADAFBMVEUA//+Gh4ju7/Ds"
"7e7q6+zo6evm5+nk5efi4+Xg4ePd3+Db3N7Z2tzW2Nrr7e5+g4bo6erm5+js7e3a3N7Y"
"2dt3fYDT1dfp6uzn6Orl5uhIS03V1tjS1NbQ0tTn6Onl5ufi5OXS09XP0dPNz9HR09XP"
"0NPMztDKzM7h4+Tf4OLd3uCVmp1OUlQZGhoYGRlLTlCHjZDMzdDJy87Hycve4OHc3t+d"
"oqQyNDU3OjtSVlgpKywqLC2IjpHHyMvExsnc3d/Z29xWWlyBh4p2fH9pbnFfZGYsLi9L"
"T1HExsjCxMYbHB2XnJ5MUFJKTU9yeHtVWVvBw8a/wcTW19kcHR6UmZypra9RVVeGjI9l"
"am0aGxu/wcO9v8JcYWNeY2W5vL6xtLamqqyboKK9vsG7vcA9QEG6vL+5u76LkJPIycyy"
"tbddYmRYXV+jqKqDiYy3ubzFx8nDxcfBw8W4ur22uLsAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADQcfgAAAAAAAXQciD0AAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAABAAgAAAAAAAAAAAAAAAAAAAAAAAAAAABGa1gAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAEAAAAAAAAAAAAPAAAAAAEAAAEAAADQckT/C08AAAAAAAAAAAAAAAMAAADf"
"BnAAAAAAAAAAAAAAAAAAAAQAAQEAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACnZAh6AAAA"
"AXRSTlMAQObYZgAAAAlwSFlzAAALEgAACxIB0t1+/AAAAKdJREFUeNpjYGBgRAVoAl9A"
"ArwQCbDMayYgg5OTk5GBg4PhPzs7OwNIAESDARsbGwNICxtQKQeQLwSkb4EE1JEMPQfS"
"wgMGjNwgANZix8h4UwOqYgdIxdmznGKcIPAMaBVIReARW6DcNW2QimUgAWlGe7DynTY8"
"jPOYwNYzs/+//vqB3ANmZrAWVUeg9EsGCcafHIyTQQKGjDZAAUYOJt4/rH0M6N4HAFCJ"
"GrcTFgV2AAAAAElFTkSuQmCC"
),
)
#===============================================================================
import wx.grid as gridlib
import subprocess
import wx.calendar as wxCal
from wx.lib.masked import EVT_TIMEUPDATE
from subprocess import Popen
from os import listdir, remove, rename
from os.path import abspath, join, dirname, split, isfile, exists
from calendar import day_name, month_name, monthrange
from wx.lib.mixins.listctrl import CheckListCtrlMixin
from _winreg import OpenKey, HKEY_CURRENT_USER, EnumValue, QueryValueEx, CloseKey
from time import sleep, mktime, strptime, localtime
from datetime import datetime as dt
from datetime import timedelta as td
from copy import deepcopy as cpy
from xml.dom import minidom as miniDom
from threading import Timer, Thread, Event
from eg.WinApi.Utils import GetMonitorDimensions
from eg.WinApi.Dynamic import CreateEvent, SetEvent, PostMessage
from eg.WinApi.Dynamic import SendMessage, ShowWindow, RegisterWindowMessage
from eg.WinApi import SendMessageTimeout
from win32gui import GetWindowText, GetWindow, GetDlgCtrlID, GetMenuItemCount
from win32gui import GetWindowPlacement, GetDlgItem, GetClassName, GetSubMenu
from win32file import GetFileAttributes
from random import randrange
from codecs import lookup
from codecs import open as openFile
from winsound import PlaySound, SND_ASYNC
from locale import strxfrm
from ctypes import c_long, c_ulong, c_int, byref, sizeof, Structure, c_buffer
from ctypes.wintypes import WinDLL
_kernel32 = WinDLL("kernel32")
_user32 = WinDLL("user32")
from sys import getfilesystemencoding
FSE = getfilesystemencoding()
if eg.Version.base >= "0.4.0":
from eg.Classes.MainFrame.TreeCtrl import DropTarget as EventDropTarget
IMAGES_DIR = eg.imagesDir
else:
from eg.Classes.MainFrame.TreeCtrl import EventDropTarget
IMAGES_DIR = eg.IMAGES_DIR
ARIAL_INFO = "0;-35;0;0;0;700;0;0;0;0;3;2;1;34;Arial"
TAHOMA_INFO = "0;-27;0;0;0;400;0;0;0;0;3;2;1;34;Tahoma"
PROCESS_TERMINATE = 1
WM_CLOSE = 16
WM_COMMAND = 273
WM_SYSCOMMAND = 274
TBM_GETPOS = 1024
TBM_SETPOS = 1029
SC_RESTORE = 61728
#SW_HIDE = 0
#SW_MINIMIZE = 6
SW_RESTORE = 9
GW_CHILD = 5
GW_HWNDNEXT = 2
FILE_ATTRIBUTE_HIDDEN = 2
FILE_ATTRIBUTE_SYSTEM = 4
SYS_VSCROLL_X = wx.SystemSettings.GetMetric(wx.SYS_VSCROLL_X)
#===============================================================================
CUR_STRING = (
"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAIAAAD8GO2jAAAABnRSTlMA/wBmAADomHeP"
"AAAACXBIWXMAAA7EAAAOxAGVKw4bAAAMK0lEQVR42gEgDN/zAQAAAOns8xZ6DQAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAApKDTXa9EAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAA////AQEB1tLSKZQuAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAP///youLtds0gAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAAD///8qLi7XbNIAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAACAAAAAAAA/wD+AAAA////Ki4u12zSAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAgAAAAAAAP7///8A/gAAAP///youLtds0wAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAIAAAAAAAAAAAD+////AP4AAAD//PwqLi3YbNQAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAA"
"AAAAAAAAAAAA/gD/APwAAAMD9PjwKS4s2W3UAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAA"
"AAABAAH6AQAAAPb69gkHC+ns9CgtLdlt1AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAQAC+wLz"
"+/P0+/QB+QHw8fEWEwvs6ecoLS/VcN8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAA/////vv+9/32AwMDAvsC"
"AQkABQEHAAAAAAAAAAAAAQEJ/2b4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAIFAgsIDObm8gYKBwUBBwEB"
"AQEBDQEBAQEBCAAA+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAADz9vsjIBicn6UAAAAAABTd2Nj/"
"ZgD/Zvn/ZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAABAAAA////8/X6DgwH/2YAAZoA////9fHwDBAR/2YAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAgAAAAAAAA4MB+Pk6wAAAAAAFMnFwgsPEAAEGLtQxAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAQAAAABAQHe3+YcghUAAADk59ocGRL////o4coYGx7/ZgAAAAAAAAAEoBoA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD8YOYAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAE/2YAAAAAAIcaAAAAAAAAG38SAAAQ8u7qFxodAAAAAAAAAAAAAAAAAAAAiodz+Pj4"
"DAwMAQEBBQUF/Pz8+Pj47e3tCAgIg4aaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAA"
"AAAAAAAAAAAAAAAAAAAAAPz/7wQBASEkOszP2ACXKAAAAAAAAAAAAFBQUCMjIwAAAAAA"
"AAAAAAAAAAAAAHJycoeHh4OGmgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAA"
"AAAAAAAAAAAAAAADZwH/ZgAAAMYAlygAAAAAAAAAAAAAAAAiIiLMzMwFBQULCwsAAAAB"
"AQHn5+cXFxcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeHh46OjoAAAABQUF6enp+fn5CAgI"
"BgYGISEhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAM7OzqysrA8PD4WFhYKCgvz8/AgICIKCgouL"
"iwMGGgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAH/ZgAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAEoBr9+uY2NjbKysogICDg4OAAAAAAAAAAAAAAAAADBhr8"
"YOYAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAwcHBlZWVAAAA39/fZWVlAAAAysrKnZ2ds7OzAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAD4+PqysrAAAAAAAAJqamgAAAKmpqRMTE0xMTAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAtLS0AAADb29sEBAQAAAAxMTHt7e0AAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAB/2YAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABKAa"
"/PnlAAAADQ0NCQkJs7OzZmZm0dHRAAAAAAAABAcb/GDmAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMrKygEB"
"AfT09Ovr65aWlmRkZAEBAURERAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA2Njb/////////"
"///u7u79/f3n5+e8vLwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB/2YAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABKAaAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAA/GDmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAf9mAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADPbTswenz69gAAAABJRU5ErkJggg=="
)
#===============================================================================
class ConfigData(eg.PersistentData):
pos = None
plcmnt = None
class Text:
label1 = "Radio?Sure! installation folder:"
label2 = "RadioSure.xml and Scheduler.xml folder location:"
# filemask = "RadioSure.exe|RadioSure.exe|All-Files (*.*)|*.*"
text1 = "Couldn't find RadioSure window !"
browseTitle = "Selected folder:"
toolTipFolder = "Press button and browse to select folder ..."
boxTitle = 'Folder "%s" is incorrect'
toolTipFile = "Press button and browse to select logfile ..."
browseFile = 'Select the logfile'
boxMessage1 = 'Missing file %s !'
logLabel = "Log scheduler events to following logfile:"
# nextRun = "Next run: %s"
none = "None"
execut = 'Schedule "%s" - execution. Next run: %s'
cmdLine = 'Commandline: %s'
cancAndDel = 'Schedule "%s" canceled and deleted'
cancAndDis = 'Schedule "%s" canceled (disabled)'
newSched = 'Schedule "%s" scheduled. Next run: %s'
re_Sched = 'Schedule "%s" re-scheduled. New next run: %s'
start = 'RadioSure plugin started. All valid schedules will be scheduled:'
stop = 'RadioSure plugin stoped. All scheduled schedules will be canceled:'
canc = 'Schedule "%s" canceled'
launched = "Schedule.Launched"
holidButton = "Public holidays ..."
managerButton = "Show scheduler"
fixBoxLabel = "Fixed public holidays:"
varBoxLabel = "Variable public holidays:"
ok = "OK"
cancel = "Cancel"
yes = "Yes"
no = "No"
add = "Add"
delete = "Delete"
first_day = "The first day of the week:"
xmlComment = "Radio?Sure! scheduler configuration file. Updated at %s."
messBoxTit0 = "EventGhost - Radio?Sure! plugin"
messBoxTit1 = "Attention !"
message2 = """You can not start another instance of Radio?Sure!,
because the maximum number of instances %i is exhausted!"""
message3 = '''You can not start another instance of Radio?Sure!,
because the option "Allow only one instance" is chosen!'''
autoClose = "Auto close after %i s"
toolTip = "Drag-and-drop an event from the log into the box."
popup = (
"Delete item",
"Delete all items",
)
clear = "Clear all"
opened = "Opened"
closed = "Closed"
root = "Main (root) menu"
class OpenManager:
dialogTitle = "Radio?Sure! Favorites manager %s (plugin for EventGhost)"
toolTipDelete = "Delete item(s)"
toolTipUp = "Move item(s) up"
toolTipDown = "Move item(s) down"
moveTop = "Move item(s) top"
moveBottom = "Move item(s) bottom"
exportSel = "Export selected item(s) to XML file"
exportAll = "Export all items to XML file"
toolTipExport = "Export selected (if any) or all items to XML file"
toolTipImport = "Import from XML file"
toolTipImportSR = "Import from Screamer Radio"
sort = "Sort alphabetically"
play = "Play selected favorite just now !"
refresh = "Refresh favorites list from RadioSure.xml"
export = "Export"
imprt = "Import"
importSR = "Import SR"
lblSource = "Source:"
lblGenre = "Genre:"
lblLanguage = "Language:"
lblCountry = "Country:"
ok = "OK"
cancel = "Cancel"
apply = "Apply"
lblList = "Favorites list:"
xmlComment1 = "Radio?Sure! favorites backup file."
xmlComment2 = 'Saved at %s by EventGhost.'
choose = 'Choose a XML file to be import'
save = 'Backup favorites as XML file ...'
wildcard = "XML file (*.xml)|*.xml"
removeDupl = "Remove duplications"
messBoxTit2 = """Attention !
Radio?Sure! is running !"""
messBoxTit3 = """Attention !
Recording is in progress !"""
messBoxTit5 = "Congratulations!"
messBoxTit6 = "Announcement"
messBoxTit7 = "Warning"
message1 = """Your version of Radio?Sure! allows you to save only the first %i favorite stations !
Other favorites will be ignored."""
message2 = """If you want to save the modified list of favorite stations,
must be overwritten file RadioSure.xml.
You can not overwrite the file RadioSure.xml,
if the Radio?Sure! is currently running.
Otherwise, the favorites list is returned to its original condition.
Press button %s, if the program Radio?Sure! can be closed.
Press button %s, if the program Radio?Sure! can not be closed."""
message3 = "Failed to save data to the file RadioSure.xml !"
message4 = 'It is not possible to import because there is a problem.\n\
The file "%s" does not have the expected structure.'
message5 = "Your list of favorite stations has been successfully updated!"
message6 = "Failed to close Radio?Sure!"
message7 = "Your list of favorite stations has not been updated!"
message8 = """Your list of favorite stations contain (in sources) duplications!
They will be saved only unique items."""
message9 = "Failed to open Radio?Sure!"
class OpenScheduler:
dialogTitle = "Radio?Sure! Scheduler %s (plugin for EventGhost)"
header = (
"Enabled",
"Schedule title",
"Last run",
"Next run",
)
sched_type = (
"Only once (or yearly)",
"Daily",
"Weekly",
"Monthly / weekday",
"Monthly / day",
"Periodically",
)
toolTipFile = """Press button and browse to select file ...
File type (as .mp3) need not be completed. Will be added automatically."""
browseTitle = "Select a folder and enter file name (without file type):"
serial_num = (
"first",
"second",
"third",
"fourth",
"fifth",
"last"
)
the = "The"
in_ = "in"
buttons = (
"Add new",
"Duplicate",
"Delete",
"OK",
"Cancel",
"Apply"
)
type_label = "Schedule type:"
source = "Source URL:"
favorite = "Favorite station title:"
filename = "Destination file name (optional):"
chooseDay = "Choose day"
theEvery = "The every"
yearly = "Every year on the same day"
chooseTime = "Choose start time and duration (00:00 = constantly)"
choosePeriod = "Choose period"
andThenEvery = "Repeat every"
units = (
"hours",
"days",
"weeks",
"months",
"years",
)
start = "Start time (HH:MM:SS):"
length = "Duration (HH:MM):"
boxTitle = "Your setup is not properly configured !"
boxTexts = (
"Schedule title must not be an empty string !",
"Schedule title must be unique !",
'Determine the source URL, or set the mode "Do nothing" !',
'Not allowed to set "Do nothing" while also "None" event !',
'Must be chosen Schedule type !',
"The span must be shorter than the period !",
)
workModeLabel = "Radio?Sure! working mode:"
workModes = (
"Playing (audibly)",
"Recording (audibly)",
"Recording (soundlessly)",
"Do nothing"
)
windOpenLabel = "Window open:"
windOpenChoices =(
"Visible",
"Hidden"
)
triggEvtLabel = "Trigger an event:"
triggEvtChoices = (
"None",
"Schedule title",
"All parameters"
)
testButton = "Test now"
testRun = 'Schedule "%s" - TEST execution. Possible next run: %s'
holidCheck_1 = "Do not trigger events for a chosen day if it happens to be a holiday"
holidCheck_2 = "Do also trigger events for a non-chosen day if it happens to be a holiday"
popup = (
"Add schedule",
"Duplicate schedule",
"Delete schedule",
"Enable all schedules",
"Disable all schedules",
"Move schedule up",
"Move schedule down",
)
#===============================================================================
def my_list2cmdline(seq):
""" FIXING subprocess.list2cmdline
Workaround, because subprocess.list2cmdline does not work with arguments like:
filename="... ...". Ie, when we need quotes inside the string, and somewhere
inside is a space character. When you properly prepare all items
(including the quotes), it works!
There is also done simultaneously filesystemencode encoding
(otherwise there UnicodeDecodeError occurs...)"""
return ' '.join([arg.encode(FSE) if isinstance(arg, unicode) else arg for arg in seq])
subprocess.list2cmdline = my_list2cmdline
#===============================================================================
class MyDirBrowseButton(eg.DirBrowseButton):
def GetTextCtrl(self): # now I can make build-in textCtrl
return self.textControl # non-editable !!!
#===============================================================================
class MyFileBrowseButton(eg.FileBrowseButton):
def GetTextCtrl(self): # now I can make build-in textCtrl
return self.textControl # non-editable !!!
#===============================================================================
class MySpinIntCtrl(eg.SpinIntCtrl):
def SetNumCtrlId(self, id):
self.numCtrl.SetId(id)
#===============================================================================
newEVT_BUTTON_AFTER = wx.NewEventType()
EVT_BUTTON_AFTER = wx.PyEventBinder(newEVT_BUTTON_AFTER, 1)
newEVT_UPDATE_DIALOG = wx.NewEventType()
EVT_UPDATE_DIALOG = wx.PyEventBinder(newEVT_UPDATE_DIALOG, 1)
newEVT_CHECKLISTCTRL = wx.NewEventType()
EVT_CHECKLISTCTRL = wx.PyEventBinder(newEVT_CHECKLISTCTRL, 1)
#===============================================================================
class UserEvent(wx.PyCommandEvent):
def __init__(self, evtType, id):
wx.PyCommandEvent.__init__(self, evtType, id)
self.myVal = None
def SetValue(self, val):
self.myVal = val
def GetValue(self):
return self.myVal
#===============================================================================
class ExtColourSelectButton(eg.ColourSelectButton):
def __init__(self,*args,**kwargs):
eg.ColourSelectButton.__init__(self, *args)
self.title = kwargs['title']
def OnButton(self, event):
colourData = wx.ColourData()
colourData.SetChooseFull(True)
colourData.SetColour(self.value)
for i, colour in enumerate(eg.config.colourPickerCustomColours):
colourData.SetCustomColour(i, colour)
dialog = wx.ColourDialog(self.GetParent(), colourData)
dialog.SetTitle(self.title)
if dialog.ShowModal() == wx.ID_OK:
colourData = dialog.GetColourData()
self.SetValue(colourData.GetColour().Get())
event.Skip()
eg.config.colourPickerCustomColours = [
colourData.GetCustomColour(i).Get() for i in range(16)
]
dialog.Destroy()
evt = UserEvent(newEVT_BUTTON_AFTER, self.GetId())
evt.SetValue(self.GetValue())
self.GetEventHandler().ProcessEvent(evt)
#===============================================================================
class ExtFontSelectButton(eg.FontSelectButton):
def OnButton(self, event):
fontData = wx.FontData()
fontData.EnableEffects(False)
if self.value is not None:
font = wx.FontFromNativeInfoString(self.value)
fontData.SetInitialFont(font)
else:
fontData.SetInitialFont(
wx.SystemSettings_GetFont(wx.SYS_ANSI_VAR_FONT)
)
dialog = wx.FontDialog(self.GetParent(), fontData)
if dialog.ShowModal() == wx.ID_OK:
fontData = dialog.GetFontData()
font = fontData.GetChosenFont()
self.value = font.GetNativeFontInfo().ToString()
event.Skip()
dialog.Destroy()
evt = UserEvent(newEVT_BUTTON_AFTER, self.GetId())
evt.SetValue(self.GetValue())
self.GetEventHandler().ProcessEvent(evt)
#===============================================================================
class MessageBoxDialog(wx.Dialog):
def __init__(
self,
parent,
message,
caption = eg.APP_NAME,
flags=wx.OK,
time=0,
plugin=None,
pos=wx.DefaultPosition
):
PlaySound('SystemExclamation', SND_ASYNC)
if parent is None and eg.document.frame:
parent = eg.document.frame
dialogStyle = wx.DEFAULT_DIALOG_STYLE
if flags & wx.STAY_ON_TOP:
dialogStyle |= wx.STAY_ON_TOP
wx.Dialog.__init__(self, parent, -1, caption, pos, style=dialogStyle)
self.SetTitle(plugin.text.messBoxTit0)
self.SetIcon(plugin.info.icon.GetWxIcon())
bttns = []
if flags:
art = None
if flags & wx.ICON_EXCLAMATION:
art = wx.ART_WARNING
elif flags & wx.ICON_ERROR:
art = wx.ART_ERROR
elif flags & wx.ICON_QUESTION:
art = wx.ART_QUESTION
elif flags & wx.ICON_INFORMATION:
art = wx.ART_INFORMATION
if art is not None:
bmp = wx.ArtProvider.GetBitmap(art, wx.ART_MESSAGE_BOX, (32,32))
icon = wx.StaticBitmap(self, -1, bmp)
icon2 = wx.StaticBitmap(self, -1, bmp)
else:
icon = (32,32)
icon2 = (32,32)
flag = True
if flags & wx.YES:
default = False
if not flags & wx.NO_DEFAULT:
default = True
flag = False
bttns.append((wx.ID_YES, plugin.text.yes, default))
if flags & wx.NO:
default = False
if flags & wx.NO_DEFAULT:
default = True
flag = False
bttns.append((wx.ID_NO, plugin.text.no, default))
if flags & wx.OK:
bttns.append((wx.ID_OK, plugin.text.ok, flag))
if flags & wx.CANCEL:
bttns.append((wx.ID_CANCEL, plugin.text.cancel, False))
if not flags & (wx.OK | wx.CANCEL | wx.YES | wx.NO):
bttns.append((wx.ID_OK, plugin.text.ok, True))
else:
bttns.append((wx.ID_OK, plugin.text.ok, True))
if caption:
caption = wx.StaticText(self, -1, caption)
# caption.SetFont(wx.Font(16, wx.SWISS, wx.NORMAL, wx.BOLD))
caption.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))
message = wx.StaticText(self, -1, message)
line = wx.StaticLine(self, -1, size=(1,-1), style = wx.LI_HORIZONTAL)
bottomSizer = wx.BoxSizer(wx.HORIZONTAL)
bottomSizer.Add((10, 1))
if time:
self.cnt = time
txt = plugin.text.autoClose % self.cnt
info = wx.StaticText(self, -1, txt)
info.Enable(False)
bottomSizer.Add(info, 0, wx.TOP, 3)
def UpdateInfoLabel(evt):
self.cnt -= 1
txt = plugin.text.autoClose % self.cnt
info.SetLabel(txt)
if not self.cnt:
self.Close()
self.Bind(wx.EVT_TIMER, UpdateInfoLabel)
self.timer = wx.Timer(self)
self.timer.Start(1000)
else:
self.timer = None
bottomSizer.Add((5,1),1,wx.EXPAND)
for bttn in bttns:
b = wx.Button(self, bttn[0], bttn[1])
if bttn[2]:
#b.SetDefault()
defBtn = b # SetDefault() workaround
bottomSizer.Add(b, 0, wx.LEFT, 5)
bottomSizer.Add((10, 1))
topSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add(icon,0,wx.LEFT|wx.RIGHT,10)
topSizer.Add((1,1),1,wx.EXPAND)
topSizer.Add(caption,0,wx.TOP,5)
topSizer.Add((1,1),1,wx.EXPAND)
topSizer.Add(icon2,0,wx.LEFT|wx.RIGHT,10)
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(topSizer, 0, wx.EXPAND|wx.TOP|wx.BOTTOM,10)
mainSizer.Add(message, 0, wx.EXPAND|wx.LEFT|wx.RIGHT,10)
mainSizer.Add(line, 0, wx.EXPAND|wx.ALL,5)
mainSizer.Add(bottomSizer, 0, wx.EXPAND|wx.BOTTOM,5)
# SetDefault() workaround:
defBtn.SetFocus()
def OnButton(evt):
self.SetReturnCode(evt.GetId())
self.Close()
evt.Skip()
wx.EVT_BUTTON(self, -1, OnButton)
def onClose(evt):
if self.GetReturnCode() not in (wx.ID_OK, wx.ID_CANCEL, wx.ID_YES, wx.ID_NO):
self.SetReturnCode(wx.ID_CANCEL)
if self.timer:
self.timer.Stop()
del self.timer
self.MakeModal(False)
self.GetParent().Raise()
self.Destroy()
self.Bind(wx.EVT_CLOSE, onClose)
self.SetSizer(mainSizer)
self.Fit()
def MessageBox(parent, message, caption='', flags=0, time = 0, plugin = None):
mssgbx = MessageBoxDialog(parent, message, caption, flags, time, plugin)
val = mssgbx.ShowModal()
return val
#===============================================================================
class MyTimer():
def __init__(self, t, plugin):
self.timer = Timer(t, self.Run)
self.plugin = plugin
self.timer.start()
def Run(self):
try:
self.plugin.menuDlg.Close()
self.plugin.menuDlg = None
except:
pass
def Cancel(self):
self.timer.cancel()
#===============================================================================
class HolidaysFrame(wx.Dialog):
fixWin = None
varWin = None
fixHolidays = []
varHolidays = []
def __init__(self, parent, plugin):
self.plugin = plugin
wx.Dialog.__init__(
self,
parent,
-1,
style = wx.DEFAULT_DIALOG_STYLE,
name = self.plugin.text.holidButton
)
self.SetIcon(self.plugin.info.icon.GetWxIcon())
self.panel = parent
self.fixHolidays, self.varHolidays = cpy(self.panel.holidays)
self.Bind(wxCal.EVT_CALENDAR_DAY, self.OnChangeDay)
def ShowHolidaysFrame(self):
text = self.plugin.text
self.SetTitle(self.plugin.text.holidButton)
self.fixWin = CalendarPopup(self, False, self.plugin.first_day)
self.varWin = CalendarPopup(self, True, self.plugin.first_day)
calW, calH = self.fixWin.GetWinSize()
fixLbl = wx.StaticText(self, -1, text.fixBoxLabel)
variableLbl = wx.StaticText(self, -1, text.varBoxLabel)
widthList = [self.GetTextExtent("30. %s 2000" % month)[0] +
SYS_VSCROLL_X for month in list(month_name)]
widthList.append(fixLbl.GetSize()[0])
widthList.append(variableLbl.GetSize()[0])
w = max(widthList) + 5
self.SetMinSize((w + calW + 30, 2 * calH + 128))
self.fixListBox = HolidaysBox(
self,
-1,
size = wx.Size(w, 130),
style = wx.LB_SINGLE|wx.LB_NEEDED_SB
)
self.fix_add_Btn = wx.Button(self, -1, text.add)
self.fix_del_Btn = wx.Button(self, -1, text.delete)
self.fix_del_Btn.Enable(False)
self.varListBox = HolidaysBox(
self,
-1,
size = wx.Size(w, 130),
style = wx.LB_SINGLE|wx.LB_NEEDED_SB
)
self.var_add_Btn = wx.Button(self, -1, text.add)
self.var_del_Btn = wx.Button(self, -1, text.delete)
self.var_del_Btn.Enable(False)
line = wx.StaticLine(self, -1, style = wx.LI_HORIZONTAL)
sizer = wx.BoxSizer(wx.VERTICAL)
fixSizer = wx.GridBagSizer(2, 8)
fixSizer.SetMinSize((w + 8 + calW, -1))
varSizer = wx.GridBagSizer(2, 8)
varSizer.SetMinSize((w + 8 + calW, -1))
fixSizer.Add(fixLbl, (0, 0))
fixSizer.Add(self.fixListBox, (1, 0), (3, 1))
fixSizer.Add(self.fix_add_Btn, (1, 1))
fixSizer.Add((-1, 15), (2, 1))
fixSizer.Add(self.fix_del_Btn, (3, 1))
varSizer.Add(variableLbl, (0, 0))
varSizer.Add(self.varListBox, (1, 0), (3,1))
varSizer.Add(self.var_add_Btn, (1, 1))
varSizer.Add((-1, 15), (2, 1))
varSizer.Add(self.var_del_Btn, (3, 1))
sizer.Add(fixSizer, 0, wx.EXPAND|wx.ALL, 8)
sizer.Add((-1, 12))
sizer.Add(varSizer, 0, wx.EXPAND|wx.ALL, 8)
sizer.Add((1, 16))
btn1 = wx.Button(self, wx.ID_OK)
btn1.SetLabel(text.ok)
btn1.SetDefault()
btn2 = wx.Button(self, wx.ID_CANCEL)
btn2.SetLabel(text.cancel)
btnsizer = wx.StdDialogButtonSizer()
btnsizer.AddButton(btn1)
btnsizer.AddButton(btn2)
btnsizer.Realize()
sizer.Add(line, 0, wx.EXPAND)
sizer.Add((1,5))
sizer.Add(btnsizer, 0, wx.EXPAND|wx.RIGHT, 10)
sz = self.GetMinSize()
self.SetSize(sz)
self.fixListBox.Reset(self.fixHolidays)
self.varListBox.Reset(self.varHolidays)
self.Bind(wx.EVT_CLOSE, self.onClose)
btn2.Bind(wx.EVT_BUTTON, self.onCancel)
btn1.Bind(wx.EVT_BUTTON, self.onOK)
self.fix_add_Btn.Bind(wx.EVT_BUTTON, self.onFixAddBtn)
self.var_add_Btn.Bind(wx.EVT_BUTTON, self.onVarAddBtn)
self.fix_del_Btn.Bind(wx.EVT_BUTTON, self.onFixDelBtn)
self.var_del_Btn.Bind(wx.EVT_BUTTON, self.onVarDelBtn)
self.Bind(wx.EVT_LISTBOX, self.onHolBoxSel)
sizer.Layout()
self.SetSizer(sizer)
self.MakeModal(True)
self.Show(True)
def onClose(self, evt):
self.MakeModal(False)
self.GetParent().GetParent().Raise()
self.Destroy()
def onCancel(self, evt):
self.Close()
def onOK(self, evt):
self.panel.holidays = (self.fixHolidays, self.varHolidays)
self.Close()
def onHolBoxSel(self, evt):
if evt.GetId() == self.fixListBox.GetId():
self.fix_del_Btn.Enable(True)
else:
self.var_del_Btn.Enable(True)
evt.Skip()
def onFixAddBtn(self, evt):
pos = self.ClientToScreen(self.fix_add_Btn.GetPosition())
self.fixWin.PopUp(pos, self.fixHolidays)
def onVarAddBtn(self, evt):
pos = self.ClientToScreen(self.var_add_Btn.GetPosition())
self.varWin.PopUp(pos, self.varHolidays)
def onFixDelBtn(self, evt):
self.fixHolidays.pop(self.fixListBox.GetSelection())
if self.fixListBox.Reset(self.fixHolidays):
self.fix_del_Btn.Enable(False)
def onVarDelBtn(self, evt):
self.varHolidays.pop(self.varListBox.GetSelection())
if self.varListBox.Reset(self.varHolidays):
self.var_del_Btn.Enable(False)
def OnChangeDay(self, evt):
if evt.GetId() == self.fixWin.GetCalId():
self.fixListBox.Reset(self.fixHolidays)
else:
self.varListBox.Reset(self.varHolidays)
evt.Skip()
#===============================================================================
class HolidaysBox(wx.ListBox):
def __init__ (self, parent, id, size, style):
wx.ListBox.__init__(
self,
parent = parent,
id = id,
size = size,
style = style
)
self.sel = -1
self.Bind(wx.EVT_LISTBOX, self.onHolBoxSel)
def Reset(self, list):
tmpList = []
for item in list:
day = item[-1]
day = " %i" % day if day < 10 else "%i" % day
if len(item) == 2:
tmpList.append("%s. %s" % (day, month_name[item[0]]))
else:
tmpList.append("%s. %s %i" % (day, month_name[item[1]], item[0]))
self.Set(tmpList)
if self.sel > -1 and self.sel < self.GetCount():
self.SetSelection(self.sel)
return False
else:
return True
def onHolBoxSel(self, evt):
self.sel = evt.GetSelection()
evt.Skip()
#===============================================================================
class CalendarPopup(wx.PopupWindow):
yearChange = True
def __init__(self, parent, yearChange, first_day):
self.yearChange = yearChange
wx.PopupWindow.__init__(self, parent)
startDate = wx.DateTime()
startDate.Set(1, 0)
self.cal = wxCal.CalendarCtrl(
self,
-1,
startDate,
style = (wxCal.CAL_MONDAY_FIRST, wxCal.CAL_SUNDAY_FIRST)[first_day]
| wxCal.CAL_SHOW_HOLIDAYS
| wxCal.CAL_SEQUENTIAL_MONTH_SELECTION
| wxCal.CAL_SHOW_SURROUNDING_WEEKS
)
self.cal.EnableYearChange(yearChange)
sz = self.cal.GetBestSize()
self.SetSize(sz)
self.cal.Bind(wxCal.EVT_CALENDAR_DAY, self.OnChangeDay)
self.cal.Bind(wxCal.EVT_CALENDAR_MONTH, self.OnChangeMonth)
self.cal.Bind(wxCal.EVT_CALENDAR_YEAR, self.OnChangeMonth)
self.cal.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeaveWindow)
def OnLeaveWindow(self, evt):
self.PopDown()
evt.Skip()
def GetCalId(self):
return self.cal.GetId()
def GetWinSize(self):
return self.GetSize()
def OnChangeDay(self, evt):
date = evt.GetDate()
day, month, year = date.GetDay(), 1 + date.GetMonth(), date.GetYear()
newHoliday = (year, month, day) if self.yearChange else (month, day)
if not newHoliday in self.holidays:
self.holidays.append(newHoliday)
self.holidays.sort()
date = self.cal.GetDate()
self.cal.SetHoliday(day)
date.AddDS(wx.DateSpan.Day())
self.cal.SetDate(date)
self.Refresh()
evt.Skip()
def OnChangeMonth(self, evt = None):
date = self.cal.GetDate()
cur_month = date.GetMonth() + 1 # convert wx.DateTime 0-11 => 1-12
if self.yearChange:
cur_year = date.GetYear()
for year, month, day in self.holidays:
if year == cur_year and month == cur_month:
self.cal.SetHoliday(day)
else:
for month, day in self.holidays:
if month == cur_month:
self.cal.SetHoliday(day)
def PopUp(self, position, holidays):
self.cal.EnableHolidayDisplay(False)
self.cal.EnableHolidayDisplay(True)
self.SetPosition(position)
self.holidays = holidays
self.OnChangeMonth()
self.Show(True)
def PopDown(self):
self.Show(False)
self.Close()
#===============================================================================
class CheckListCtrl(wx.ListCtrl, CheckListCtrlMixin):
def __init__(self, parent, text, width):
wx.ListCtrl.__init__(
self,
parent,
-1,
size = (width, 164),
style = wx.LC_REPORT|wx.LC_HRULES|wx.LC_VRULES|wx.LC_SINGLE_SEL
)
curFile = abspath(join(dirname(__file__), "contextCursor.cur"))
img = None
if exists(curFile):
img = wx.EmptyImage(32, 32)
img.LoadFile(curFile, wx.BITMAP_TYPE_CUR)
if not img or not img.IsOk():
from cStringIO import StringIO
from base64 import b64decode
stream = StringIO(b64decode(CUR_STRING))
img = wx.ImageFromStream(stream)
stream.close()
img.SetOptionInt(wx.IMAGE_OPTION_CUR_HOTSPOT_X, 0)
img.SetOptionInt(wx.IMAGE_OPTION_CUR_HOTSPOT_Y, 0)
self.SetCursor(wx.CursorFromImage(img))
self.selRow = -1
self.back = self.GetBackgroundColour()
self.fore = self.GetForegroundColour()
self.selBack = wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT)
self.selFore = wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHTTEXT)
for i in range(len(text.header)):
self.InsertColumn(i, text.header[i])
self.SetColumnWidth(0, wx.LIST_AUTOSIZE_USEHEADER)
self.SetColumnWidth(
1,
width - self.GetColumnWidth(0) - 2 * 116 - SYS_VSCROLL_X - self.GetWindowBorderSize()[0]
)
self.SetColumnWidth(2, 116)
self.SetColumnWidth(3, 116)
CheckListCtrlMixin.__init__(self)
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnItemSelected)
def OnItemSelected(self, evt):
self.SelRow(evt.m_itemIndex)
evt.Skip()
# this is called by the base class when an item is checked/unchecked !!!!!!!
def OnCheckItem(self, index, flag):
evt = UserEvent(newEVT_CHECKLISTCTRL, self.GetId())
evt.SetValue((index, flag))
self.GetEventHandler().ProcessEvent(evt)
def SelRow(self, row):
if row != self.selRow:
if self.selRow in range(self.GetItemCount()):
item = self.GetItem(self.selRow)
item.SetTextColour(self.fore)
item.SetBackgroundColour(self.back)
self.SetItem(item)
self.selRow = row
if self.GetItemBackgroundColour(row) != self.selBack:
item = self.GetItem(row)
item.SetTextColour(self.selFore)
item.SetBackgroundColour(self.selBack)
self.SetItem(item)
self.SetItemState(row, 0, wx.LIST_STATE_SELECTED)
def AppendRow(self):
ix = self.GetItemCount()
self.InsertStringItem(ix, "")
self.CheckItem(ix)
self.EnsureVisible(ix)
self.SelRow(ix)
#===============================================================================
class ManagerDialog(wx.Dialog):
def __init__(self, text, plugin):
wx.Dialog.__init__(
self,
None,
-1,
text.dialogTitle % version,
style = wx.DEFAULT_DIALOG_STYLE|wx.MINIMIZE_BOX|wx.CLOSE_BOX|wx.RESIZE_BORDER,
)
#self.plugin = eg.Utils.GetPlugin(self) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
self.plugin = plugin
statusRS = self.plugin.GetStatusRS()
self.idUp = wx.NewId()
self.idDown = wx.NewId()
self.idTop = wx.NewId()
self.idBottom = wx.NewId()
self.idSort = wx.NewId()
self.idRefr = wx.NewId()
self.idPlay = wx.NewId()
self.SetIcon(self.plugin.info.icon.GetWxIcon())
self.plugin.manager = self
self.text = text
self.Bind(wx.EVT_CLOSE, self.onClose)
statPath = self.plugin.RadioSurePath+"\\Stations"
rsd_files = [x for x in listdir(statPath) if x.endswith('.rsd') and x.startswith('stations-')]
stations = statPath+"\\"+rsd_files[0]
def unique(seq):
res = set(seq)
res = list(res)
res.sort()
return res
f = openFile(stations, encoding='utf-8', mode='r')
data = self.data = [item.split("\t") for item in f.readlines()]
genres = [item[2] for item in data]
genres = unique(genres)
countrys = [item[3] for item in data]
countrys = unique(countrys)
languages = [item[4] for item in data]
languages = unique(languages)
titles = [item[0] for item in data]
titles = unique(titles)
f.close()
curFile = abspath(join(dirname(__file__), "contextCursor.cur"))
img = None
if exists(curFile):
img = wx.EmptyImage(32, 32)
img.LoadFile(curFile, wx.BITMAP_TYPE_CUR)
if not img or not img.IsOk():
from cStringIO import StringIO
from base64 import b64decode
stream = StringIO(b64decode(CUR_STRING))
img = wx.ImageFromStream(stream)
stream.close()
img.SetOptionInt(wx.IMAGE_OPTION_CUR_HOTSPOT_X, 0)
img.SetOptionInt(wx.IMAGE_OPTION_CUR_HOTSPOT_Y, 0)
self.grid = wx.ListCtrl(self, style = wx.LC_REPORT|wx.LC_NO_HEADER|wx.LC_HRULES|wx.LC_VRULES)
self.grid.SetCursor(wx.CursorFromImage(img))
self.grid.InsertColumn(0,"")
#Button UP
bmp = wx.ArtProvider.GetBitmap(wx.ART_GO_UP, wx.ART_OTHER, (16, 16))
btnUP = wx.BitmapButton(self, self.idUp, bmp)
btnUP.SetToolTipString(self.text.toolTipUp)
#Button DOWN
bmp = wx.ArtProvider.GetBitmap(wx.ART_GO_DOWN, wx.ART_OTHER, (16, 16))
btnDOWN = wx.BitmapButton(self, self.idDown, bmp)
btnDOWN.SetToolTipString(self.text.toolTipDown)
#Button DEL
bmp = wx.ArtProvider.GetBitmap(wx.ART_DELETE, wx.ART_OTHER, (16, 16))
btnDEL = wx.BitmapButton(self, -1, bmp)
btnDEL.SetToolTipString(self.text.toolTipDelete)
btnExp = wx.Button(self, wx.ID_SAVEAS, self.text.export)
btnExp.SetToolTipString(self.text.toolTipExport)
btnImp = wx.Button(self, wx.ID_OPEN, self.text.imprt)
btnImp.SetToolTipString(self.text.toolTipImport)
btnImpSR = wx.Button(self, wx.ID_FILE, self.text.importSR)
btnImpSR.SetToolTipString(self.text.toolTipImportSR)
bmp = wx.ArtProvider.GetBitmap(wx.ART_HELP_SETTINGS, wx.ART_OTHER, (16, 16))
btnSort = wx.BitmapButton(self, self.idSort, bmp)
btnSort.SetToolTipString(self.text.sort)
bmp = wx.ArtProvider.GetBitmap(wx.ART_REDO, wx.ART_OTHER, (16, 16))
btnRefr = wx.BitmapButton(self, self.idRefr, bmp)
btnRefr.SetToolTipString(self.text.refresh)
def EnableCtrls():
first = self.grid.GetFirstSelected()
cnt = self.grid.GetSelectedItemCount()
subseq = True
for ix in range(first, first + cnt):
if not self.grid.IsSelected(ix):
subseq = False
break
one = cnt==1
self.menuFlagM = subseq
itemCnt = self.grid.GetItemCount()
upDown = cnt > 0 and cnt < itemCnt and subseq
sourceLabel.Enable(one)
genreLabel.Enable(one)
langLabel.Enable(one)
countryLabel.Enable(one)
sourceCtrl.Enable(one)
btnUP.Enable(upDown)
btnDOWN.Enable(upDown)
btnDEL.Enable(cnt > 0)
btnExp.Enable(itemCnt > 0)
btnSort.Enable(itemCnt > 1)
def ListSelection(event=None):
EnableCtrls()
first = self.grid.GetFirstSelected()
cnt = self.grid.GetSelectedItemCount()
if cnt == 1:
item = self.tmpFavs[first]
src = item[0]
sourceCtrl.Clear()
srcs = ()
i = -1
for ix in range(5, 11):
srcIx = [itm[ix] for itm in data]
if src in srcIx:
i = srcIx.index(src)
break
if i > -1:
srcs = data[i][5:]
sourceCtrl.AppendItems(srcs)
if not src in srcs:
sourceCtrl.Append(src)
sourceCtrl.SetStringSelection(src)
if item[2] in genres:
genreCtrl.SetStringSelection(item[2])
if item[3] in languages:
langCtrl.SetStringSelection(item[3])
if item[4] in countrys:
countryCtrl.SetStringSelection(item[4])
else:
sourceCtrl.SetSelection(-1)
genreCtrl.SetSelection(-1)
langCtrl.SetSelection(-1)
countryCtrl.SetSelection(-1)
if event:
event.Skip()
self.grid.Bind(wx.EVT_LIST_ITEM_SELECTED, ListSelection)
self.grid.Bind(wx.EVT_LIST_ITEM_DESELECTED, ListSelection)
def onRefresh(evt = None, seq = None):
self.favs = seq if seq else self.plugin.RefreshVariables()
self.tmpFavs = cpy(self.favs)
self.grid.DeleteAllItems()
for row in range(len(self.tmpFavs)):
self.grid.InsertStringItem(row, self.tmpFavs[row][1])
self.grid.SetColumnWidth(0, -1)
self.grid.SetColumnWidth(0, self.grid.GetColumnWidth(0) + 6)
ListSelection()
self.Diff()
EnableCtrls()
#evt.Skip
btnRefr.Bind(wx.EVT_BUTTON, onRefresh)
def onSort(evt):
self.tmpFavs = sorted(self.tmpFavs, key=lambda i: strxfrm(i[1].encode(eg.systemEncoding)))
self.grid.DeleteAllItems()
for row in range(len(self.tmpFavs)):
self.grid.InsertStringItem(row, self.tmpFavs[row][1])
ListSelection()
self.Diff()
self.Colour()
btnSort.Bind(wx.EVT_BUTTON, onSort)
sourceLabel = wx.StaticText(self, -1, self.text.lblSource)
genreLabel = wx.StaticText(self, -1, self.text.lblGenre)
langLabel = wx.StaticText(self, -1, self.text.lblLanguage)
countryLabel = wx.StaticText(self, -1, self.text.lblCountry)
sourceCtrl = wx.Choice(self, -1, choices=[])
genreCtrl = wx.Choice(self, -1, choices=genres)
langCtrl = wx.Choice(self, -1, choices=languages)
countryCtrl = wx.Choice(self, -1, choices=countrys)
genreCtrl.Enable(False)
langCtrl.Enable(False)
countryCtrl.Enable(False)
line = wx.StaticLine(self, -1, style=wx.LI_HORIZONTAL)
btn1 = wx.Button(self, wx.ID_OK, self.text.ok)
btn1.SetDefault()
btn2 = wx.Button(self, wx.ID_CANCEL, self.text.cancel)
btn3 = wx.Button(self, wx.ID_APPLY, self.text.apply)
btn1.Bind(wx.EVT_BUTTON, self.onBtn)
btn2.Bind(wx.EVT_BUTTON, self.onBtn)
btn3.Bind(wx.EVT_BUTTON, self.onBtn)
btnExp.Bind(wx.EVT_BUTTON, self.onBtnsInOut)
btnImp.Bind(wx.EVT_BUTTON, self.onBtnsInOut)
btnImpSR.Bind(wx.EVT_BUTTON, self.onBtnsInOut)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btnsizer.Add(btnExp,0,wx.LEFT)
btnsizer.Add((8,-1),0)
btnsizer.Add(btnImp,0,wx.CENTER)
btnsizer.Add((8,-1),0)
btnsizer.Add(btnImpSR,0,wx.CENTER)
btnsizer.Add((-1,-1),1)
btnsizer.Add(btn1,0,wx.CENTER)
btnsizer.Add((8,-1),0)
btnsizer.Add(btn2,0,wx.CENTER)
btnsizer.Add((8,-1),0)
btnsizer.Add(btn3,0,wx.RIGHT)
btnsizer.Layout()
w = btn1.GetSize()[0]+btn2.GetSize()[0]+btn3.GetSize()[0]+btnExp.GetSize()[0]+btnImp.GetSize()[0]+btnImpSR.GetSize()[0]+5*8
w1 = btnUP.GetSize()[0]+8
onRefresh()
self.grid.SetMinSize((w-w1,-1))
szr = wx.BoxSizer(wx.VERTICAL)
sizer = wx.GridBagSizer(1,5)
sizer.AddGrowableCol(1)
sizer.AddGrowableRow(7)
sizer.Add(wx.StaticText(self, -1, self.text.lblList),(0,0),(1,2))
sizer.Add(self.grid, (1,0), (7, 2), wx.EXPAND, 5)
sizer.Add(btnUP, (1,2), (1, 1),flag=wx.RIGHT)
sizer.Add(btnDOWN, (2,2), (1, 1),flag=wx.RIGHT)
sizer.Add(btnDEL, (3,2), (1, 1),flag=wx.RIGHT)
sizer.Add((5,20), (4,2), (1, 1),flag=wx.RIGHT)
sizer.Add(btnRefr, (5,2), (1, 1),flag=wx.RIGHT)
sizer.Add(btnSort, (6,2), (1, 1),flag=wx.RIGHT)
sizer.Add(sourceLabel, (8,0), (1, 1),wx.TOP, 10)
sizer.Add(sourceCtrl, (8,1), (1, 2), wx.EXPAND|wx.TOP, 5)
sizer.Add(genreLabel, (9,0), (1, 1),wx.TOP, 10)
sizer.Add(genreCtrl, (9,1), (1, 2), wx.EXPAND|wx.TOP, 5)
sizer.Add(langLabel, (10,0), (1, 1),wx.TOP, 10)
sizer.Add(langCtrl, (10,1), (1, 2), wx.EXPAND|wx.TOP, 5)
sizer.Add(countryLabel, (11,0), (1, 1),wx.TOP, 10)
sizer.Add(countryCtrl, (11,1), (1, 2), wx.EXPAND|wx.TOP, 5)
szr.Add(sizer, 1, wx.EXPAND|wx.ALL, 5)
szr.Add(line, 0, wx.EXPAND|wx.TOP, 3)
szr.Add(btnsizer, 0, wx.EXPAND|wx.ALL, 5)
self.SetSizer(szr)
self.Fit()
#Learn New MINSIZE:
#====================
if ConfigData.plcmnt:
# if 0:
self.SetPosition(ConfigData.plcmnt[0])
sz = ConfigData.plcmnt[1]
minsz = ConfigData.plcmnt[2]
else:
self.Center()
sz = (w+w1, self.GetSize()[1] + btn1.GetSize()[1] + 10)
minsz = sz
self.SetMinSize(minsz)
self.SetSize(sz)
self.Show(True)
def onSource(evt):
if self.grid.GetSelectedItemCount() == 1:
self.tmpFavs[self.grid.GetFirstSelected()][0] = evt.GetString()
self.Diff()
sourceCtrl.Bind(wx.EVT_CHOICE, onSource)
def Move(evt):
id = evt.GetId()
first = self.grid.GetFirstSelected()
cnt = self.grid.GetSelectedItemCount()
if id == self.idUp:
if first:
bit = self.tmpFavs.pop(first-1)
self.tmpFavs.insert(first-1+cnt, bit)
else:
id = self.idBottom
elif id == self.idDown:
if first+cnt < len(self.tmpFavs):
bit = self.tmpFavs.pop(first+cnt)
self.tmpFavs.insert(first, bit)
else:
id = self.idTop
if id in (self.idBottom, self.idTop):
p1=self.tmpFavs[:first]
p2=self.tmpFavs[first:first+cnt]
p3=self.tmpFavs[first+cnt:]
if id == self.idTop:
p2.extend(p1)
p2.extend(p3)
self.tmpFavs = p2
elif id == self.idBottom:
p1.extend(p3)
p1.extend(p2)
self.tmpFavs = p1
self.grid.DeleteAllItems()
for row in range(len(self.tmpFavs)):
self.grid.InsertStringItem(row, self.tmpFavs[row][1])
if id == self.idUp:
if first:
b, e = (first-1, first-1+cnt)
elif id == self.idDown:
if first+cnt < len(self.tmpFavs):
b, e = (first+1,first+1+cnt)
elif id == self.idBottom:
ln = len(self.tmpFavs)
b, e = (ln-cnt, ln)
elif id == self.idTop:
b, e = (0, cnt)
for ix in range(b, e):
self.grid.Select(ix, True)
self.grid.EnsureVisible(ix)
self.Diff()
self.Colour()
btnUP.Bind(wx.EVT_BUTTON, Move)
btnDOWN.Bind(wx.EVT_BUTTON, Move)
def onRemDupl(evt):
indexes=dict(map(None,[item[0] for item in self.tmpFavs],range(len(self.tmpFavs)))).values()
indexes.sort()
tmp = []
for ix in indexes:
tmp.append(self.tmpFavs[ix])
onRefresh(None, tmp)
self.Diff()
self.Colour()
def onDelete(evt):
cnt = self.grid.GetItemCount()
for ix in range(cnt-1, -1, -1):
if self.grid.IsSelected(ix):
self.grid.DeleteItem(ix)
self.tmpFavs.pop(ix)
EnableCtrls()
self.Diff()
self.Colour()
btnDEL.Bind(wx.EVT_BUTTON, onDelete)
def onPlayNow(evt):
ix = self.grid.GetFirstSelected()
self.plugin.RefreshVariables()
sel = self.tmpFavs[ix][1]
src = sourceCtrl.GetStringSelection()
rsList = [item[1] for item in self.plugin.Favorites]
hwnds = HandleRS()
indx = None
if sel in [item[1] for item in self.plugin.Favorites]:
indx = rsList.index(sel)
if src != self.plugin.Favorites[indx][0]:
indx = None
if indx is not None: # start with favorite index
if not hwnds:
hwnds = self.plugin.GetNewHwnd()
if hwnds:
SendMessage(hwnds[0], WM_COMMAND, 4102+indx, 0)
else:
self.FailedToOpen()
else:
for hwnd in hwnds:
x, rec = self.plugin.GetStatusRS([hwnd])
if rec != 1:
SendMessage(hwnd, WM_COMMAND, 4102+indx, 0)
break
if rec or rec is None:
hwnds = self.plugin.GetNewHwnd(hwnds)
if hwnds:
SendMessage(hwnds[0], WM_COMMAND, 4102+indx, 0)
else:
self.FailedToOpen()
else: #start with source="blablabla"
if not hwnds:
hwnds = self.plugin.GetNewHwnd(hwnds, src=src)
if not hwnds:
self.FailedToOpen()
else:
for hwnd in hwnds:
x, rec = self.plugin.GetStatusRS([hwnd])
if rec != 1:
PostMessage(hwnd, WM_COMMAND, 1, 0) #close
i = 0
while hwnd in hwnds and i < 100:
hwnds = HandleRS()
i += 1
if i == 100:
self.PrintError(self.text.message6)
rec = 1
else:
hwnds = self.plugin.GetNewHwnd(hwnds, src=src)
if not hwnds:
self.FailedToOpen()
rec = 1
else:
break
if rec or rec is None:
hwnds = self.plugin.GetNewHwnd(hwnds, src=src)
if not hwnds:
self.FailedToOpen()
self.grid.Bind(wx.EVT_LIST_ITEM_ACTIVATED, onPlayNow)
def AreDuplications():
srcList = [item[0] for item in self.tmpFavs]
return len(srcList) > len(set(srcList))
def OnRightClick(evt):
if not hasattr(self, "popupID1"):
self.popupID1 = wx.NewId()
self.popupID2 = wx.NewId()
self.Bind(wx.EVT_MENU, onDelete, id = self.popupID1)
self.Bind(wx.EVT_MENU, onRemDupl, id = self.popupID2)
self.Bind(wx.EVT_MENU, Move, id = self.idUp)
self.Bind(wx.EVT_MENU, Move, id = self.idDown)
self.Bind(wx.EVT_MENU, Move, id = self.idTop)
self.Bind(wx.EVT_MENU, Move, id = self.idBottom)
self.Bind(wx.EVT_MENU, onPlayNow, id = self.idPlay)
self.Bind(wx.EVT_MENU, onSort, id = self.idSort)
self.Bind(wx.EVT_MENU, onRefresh, id = self.idRefr)
self.Bind(wx.EVT_MENU, self.onBtnsInOut, id = wx.ID_SAVEAS)
self.Bind(wx.EVT_MENU, self.onBtnsInOut, id = wx.ID_SAVE)
self.Bind(wx.EVT_MENU, self.onBtnsInOut, id = wx.ID_OPEN)
self.Bind(wx.EVT_MENU, self.onBtnsInOut, id = wx.ID_FILE)
menu = wx.Menu()
if self.grid.GetSelectedItemCount() == 1:
menu.Append(self.idPlay, self.text.play)
menu.AppendSeparator()
menu.Append(self.popupID1, self.text.toolTipDelete)
if AreDuplications():
menu.Append(self.popupID2, self.text.removeDupl)
if self.grid.GetItemCount() > 1:
menu.Append(self.idSort, self.text.sort)
if self.menuFlagM:
menu.AppendSeparator()
menu.Append(self.idUp, self.text.toolTipUp)
menu.Append(self.idDown, self.text.toolTipDown)
menu.Append(self.idTop, self.text.moveTop)
menu.Append(self.idBottom, self.text.moveBottom)
menu.AppendSeparator()
menu.Append(self.idRefr, self.text.refresh)
menu.Append(wx.ID_SAVEAS, self.text.exportSel)
menu.Append(wx.ID_SAVE, self.text.exportAll)
menu.Append(wx.ID_OPEN, self.text.toolTipImport)
menu.Append(wx.ID_FILE, self.text.toolTipImportSR)
self.PopupMenu(menu)
menu.Destroy()
evt.Skip()
self.grid.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, OnRightClick)
def FailedToOpen(self):
return MessageBox(
self,
self.text.message9, #failed to open
self.text.messBoxTit6,
wx.ICON_EXCLAMATION,
15,
plugin = self.plugin,
)
def CreateFavorites(self, dom, node, itmList = None, save = False):
max = self.plugin.maxFav
mssgs = []
if save:
#Duplications check
indexes = dict(map(None,[item[0] for item in self.tmpFavs],range(len(self.tmpFavs)))).values()
indexes.sort()
tmp = []
for ix in indexes:
tmp.append(self.tmpFavs[ix])
itmList = range(len(tmp))
if len(self.tmpFavs) > len(tmp):
mssgs.append(self.text.message8)
else:
tmp = self.tmpFavs
flag = save and len(itmList) > max
if flag:
mssgs.append(self.text.message1 % self.plugin.maxFav)
if mssgs:
MessageBox(
self,
"\n".join(mssgs),
self.plugin.text.messBoxTit1,
wx.ICON_EXCLAMATION,
plugin = self.plugin,
)
elm = 0
for i in itmList:
elm += 1
if flag and elm > max:
break
item = tmp[i]
itemNode = dom.createElement(u'Item-%i' % elm)
sourceNode = dom.createElement(u'Source')
sourceText = dom.createTextNode(unicode(item[0]))
sourceNode.appendChild(sourceText)
itemNode.appendChild(sourceNode)
titleNode = dom.createElement(u'Title')
titleText = dom.createTextNode(unicode(item[1]))
titleNode.appendChild(titleText)
itemNode.appendChild(titleNode)
genreNode = dom.createElement(u'Genre')
genreText = dom.createTextNode(unicode(item[2]))
genreNode.appendChild(genreText)
itemNode.appendChild(genreNode)
languageNode = dom.createElement(u'Language')
languageText = dom.createTextNode(unicode(item[3]))
languageNode.appendChild(languageText)
itemNode.appendChild(languageNode)
countryNode = dom.createElement(u'Country')
countryText = dom.createTextNode(unicode(item[4]))
countryNode.appendChild(countryText)
itemNode.appendChild(countryNode)
node.appendChild(itemNode)
def UpdateRadioSureXml(self):
# create a backup of original file
new_file_name = u'%s\\RadioSure.xml' % self.plugin.xmlpath
old_file_name = new_file_name + "~"
if exists(old_file_name):
remove(old_file_name)
rename(new_file_name, old_file_name)
try:
# change Favorites node
doc = miniDom.parse(old_file_name)
node = doc.getElementsByTagName('XMLConfigSettings')[0]
oldFavorites = node.getElementsByTagName('Favorites')[0]
newFavorites = doc.createElement(u'Favorites')
self.CreateFavorites(doc, newFavorites, save = True)
node.replaceChild(newFavorites, oldFavorites)
# persist changes to new file
f = file(new_file_name, "wb")
writer = lookup('utf-8')[3](f)
doc.writexml(writer, encoding = 'utf-8')
f.close()
MessageBox(
self,
self.text.message5, #updated
self.text.messBoxTit5,
wx.ICON_INFORMATION,
15,
plugin = self.plugin,
)
return True
except:
raise
MessageBox(
self,
self.text.message3,
self.plugin.text.messBoxTit1,
wx.ICON_EXCLAMATION,
plugin = self.plugin,
)
if exists(new_file_name):
remove(new_file_name)
rename(old_file_name, new_file_name)
return False
def onBtn(self, evt):
def UpdateXml():
closeFlag = self.UpdateRadioSureXml()
rs = u'%s\\RadioSure.exe' % self.plugin.RadioSurePath
rs = rs.encode(FSE) if isinstance(rs, unicode) else rs
args = [rs]
if isfile(rs):
Popen(args)
return closeFlag
closeFlag = False
id = evt.GetId()
if id == wx.ID_APPLY or (id == wx.ID_OK and self.favs != self.tmpFavs):
hwnds = HandleRS()
rec = 0
for hwnd in hwnds:
rec = self.plugin.GetStatusRS([hwnd])[1]
if rec:
break
title = self.text.messBoxTit3 if rec else self.text.messBoxTit2
if hwnds:
# RS is running !
res = MessageBox(
self,
self.text.message2 % (self.plugin.text.yes, self.plugin.text.no),
title,
wx.ICON_EXCLAMATION|wx.YES_NO|wx.YES_DEFAULT,
plugin = self.plugin,
)
if res == wx.ID_YES:
for hwnd in hwnds:
rec = self.plugin.GetStatusRS([hwnd])[1]
if rec:
PostMessage(hwnd, WM_COMMAND, 1051, 0) # Stop Rec
i=0
while rec and i < 100:
i+=1
rec = self.plugin.GetStatusRS([hwnd])[1]
if not rec:
PostMessage(hwnd, WM_COMMAND, 1, 0) # Close
else:
PostMessage(hwnd, WM_COMMAND, 1, 0) # Close
i = 0
while hwnds and i < 100:
i += 1
hwnds = HandleRS()
if hwnds:
pid = eg.WinApi.Utils.PyGetWindowThreadProcessId(hwnd)[1]
handle = _kernel32.OpenProcess(PROCESS_TERMINATE, False, pid)
succ = _kernel32.TerminateProcess(handle, -1)
_kernel32.CloseHandle(handle)
if not succ:
MessageBox(
self,
self.text.message6, #failed to close
self.text.messBoxTit6,
wx.ICON_EXCLAMATION,
15,
plugin = self.plugin,
)
else:
closeFlag = UpdateXml()
else:
closeFlag = UpdateXml()
else:
MessageBox(
self,
self.text.message7, #no update
self.text.messBoxTit7,
wx.ICON_EXCLAMATION,
15,
plugin = self.plugin,
)
else:
closeFlag = self.UpdateRadioSureXml()
if id == wx.ID_APPLY and closeFlag:
self.favs = cpy(self.tmpFavs)
self.Diff()
if id != wx.ID_APPLY:
if id != wx.ID_OK or closeFlag or self.favs == self.tmpFavs:
self.Close()
#evt.Skip()
def Import(self, data):
# ToDo: Add check of duplications ???
self.tmpFavs.extend(data)
self.grid.DeleteAllItems()
for row in range(len(self.tmpFavs)):
self.grid.InsertStringItem(row, self.tmpFavs[row][1])
self.grid.SetColumnWidth(0, -1)
self.grid.SetColumnWidth(0, self.grid.GetColumnWidth(0) + 6)
self.grid.EnsureVisible(len(self.tmpFavs)-1)
self.grid.SetFocus()
self.Colour()
self.Diff()
def Colour(self):
maxF = self.plugin.maxFav
cnt = self.grid.GetItemCount()
fore = self.grid.GetTextColour()
for row in range(min(maxF, cnt)):
item = self.grid.GetItem(row)
item.SetTextColour(fore)
self.grid.SetItem(item)
if maxF >= cnt:
return
for row in range(maxF, cnt):
item = self.grid.GetItem(row)
item.SetTextColour("red")
self.grid.SetItem(item)
def onBtnsInOut(self, evt):
id = evt.GetId()
if id == wx.ID_SAVEAS or id == wx.ID_SAVE:
dlg = wx.FileDialog(
self,
message = self.text.save,
defaultDir = self.plugin.xmlpath,
defaultFile = "Favorites.xml",
wildcard = self.text.wildcard,
style=wx.SAVE
)
if dlg.ShowModal() == wx.ID_OK:
self.Export(dlg.GetPath(), id)
dlg.Destroy()
elif id == wx.ID_OPEN: # Import
dlg = wx.FileDialog(
self,
message = self.text.choose,
defaultDir = self.plugin.xmlpath,
defaultFile = "*.xml",
wildcard = self.text.wildcard,
style = wx.OPEN | wx.CHANGE_DIR
)
flg = True
filePath = None
if dlg.ShowModal() == wx.ID_OK:
filePath = dlg.GetPath()
dlg.Destroy()
xmldoc = miniDom.parse(filePath)
document = xmldoc.getElementsByTagName('Favorites')
if len(document) > 0:
stations = getStations(document[0])
if stations:
flg = False
self.Import(stations)
if flg and filePath:
MessageBox(
self,
self.text.message4 % split(filePath)[1],
self.plugin.text.messBoxTit1,
wx.ICON_EXCLAMATION,
plugin = self.plugin,
)
elif id == wx.ID_FILE: # Import SR
dlg = wx.FileDialog(
self,
message = self.text.choose,
defaultDir = eg.folderPath.ProgramFiles+'\\Screamer',
defaultFile = "favorites.xml",
wildcard = self.text.wildcard,
style = wx.OPEN | wx.CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
filePath = dlg.GetPath()
dlg.Destroy()
stations = self.ImportSR(filePath)
if not stations:
MessageBox(
self,
self.text.message4 % split(filePath)[1],
self.plugin.text.messBoxTit1,
wx.ICON_EXCLAMATION,
plugin = self.plugin,
)
else:
self.Import(stations)
evt.Skip()
return
def Diff(self):
wx.FindWindowById(wx.ID_APPLY).Enable(self.favs != self.tmpFavs)
def onClose(self, evt):
hwnd = self.GetHandle()
wp = GetWindowPlacement(hwnd)[4]
#Note: GetPosition() return (-32000, -32000), if window is minimized !!!
plcmnt = (
(wp[0], wp[1]), # pos
(wp[2] - wp[0], wp[3] - wp[1]), # size
(self.GetMinSize().GetWidth(),self.GetMinSize().GetHeight()) # min size
)
if plcmnt != ConfigData.plcmnt:
ConfigData.plcmnt = plcmnt
#if not eg.document.IsDirty():
# wx.CallAfter(eg.Notify, "DocumentChange", True)
self.Show(False)
self.plugin.manager = None
self.Destroy()
evt.Skip()
def ImportSR(self, filePath):
xmldoc = miniDom.parse(filePath)
document = xmldoc.getElementsByTagName('Screamer')
if len(document) > 0:
res = []
stations = tuple(document[0].getElementsByTagName('Station'))
for station in stations:
if "title" in station.attributes.keys():
title = station.attributes["title"].value
else:
return None
src = station.getElementsByTagName('Source')
if len(src)>0:
src = src[0].firstChild.data
i = -1
for ix in range(5, 11):
srcIx = [itm[ix] for itm in self.data]
if src in srcIx:
i = srcIx.index(src)
break
if i > -1:
station = self.data[i]
itm = (src, station[0], station[2], station[4], station[3])
else:
itm = (src, title, "-", "-", "-")
res.append(itm)
else:
return None
return res
return None
def Export(self, path, id):
impl = miniDom.getDOMImplementation()
dom = impl.createDocument(None, u'XMLConfigSettings', None)
root = dom.documentElement
commentNode = dom.createComment(self.text.xmlComment1)
dom.insertBefore(commentNode, root)
commentNode = dom.createComment(self.text.xmlComment2 % str(dt.now())[:19])
dom.insertBefore(commentNode, root)
favNode = dom.createElement(u'Favorites')
root.appendChild(favNode)
if id == wx.ID_SAVEAS and self.grid.GetSelectedItemCount():
itmList = [itm for itm in range(len(self.tmpFavs)) if self.grid.IsSelected(itm)]
else:
itmList = range(len(self.tmpFavs))
self.CreateFavorites(dom, favNode, itmList)
f = file(path, 'wb')
writer = lookup('utf-8')[3](f)
dom.writexml(writer, encoding = 'utf-8')
f.close()
#===============================================================================
class SchedulerDialog(wx.Dialog):
lastRow = -1
applyBttn = None
def __init__(self, text, plugin):
wx.Dialog.__init__(
self,
None,
-1,
text.dialogTitle % version,
style = wx.DEFAULT_DIALOG_STYLE|wx.MINIMIZE_BOX|wx.CLOSE_BOX,
)
#import locale as l
#l.setlocale(l.LC_ALL, "us") # only for testing
bttns = []
self.ctrls=[]
self.plugin = plugin
self.SetIcon(self.plugin.info.icon.GetWxIcon())
self.plugin.dialog = self
self.tmpData = self.plugin.tmpData = cpy(self.plugin.data)
self.text = text
def fillDynamicSizer(type, data = None, old_type = 255):
flag = old_type != type
if flag:
dynamicSizer.Clear(True)
self.ctrls=[]
self.ctrls.append(wx.NewId())
self.ctrls.append(wx.NewId())
if type == -1:
return
if type != 1 and flag:
topSizer = wx.StaticBoxSizer(
wx.StaticBox(self, -1, self.text.chooseDay),
wx.HORIZONTAL
)
if type == 0:
if flag:
self.ctrls.append(wx.NewId())
dp = wx.DatePickerCtrl(self, self.ctrls[2], size = (86, -1),
style = wx.DP_DROPDOWN | wx.DP_SHOWCENTURY)
topSizer.Add(dp,0,wx.EXPAND)
self.ctrls.append(wx.NewId())
yearlyCtrl = wx.CheckBox(self, self.ctrls[3], self.text.yearly)
topSizer.Add(yearlyCtrl, 0, wx.EXPAND|wx.LEFT, 30)
dynamicSizer.Add(topSizer, 0, wx.EXPAND|wx.TOP, 2)
else:
dp = wx.FindWindowById(self.ctrls[2])
yearlyCtrl = wx.FindWindowById(self.ctrls[3])
if data:
if not data[2]:
val = wx.DateTime_Now()
data[2] = str(dt.now())[:10]
wxDttm = wx.DateTime()
wxDttm.Set(
int(data[2][8:10]),
int(data[2][5:7]) - 1,
int(data[2][:4])
)
dp.SetValue(wxDttm)
yearlyCtrl.SetValue(data[3])
elif type == 2:
if flag:
if self.plugin.first_day:
choices = list(day_name)[:-1]
choices.insert(0, list(day_name)[-1])
else:
choices = list(day_name)
self.ctrls.append(wx.NewId())
weekdayCtrl = wx.CheckListBox(
self,
self.ctrls[2],
choices = choices,
size=((-1,110)),
)
self.ctrls.append(wx.NewId())
holidCheck_2 = wx.CheckBox(
self,
self.ctrls[3],
self.text.holidCheck_2
)
self.ctrls.append(wx.NewId())
holidCheck_1 = wx.CheckBox(
self,
self.ctrls[4],
self.text.holidCheck_1
)
topSizer.Add((40,1), 0, wx.ALIGN_CENTER)
topSizer.Add(
wx.StaticText(
self,
-1,
self.text.theEvery
),
0,
wx.ALIGN_CENTER | wx.RIGHT, 10
)
topSizer.Add(weekdayCtrl, 0, wx.TOP)
dynamicSizer.Add(topSizer, 0, wx.EXPAND | wx.TOP,2)
dynamicSizer.Add(holidCheck_1, 0, wx.TOP, 2)
dynamicSizer.Add(holidCheck_2, 0, wx.TOP, 2)
else:
weekdayCtrl = wx.FindWindowById(self.ctrls[2])
holidCheck_2 = wx.FindWindowById(self.ctrls[3])
holidCheck_1 = wx.FindWindowById(self.ctrls[4])
val = 127 if not data else data[2]
if self.plugin.first_day:
exp = [6, 0, 1, 2, 3, 4, 5]
else:
exp = [0, 1, 2, 3, 4, 5, 6]
for i in range(7):
weekdayCtrl.Check(i, bool(val & (2 ** exp[i])))
enable = val & 31 and not val & 96
holidCheck_1.Enable(enable)
check = 0 if (not data or not enable) else data[4]
holidCheck_1.SetValue(check)
enable = val & 96 and not val & 31
holidCheck_2.Enable(enable)
check = 0 if (not data or not enable) else data[3]
holidCheck_2.SetValue(check)
elif type == 3: # Monthly/weekday ...
if flag:
dateSizer = wx.BoxSizer(wx.HORIZONTAL)
dateSizer.Add(
wx.StaticText(
self,
-1,
self.text.the
),
0,
wx.ALIGN_CENTER
)
topSizer.Add(dateSizer, 0, wx.EXPAND)
dynamicSizer.Add(topSizer, 0, wx.EXPAND | wx.TOP,2)
self.ctrls.append(wx.NewId())
serialCtrl = wx.CheckListBox(
self,
self.ctrls[2],
choices = self.text.serial_num,
size = ((-1, 95)),
)
dateSizer.Add(serialCtrl, 0, wx.ALIGN_CENTER | wx.LEFT, 10)
if self.plugin.first_day:
choices = list(day_name)[0:-1]
choices.insert(0, list(day_name)[-1])
else:
choices = list(day_name)
self.ctrls.append(wx.NewId())
weekdayCtrl = wx.CheckListBox(
self,
self.ctrls[3],
choices = choices,
size = ((-1, 110)),
)
dateSizer.Add(weekdayCtrl, 0, wx.ALIGN_CENTER | wx.LEFT, 10)
dateSizer.Add(
wx.StaticText(
self,
-1,
self.text.in_
),
0,
wx.ALIGN_CENTER | wx.LEFT, 10
)
self.ctrls.append(wx.NewId())
monthsCtrl_1 = wx.CheckListBox(
self,
self.ctrls[4],
choices = list(month_name)[1:7],
size = ((-1, 95)),
)
dateSizer.Add(monthsCtrl_1, 0, wx.ALIGN_CENTER | wx.LEFT, 10)
self.ctrls.append(wx.NewId())
monthsCtrl_2 = wx.CheckListBox(
self,
self.ctrls[5],
choices = list(month_name)[7:],
size = ((-1, 95)),
)
dateSizer.Add(monthsCtrl_2, 0, wx.ALIGN_CENTER | wx.LEFT, -1)
self.ctrls.append(wx.NewId())
holidCheck_1 = wx.CheckBox(
self,
self.ctrls[6],
self.text.holidCheck_1
)
dynamicSizer.Add(holidCheck_1, 0, wx.TOP, 2)
else:
serialCtrl = wx.FindWindowById(self.ctrls[2])
weekdayCtrl = wx.FindWindowById(self.ctrls[3])
monthsCtrl_1 = wx.FindWindowById(self.ctrls[4])
monthsCtrl_2 = wx.FindWindowById(self.ctrls[5])
holidCheck_1 = wx.FindWindowById(self.ctrls[6])
val = 0 if not data else data[2]
for i in range(6):
serialCtrl.Check(i, bool(val & (2 ** i)))
val = 0 if not data else data[3]
if self.plugin.first_day:
exp = [6, 0, 1, 2, 3, 4, 5]
else:
exp = [0, 1, 2, 3, 4, 5, 6]
for i in range(7):
weekdayCtrl.Check(i, bool(val & (2 ** exp[i])))
enable = val & 31 and not val & 96
holidCheck_1.Enable(enable)
val = 63 if not data else data[4]
for i in range(6):
monthsCtrl_1.Check(i, bool(val & (2 ** i)))
val = 63 if not data else data[5]
for i in range(6):
monthsCtrl_2.Check(i, bool(val & (2 ** i)))
check = 0 if (not data or not enable) else data[6]
holidCheck_1.SetValue(check)
elif type == 4: # Monthly/day ...
if flag:
dateSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add(dateSizer, 0, wx.EXPAND)
dynamicSizer.Add(topSizer, 0, wx.EXPAND | wx.TOP, 2)
self.ctrls.append(wx.NewId())
q_1_Ctrl = wx.CheckListBox(
self,
self.ctrls[2],
choices = [str(i) + '.' for i in range(1, 9)],
size = ((40, 125)),
)
dateSizer.Add(q_1_Ctrl, 0, wx.LEFT, 5)
self.ctrls.append(wx.NewId())
q_2_Ctrl = wx.CheckListBox(
self,
self.ctrls[3],
choices = [str(i) + '.' for i in range(9, 17)],
size = ((46, 125)),
)
dateSizer.Add(q_2_Ctrl, 0, wx.LEFT, -1)
self.ctrls.append(wx.NewId())
q_3_Ctrl = wx.CheckListBox(
self,
self.ctrls[4],
choices = [str(i) + '.' for i in range(17, 25)],
size = ((46, 125)),
)
dateSizer.Add(q_3_Ctrl, 0, wx.LEFT, -1)
self.ctrls.append(wx.NewId())
q_4_Ctrl = wx.CheckListBox(
self,
self.ctrls[5],
choices = [str(i) + '.' for i in range(25, 32)],
size = ((46, 125)),
)
dateSizer.Add(q_4_Ctrl, 0, wx.LEFT, -1)
dateSizer.Add((-1, 1), 1, wx.EXPAND)
self.ctrls.append(wx.NewId())
monthsCtrl_1 = wx.CheckListBox(
self,
self.ctrls[6],
choices = list(month_name)[1:7],
size = ((-1, 95)),
)
dateSizer.Add(monthsCtrl_1, 0, wx.ALIGN_CENTER | wx.LEFT, 10)
self.ctrls.append(wx.NewId())
monthsCtrl_2 = wx.CheckListBox(
self,
self.ctrls[7],
choices = list(month_name)[7:],
size = ((-1, 95)),
)
dateSizer.Add(monthsCtrl_2, 0, wx.ALIGN_CENTER | wx.LEFT, -1)
dateSizer.Add((5, 1), 0)
else:
q_1_Ctrl = wx.FindWindowById(self.ctrls[2])
q_2_Ctrl = wx.FindWindowById(self.ctrls[3])
q_3_Ctrl = wx.FindWindowById(self.ctrls[4])
q_4_Ctrl = wx.FindWindowById(self.ctrls[5])
monthsCtrl_1 = wx.FindWindowById(self.ctrls[6])
monthsCtrl_2 = wx.FindWindowById(self.ctrls[7])
val = 0 if not data else data[2]
for i in range(8):
q_1_Ctrl.Check(i, bool(val & (2 ** i)))
val = 0 if not data else data[3]
for i in range(8):
q_2_Ctrl.Check(i, bool(val & (2 ** i)))
val = 0 if not data else data[4]
for i in range(8):
q_3_Ctrl.Check(i, bool(val & (2 ** i)))
val = 0 if not data else data[5]
for i in range(7):
q_4_Ctrl.Check(i, bool(val & (2 ** i)))
val = 63 if not data else data[6]
for i in range(6):
monthsCtrl_1.Check(i, bool(val & (2 ** i)))
val = 63 if not data else data[7]
for i in range(6):
monthsCtrl_2.Check(i, bool(val & (2 ** i)))
elif type == 5:
if flag:
self.ctrls.append(wx.NewId())
dp = wx.DatePickerCtrl(self, self.ctrls[2], size = (86, -1),
style = wx.DP_DROPDOWN | wx.DP_SHOWCENTURY)
topSizer.Add(dp, 0, wx.EXPAND)
dynamicSizer.Add(topSizer, 0, wx.EXPAND | wx.TOP, 2)
else:
dp = wx.FindWindowById(self.ctrls[2])
if data:
if not data[2]:
val = wx.DateTime_Now()
data[2] = str(dt.now())[:10]
wxDttm = wx.DateTime()
wxDttm.Set(
int(data[2][8:10]),
int(data[2][5:7])-1,
int(data[2][:4])
)
dp.SetValue(wxDttm)
#elif type == 1: # daily
# pass
if flag:
timeSizer = wx.GridBagSizer(0, 0)
bottomSizer = wx.StaticBoxSizer(
wx.StaticBox(self, -1, self.text.chooseTime6 if type == 6 else self.text.chooseTime),
wx.HORIZONTAL
)
dynamicSizer.Add(bottomSizer, 0, wx.EXPAND | wx.TOP, 16 if type != 2 else 5)
bottomSizer.Add(timeSizer, 0, wx.EXPAND)
stEvLbl = wx.StaticText(self, -1, self.text.start)
timeSizer.Add(stEvLbl, (0, 0), (1, 2))
durLabel = wx.StaticText(self, -1, self.text.length)
timeSizer.Add(durLabel, (0, 3), (1, 2))
spinBtn = wx.SpinButton(
self,
-1,
wx.DefaultPosition,
(-1, 22),
wx.SP_VERTICAL
)
initTime = wx.DateTime_Now()
initTime.SetSecond(0)
initTime.AddTS(wx.TimeSpan.Minute())
val = data[0] if data and data[0] else initTime
timeCtrl = eg.TimeCtrl(
self,
self.ctrls[0],
val,
fmt24hr = True,
spinButton = spinBtn
)
timeSizer.Add(timeCtrl, (1, 0), (1, 1))
timeSizer.Add(spinBtn, (1, 1), (1, 1))
timeSizer.Add((40, -1), (1, 2), (1, 1))
spinBtn2 = wx.SpinButton(
self,
-1,
wx.DefaultPosition,
(-1, 22),
wx.SP_VERTICAL
)
val = data[1] if data and data[1] else "00:00"
lenCtrl = eg.TimeCtrl_Duration(
self,
self.ctrls[1],
val,
fmt24hr = True,
spinButton = spinBtn2,
displaySeconds = False
)
timeSizer.Add(lenCtrl, (1, 3), (1, 1))
timeSizer.Add(spinBtn2, (1, 4), (1, 1))
bottomSizer.Add((-1,-1), 1, wx.EXPAND)
testBttn = wx.Button(
self,
-1 if len(bttns) == 0 else bttns[-1],
self.text.testButton
)
bottomSizer.Add(testBttn, 0, wx.EXPAND | wx.RIGHT)
else:
timeCtrl = wx.FindWindowById(self.ctrls[0])
val = data[0] if data and data[0] else wx.DateTime_Now()
timeCtrl.SetValue(val)
lenCtrl = wx.FindWindowById(self.ctrls[1])
val = data[1] if data and data[1] else "00:00"
lenCtrl.SetValue(val)
if type == 5: #periodically
if flag:
bottomSizer = wx.StaticBoxSizer(
wx.StaticBox(self, -1, self.text.choosePeriod),
wx.HORIZONTAL
)
self.ctrls.append(wx.NewId())
numCtrl = MySpinIntCtrl(self, -1, value = 1, min = 1)
numCtrl.SetNumCtrlId(self.ctrls[3])
bottomSizer.Add(
wx.StaticText(
self,
-1,
self.text.andThenEvery
),
0,
wx.ALIGN_CENTER
)
bottomSizer.Add(numCtrl, 0, wx.LEFT, 4)
self.ctrls.append(wx.NewId())
unitCtrl = wx.Choice(
self,
self.ctrls[4],
choices = self.text.units
)
bottomSizer.Add(unitCtrl, 0, wx.LEFT, 8)
dynamicSizer.Add(bottomSizer, 0, wx.EXPAND|wx.TOP, 16)
dynamicSizer.Layout()
else:
numCtrl = wx.FindWindowById(self.ctrls[3])
unitCtrl = wx.FindWindowById(self.ctrls[4])
if data:
numCtrl.SetValue(str(data[3]))
unitCtrl.SetSelection(data[4])
elif flag:
dynamicSizer.Layout()
if type == 6:
stEvLbl.Show(False)
timeCtrl.Show(False)
spinBtn.Show(False)
return dynamicSizer.GetMinSize()[0]
def Diff():
applyBttn = wx.FindWindowById(bttns[5])
flg = self.tmpData != self.plugin.data
applyBttn.Enable(flg)
def onCheckListBox(evt):
id = evt.GetId()
sel = evt.GetSelection()
box = self.FindWindowById(id)
ix = self.ctrls.index(id)
type = self.tmpData[self.lastRow][2]
cond = (type == 2 and ix == 2) or (type == 3 and ix == 3)
if cond and self.plugin.first_day:
exp = (6, 0, 1, 2, 3, 4, 5)[sel]
else:
exp = sel
if box.IsChecked(sel):
self.tmpData[self.lastRow][3][ix] |= 2 ** exp
else:
self.tmpData[self.lastRow][3][ix] &= 255 - 2 ** exp
if cond:
holidCheck_1 = wx.FindWindowById(self.ctrls[-1])
val = self.tmpData[self.lastRow][3][ix]
flg = val & 31 and not val & 96
holidCheck_1.Enable(flg)
if not flg:
holidCheck_1.SetValue(0)
self.tmpData[self.lastRow][3][-1] = 0
if type == 2:
holidCheck_2 = wx.FindWindowById(self.ctrls[3])
val = self.tmpData[self.lastRow][3][2]
flg = val & 96 and not val & 31
holidCheck_2.Enable(flg)
if not flg:
holidCheck_2.SetValue(0)
self.tmpData[self.lastRow][3][3] = 0
next = self.plugin.NextRun(
self.tmpData[self.lastRow][2],
self.tmpData[self.lastRow][3]
)
grid.SetStringItem(self.lastRow, 3, next)
Diff()
def OnTimeChange(evt):
ix = self.ctrls.index(evt.GetId())
self.tmpData[self.lastRow][3][ix] = evt.GetValue()
next = self.plugin.NextRun(
self.tmpData[self.lastRow][2],
self.tmpData[self.lastRow][3]
)
grid.SetStringItem(self.lastRow, 3, next)
Diff()
def onPeriodUnit(evt):
if len(self.ctrls) == 5 and evt.GetId() == self.ctrls[4]:
self.tmpData[self.lastRow][3][4] = evt.GetSelection()
next = self.plugin.NextRun(
self.tmpData[self.lastRow][2],
self.tmpData[self.lastRow][3]
)
grid.SetStringItem(self.lastRow, 3, next)
else:
evt.Skip()
Diff()
def onDatePicker(evt):
val = str(dt.fromtimestamp(evt.GetDate().GetTicks()))[:10]
self.tmpData[self.lastRow][3][2] = val
next = self.plugin.NextRun(
self.tmpData[self.lastRow][2],
self.tmpData[self.lastRow][3]
)
grid.SetStringItem(self.lastRow, 3, next)
Diff()
def onCheckBox(evt):
val = evt.IsChecked()
ix = self.ctrls.index(evt.GetId())
if self.tmpData[self.lastRow][2] == 2 and ix == 3:
self.tmpData[self.lastRow][3][3] = int(val)
else:
self.tmpData[self.lastRow][3][-1] = int(val)
next = self.plugin.NextRun(
self.tmpData[self.lastRow][2],
self.tmpData[self.lastRow][3]
)
grid.SetStringItem(self.lastRow, 3, next)
Diff()
def OnUpdateDialog(evt):
if self.lastRow == evt.GetId():
OpenSchedule()
def OnSelectCell(evt):
self.lastRow = evt.m_itemIndex
OpenSchedule()
Diff()
evt.Skip() # necessary !!!
def enableBttns(value):
for i in (1, 2):
bttn = self.FindWindowById(bttns[i])
bttn.Enable(value)
Diff()
def ShowMessageBox(mess):
MessageBox(
self,
mess,
self.text.boxTitle,
wx.ICON_EXCLAMATION,
plugin = self.plugin
)
def FindNewTitle(title):
tmpLst = []
for item in self.tmpData:
if item[1].startswith(title + " ("):
tmpLst.append(item[1][2 + len(title):])
if len(tmpLst) == 0:
return "%s (1)" % title
tmpLst2 = []
for item in tmpLst:
if item[-1] == ")":
try:
tmpLst2.append(int(item[:-1]))
except:
pass
if len(tmpLst2) == 0:
return "%s (1)" % title
else:
return "%s (%i)" % (title, 1 + max(tmpLst2))
def testValidity(data, test = False):
mssgs = []
tempDict = dict([(item[1].strip(), item[2]) for item in data])
if "" in tempDict.iterkeys():
mssgs.append(self.text.boxTexts[0])
if not test and len(tempDict) < len(data):
mssgs.append(self.text.boxTexts[1])
if -1 in tempDict.itervalues():
mssgs.append(self.text.boxTexts[4])
for item in data:
val = item[7]
if (val & 6) == 6: # = Do nothing
if not val & 24:
if not self.text.boxTexts[3] in mssgs:
mssgs.append(self.text.boxTexts[3])
else: # Not "Do nothing"
if not item[5]:
if not self.text.boxTexts[2] in mssgs:
mssgs.append(self.text.boxTexts[2])
if item[2] == 5 and item[3][4] < 2:
period = item[3][3] * (3600, 86400)[item[3][4]]
span = 60 * int(item[3][1][3:]) + 3600 * int(item[3][1][:2])
if period <= span:
if self.text.boxTexts[5] not in mssgs:
mssgs.append(self.text.boxTexts[5])
flag = len(mssgs) > 0
if flag:
ShowMessageBox("\n".join(mssgs))
return flag
def addSchedule(evt = None):
empty = [1, "", -1, [], " ", "", "", 5]
self.lastRow = len(self.tmpData)
self.tmpData.append(empty)
Tidy()
grid.AppendRow()
grid.SelRow(self.lastRow)
if not self.lastRow:
enableBttns(True)
EnableCtrls(True)
Diff()
def duplSchedule(evt = None):
lngth = len(self.tmpData)
item = cpy(self.tmpData[self.lastRow])
nxt = grid.GetItem(self.lastRow, 3).GetText()
item[4] = ""
self.lastRow = lngth
self.tmpData.append(item)
newTitle = FindNewTitle(self.tmpData[lngth][1])
self.tmpData[lngth][1] = newTitle
grid.AppendRow()
grid.SelRow(lngth)
grid.SetStringItem(lngth, 1, newTitle)
grid.SetStringItem(lngth, 3, nxt)
OpenSchedule()
Diff()
def delSchedule(evt = None):
self.tmpData.pop(self.lastRow)
grid.DeleteItem(self.lastRow)
if len(self.tmpData) > 0:
if self.lastRow == len(self.tmpData):
self.lastRow -= 1
OpenSchedule()
grid.SelRow(self.lastRow)
else:
self.lastRow = -1
Tidy()
EnableCtrls(False)
enableBttns(False)
Diff()
def Move(direction):
lst = cpy(self.tmpData)
index = self.lastRow
max = len(lst)-1
#Last to first position, other down
if index == max and direction == 1:
self.tmpData[1:] = lst[:-1]
self.tmpData[0] = lst[max]
index2 = 0
#First to last position, other up
elif index == 0 and direction == -1:
self.tmpData[:-1] = lst[1:]
self.tmpData[max] = lst[0]
index2 = max
else:
index2 = index + direction
self.tmpData[index] = lst[index2]
self.tmpData[index2] = lst[index]
del lst
return index2
def moveUp(evt = None):
newSel = Move(-1)
fillGrid(False)
self.grid.SelRow(newSel)
Diff()
def moveDown(evt = None):
newSel = Move(1)
fillGrid(False)
self.grid.SelRow(newSel)
Diff()
def onButton(evt):
id = evt.GetId()
if id == bttns[0]: # Add new
addSchedule()
elif id == bttns[1]: # Duplicate
duplSchedule()
elif id == bttns[2]: # Delete
delSchedule()
elif id == bttns[3]: # OK
if testValidity(self.tmpData):
evt.Skip()
return
self.plugin.data = cpy(self.tmpData)
self.tmpData = []
self.plugin.dataToXml()
self.plugin.UpdateEGscheduler()
self.Close()
elif id == bttns[4]: # Cancel
self.tmpData = []
self.Close()
elif id == bttns[5]: # Apply
applyBttn = wx.FindWindowById(bttns[5])
applyBttn.Enable(False)
if testValidity(self.tmpData):
evt.Skip()
return
self.plugin.data = cpy(self.tmpData)
self.plugin.dataToXml()
self.plugin.UpdateEGscheduler()
evt.Skip()
def EnableCtrls(value):
typeChoice.Enable(value)
schedulerName.Enable(value)
name_label.Enable(value)
type_label.Enable(value)
favorite_label.Enable(value)
workModeLbl.Enable(value)
triggEvtLbl.Enable(value)
windOpenLbl.Enable(value)
source_label.Enable(value)
filename_label.Enable(value)
favChoice.Enable(value)
sourceCtrl.Enable(value)
recordCtrl.Enable(value)
workModeCtrl.Enable(value)
triggEvtCtrl.Enable(value)
windOpenCtrl.Enable(value)
if not value:
workModeCtrl.SetSelection(-1)
triggEvtCtrl.SetSelection(-1)
windOpenCtrl.SetSelection(-1)
def OpenSchedule():
schedulerName.ChangeValue(self.tmpData[self.lastRow][1])
type = self.tmpData[self.lastRow][2]
fillDynamicSizer(
type,
self.tmpData[self.lastRow][3],
typeChoice.GetSelection()
)
typeChoice.SetSelection(type)
modes = self.tmpData[self.lastRow][7]
rsMode = (modes>>1)&3
workModeCtrl.SetSelection(rsMode)
recordCtrl.GetTextCtrl().ChangeValue(self.tmpData[self.lastRow][6])
sourceCtrl.SetValue(self.tmpData[self.lastRow][5])
if rsMode == 3:
windOpenCtrl.SetSelection(-1)
windOpenCtrl.Enable(False)
windOpenLbl.Enable(False)
else:
windOpenCtrl.SetSelection(modes&1)
windOpenCtrl.Enable(True)
windOpenLbl.Enable(True)
triggEvtCtrl.SetSelection((modes>>3)&3)
def Tidy():
favChoice.SetSelection(-1)
typeChoice.SetSelection(-1)
windOpenCtrl.SetSelection(1)
workModeCtrl.SetSelection(2)
triggEvtCtrl.SetSelection(0)
sourceCtrl.ChangeValue("")
recordCtrl.GetTextCtrl().ChangeValue("")
schedulerName.ChangeValue("")
fillDynamicSizer(-1)
filename_label.Enable(True)
recordCtrl.Enable(True)
def onCheckListCtrl(evt):
index, flag = evt.GetValue()
if self.tmpData[index][0] != int(flag):
self.tmpData[index][0] = int(flag)
Diff()
def onSchedulerTitle(evt):
txt = evt.GetString()
grid.SetStringItem(self.lastRow, 1, txt)
self.tmpData[self.lastRow][1] = txt
Diff()
def onPeriodNumber(evt):
if len(self.ctrls) == 5 and evt.GetId() == self.ctrls[3]:
self.tmpData[self.lastRow][3][3] = int(evt.GetString())
next = self.plugin.NextRun(
self.tmpData[self.lastRow][2],
self.tmpData[self.lastRow][3]
)
grid.SetStringItem(self.lastRow, 3, next)
Diff()
else:
evt.Skip()
def onTestButton(evt):
data = self.tmpData[self.lastRow]
if testValidity([data,], True):
return
ticks = mktime(localtime())
next, cmdline = self.plugin.Execute(data, True)
next = next[:19] if next else self.plugin.text.none
self.plugin.updateLogFile(self.text.testRun % (data[1], next))
self.plugin.updateLogFile(self.plugin.text.cmdLine % cmdline)
def OnRightClick(evt):
if not hasattr(self, "popupID1"):
self.popupID1 = wx.NewId()
self.popupID2 = wx.NewId()
self.popupID3 = wx.NewId()
self.popupID4 = wx.NewId()
self.popupID5 = wx.NewId()
self.popupID6 = wx.NewId()
self.popupID7 = wx.NewId()
self.Bind(wx.EVT_MENU, addSchedule, id=self.popupID1)
self.Bind(wx.EVT_MENU, duplSchedule, id=self.popupID2)
self.Bind(wx.EVT_MENU, delSchedule, id=self.popupID3)
self.Bind(wx.EVT_MENU, self.EnableAll, id=self.popupID4)
self.Bind(wx.EVT_MENU, self.DisableAll, id=self.popupID5)
self.Bind(wx.EVT_MENU, moveUp, id=self.popupID6)
self.Bind(wx.EVT_MENU, moveDown, id=self.popupID7)
# make a menu
menu = wx.Menu()
menu.Append(self.popupID1, self.text.popup[0])
menu.Append(self.popupID2, self.text.popup[1])
menu.Append(self.popupID3, self.text.popup[2])
menu.AppendSeparator()
menu.Append(self.popupID4, self.text.popup[3])
menu.Append(self.popupID5, self.text.popup[4])
menu.AppendSeparator()
menu.Append(self.popupID6, self.text.popup[5])
menu.Append(self.popupID7, self.text.popup[6])
self.PopupMenu(menu)
menu.Destroy()
evt.Skip()
def fillGrid(flag):
grid.DeleteAllItems()
rows = len(self.tmpData)
if rows > 0:
for row in range(rows):
grid.InsertStringItem(row, "")
if self.tmpData[row][0]:
grid.CheckItem(row)
grid.SetStringItem(row, 1, self.tmpData[row][1])
grid.SetStringItem(row, 2, self.tmpData[row][4])
next = self.plugin.NextRun(self.tmpData[row][2], self.tmpData[row][3])
grid.SetStringItem(row, 3, next)
if flag:
self.lastRow = 0
grid.SelRow(0)
OpenSchedule()
enableBttns(True)
else:
EnableCtrls(False)
grid.DeleteItem(0)
dynamicSizer = wx.BoxSizer(wx.VERTICAL)
wDynamic = fillDynamicSizer(3)
fillDynamicSizer(-1)
self.SetSize(wx.Size(wDynamic + 37, 684))
grid = self.grid = CheckListCtrl(self, text, wDynamic + 20)
# grid = self.grid = CheckListCtrl(self, text, wDynamic + 20)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(grid, 0, wx.ALL, 5)
favorite_label = wx.StaticText(self, -1, self.text.favorite)
workModeLbl = wx.StaticText(self, -1, self.text.workModeLabel)
workModeCtrl = wx.Choice(self, -1, choices = self.text.workModes)
triggEvtLbl = wx.StaticText(self, -1, self.text.triggEvtLabel)
triggEvtCtrl = wx.Choice(self, -1, choices = self.text.triggEvtChoices)
windOpenLbl = wx.StaticText(self, -1, self.text.windOpenLabel)
windOpenCtrl = wx.Choice(self, -1, choices = self.text.windOpenChoices)
source_label = wx.StaticText(self, -1, self.text.source)
self.favorites = self.plugin.RefreshVariables()
favChoice = wx.Choice(self, -1, choices = [item[1] for item in self.favorites])
sourceCtrl = wx.TextCtrl(self,-1,"")
filename_label = wx.StaticText(self, -1, self.text.filename)
schedulerName = wx.TextCtrl(self, -1, "")
typeChoice = wx.Choice(self, -1, choices = self.text.sched_type)
xmltoparse = u'%s\\RadioSure.xml' % self.plugin.xmlpath
xmltoparse = xmltoparse.encode(FSE) if isinstance(xmltoparse, unicode) else xmltoparse
xmldoc = miniDom.parse(xmltoparse)
recordings = xmldoc.getElementsByTagName('Recordings')
if not recordings:
folder = u'%s\\RadioSure Recordings' % self.plugin.xmlpath
else:
folder = recordings[0].getElementsByTagName('Folder')[0].firstChild.data
recordCtrl = MyFileBrowseButton(
self,
toolTip = self.text.toolTipFile,
dialogTitle = self.text.browseTitle,
buttonText = eg.text.General.browse,
startDirectory = folder
)
self.grid.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, OnRightClick)
def onSource(evt):
src = evt.GetString()
srcs = [item[0] for item in self.favorites]
if src in srcs:
ix = srcs.index(src)
else:
ix = -1
favChoice.SetSelection(ix)
self.tmpData[self.lastRow][5] = src
Diff()
evt.Skip()
sourceCtrl.Bind(wx.EVT_TEXT, onSource)
def onFavChoice(evt):
sel = evt.GetSelection()
txt = self.favorites[sel][0]
sourceCtrl.ChangeValue(txt)
self.tmpData[self.lastRow][5] = txt
Diff()
evt.Skip()
favChoice.Bind(wx.EVT_CHOICE, onFavChoice)
def onRecordCtrl(evt):
txt = evt.GetString()
self.tmpData[self.lastRow][6] = txt
Diff()
evt.Skip()
recordCtrl.GetTextCtrl().Bind(wx.EVT_TEXT, onRecordCtrl)
def onTypeChoice(evt):
type = evt.GetSelection()
if self.tmpData[self.lastRow][2] != type:
empty_data = [
["", "", 0, 0],
["", ""],
["", "", 127, 0, 0],
["", "", 0, 0, 63, 63, 0],
["", "", 0, 0, 0, 0, 63, 63],
["", "", 0, 1, 0],
]
self.tmpData[self.lastRow][2] = type
data = empty_data[self.tmpData[self.lastRow][2]]
self.tmpData[self.lastRow][3] = data
fillDynamicSizer(type, data)
Diff()
def onWorkMode(evt):
sel = evt.GetSelection()
if sel == 3:
windOpenCtrl.SetSelection(-1)
windOpenCtrl.Enable(False)
windOpenLbl.Enable(False)
if triggEvtCtrl.GetSelection() == 0:
ShowMessageBox(self.text.boxTexts[3])
else:
if windOpenCtrl.GetSelection() == -1:
windOpenCtrl.SetSelection(1)
windOpenCtrl.Enable(True)
windOpenLbl.Enable(True)
val = self.tmpData[self.lastRow][7]
val &= (255-6)
val |= (sel<<1)
self.tmpData[self.lastRow][7] = val
Diff()
workModeCtrl.Bind(wx.EVT_CHOICE, onWorkMode)
def onWindOpen(evt):
sel = evt.GetSelection()
val = self.tmpData[self.lastRow][7]
val &= (255-1)
val |= sel
self.tmpData[self.lastRow][7] = val
Diff()
windOpenCtrl.Bind(wx.EVT_CHOICE, onWindOpen)
def onTriggEvtCtrl(evt):
sel = evt.GetSelection()
workMode = workModeCtrl.GetSelection()
if sel == 0 and workMode == 3:
ShowMessageBox(self.text.boxTexts[3])
val = self.tmpData[self.lastRow][7]
val &= (255-24)
val |= (sel<<3)
self.tmpData[self.lastRow][7] = val
Diff()
triggEvtCtrl.Bind(wx.EVT_CHOICE, onTriggEvtCtrl)
bttnSizer = wx.BoxSizer(wx.HORIZONTAL)
bttnSizer.Add((5, -1))
i = 0
for bttn in self.text.buttons:
id = wx.NewId()
bttns.append(id)
b = wx.Button(self, id, bttn)
bttnSizer.Add(b,1)
if i in (1, 2, 5):
b.Enable(False)
if i == 3:
b.SetDefault()
if i == 5:
self.applyBttn = b
b.Bind(wx.EVT_BUTTON, onButton, id = id)
bttnSizer.Add((5, -1))
i += 1
sizer.Add(bttnSizer,0,wx.EXPAND)
id = wx.NewId() #testBttn
bttns.append(id)
self.Bind(wx.EVT_BUTTON, onTestButton, id = id)
wx.EVT_CHECKLISTBOX(self, -1, onCheckListBox)
EVT_TIMEUPDATE(self, -1, OnTimeChange)
wx.EVT_TEXT(self, -1, onPeriodNumber)
wx.EVT_CHOICE(self, -1, onPeriodUnit)
wx.EVT_DATE_CHANGED(self, -1, onDatePicker)
wx.EVT_CHECKBOX(self, -1, onCheckBox)
self.Bind(EVT_UPDATE_DIALOG, OnUpdateDialog)
self.Bind(wx.EVT_LIST_ITEM_SELECTED, OnSelectCell)
typeChoice.Bind(wx.EVT_CHOICE, onTypeChoice)
schedulerName.Bind(wx.EVT_TEXT, onSchedulerTitle)
self.Bind(EVT_CHECKLISTCTRL, onCheckListCtrl)
nameSizer = wx.FlexGridSizer(2, 0, 0, 20)
nameSizer.AddGrowableCol(0,1)
name_label = wx.StaticText(self, -1, self.text.header[1] + ":")
nameSizer.Add(name_label)
type_label = wx.StaticText(self, -1, self.text.type_label)
nameSizer.Add(type_label)
nameSizer.Add(schedulerName, 0, wx.EXPAND)
nameSizer.Add(typeChoice)
typeSizer = wx.StaticBoxSizer(
wx.StaticBox(self, -1, ""),
wx.VERTICAL
)
dynamicSizer.SetMinSize((-1, 226))
typeSizer.Add(nameSizer, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, 5)
typeSizer.Add(dynamicSizer, 0, wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, 5)
sizer.Add(typeSizer, 0, wx.EXPAND | wx.LEFT | wx.RIGHT, 5)
sizer.Add(source_label, 0, wx.TOP|wx.LEFT, 5)
sizer.Add(sourceCtrl,0,wx.EXPAND|wx.LEFT|wx.RIGHT, 5)
sizer.Add((1,4))
sizer.Add(favorite_label, 0, wx.TOP|wx.LEFT, 5)
sizer.Add(favChoice,0,wx.EXPAND|wx.LEFT|wx.RIGHT, 5)
sizer.Add((1,4))
choicesSizer = wx.FlexGridSizer(2,3,0,10)
choicesSizer.Add(windOpenLbl,0)
choicesSizer.Add(workModeLbl,0)
choicesSizer.Add(triggEvtLbl,0)
choicesSizer.Add(windOpenCtrl,0,wx.EXPAND)
choicesSizer.Add(workModeCtrl,0,wx.EXPAND)
choicesSizer.Add(triggEvtCtrl,0,wx.EXPAND)
sizer.Add(choicesSizer,0,wx.ALL, 5)
sizer.Add(filename_label, 0, wx.LEFT, 5)
sizer.Add(recordCtrl,0,wx.EXPAND|wx.LEFT|wx.RIGHT, 5)
fillGrid(True)
self.Bind(wx.EVT_CLOSE, self.onClose)
self.SetSizer(sizer)
sizer.Layout()
if ConfigData.pos:
self.SetPosition(ConfigData.pos)
else:
self.Center()
self.Show(True)
def EnableAll(self, flag):
if isinstance(flag, wx.CommandEvent):
schedule = self.tmpData[self.lastRow][1]
flag = 1
for ix in range(len(self.tmpData)):
self.tmpData[ix][0] = flag
if self.grid.GetItem(ix, 1).GetText() == self.tmpData[ix][1]:
if flag:
self.grid.CheckItem(ix)
elif self.grid.IsChecked(ix):
self.grid.ToggleItem(ix)
self.applyBttn.Enable(self.tmpData != self.plugin.data)
def DisableAll(self, evt):
self.EnableAll(0)
def EnableSchedule(self, schedule, flag):
tmpList = [item[1] for item in self.tmpData]
if schedule in tmpList:
ix = tmpList.index(schedule)
self.tmpData[ix][0] = flag
if self.grid.GetItem(ix, 1).GetText() == self.tmpData[ix][1]:
if flag:
self.grid.CheckItem(ix)
elif self.grid.IsChecked(ix):
self.grid.ToggleItem(ix)
def DeleteSchedule(self, schedule):
tmpList = [item[1] for item in self.tmpData]
if schedule in tmpList:
ix = tmpList.index(schedule)
if self.grid.GetItem(ix, 1).GetText() == self.tmpData[ix][1]:
self.grid.DeleteItem(ix)
self.grid.Refresh()
self.tmpData.pop(ix)
def AddSchedule(self, schedule):
tmpList = [item[1] for item in self.tmpData]
if schedule[1] in tmpList:
ix = tmpList.index(schedule[1])
self.tmpData[ix] = schedule
if not self.grid.GetItem(ix, 1).GetText() == self.tmpData[ix][1]:
return
else:
ix = len(self.tmpData)
self.tmpData.append(schedule)
self.grid.InsertStringItem(ix, "")
if schedule[0]:
self.grid.CheckItem(ix)
elif self.grid.IsChecked(ix):
self.grid.ToggleItem(ix)
self.grid.SetStringItem(ix, 1, schedule[1])
next = self.plugin.NextRun(schedule[2], schedule[3])
self.grid.SetStringItem(ix, 3, next)
if self.lastRow == ix:
evt = wx.PyCommandEvent(newEVT_UPDATE_DIALOG, ix)
self.GetEventHandler().ProcessEvent(evt)
def RefreshGrid(self, ix, last, next):
if self.grid.GetItem(ix, 1).GetText() == self.tmpData[ix][1]:
self.grid.SetStringItem(ix, 2, last)
self.grid.SetStringItem(ix, 3, next)
def onClose(self, evt):
hwnd = self.GetHandle()
wp = GetWindowPlacement(hwnd)[4]
#Note: GetPosition() return (-32000, -32000), if window is minimized !!!
pos = (wp[0], wp[1])
if pos != ConfigData.pos:
ConfigData.pos = pos
#if not eg.document.IsDirty():
# wx.CallAfter(eg.Notify, "DocumentChange", True)
self.Show(False)
self.plugin.dialog = None
self.Destroy()
evt.Skip()
#===============================================================================
def HandleRS():
FindRS = eg.WindowMatcher(
u'RadioSure.exe',
None,
u'#32770',
None,
None,
None,
True,
0.0,
0
)
hwnds = FindRS()
res = []
for hwnd in hwnds:
try: #maybe already closed !!!
curhw = GetWindow(hwnd, GW_CHILD)
while curhw > 0:
if GetDlgCtrlID(curhw) == 1016 and GetClassName(curhw) == 'SysListView32':
res.append(hwnd)
break
curhw = GetWindow(curhw, GW_HWNDNEXT)
except:
pass
return res
#===============================================================================
class ObservationThread(Thread):
def __init__(
self,
period,
evtName,
):
self.abort = False
self.aborted = False
self.oldData = ""
self.threadFlag = Event()
self.period = period
self.evtName = evtName
Thread.__init__(self, name = self.evtName.encode('unicode_escape')+'_Thread')
def run(self):
while 1:
hwnd = HandleRS()
if hwnd:
data = GetWindowText(hwnd[0]).decode(eg.systemEncoding)
if data != self.oldData and data != "Radio? Sure!":
self.oldData = data
eg.TriggerEvent(self.evtName, payload = data, prefix = "RadioSure")
if self.abort:
break
self.threadFlag.wait(self.period)
self.threadFlag.clear()
self.aborted = True
def AbortObservation(self):
self.abort = True
self.threadFlag.set()
#===============================================================================
def GetCtrlByID(id):
res = None
hwnds = HandleRS()
if hwnds:
try:
res = GetDlgItem(hwnds[0], id)
except:
pass
return res
#===============================================================================
def getPathFromReg():
try:
rs_reg = OpenKey(
HKEY_CURRENT_USER,
"Software\\RadioSure"
)
res = unicode(EnumValue(rs_reg,0)[1])
CloseKey(rs_reg)
except:
res = None
return res
#===============================================================================
def FindMonthDay(year, month, weekday, index):
"""weekday = what day of the week looking for (numbered 0-6, 0 = monday)
index = how many occurrence of looking for (numbered 0-4 and 5 for the last day)
Returns the day of the month (date) or 0 (if no such date exists)"""
first_wd, length = monthrange(year, month)
day = 1 + weekday - first_wd
if day < 1:
day += 7
if index == 5:
index = 4 if day <= length % 7 else 3
day += 7 * index
if day > length:
day = 0
return day
#===============================================================================
def getStations(nodelist):
tmp = []
for item in nodelist.childNodes:
if item.nodeName[:5] == "Item-":
title = item.getElementsByTagName('Title')[0].firstChild
if title:
title = title.data
source = item.getElementsByTagName('Source')[0].firstChild
if source:
source = source.data
genre = item.getElementsByTagName('Genre')[0].firstChild
if genre:
genre = genre.data
language = item.getElementsByTagName('Language')[0].firstChild
if language:
language = language.data
country = item.getElementsByTagName('Country')[0].firstChild
if country:
country = country.data
tmp.append([source, title, genre, language, country])
return tmp
#===============================================================================
class MenuGrid(gridlib.Grid):
def __init__(self, parent, lngth):
gridlib.Grid.__init__(self, parent)
self.SetRowLabelSize(0)
self.SetColLabelSize(0)
self.SetDefaultRowSize(16)
self.SetScrollLineX(1)
self.SetScrollLineY(1)
self.EnableEditing(False)
self.EnableDragColSize(False)
self.EnableDragRowSize(False)
self.EnableDragGridSize(False)
self.EnableGridLines(False)
self.SetColMinimalAcceptableWidth(8)
self.CreateGrid(lngth, 3)
attr = gridlib.GridCellAttr()
attr.SetAlignment(wx.ALIGN_LEFT, wx.ALIGN_CENTRE)
self.SetColAttr(1,attr)
self.SetSelectionMode(gridlib.Grid.wxGridSelectRows)
self.Bind(gridlib.EVT_GRID_CMD_SELECT_CELL, self.onGridSelectCell, self)
def SetBackgroundColour(self, colour):
self.SetDefaultCellBackgroundColour(colour)
def SetForegroundColour(self, colour):
self.SetDefaultCellTextColour(colour)
def SetFont(self, font):
self.SetDefaultCellFont(font)
def GetSelection(self):
return self.GetSelectedRows()[0]
def Set(self, choices):
oldLen = self.GetNumberRows()
newLen = len(choices)
h = self.GetDefaultRowSize()
if oldLen > newLen:
self.DeleteRows(0, oldLen-newLen, False)
elif oldLen < newLen:
self.AppendRows(newLen-oldLen, False)
for i in range(len(choices)):
chr = u"\u25a0" if choices[i][2] else ""
self.SetCellValue(i,0,chr)
self.SetCellValue(i,1," "+choices[i][0])
chr = u"\u25ba" if choices[i][3] == -1 else ""
self.SetCellValue(i,2, chr)
self.SetRowSize(i,h)
def onGridSelectCell(self, event):
row = event.GetRow()
self.SelectRow(row)
if not self.IsVisible(row,1):
self.MakeCellVisible(row,1)
event.Skip()
def MoveCursor(self, step):
max = self.GetNumberRows()
sel = self.GetSelectedRows()[0]
new = sel + step
if new < 0:
new += max
elif new > max-1:
new -= max
self.SetGridCursor(new, 1)
self.SelectRow(new)
#===============================================================================
class MyTextDropTarget(EventDropTarget):
def __init__(self, object):
EventDropTarget.__init__(self, object)
self.object = object
def OnDragOver(self, x, y, dragResult):
return wx.DragMove
def OnData(self, dummyX, dummyY, dragResult):
if self.GetData() and self.customData.GetDataSize() > 0:
txt = self.customData.GetData()
ix, evtList = self.object.GetEvtList()
flag = True
for lst in evtList:
if txt in lst:
flag = False
break
if flag:
self.object.InsertImageStringItem(len(evtList[ix]), txt, 0)
self.object.UpdateEvtList(ix, txt)
else:
PlaySound('SystemExclamation', SND_ASYNC)
def OnLeave(self):
pass
#===============================================================================
class EventListCtrl(wx.ListCtrl):
def __init__(self, parent, id, evtList, ix, plugin):
width = 205
wx.ListCtrl.__init__(self, parent, id, style=wx.LC_REPORT |
wx.LC_NO_HEADER | wx.LC_SINGLE_SEL, size = (width, -1))
self.parent = parent
self.id = id
self.evtList = evtList
self.ix = ix
self.plugin = plugin
self.sel = -1
self.il = wx.ImageList(16, 16)
self.il.Add(wx.BitmapFromImage(wx.Image(join(IMAGES_DIR, "event.png"), wx.BITMAP_TYPE_PNG)))
self.SetImageList(self.il, wx.IMAGE_LIST_SMALL)
self.InsertColumn(0, '')
self.SetColumnWidth(0, width - 5 - SYS_VSCROLL_X)
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnSelect)
self.Bind(wx.EVT_SET_FOCUS, self.OnChange)
self.Bind(wx.EVT_LIST_INSERT_ITEM, self.OnChange)
self.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.OnChange)
self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.OnRightClick)
self.SetToolTipString(self.plugin.text.toolTip)
def OnSelect(self, event):
self.sel = event.GetIndex()
evt = UserEvent(newEVT_BUTTON_AFTER, self.id)
evt.SetValue(self)
self.GetEventHandler().ProcessEvent(evt)
event.Skip()
def OnChange(self, event):
evt = UserEvent(newEVT_BUTTON_AFTER, self.id)
evt.SetValue(self)
self.GetEventHandler().ProcessEvent(evt)
event.Skip()
def OnRightClick(self, event):
if not hasattr(self, "popupID1"):
self.popupID1 = wx.NewId()
self.popupID2 = wx.NewId()
self.Bind(wx.EVT_MENU, self.OnDeleteButton, id=self.popupID1)
self.Bind(wx.EVT_MENU, self.OnDeleteAllButton, id=self.popupID2)
# make a menu
menu = wx.Menu()
# add some items
menu.Append(self.popupID1, self.plugin.text.popup[0])
menu.Append(self.popupID2, self.plugin.text.popup[1])
# Popup the menu. If an item is selected then its handler
# will be called before PopupMenu returns.
self.PopupMenu(menu)
menu.Destroy()
def OnDeleteButton(self, event=None):
self.DeleteItem(self.sel)
self.evtList[self.ix].pop(self.sel)
evt = UserEvent(newEVT_BUTTON_AFTER, self.id)
evt.SetValue(self)
self.GetEventHandler().ProcessEvent(evt)
if event:
event.Skip()
def OnDeleteAllButton(self, event=None):
self.DeleteAllItems()
evt = UserEvent(newEVT_BUTTON_AFTER, self.id)
evt.SetValue(self)
self.GetEventHandler().ProcessEvent(evt)
self.evtList[self.ix] = []
if event:
event.Skip()
def GetEvtList(self):
return self.ix, self.evtList
def UpdateEvtList(self, ix, txt):
self.evtList[ix].append(txt)
def SetItems(self, evtList):
for i in range(len(evtList)):
self.InsertImageStringItem(i, evtList[i], 0)
#===============================================================================
class MenuEventsDialog(wx.MiniFrame):
def __init__(self, parent, plugin):
wx.MiniFrame.__init__(
self,
parent,
-1,
style=wx.CAPTION,
name="Menu events dialog"
)
self.panel = parent
self.plugin = plugin
self.evtList = cpy(self.panel.evtList)
self.SetBackgroundColour(wx.NullColour)
self.ctrl = None
self.sel = -1
def ShowMenuEventsDialog(self, title, labels):
self.panel.Enable(False)
self.panel.dialog.buttonRow.cancelButton.Enable(False)
self.panel.EnableButtons(False)
self.SetTitle(title)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.SetMinSize((450, 308))
topSizer=wx.GridBagSizer(2, 20)
textLbl_0=wx.StaticText(self, -1, labels[0])
id = wx.NewId()
eventsCtrl_0 = EventListCtrl(self, id, self.evtList, 0, self.plugin)
eventsCtrl_0.SetItems(self.evtList[0])
dt0 = MyTextDropTarget(eventsCtrl_0)
eventsCtrl_0.SetDropTarget(dt0)
textLbl_1=wx.StaticText(self, -1, labels[1])
id = wx.NewId()
eventsCtrl_1 = EventListCtrl(self, id, self.evtList, 1, self.plugin)
eventsCtrl_1.SetItems(self.evtList[1])
dt1 = MyTextDropTarget(eventsCtrl_1)
eventsCtrl_1.SetDropTarget(dt1)
textLbl_2=wx.StaticText(self, -1, labels[2])
id = wx.NewId()
eventsCtrl_2 = EventListCtrl(self, id, self.evtList, 2, self.plugin)
eventsCtrl_2.SetItems(self.evtList[2])
dt2 = MyTextDropTarget(eventsCtrl_2)
eventsCtrl_2.SetDropTarget(dt2)
textLbl_3=wx.StaticText(self, -1, labels[3])
id = wx.NewId()
eventsCtrl_3 = EventListCtrl(self, id, self.evtList, 3, self.plugin)
eventsCtrl_3.SetItems(self.evtList[3])
dt3 = MyTextDropTarget(eventsCtrl_3)
eventsCtrl_3.SetDropTarget(dt3)
textLbl_4=wx.StaticText(self, -1, labels[4])
id = wx.NewId()
eventsCtrl_4 = EventListCtrl(self, id, self.evtList, 4, self.plugin)
eventsCtrl_4.SetItems(self.evtList[4])
dt4 = MyTextDropTarget(eventsCtrl_4)
eventsCtrl_4.SetDropTarget(dt4)
deleteSizer = wx.BoxSizer(wx.VERTICAL)
delOneBtn = wx.Button(self, -1, self.plugin.text.popup[0])
delBoxBtn = wx.Button(self, -1, self.plugin.text.popup[1])
clearBtn = wx.Button(self, -1, self.plugin.text.clear)
deleteSizer.Add(delOneBtn, 1, wx.EXPAND)
deleteSizer.Add(delBoxBtn, 1, wx.EXPAND|wx.TOP,5)
deleteSizer.Add(clearBtn, 1, wx.EXPAND|wx.TOP,5)
topSizer.Add(textLbl_0, (0,0))
topSizer.Add(eventsCtrl_0, (1,0), flag = wx.EXPAND)
topSizer.Add(textLbl_1, (0,1))
topSizer.Add(eventsCtrl_1, (1,1), flag = wx.EXPAND)
topSizer.Add(textLbl_2, (2,0),flag = wx.TOP, border = 8)
topSizer.Add(eventsCtrl_2, (3,0), flag = wx.EXPAND)
topSizer.Add(textLbl_3, (2,1), flag = wx.TOP, border = 8)
topSizer.Add(eventsCtrl_3, (3,1), flag = wx.EXPAND)
topSizer.Add(textLbl_4, (4,0), flag = wx.TOP, border = 8)
topSizer.Add(eventsCtrl_4, (5,0), flag = wx.EXPAND)
topSizer.Add(deleteSizer, (5,1), flag = wx.EXPAND)
line = wx.StaticLine(self, -1, size=(20,-1),pos = (200,0), style=wx.LI_HORIZONTAL)
btn1 = wx.Button(self, wx.ID_OK)
btn1.SetLabel(self.plugin.text.ok)
btn1.SetDefault()
btn2 = wx.Button(self, wx.ID_CANCEL)
btn2.SetLabel(self.plugin.text.cancel)
btnsizer = wx.StdDialogButtonSizer()
btnsizer.AddButton(btn1)
btnsizer.AddButton(btn2)
btnsizer.Realize()
sizer.Add(topSizer,0,wx.ALL,10)
sizer.Add(line, 0, wx.EXPAND|wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM,5)
sizer.Add(btnsizer, 0, wx.EXPAND|wx.RIGHT, 10)
sizer.Add((1,6))
self.SetSizer(sizer)
sizer.Fit(self)
def onFocus(evt):
ctrl = evt.GetValue()
if ctrl != self.ctrl:
if self.ctrl:
self.ctrl.SetItemState(-1, wx.LIST_MASK_STATE, wx.LIST_STATE_SELECTED)
self.ctrl = ctrl
sel = self.ctrl.sel
if sel != -1:
self.sel = sel
flag = self.ctrl.GetSelectedItemCount() > 0
delOneBtn.Enable(flag)
delBoxBtn.Enable(flag)
evt.Skip()
eventsCtrl_0.Bind(EVT_BUTTON_AFTER, onFocus)
eventsCtrl_1.Bind(EVT_BUTTON_AFTER, onFocus)
eventsCtrl_2.Bind(EVT_BUTTON_AFTER, onFocus)
eventsCtrl_3.Bind(EVT_BUTTON_AFTER, onFocus)
eventsCtrl_4.Bind(EVT_BUTTON_AFTER, onFocus)
def onDelOneBtn(evt):
self.ctrl.OnDeleteButton()
delOneBtn.Enable(False)
delBoxBtn.Enable(False)
evt.Skip()
delOneBtn.Bind(wx.EVT_BUTTON, onDelOneBtn)
def onDelBoxBtn(evt):
self.ctrl.OnDeleteAllButton()
delOneBtn.Enable(False)
delBoxBtn.Enable(False)
evt.Skip()
delBoxBtn.Bind(wx.EVT_BUTTON, onDelBoxBtn)
def onClearBtn(evt):
eventsCtrl_0.DeleteAllItems()
eventsCtrl_1.DeleteAllItems()
eventsCtrl_2.DeleteAllItems()
eventsCtrl_3.DeleteAllItems()
eventsCtrl_4.DeleteAllItems()
delOneBtn.Enable(False)
delBoxBtn.Enable(False)
self.evtList = [[],[],[],[],[]]
evt.Skip()
clearBtn.Bind(wx.EVT_BUTTON, onClearBtn)
def onClose(evt):
self.panel.Enable(True)
self.panel.dialog.buttonRow.cancelButton.Enable(True)
self.panel.EnableButtons(True)
self.GetParent().GetParent().Raise()
self.Destroy()
self.panel.setFocus()
self.Bind(wx.EVT_CLOSE, onClose)
def onCancel(evt):
self.panel.Enable(True)
self.panel.dialog.buttonRow.cancelButton.Enable(True)
self.panel.EnableButtons(True)
self.Close()
btn2.Bind(wx.EVT_BUTTON,onCancel)
def onOK(evt):
self.panel.evtList = self.evtList
self.Close()
btn1.Bind(wx.EVT_BUTTON,onOK)
sizer.Layout()
self.Raise()
self.Show()
#===============================================================================
class Menu(wx.Frame):
def __init__(self):
wx.Frame.__init__(
self,
None,
-1,
'MPC_menu',
style = wx.STAY_ON_TOP|wx.SIMPLE_BORDER
)
self.flag = False
self.monitor = 0
self.oldMenu = []
def DrawMenu(self, ix):
self.Show(False)
self.menuGridCtrl.SetGridCursor(ix, 1)
self.menuGridCtrl.SelectRow(ix)
monDim = GetMonitorDimensions()
try:
x,y,ws,hs = monDim[self.monitor]
except IndexError:
x,y,ws,hs = monDim[0]
# menu height calculation:
h=self.GetCharHeight()+4
for i in range(len(self.choices)):
self.menuGridCtrl.SetRowSize(i,h)
self.menuGridCtrl.SetCellValue(i,1," "+self.choices[i])
if self.items[i][3] == -1:
self.menuGridCtrl.SetCellValue(i,2, u"\u25ba")
height0 = len(self.choices)*h
height1 = h*((hs-20)/h)
height = min(height0, height1)+6
# menu width calculation:
width_lst=[]
for item in self.choices:
width_lst.append(self.GetTextExtent(item+' ')[0])
width = max(width_lst)+8
self.menuGridCtrl.SetColSize(0,self.w0)
self.menuGridCtrl.SetColSize(1,width)
self.menuGridCtrl.SetColSize(2,self.w2)
self.menuGridCtrl.ForceRefresh()
width = width + self.w0 + self.w2
if height1 < height0:
width += SYS_VSCROLL_X
if width > ws-50:
if height + SYS_HSCROLL_Y < hs:
height += SYS_HSCROLL_Y
width = ws-50
width += 6
x_pos = x + (ws - width)/2
y_pos = y + (hs - height)/2
self.SetDimensions(x_pos,y_pos,width,height)
self.menuGridCtrl.SetDimensions(2, 2, width-6, height-6, wx.SIZE_AUTO)
self.Show(True)
self.Raise()
def ShowMenu(
self,
fore,
back,
foreSel,
backSel,
fontInfo,
flag,
plugin,
event,
monitor,
hWnd,
evtList,
ix,
):
self.fore = fore
self.back = back
self.foreSel = foreSel
self.backSel = backSel
self.fontInfo = fontInfo
self.flag = flag
self.plugin = plugin
self.monitor = monitor
self.hWnd = hWnd
self.evtList = evtList
eg.TriggerEvent("OnScreenMenu.%s" % self.plugin.text.opened, prefix = "RadioSure")
for evt in self.evtList[0]:
eg.Bind(evt, self.onUp)
for evt in self.evtList[1]:
eg.Bind(evt, self.onDown)
for evt in self.evtList[2]:
eg.Bind(evt, self.onLeft)
for evt in self.evtList[3]:
eg.Bind(evt, self.onRight)
for evt in self.evtList[4]:
eg.Bind(evt, self.onEscape)
self.menuHwnd, self.menu = self.plugin.GetRS_Menu(self.hWnd)
self.items = self.plugin.GetItemList(self.menuHwnd, self.menu)
self.choices = [item[0] for item in self.items]
self.menuGridCtrl = MenuGrid(self, len(self.choices))
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(mainSizer)
mainSizer.Add(self.menuGridCtrl, 0, wx.EXPAND)
self.Bind(wx.EVT_CLOSE, self.onClose)
self.Bind(gridlib.EVT_GRID_CMD_CELL_LEFT_DCLICK, self.onDoubleClick, self.menuGridCtrl)
self.Bind(wx.EVT_CHAR_HOOK, self.onFrameCharHook)
font = wx.FontFromNativeInfoString(fontInfo)
self.menuGridCtrl.SetFont(font)
arial = wx.FontFromNativeInfoString(ARIAL_INFO)
self.SetFont(font)
hght = self.GetTextExtent('X')[1]
for n in range(1,1000):
arial.SetPointSize(n)
self.SetFont(arial)
h = self.GetTextExtent(u"\u25a0")[1]
if h > hght:
break
arial.SetPointSize(2*n/3)
self.SetFont(arial)
self.w0 = 2 * self.GetTextExtent(u"\u25a0")[0]
attr = gridlib.GridCellAttr()
attr.SetFont(arial)
attr.SetAlignment(wx.ALIGN_CENTRE, wx.ALIGN_CENTRE)
self.menuGridCtrl.SetColAttr(0,attr)
for n in range(1,1000):
arial.SetPointSize(n)
self.SetFont(arial)
h = self.GetTextExtent(u"\u25ba")[1]
if h > hght:
break
arial.SetPointSize(n/2)
self.SetFont(arial)
self.w2 = 2 * self.GetTextExtent(u"\u25ba")[0]
attr = gridlib.GridCellAttr()
attr.SetFont(arial)
attr.SetAlignment(wx.ALIGN_RIGHT, wx.ALIGN_CENTRE)
self.menuGridCtrl.SetColAttr(2,attr)
self.SetFont(font)
self.SetBackgroundColour((0, 0, 0))
self.menuGridCtrl.SetBackgroundColour(self.back)
self.menuGridCtrl.SetForegroundColour(self.fore)
self.menuGridCtrl.SetSelectionBackground(self.backSel)
self.menuGridCtrl.SetSelectionForeground(self.foreSel)
if self.flag:
self.timer=MyTimer(t = 5.0, plugin = self.plugin)
self.menuGridCtrl.Set(self.items)
self.UpdateMenu(ix == 0, ix)
wx.Yield()
SetEvent(event)
def UpdateMenu(self, root = False, ix = 0):
if root:
self.menuHwnd, self.menu = self.plugin.GetRS_Menu(self.hWnd)
else:
self.menuHwnd, self.menu = self.GetSubMenuExt(self.hWnd, ix)
ix = 0
self.items = self.plugin.GetItemList(self.menuHwnd, self.menu)
if len(self.items)==0:
PlaySound('SystemExclamation', SND_ASYNC)
eg.PrintError("Please report: %i, %i, %i, %i" % (ix, int(root), self.menuHwnd, self.menu))
#self.menu,ix = self.oldMenu.pop()
#self.items = self.plugin.GetItemList(self.hWnd, self.menu)
self.choices = [item[0] for item in self.items]
self.menuGridCtrl.Set(self.items)
self.DrawMenu(ix)
def MoveCursor(self, step):
max=len(self.choices)
if max > 0:
self.menuGridCtrl.MoveCursor(step)
def onUp(self, event):
wx.CallAfter(self.menuGridCtrl.MoveCursor, -1)
def onDown(self, event):
wx.CallAfter(self.menuGridCtrl.MoveCursor, 1)
def onLeft(self, event):
if len(self.oldMenu) > 0:
ix = self.oldMenu.pop()
wx.CallAfter(self.UpdateMenu, True, ix)
else:
wx.CallAfter(self.destroyMenu)
def onRight(self, event):
wx.CallAfter(self.DefaultAction)
def onEscape(self, event):
wx.CallAfter(self.destroyMenu)
def GetSubMenuExt(self, hWnd, ix):
menu, hMenu = self.plugin.GetRS_Menu(hWnd)
if menu:
hMenu = GetSubMenu(hMenu, ix)
return (menu, hMenu)
def DefaultAction(self):
sel = self.menuGridCtrl.GetSelection()
item = self.items[sel]
id = item[3]
if id != -1:
self.destroyMenu()
SendMessage(self.hWnd, WM_COMMAND, id, 0)
else:
self.oldMenu.append(sel)
wx.CallAfter(self.UpdateMenu, False, item[1])
def onFrameCharHook(self, event):
keyCode = event.GetKeyCode()
if keyCode == wx.WXK_F4:
if event.AltDown():
self.destroyMenu()
elif keyCode == wx.WXK_RETURN or keyCode == wx.WXK_NUMPAD_ENTER:
self.DefaultAction()
elif keyCode == wx.WXK_RIGHT or keyCode == wx.WXK_NUMPAD_RIGHT:
self.DefaultAction()
elif keyCode == wx.WXK_ESCAPE:
self.destroyMenu()
elif keyCode == wx.WXK_UP or keyCode == wx.WXK_NUMPAD_UP:
self.menuGridCtrl.MoveCursor(-1)
elif keyCode == wx.WXK_DOWN or keyCode == wx.WXK_NUMPAD_DOWN:
self.menuGridCtrl.MoveCursor(1)
elif keyCode == wx.WXK_LEFT or keyCode == wx.WXK_NUMPAD_LEFT:
if len(self.oldMenu) > 0:
ix = self.oldMenu.pop()
wx.CallAfter(self.UpdateMenu, True, ix)
else:
self.destroyMenu()
else:
event.Skip()
def onDoubleClick(self, event):
self.DefaultAction()
event.Skip()
def onClose(self, event):
self.Show(False)
self.Destroy()
self.plugin.menuDlg = None
def destroyMenu(self, event = None):
for evt in self.evtList[0]:
eg.Unbind(evt, self.onUp)
for evt in self.evtList[1]:
eg.Unbind(evt, self.onDown)
for evt in self.evtList[2]:
eg.Unbind(evt, self.onLeft)
for evt in self.evtList[3]:
eg.Unbind(evt, self.onRight)
for evt in self.evtList[4]:
eg.Unbind(evt, self.onEscape)
if self.flag:
self.timer.Cancel()
eg.TriggerEvent("OnScreenMenu.%s" % self.plugin.text.closed, prefix = "RadioSure")
self.Close()
#===============================================================================
class RadioSure(eg.PluginBase):
text=Text
menuDlg = None
RadioSurePath = u''
xmlPath = u''
data = []
tmpData = []
dialog = None
manager = None
Favorites = []
History = []
Current = ['','']
FavIx = -1
HistIx = -1
List = None
maxFav = None
submenus = None
def GetRS_Menu(self, hwnd):
WM_CONTEXTMENU = 0x007B
OBJID_CLIENT = 0xFFFFFFFC
class RECT(Structure):
_fields_ = [
('left', c_long),
('top', c_long),
('right', c_long),
('bottom', c_long),
]
class MENUBARINFO(Structure):
_fields_ = [
('cbSize', c_ulong),
('rcBar', RECT), # rect of bar, popup, item
('hMenu', c_long), # real menu handle of bar, popup
('hwndMenu', c_long), # hwnd of item submenu if one
('fBarFocused', c_int, 1), # bar, popup has the focus
('fFocused', c_int, 1), # item has the focus
]
findMenu = eg.WindowMatcher(
u'RadioSure.exe',
None,
u'#32768',
None,
None,
None,
True,
0.0,
0
)
PostMessage(hwnd, WM_CONTEXTMENU, hwnd, 0x00010001)
menu = []
i = 0
while len(menu) == 0:
menu = findMenu()
i+=1
if i > 1000:
break
if menu:
menu = menu[0]
mbi = MENUBARINFO()
mbi.cbSize = sizeof(mbi)
if _user32.GetMenuBarInfo(
menu,
OBJID_CLIENT,
0,
byref(mbi)
):
return (menu, mbi.hMenu)
return (None, None)
def GetItemList(self, hWnd, hMenu):
WM_INITMENUPOPUP = 0x0117
MF_BYPOSITION = 1024
MF_GRAYED = 1
MF_DISABLED = 2
MF_CHECKED = 8
MF_SEPARATOR = 2048
SendMessage(hWnd, WM_INITMENUPOPUP, hMenu, 0) #REFRESH MENU STATE !!!
itemList = []
itemName = c_buffer("\000" * 128)
count = GetMenuItemCount(hMenu)
for i in range(count):
_user32.GetMenuStringA(c_int(hMenu),
c_int(i),
itemName,
c_int(len(itemName)),
MF_BYPOSITION)
hMenuState = _user32.GetMenuState(c_int(hMenu),
c_int(i),
MF_BYPOSITION)
id = _user32.GetMenuItemID(c_int(hMenu), c_int(i))
# if hMenuState & (MF_GRAYED|MF_DISABLED|MF_SEPARATOR):
if hMenuState & (MF_GRAYED|MF_DISABLED):
continue
item = itemName.value.replace("&","").split("\t")[0]
if item == "" and id == 0:
continue
checked = bool(hMenuState & MF_CHECKED)
itemList.append((item, i, checked, id))
PostMessage(hWnd, WM_CLOSE, 0, 0)
return itemList
def GetLanguageXml(self):
xmltoparse = u'%s\\RadioSure.xml' % self.xmlpath
xmltoparse = xmltoparse.encode(FSE) if isinstance(xmltoparse, unicode) else xmltoparse
xmldoc = miniDom.parse(xmltoparse)
general = xmldoc.getElementsByTagName('General')
if general: #NOTE: don't use general[0].getElementsByTagName('Language') !!!!!!!!!!!!!!
langNodes = [node for node in general[0].childNodes if node.localName =="Language"]
if langNodes:
langFile = abspath(join(self.RadioSurePath+"\\Lang", langNodes[0].firstChild.data))
langFile = langFile.encode(FSE) if isinstance(langFile, unicode) else langFile
languageXml = miniDom.parse(langFile)
return languageXml
def GetOneInstance(self):
xmltoparse = u'%s\\RadioSure.xml' % self.xmlpath
xmltoparse = xmltoparse.encode(FSE) if isinstance(xmltoparse, unicode) else xmltoparse
xmldoc = miniDom.parse(xmltoparse)
advanced = xmldoc.getElementsByTagName('Advanced')
if advanced:
oneInstance = advanced[0].getElementsByTagName('One_instance')[0].firstChild.data
return oneInstance
def GetStrings(self):
language = self.GetLanguageXml()
if language:
res = {}
mainWindow = language.getElementsByTagName('MainWindow')
res['stop'] = mainWindow[0].getElementsByTagName('Stop')[0].firstChild.data
res['unmute'] = mainWindow[0].getElementsByTagName('Unmute')[0].firstChild.data
res['stopRec'] = mainWindow[0].getElementsByTagName('StopRecording')[0].firstChild.data
#res['play'] = mainWindow[0].getElementsByTagName('Play')[0].firstChild.data
#res['mute'] = mainWindow[0].getElementsByTagName('Mute')[0].firstChild.data
#res['rec'] = mainWindow[0].getElementsByTagName('Recording')[0].firstChild.data
return res
def GetSubmenuStrings(self):
choices = [self.text.root]
language = self.GetLanguageXml()
if language:
mainWindow = language.getElementsByTagName('MainWindow')
favorites = language.getElementsByTagName('Favorites')
equaliser = language.getElementsByTagName('EQUALIZER')
sleeptimer = language.getElementsByTagName('SleepTimer')
choices.append(favorites[0].getElementsByTagName('Title')[0].firstChild.data)
choices.append(mainWindow[0].getElementsByTagName('Back')[0].firstChild.data)
choices.append(equaliser[0].getElementsByTagName('Title')[0].firstChild.data)
choices.append(mainWindow[0].getElementsByTagName('WindowMenu')[0].firstChild.data)
choices.append(mainWindow[0].getElementsByTagName('ClipboardMenu')[0].firstChild.data)
choices.append(sleeptimer[0].getElementsByTagName('Title')[0].firstChild.data)
choices.append(mainWindow[0].getElementsByTagName('Language')[0].firstChild.data)
return choices
def GetRS_Status(self, hwnd):
menu, hMenu = self.GetRS_Menu(hwnd)
if menu:
menuItems = self.GetItemList(menu, hMenu)
#PostMessage(menu, WM_CLOSE, 0, 0)
strings = self.GetStrings()
if menuItems and strings:
res = [
strings['stop'] == menuItems[0][0], # Playing
strings['unmute'] == menuItems[1][0], # Muted
strings['stopRec'] == menuItems[2][0], # Recording
menuItems[3][2] # Record only current track
]
return res
def GetMenuItem(self, hwnd, indx): # indx = 7 for Fav, 8 for Hist, 9 for Equalizer
menu, hMenu = self.GetRS_Menu(hwnd)
if menu:
hMenu = GetSubMenu(hMenu, indx)
menuItems = self.GetItemList(menu, hMenu)
flags = [item[2] for item in menuItems]
if True in flags:
ix = flags.index(True)
return (ix, menuItems[ix][0])
return (-1, "")
def RefreshVariables(self):
xmltoparse = u'%s\\RadioSure.xml' % self.xmlpath
xmltoparse = xmltoparse.encode(FSE) if isinstance(xmltoparse, unicode) else xmltoparse
if not exists(xmltoparse):
return
xmldoc = miniDom.parse(xmltoparse)
lastPlayed = xmldoc.getElementsByTagName('LastPlayed')
if lastPlayed:
lastPlayed=lastPlayed[0]
src = lastPlayed.getElementsByTagName('Source')
if src:
src = src[0].firstChild.data
else:
src = ""
ttl = lastPlayed.getElementsByTagName('Title')
if ttl:
ttl = ttl[0].firstChild.data
else:
ttl = ""
self.Current = [src, ttl]
else:
self.Current = ["", ""]
histNode = xmldoc.getElementsByTagName('History')
if histNode:
self.History = getStations(histNode[0])
else:
self.History = []
favNode = xmldoc.getElementsByTagName('Favorites')
if favNode:
self.Favorites = getStations(favNode[0])
else:
self.Favorites = []
tmp = [item[:2] for item in self.Favorites]
if self.Current in tmp:
self.FavIx = tmp.index(self.Current)
else:
self.FavIx = -1
tmp = [item[:2] for item in self.History]
if self.Current in tmp:
self.HistIx = tmp.index(self.Current)
else:
self.HistIx = -1
return self.Favorites
def NextRun(self, type, data):
def FindRunDateTime(runList, cond):
runList.sort()
runDateTime = ""
if len(runList) > 0:
if not cond:
return runList[0]
found = False
for item in runList:
if item.weekday() > 4:
found = True
break
else:
if (item.month, item.day) in self.holidays[0]:
pass
elif (item.year, item.month, item.day) in self.holidays[1]:
pass
else:
found = True
break
if found:
runDateTime = item
return runDateTime
now = dt.now()
now = now.replace(microsecond = 0) + td(seconds = 2)
runTime = dt.strptime(data[0], "%H:%M:%S").time()
if type == 0: # once or yearly
runDate = dt.strptime(data[2], '%Y-%m-%d')
runDateTime = dt.combine(runDate, runTime)
if now < runDateTime:
return str(runDateTime)
elif not data[3]:
return ""
else:
if runDateTime.replace(year = now.year) < now:
return str(runDateTime.replace(year = now.year + 1))
else:
return str(runDateTime.replace(year = now.year))
elif type == 1: # daily
runDateTime = dt.combine(now.date(), runTime)
if now.time() > runTime:
runDateTime += td(days = 1)
return str(runDateTime)
elif type == 2: # weekly
if not data[2]:
return ""
runDateTime = dt.combine(now.date(), runTime)
weekdaysLower = []
weekdaysLarger = []
nowDay = now.weekday()
for weekday in range(7):
if 2**weekday & data[2]:
if weekday < nowDay or (weekday == nowDay and now.time() > runTime):
weekdaysLower.append(weekday)
else:
weekdaysLarger.append(weekday)
if not data[4] and not data[3]: # without holiday check
if len(weekdaysLarger) > 0:
delta = weekdaysLarger[0] - nowDay
return str(runDateTime + td(days = delta))
delta = 7 + weekdaysLower[0] - nowDay
return str(runDateTime + td(days = delta))
elif data[4]: # holiday check
found = False
shift = 0
while True:
for day in weekdaysLarger:
delta = day + shift - nowDay
tmpRunDT = runDateTime + td(days = delta)
if tmpRunDT.weekday() > 4: # weekend
found = True
break
else: # workday
if (tmpRunDT.month, tmpRunDT.day) in self.holidays[0]:
pass
elif (tmpRunDT.year, tmpRunDT.month, tmpRunDT.day) in self.holidays[1]:
pass
else:
found = True
break
if found:
break
shift += 7
for day in weekdaysLower:
delta = day + shift - nowDay
tmpRunDT = runDateTime + td(days = delta)
if tmpRunDT.weekday() > 4: # weekend
found = True
break
else: # workday
if (tmpRunDT.month, tmpRunDT.day) in self.holidays[0]:
pass
elif (tmpRunDT.year, tmpRunDT.month, tmpRunDT.day) in self.holidays[1]:
pass
else:
found = True
break
if found:
break
return str(tmpRunDT)
else: # holiday_2 check
if len(weekdaysLarger) > 0:
Delta = weekdaysLarger[0] - nowDay
else:
Delta = 7 + weekdaysLower[0] - nowDay
start = 0 if now.time() < runTime else 1
found = False
for delta in range(start, Delta):
tmpRunDT = runDateTime + td(days = delta)
if tmpRunDT.weekday() < 5:
if (tmpRunDT.month, tmpRunDT.day) in self.holidays[0]:
found = True
break
elif (tmpRunDT.year, tmpRunDT.month, tmpRunDT.day) in self.holidays[1]:
found = True
break
return str(tmpRunDT if found else runDateTime + td(days = Delta))
elif type == 3: # monthly/weekday
if data[2] == 0 or data[3] == 0 or (data[4] + data[5]) == 0:
return ""
currMonth = now.month
currYear = now.year
monthsInt = data[4] + (data[5] << 6)
months = []
for month in range(1,13):
if 2 ** (month - 1) & monthsInt:
months.append(month)
if currMonth in months:
runList = []
for ix in range(6):
if 2 ** ix & data[2]:
for weekday in range(7):
if 2 ** weekday & data[3]:
day = FindMonthDay(currYear, currMonth, weekday, ix)
if day:
runDateTime = dt.combine(dt(currYear, currMonth, day).date(), runTime)
if now < runDateTime:
runList.append(runDateTime)
tmpRunDT = FindRunDateTime(runList, data[6])
if tmpRunDT:
return str(tmpRunDT)
lower = []
larger = []
for month in months:
if month > currMonth:
larger.append(month)
else: #month <= currMonth:
lower.append(month)
year = currYear
tmpRunDT = None
while True:
for month in larger:
runList = []
for ix in range(6):
if 2 ** ix & data[2]:
for weekday in range(7):
if 2 ** weekday & data[3]:
day = FindMonthDay(year, month, weekday, ix)
if day:
runDateTime = dt.combine(dt(year, month, day).date(), runTime)
runList.append(runDateTime)
tmpRunDT = FindRunDateTime(runList, data[6])
if tmpRunDT:
break
if tmpRunDT:
break
year += 1
for month in lower:
runList = []
for ix in range(6):
if 2 ** ix & data[2]:
for weekday in range(7):
if 2 ** weekday & data[3]:
day=FindMonthDay(year, month, weekday, ix)
if day:
runDateTime = dt.combine(dt(year, month, day).date(), runTime)
runList.append(runDateTime)
tmpRunDT = FindRunDateTime(runList, data[6])
if tmpRunDT:
break
if tmpRunDT:
break
return str(tmpRunDT)
elif type == 4: #monthly/day
if (data[2] + data[3] + data[4] + data[5]) == 0 or (data[6] + data[7]) == 0:
return ""
runList = []
currMonth = now.month
currYear = now.year
monthsInt = data[6] + (data[7] << 6)
daysInt = data[2] + (data[3] << 8) + (data[4] << 16) + (data[5] << 24)
days = []
for day in range(1, 32):
if 2 ** (day - 1) & daysInt:
days.append(day)
months = []
for month in range(1, 13):
if 2 ** (month - 1) & monthsInt:
months.append(month)
if currMonth in months:
for day in days:
if day > monthrange(currYear, currMonth)[1]:
break
runDateTime = dt.combine(dt(currYear, currMonth, day).date(), runTime)
if now < runDateTime:
runList.append(runDateTime)
if len(runList) == 0:
lower = []
larger = []
nextMonth = None
for month in months:
if month > currMonth:
larger.append(month)
else: #month<=currMonth:
lower.append(month)
if len(larger) > 0:
nextYear = currYear
for month in larger:
for day in days:
if day > monthrange(nextYear, month)[1]:
break
runDateTime = dt.combine(dt(nextYear, month, day).date(), runTime)
runList.append(runDateTime)
if len(runList) == 0 and len(lower) > 0:
nextYear = currYear + 1
for month in lower:
for day in days:
if day > monthrange(nextYear, month)[1]:
break
runDateTime = dt.combine(dt(nextYear, month, day).date(), runTime)
runList.append(runDateTime)
if len(runList) > 0:
return str(min(runList))
else:
return ""
else: #type == 5: #periodically
runDate = dt.strptime(data[2], '%Y-%m-%d')
runDateTime = dt.combine(runDate, runTime)
if now < runDateTime:
return str(runDateTime)
elif data[4] == 0: #unit = hour
period = data[3] * 3600
if period < 86400 and not 86400 % period:
if now.time() > runTime:
date = now.date()
else:
date = now.date() - td(days = 1)
runDateTime = dt.combine(date, runTime)
delta = now - runDateTime
delta = delta.seconds + 86400 * delta.days
share = delta / period
share += 1
delta = td(seconds = share * period)
return str(runDateTime + delta)
elif data[4] == 1 or data[4] == 2: #unit = day or week
period = data[3] if data[4] == 1 else 7 * data[3]
delta = (now - runDateTime).days
share = delta / period
if not delta % period:
if now.time() < runTime:
return str(dt.combine(now.date(), runTime))
share += 1
delta = td(days = share * period)
return str(runDateTime + delta)
elif data[4] == 3: #unit = month
period = data[3]
month = runDateTime.month
year = runDateTime.year
while now > runDateTime:
year += period / 12
m = month+period % 12
if m > 12:
year += 1
month = m % 12
else:
month = m
runDateTime = runDateTime.replace(year = year).replace(month = month)
return str(runDateTime)
else: # data[4] == 6: #unit = year
period = data[3]
year = runDateTime.year
while now > runDateTime:
year += period
runDateTime = runDateTime.replace(year = year)
return str(runDateTime)
def updateLogFile(self, line, blank = False):
if not self.logfile:
return
f = openFile(self.logfile, encoding='utf-8', mode='a')
if blank:
f.write("\r\n")
f.write("%s %s\r\n" % (str(dt.now())[:19], line))
f.close()
def Execute(self, params, immed = False):
next = self.NextRun(params[2], params[3])
modes = params[7]
playRec = modes & 6
if playRec != 6:
args = [u'%s\\RadioSure.exe' % self.RadioSurePath,]
if playRec:
args.append("/record")
else:
args.append("/play")
if playRec == 4:
args.append("/mute")
if modes & 1:
args.append("/hidden")
args.append(u'/source="%s"' % params[5])
duration = 60*int(params[3][1][:2])+int(params[3][1][-2:])
if duration:
args.append('/duration=%i' % duration)
if params[6]:
recfile = eg.ParseString(params[6])
try:
recfile = eval(recfile)
except:
pass
args.append(u'/filename="%s"' % recfile)
elif playRec:
args.append(u'/filename="%s"' % params[1])
Popen(args)
if not immed and next: # new schedule, if valid next run time and not TEST/IMMEDIATELY run
startTicks = mktime(strptime(next, "%Y-%m-%d %H:%M:%S"))
eg.scheduler.AddTaskAbsolute(startTicks, self.RadioSureScheduleRun, params[1])
triggEvt = modes & 24
if triggEvt == 8:
eg.TriggerEvent(self.text.launched, prefix = "RadioSure", payload = params[1])
elif triggEvt == 16:
eg.TriggerEvent(self.text.launched, prefix = "RadioSure", payload = params)
return (next, my_list2cmdline(args))
def RadioSureScheduleRun(self, schedule):
data = self.data
ix = [item[1] for item in data].index(schedule)
next, cmdline = self.Execute(data[ix])
last = str(dt.now())[:19]
self.data[ix][4] = last
if self.dialog:
tmpList = [item[1] for item in self.tmpData]
if schedule in tmpList:
ixTmp = tmpList.index(schedule)
self.tmpData[ixTmp][4] = last
self.dialog.RefreshGrid(ixTmp, last, next)
nxt = next[:19] if next else self.text.none
self.updateLogFile(self.text.execut % (data[ix][1], nxt))
self.updateLogFile(self.text.cmdLine % cmdline)
def UpdateEGscheduler(self):
data = self.data
tmpList = []
sched_list = eg.scheduler.__dict__['heap']
for sched in sched_list:
if sched[1] == self.RadioSureScheduleRun:
if sched[2][0] in [item[1] for item in data]:
tmpList.append(sched)
else:
self.updateLogFile(self.text.cancAndDel % sched[2][0])
eg.scheduler.CancelTask(sched)
sched_list = tmpList
for schedule in data:
startMoment = self.NextRun(schedule[2], schedule[3])
if not startMoment:
continue
startTicks = mktime(strptime(startMoment,"%Y-%m-%d %H:%M:%S"))
nameList = [item[2][0] for item in sched_list]
if schedule[1] in nameList:
sched = sched_list[nameList.index(schedule[1])]
if not schedule[0]: # schedule is disabled !
eg.scheduler.CancelTask(sched)
self.updateLogFile(self.text.cancAndDis % schedule[1])
elif sched[0] != startTicks: #Re-schedule
self.updateLogFile(self.text.re_Sched % (schedule[1], startMoment))
eg.scheduler.CancelTask(sched)
eg.scheduler.AddTaskAbsolute(startTicks, self.RadioSureScheduleRun, schedule[1])
elif schedule[0]: #New schedule
eg.scheduler.AddTaskAbsolute(startTicks, self.RadioSureScheduleRun, schedule[1])
self.updateLogFile(self.text.newSched % (schedule[1], startMoment))
def dataToXml(self):
impl = miniDom.getDOMImplementation()
dom = impl.createDocument(None, u'Document', None)
root = dom.documentElement
commentNode = dom.createComment(self.text.xmlComment % str(dt.now())[:19])
dom.insertBefore(commentNode, root)
for item in self.data:
schedNode = dom.createElement(u'Schedule')
schedNode.setAttribute(u'Name', unicode(item[1]))
schedNode.setAttribute(u'Type', unicode(item[2]))
enableNode = dom.createElement(u'Enable')
enableText = dom.createTextNode(unicode(item[0]))
enableNode.appendChild(enableText)
schedNode.appendChild(enableNode)
last_runNode = dom.createElement(u'Last_run')
last_runText = dom.createTextNode(unicode(item[4]))
last_runNode.appendChild(last_runText)
schedNode.appendChild(last_runNode)
sourceNode = dom.createElement(u'Source')
sourceText = dom.createTextNode(unicode(item[5]))
sourceNode.appendChild(sourceText)
schedNode.appendChild(sourceNode)
filenameNode = dom.createElement(u'Filename')
filenameText = dom.createTextNode(unicode(item[6]))
filenameNode.appendChild(filenameText)
schedNode.appendChild(filenameNode)
modesNode = dom.createElement(u'Modes')
modesText = dom.createTextNode(unicode(item[7]))
modesNode.appendChild(modesText)
schedNode.appendChild(modesNode)
dateTimeNode = dom.createElement(u'Datetime')
start_timeNode = dom.createElement(u'Start_time')
start_timeText = dom.createTextNode(unicode(item[3][0]))
start_timeNode.appendChild(start_timeText)
dateTimeNode.appendChild(start_timeNode)
durationNode = dom.createElement(u'Duration')
durationText = dom.createTextNode(unicode(item[3][1]))
durationNode.appendChild(durationText)
dateTimeNode.appendChild(durationNode)
if item[2] == 0:
dateNode = dom.createElement(u'Date')
dateText = dom.createTextNode(unicode(item[3][2]))
dateNode.appendChild(dateText)
dateTimeNode.appendChild(dateNode)
yearlyNode = dom.createElement(u'Yearly')
yearlyText = dom.createTextNode(unicode(item[3][3]))
yearlyNode.appendChild(yearlyText)
dateTimeNode.appendChild(yearlyNode)
if item[2] == 2:
weekdayNode = dom.createElement(u'Weekday')
weekdayText = dom.createTextNode(unicode(item[3][2]))
weekdayNode.appendChild(weekdayText)
dateTimeNode.appendChild(weekdayNode)
holidayNode = dom.createElement(u'HolidayCheck')
holidayText = dom.createTextNode(unicode(item[3][4]))
holidayNode.appendChild(holidayText)
dateTimeNode.appendChild(holidayNode)
holiday2Node = dom.createElement(u'HolidayCheck_2')
holiday2Text = dom.createTextNode(unicode(item[3][3]))
holiday2Node.appendChild(holiday2Text)
dateTimeNode.appendChild(holiday2Node)
if item[2] == 3:
orderNode = dom.createElement(u'Order')
orderText = dom.createTextNode(unicode(item[3][2]))
orderNode.appendChild(orderText)
dateTimeNode.appendChild(orderNode)
weekdayNode = dom.createElement(u'Weekday')
weekdayText = dom.createTextNode(unicode(item[3][3]))
weekdayNode.appendChild(weekdayText)
dateTimeNode.appendChild(weekdayNode)
first_halfNode = dom.createElement(u'First_half')
first_halfText = dom.createTextNode(unicode(item[3][4]))
first_halfNode.appendChild(first_halfText)
dateTimeNode.appendChild(first_halfNode)
second_halfNode = dom.createElement(u'Second_half')
second_halfText = dom.createTextNode(unicode(item[3][5]))
second_halfNode.appendChild(second_halfText)
dateTimeNode.appendChild(second_halfNode)
holidayNode = dom.createElement(u'HolidayCheck')
holidayText = dom.createTextNode(unicode(item[3][6]))
holidayNode.appendChild(holidayText)
dateTimeNode.appendChild(holidayNode)
if item[2] == 4:
q_1_Node = dom.createElement(u'Q_1')
q_1_Text = dom.createTextNode(unicode(item[3][2]))
q_1_Node.appendChild(q_1_Text)
dateTimeNode.appendChild(q_1_Node)
q_2_Node = dom.createElement(u'Q_2')
q_2_Text = dom.createTextNode(unicode(item[3][3]))
q_2_Node.appendChild(q_2_Text)
dateTimeNode.appendChild(q_2_Node)
q_3_Node = dom.createElement(u'Q_3')
q_3_Text = dom.createTextNode(unicode(item[3][4]))
q_3_Node.appendChild(q_3_Text)
dateTimeNode.appendChild(q_3_Node)
q_4_Node = dom.createElement(u'Q_4')
q_4_Text = dom.createTextNode(unicode(item[3][5]))
q_4_Node.appendChild(q_4_Text)
dateTimeNode.appendChild(q_4_Node)
first_halfNode = dom.createElement(u'First_half')
first_halfText = dom.createTextNode(unicode(item[3][6]))
first_halfNode.appendChild(first_halfText)
dateTimeNode.appendChild(first_halfNode)
second_halfNode = dom.createElement(u'Second_half')
second_halfText = dom.createTextNode(unicode(item[3][7]))
second_halfNode.appendChild(second_halfText)
dateTimeNode.appendChild(second_halfNode)
if item[2] == 5:
dateNode = dom.createElement(u'Date')
dateText = dom.createTextNode(unicode(item[3][2]))
dateNode.appendChild(dateText)
dateTimeNode.appendChild(dateNode)
numberNode = dom.createElement(u'Number')
numberText = dom.createTextNode(unicode(item[3][3]))
numberNode.appendChild(numberText)
dateTimeNode.appendChild(numberNode)
unitNode = dom.createElement(u'Unit')
unitText = dom.createTextNode(unicode(item[3][4]))
unitNode.appendChild(unitText)
dateTimeNode.appendChild(unitNode)
schedNode.appendChild(dateTimeNode)
root.appendChild(schedNode)
f = file(u'%s\\Scheduler.xml' % self.xmlpath, 'wb')
writer = lookup('utf-8')[3](f)
dom.writexml(writer, encoding = 'utf-8')
f.close()
return f.closed
def xmlToData(self):
data = []
xmlfile = u'%s\\Scheduler.xml' % self.xmlpath
if not exists(xmlfile):
return data
xmldoc = miniDom.parse(xmlfile)
document = xmldoc.getElementsByTagName('Document')[0]
schedules = tuple(document.getElementsByTagName('Schedule'))
for schedule in schedules:
dataItem = []
enable = int(schedule.getElementsByTagName('Enable')[0].firstChild.data)
dataItem.append(enable)
name = schedule.attributes["Name"].value
dataItem.append(name)
schedType = int(schedule.attributes["Type"].value)
dataItem.append(schedType)
dateTime = schedule.getElementsByTagName('Datetime')[0]
params = []
start_time = dateTime.getElementsByTagName('Start_time')[0].firstChild.data
params.append(start_time)
duration = dateTime.getElementsByTagName('Duration')[0].firstChild.data
params.append(duration)
if schedType == 0:
date = dateTime.getElementsByTagName('Date')[0].firstChild.data
params.append(date)
date = int(dateTime.getElementsByTagName('Yearly')[0].firstChild.data)
params.append(date)
if schedType == 2:
weekday = int(dateTime.getElementsByTagName('Weekday')[0].firstChild.data)
params.append(weekday)
holiday2 = int(dateTime.getElementsByTagName('HolidayCheck_2')[0].firstChild.data)
params.append(holiday2)
holiday = int(dateTime.getElementsByTagName('HolidayCheck')[0].firstChild.data)
params.append(holiday)
if schedType == 3:
order = int(dateTime.getElementsByTagName('Order')[0].firstChild.data)
params.append(order)
weekday = int(dateTime.getElementsByTagName('Weekday')[0].firstChild.data)
params.append(weekday)
first_half = int(dateTime.getElementsByTagName('First_half')[0].firstChild.data)
params.append(first_half)
second_half = int(dateTime.getElementsByTagName('Second_half')[0].firstChild.data)
params.append(second_half)
holiday = int(dateTime.getElementsByTagName('HolidayCheck')[0].firstChild.data)
params.append(holiday)
if schedType == 4:
q_1 = int(dateTime.getElementsByTagName('Q_1')[0].firstChild.data)
params.append(q_1)
q_2 = int(dateTime.getElementsByTagName('Q_2')[0].firstChild.data)
params.append(q_2)
q_3 = int(dateTime.getElementsByTagName('Q_3')[0].firstChild.data)
params.append(q_3)
q_4 = int(dateTime.getElementsByTagName('Q_4')[0].firstChild.data)
params.append(q_4)
first_half = int(dateTime.getElementsByTagName('First_half')[0].firstChild.data)
params.append(first_half)
second_half = int(dateTime.getElementsByTagName('Second_half')[0].firstChild.data)
params.append(second_half)
if schedType == 5:
date = dateTime.getElementsByTagName('Date')[0].firstChild.data
params.append(date)
number = int(dateTime.getElementsByTagName('Number')[0].firstChild.data)
params.append(number)
unit = int(dateTime.getElementsByTagName('Unit')[0].firstChild.data)
params.append(unit)
dataItem.append(params)
last_run = schedule.getElementsByTagName('Last_run')[0].firstChild
last_run = last_run.data if last_run else " "
dataItem.append(last_run)
source = schedule.getElementsByTagName('Source')[0].firstChild
source = source.data if source else ""
dataItem.append(source)
filename = schedule.getElementsByTagName('Filename')[0].firstChild
filename = filename.data if filename else ""
dataItem.append(filename)
modes = schedule.getElementsByTagName('Modes')[0].firstChild.data
dataItem.append(int(modes))
data.append(dataItem)
return data
def GetStatusRS(self, hwnds = None):
hwnds = hwnds or HandleRS()
maxFav = None
recording = None
if hwnds:
for hwnd in hwnds:
try:
maxFav = SendMessageTimeout(hwnd, self.favMesg, 0, 0)
recording = SendMessageTimeout(hwnd, self.recMesg, 0, 0)
except:
#raise
pass
if maxFav is not None and recording is not None:
#pass
break
if maxFav is not None and recording is not None:
return (maxFav, recording)
else:
return (None, None)
def GetNewHwnd(self, oldHwnds = [], src = None, hid = False, mut = False):
hwnds = HandleRS()
if len(hwnds) > 0 and self.GetOneInstance():
wx.CallAfter(
MessageBox,
None,
self.text.message3,
self.text.messBoxTit1,
wx.ICON_EXCLAMATION,
15,
plugin = self,
)
return []
maxInst = 2 if self.maxFav == 30 else 10
if len(oldHwnds) >= maxInst:
wx.CallAfter(
MessageBox,
None,
self.text.message2 % maxInst,
self.text.messBoxTit1,
wx.ICON_EXCLAMATION,
15,
plugin = self,
)
return []
i = 0
hwnds = oldHwnds if oldHwnds else []
rs = u'%s\\RadioSure.exe' % self.RadioSurePath
rs = rs.encode(FSE) if isinstance(rs, unicode) else rs
args = [rs, "/play"]
if mut:
args.append("/mute")
if hid:
args.append("/hidden")
if src:
args.append(u'/source="%s"' % src)
if isfile(rs):
Popen(args)
while i < 100 and hwnds == oldHwnds:
i += 1
hwnds = HandleRS()
sleep(1.5)
return list(set(hwnds)-set(oldHwnds))
def SetMaxFavs(self):
maxFav = 30
hwnds = HandleRS()
if hwnds:
maxFav, rec = self.GetStatusRS(hwnds)
if not maxFav: # ToDo: kill process ???
hwnds = self.GetNewHwnd(hwnds, hid = True, mut = True)
if hwnds:
maxFav, rec = self.GetStatusRS(hwnds)
PostMessage(hwnds[0], WM_COMMAND, 1, 0) # Close
else:
hwnds = self.GetNewHwnd(hid = True, mut = True)
if hwnds:
maxFav, rec = self.GetStatusRS(hwnds)
PostMessage(hwnds[0], WM_COMMAND, 1, 0) # Close
self.maxFav = maxFav
def __init__(self):
self.observThread = None
text=Text
self.AddActionsFromList(ACTIONS)
def GetLabel(
self,
path = None,
xmlpath = None,
logfile = None,
holidays = [[], []],
first_day = 0,
):
if not self.submenus:
self.RadioSurePath = path
self.xmlpath = xmlpath
self.submenus = self.GetSubmenuStrings()
return self.name
def __start__(
self,
path = None,
xmlpath = None,
logfile = None,
holidays = [[], []],
first_day = 0,
):
self.recMesg = RegisterWindowMessage("WM_RADIOSURE_GET_RECORDING_STATUS")
self.favMesg = RegisterWindowMessage("WM_RADIOSURE_GET_MAX_FAVORITES")
if not self.submenus:
self.submenus = self.GetSubmenuStrings()
self.RadioSurePath = path
self.xmlpath = xmlpath
wx.CallAfter(self.SetMaxFavs)
self.logfile = logfile
self.holidays = holidays
self.first_day = first_day
self.data = []
self.tmpData = []
if self.xmlpath:
if exists(self.xmlpath):
self.data = self.xmlToData()
if logfile:
self.updateLogFile(self.text.start, True)
self.UpdateEGscheduler()
def __stop__(self):
if self.dataToXml():
self.updateLogFile("File Scheduler.xml saved")
if self.observThread:
ot = self.observThread
if ot.isAlive():
ot.AbortObservation()
del self.observThread
self.observThread = None
sched_list = eg.scheduler.__dict__['heap']
tmpLst = []
for sched in sched_list:
if sched[1] == self.RadioSureScheduleRun:
tmpLst.append(sched)
if len(tmpLst) > 0:
self.updateLogFile(self.text.stop)
for sched in tmpLst:
eg.scheduler.CancelTask(sched)
self.updateLogFile(self.text.canc % sched[2][0])
if self.dialog:
self.dialog.Close()
if self.manager:
self.manager.Close()
def __close__(self):
if self.observThread:
ot = self.observThread
if ot.isAlive():
ot.AbortObservation()
def Configure(
self,
path = "",
xmlpath = "",
logfile = None,
holidays = [[], []],
first_day = 0,
):
panel = eg.ConfigPanel(self)
panel.holidays = cpy(holidays)
del holidays
managerButton = wx.Button(panel, -1, self.text.managerButton)
if not path: #First run after plugin insert
managerButton.Enable(False)
self.RadioSurePath = path
self.xmlpath = xmlpath
self.logfile = logfile
self.first_day = first_day
label1Text = wx.StaticText(panel, -1, self.text.label1)
rsPathCtrl = MyDirBrowseButton(
panel,
toolTip = self.text.toolTipFolder,
dialogTitle = self.text.browseTitle,
buttonText = eg.text.General.browse
)
rsPathCtrl.GetTextCtrl().SetEditable(False)
label2Text = wx.StaticText(panel, -1, self.text.label2)
xmlPathCtrl = MyDirBrowseButton(
panel,
toolTip = self.text.toolTipFolder,
dialogTitle = self.text.browseTitle,
buttonText = eg.text.General.browse
)
xmlPathCtrl.GetTextCtrl().SetEditable(False)
logFileCtrl = MyFileBrowseButton(
panel,
toolTip = self.text.toolTipFile,
dialogTitle = self.text.browseFile,
buttonText = eg.text.General.browse
)
logFileCtrl.GetTextCtrl().SetEditable(False)
logCheckBox = wx.CheckBox(panel, -1, self.text.logLabel)
if not self.RadioSurePath or not exists(self.RadioSurePath):
RSpath = getPathFromReg() #Try get path from registry
if RSpath: #Regular installation
if exists(RSpath):
self.RadioSurePath = RSpath
else: #Portable installation
self.RadioSurePath = u"%s\\RadioSure" % unicode(eg.folderPath.LocalAppData)
xmlPath = u"%s\\RadioSure" % unicode(eg.folderPath.LocalAppData)
if exists(xmlPath):
self.xmlpath = xmlPath
if exists(join(self.RadioSurePath, "RadioSure.exe")):
rsPathCtrl.GetTextCtrl().ChangeValue(self.RadioSurePath)
rsPathCtrl.Enable(False)
label1Text.Enable(False)
if exists(join(self.xmlpath, "RadioSure.xml")):
xmlPathCtrl.GetTextCtrl().ChangeValue(self.xmlpath)
xmlPathCtrl.Enable(False)
label2Text.Enable(False)
def NotHidden():
try:
nssh = OpenKey(
HKEY_CURRENT_USER,
"Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Advanced"
)
res = QueryValueEx(nssh, "Hidden")[0] != 2
CloseKey(nssh)
except:
res = False
return res
def NotHiddenAttr(path):
attr = GetFileAttributes(path)
if attr & (FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM):
return False
else:
p = split(path)[0]
if len(p) > 3:
return NotHiddenAttr(p)
return True
if self.logfile is None:
logCheckBox.SetValue(True)
if NotHiddenAttr(self.xmlpath) or NotHidden():
self.logfile = u'%s\\RadioSureSchedulerLog.txt' % self.xmlpath
else:
self.logfile = u'%s\\RadioSureSchedulerLog.txt' % unicode(eg.folderPath.Documents)
else:
val = self.logfile != ""
logCheckBox.SetValue(val)
logFileCtrl.Enable(val)
logFileCtrl.GetTextCtrl().ChangeValue(self.logfile)
rsPathCtrl.startDirectory = self.RadioSurePath
xmlPathCtrl.startDirectory = self.xmlpath
logFileCtrl.startDirectory = self.logfile or u"%s\\RadioSure" % unicode(eg.folderPath.LocalAppData)
sizerAdd = panel.sizer.Add
sizerAdd(label1Text, 0)
sizerAdd(rsPathCtrl,0,wx.TOP|wx.EXPAND,2)
sizerAdd(label2Text, 0, wx.TOP,15)
sizerAdd(xmlPathCtrl,0,wx.TOP|wx.EXPAND,2)
sizerAdd(logCheckBox, 0, wx.TOP,15)
sizerAdd(logFileCtrl, 0, wx.TOP|wx.EXPAND,2)
firstDayLabel = wx.StaticText(panel, -1, self.text.first_day)
firstDayCtrl = wx.Choice(
panel,
-1,
choices = (
day_name[0],
day_name[6]
),
size = (firstDayLabel.GetSize()[0], -1)
)
firstDayCtrl.SetSelection(first_day)
panel.holidButton = wx.Button(panel, -1, self.text.holidButton)
def OnApplyBtn(evt):
managerButton.Enable(True)
evt.Skip()
panel.dialog.buttonRow.applyButton.Bind(wx.EVT_BUTTON, OnApplyBtn)
def onManagerButton(evt):
if not self.dialog:
wx.CallAfter(SchedulerDialog, self.text.OpenScheduler, self)
else:
if self.dialog.GetPosition() == (-32000, -32000):
ShowWindow(self.dialog.GetHandle(), SW_RESTORE)
wx.CallAfter(self.dialog.Raise)
evt.Skip()
managerButton.Bind(wx.EVT_BUTTON, onManagerButton)
def onHolidButton(evt):
dlg = HolidaysFrame(
parent = panel,
plugin = self,
)
dlg.Centre()
wx.CallAfter(dlg.ShowHolidaysFrame)
evt.Skip()
panel.holidButton.Bind(wx.EVT_BUTTON, onHolidButton)
bottomSizer = wx.GridBagSizer(1, 1)
bottomSizer.AddGrowableCol(1,1)
bottomSizer.AddGrowableCol(3,1)
bottomSizer.Add(firstDayLabel, (0, 0), flag = wx.LEFT)
bottomSizer.Add(firstDayCtrl, (1, 0), flag = wx.LEFT)
bottomSizer.Add((1, -1), (1, 1), flag = wx.EXPAND)
bottomSizer.Add((1, -1), (1, 3), flag = wx.EXPAND)
bottomSizer.Add(panel.holidButton, (1, 2))
bottomSizer.Add(managerButton, (1, 4), flag = wx.RIGHT)
sizerAdd(bottomSizer, 1, wx.TOP | wx.EXPAND, 15)
def Validation():
flag1 = "%s\\RadioSure.exe" % exists(rsPathCtrl.GetValue())
flag2 = "%s\\RadioSure.xml" % exists(xmlPathCtrl.GetValue())
flag3 = logCheckBox.IsChecked() and logFileCtrl.GetValue() != "" or not logCheckBox.IsChecked()
flag = flag1 and flag2 and flag3
panel.dialog.buttonRow.okButton.Enable(flag)
panel.isDirty = True
panel.dialog.buttonRow.applyButton.Enable(flag)
def OnPathChange(event):
path = rsPathCtrl.GetValue()
if not exists("%s\\RadioSure.exe" % path):
MessageBox(
panel,
self.text.boxMessage1 % 'RadioSure.exe',
self.text.boxTitle % path,
wx.ICON_EXCLAMATION,
plugin = self
)
if path != "":
rsPathCtrl.startDirectory = path
self.RadioSurePath = path
Validation()
event.Skip()
rsPathCtrl.Bind(wx.EVT_TEXT, OnPathChange)
def OnPath2Change(event):
path2 = xmlPathCtrl.GetValue()
if not exists("%s\\RadioSure.xml" % path2):
MessageBox(
panel,
self.text.boxMessage1 % 'RadioSure.xml',
self.text.boxTitle % path2,
wx.ICON_EXCLAMATION,
plugin = self
)
if path2 != "":
self.xmlpath = path2
xmlPathCtrl.startDirectory = path2
Validation()
event.Skip()
xmlPathCtrl.Bind(wx.EVT_TEXT, OnPath2Change)
def logFileChange(event):
self.logfile = logFileCtrl.GetValue()
tmpVal = self.logfile
if not tmpVal:
tmpPath = u"%s\\RadioSure" % unicode(eg.folderPath.LocalAppData)
tmpVal = tmpPath if exists(tmpPath) else self.RadioSurePath
logFileCtrl.startDirectory = tmpVal
Validation()
event.Skip()
logFileCtrl.Bind(wx.EVT_TEXT, logFileChange)
def onLogCheckBox(evt):
val = evt.IsChecked()
logFileCtrl.Enable(val)
if not val:
logFileCtrl.SetValue("")
else:
Validation()
evt.Skip()
logCheckBox.Bind(wx.EVT_CHECKBOX, onLogCheckBox)
while panel.Affirmed():
panel.SetResult(
rsPathCtrl.GetValue(),
xmlPathCtrl.GetValue(),
logFileCtrl.GetValue(),
panel.holidays,
firstDayCtrl.GetSelection(),
)
#===============================================================================
#cls types for Actions list:
#===============================================================================
class Run(eg.ActionBase):
class text:
play = "Automatically play selected favorite after start"
default = "Use start settings RadioSure"
label = "Select favorite:"
over = "Too large number (%s > %s) !"
alr_run = "RadioSure is already running !"
def __call__(self, play = False, fav = 1):
def Play(hwnds):
self.plugin.RefreshVariables()
if fav <= len(self.plugin.Favorites):
if play:
SendMessage(hwnds[0], WM_COMMAND, 4101+fav, 0)
return str(fav)+": "+self.plugin.Favorites[self.plugin.FavIx][1]
else:
return self.text.over % (str(fav),\
str(len(self.plugin.Favorites)))
hwnds = HandleRS()
if not hwnds:
hwnds = self.plugin.GetNewHwnd()
if hwnds:
return Play(hwnds)
else:
self.PrintError(self.plugin.text.text1)
return self.plugin.text.text1
elif play:
for hwnd in hwnds:
x, rec = self.plugin.GetStatusRS([hwnd])
if rec != 1:
SendMessage(hwnd, WM_COMMAND, 4101+fav, 0)
break
if rec or rec is None:
hwnds = self.plugin.GetNewHwnd(hwnds)
if hwnds:
return Play(hwnds)
else:
self.PrintError(self.text.alr_run)
return self.text.alr_run
def GetLabel(self, play, fav):
num = str(fav) if play else ''
return "%s: %s" % (self.name, num)
def Configure(self, play = False, fav = 1):
panel=eg.ConfigPanel(self)
sizerAdd=panel.sizer.Add
rb1 = panel.RadioButton(play, self.text.play, style=wx.RB_GROUP)
rb2 = panel.RadioButton(not play, self.text.default)
sizerAdd(rb1,0,wx.TOP,15)
sizerAdd(rb2,0,wx.TOP,6)
favLbl=wx.StaticText(panel, -1, self.text.label)
sizerAdd(favLbl,0,wx.TOP,25)
favCtrl = eg.SpinNumCtrl(
panel,
-1,
fav,
fractionWidth=0,
min=1,
max=30,
)
favCtrl.SetValue(fav)
sizerAdd(favCtrl,0,wx.TOP,5)
def onChangeMode(evt=None):
enbl=rb1.GetValue()
favLbl.Enable(enbl)
favCtrl.Enable(enbl)
if evt is not None:
evt.Skip()
rb1.Bind(wx.EVT_RADIOBUTTON, onChangeMode)
rb2.Bind(wx.EVT_RADIOBUTTON, onChangeMode)
onChangeMode()
while panel.Affirmed():
panel.SetResult(
rb1.GetValue(),
favCtrl.GetValue()
)
#===============================================================================
class WindowControl(eg.ActionBase):
def __call__(self):
hwnd = HandleRS()
if hwnd:
SendMessage(hwnd[0], WM_SYSCOMMAND, self.value, 0)
else:
self.PrintError(self.plugin.text.text1)
return self.plugin.text.text1
#===============================================================================
class SendMessageActions(eg.ActionBase):
def __call__(self):
hwnd = HandleRS()
if hwnd:
SendMessage(hwnd[0], WM_COMMAND, self.value, 0)
else:
self.PrintError(self.plugin.text.text1)
return self.plugin.text.text1
#===============================================================================
class CheckAndChange(eg.ActionBase):
def __call__(self):
hwnd = HandleRS()
if hwnd:
status = self.plugin.GetRS_Status(hwnd[0])
if status[self.value[0]] == self.value[1]:
SendMessage(hwnd[0], WM_COMMAND, self.value[2], 0)
#===============================================================================
class GetStatus(eg.ActionBase):
def __call__(self):
hwnd = HandleRS()
if hwnd:
status = self.plugin.GetRS_Status(hwnd[0])
return status[self.value]
#===============================================================================
class GetMenuItem(eg.ActionBase):
def __call__(self):
hwnd = HandleRS()
if hwnd:
return self.plugin.GetMenuItem(hwnd[0], self.value)
#===============================================================================
class SetVolume(eg.ActionBase):
class text:
label=["Set volume (0 - 100%):",
"Set step (1 - 25%):",
"Set step (1 - 25%):"]
def __call__(self, step = None):
if step is None:
if self.value == 0:
step = 50
else:
step = 5
hwnd = GetCtrlByID(1006) #1006 = ID of ctrl "msctls_trackbar32"
if hwnd:
vol = SendMessage(hwnd, TBM_GETPOS, 0, 0)
key = None
value = None
if self.value == 0:
volume = step
elif self.value == 1:
volume = vol+step if (vol+step)<100 else 100
else:
volume = vol-step if (vol-step)>0 else 0
if vol>volume:
key='{Left}'
if vol>volume+1:
value = volume+1
elif vol<volume:
key='{Right}'
if vol<volume-1:
value = volume-1
if value:
SendMessage(hwnd, TBM_SETPOS,1,value)
if key:
eg.SendKeys(hwnd, key, False)
return SendMessage(hwnd, TBM_GETPOS, 0, 0)
else:
self.PrintError(self.plugin.text.text1)
return self.plugin.text.text1
def Configure(self, step = None):
if step is None:
if self.value == 0:
step = 50
else:
step = 5
panel=eg.ConfigPanel(self)
panel.sizer.Add(wx.StaticText(panel, -1, self.text.label[self.value]))
if self.value == 0:
Min = 0
Max = 100
else:
Min = 1
Max = 25
volumeCtrl = eg.SpinNumCtrl(
panel,
-1,
step,
fractionWidth=0,
increment=1,
min=Min,
max=Max,
)
volumeCtrl.SetValue(step)
panel.sizer.Add(volumeCtrl,0,wx.TOP,10)
while panel.Affirmed():
panel.SetResult(volumeCtrl.GetValue())
#===============================================================================
class GetVolume(eg.ActionBase):
def __call__(self):
hwnd = GetCtrlByID(1006) #1006 = ID for ctrl "msctls_trackbar32"
if hwnd:
return SendMessage(hwnd, TBM_GETPOS, 0, 0)
else:
self.PrintError(self.plugin.text.text1)
return self.plugin.text.text1
#===============================================================================
class SelectFav(eg.ActionBase):
class text:
label = "Select preset number (1-30):"
txtLabel = 'Preset number:'
over = "Too large number (%s > %s) !"
modeLabel = 'Preset number to get as:'
modeChoices = [
'Event payload',
'Python expression',
'Number'
]
def __call__(self,fav = 1, mode = 0, number = '{eg.event.payload}'):
hwnd = HandleRS()
if hwnd:
if mode == 2:
indx = fav
else:
indx = int(eg.ParseString(number))
self.plugin.RefreshVariables()
if indx <= len(self.plugin.Favorites):
SendMessage(hwnd[0], WM_COMMAND, 4101+indx, 0)
return str(indx)+": "+self.plugin.Favorites[indx-1][1]
else:
self.PrintError(
self.text.over % (str(indx),str(len(self.plugin.Favorites))))
return self.text.over % (str(indx),str(len(self.plugin.Favorites)))
else:
self.PrintError(self.plugin.text.text1)
return self.plugin.text.text1
def GetLabel(self, fav, mode, number):
return "%s %s" % (self.text.txtLabel, str(fav) if mode == 2 else number)
def Configure(self, fav = 1, mode = 0, number = '{eg.event.payload}'):
self.number = number
panel = eg.ConfigPanel(self)
radioBoxMode = wx.RadioBox(
panel,
-1,
self.text.modeLabel,
choices = self.text.modeChoices,
style=wx.RA_SPECIFY_ROWS
)
radioBoxMode.SetSelection(mode)
txtBoxLabel = wx.StaticText(panel, -1, self.text.txtLabel)
numberCtrl = wx.TextCtrl(panel,-1,self.number)
spinLabel = wx.StaticText(panel, -1, self.text.label)
favCtrl = eg.SpinNumCtrl(
panel,
-1,
fav,
fractionWidth=0,
min=1,
max=30,
)
favCtrl.SetValue(fav)
panel.sizer.Add(radioBoxMode, 0, wx.TOP,0)
panel.sizer.Add(txtBoxLabel,0,wx.TOP,10)
panel.sizer.Add(numberCtrl,0,wx.TOP,5)
panel.sizer.Add(spinLabel,0,wx.TOP,10)
panel.sizer.Add(favCtrl,0,wx.TOP,5)
def onRadioBox(event = None):
sel = radioBoxMode.GetSelection()
txtBoxLabel.Enable(False)
numberCtrl.Enable(False)
spinLabel.Enable(False)
favCtrl.Enable(False)
if sel == 0:
self.number = '{eg.event.payload}'
elif sel == 1:
txtBoxLabel.Enable(True)
numberCtrl.Enable(True)
else:
self.number = favCtrl.GetValue()
spinLabel.Enable(True)
favCtrl.Enable(True)
numberCtrl.ChangeValue(str(self.number))
if event:
event.Skip()
radioBoxMode.Bind(wx.EVT_RADIOBOX, onRadioBox)
onRadioBox()
def onSpin(event):
numberCtrl.ChangeValue(str(favCtrl.GetValue()))
event.Skip()
favCtrl.Bind(wx.EVT_TEXT, onSpin)
while panel.Affirmed():
panel.SetResult(
favCtrl.GetValue(),
radioBoxMode.GetSelection(),
numberCtrl.GetValue())
#===============================================================================
class NextPrevFav(eg.ActionBase):
def __call__(self):
hwnd = HandleRS()
if hwnd:
self.plugin.RefreshVariables()
#ix = self.plugin.FavIx
ix = self.plugin.GetMenuItem(hwnd[0], 7)[0]
if self.value == 1 and ix == len(self.plugin.Favorites) - 1 :
ix = -1
elif self.value == -1 and ix == 0:
ix = len(self.plugin.Favorites)
SendMessage(hwnd[0], WM_COMMAND, 4102+ix+self.value, 0)
return (str(ix+self.value+1)+": "+self.plugin.Favorites[ix+self.value][1])
else:
self.PrintError(self.plugin.text.text1)
return self.plugin.text.text1
#===============================================================================
class RandomFav(eg.ActionBase):
def __call__(self):
hwnd = HandleRS()
if hwnd:
self.plugin.RefreshVariables()
ix = self.plugin.GetMenuItem(hwnd[0], 7)[0]
lng = len(self.plugin.Favorites)
if lng > 1:
newIx = randrange(lng)
while newIx == ix:
newIx = randrange(lng)
SendMessage(hwnd[0], WM_COMMAND, 4102+newIx, 0)
return (str(newIx+1)+": "+self.plugin.Favorites[newIx][1])
else:
self.PrintError(self.plugin.text.text1)
return self.plugin.text.text1
#===============================================================================
class GetPlayingTitle(eg.ActionBase):
def __call__(self):
hwnd = HandleRS()
if hwnd:
return GetWindowText(hwnd[0])
else:
self.PrintError(self.plugin.text.text1)
return self.plugin.text.text1
#===============================================================================
class StartTitlebarObservation(eg.ActionBase):
class text:
intervalLabel = "Refresh interval (s):"
label = "Event suffix:"
timeStamp = "Insert timestamp"
def __call__(
self,
period = 1.0,
evtName ="titlebar",
):
if self.plugin.observThread:
ot = self.plugin.observThread
if ot.isAlive():
ot.AbortObservation()
del self.plugin.observThread
ot = ObservationThread(
period,
evtName,
)
ot.start()
self.plugin.observThread = ot
def Configure(
self,
period = 1.0,
evtName = "titlebar",
):
panel = eg.ConfigPanel()
periodNumCtrl = eg.SpinNumCtrl(
panel,
-1,
period,
integerWidth = 5,
fractionWidth = 1,
allowNegative = False,
min = 0.1,
increment = 0.1,
)
intervalLbl = wx.StaticText(panel, -1, self.text.intervalLabel)
textLabel = wx.StaticText(panel, -1, self.text.label)
textControl = wx.TextCtrl(panel, -1, evtName, size = (200,-1))
AddCtrl = panel.sizer.Add
AddCtrl(intervalLbl, 0, wx.TOP, 20)
AddCtrl(periodNumCtrl, 0, wx.TOP, 3)
AddCtrl(textLabel, 0, wx.TOP, 20)
AddCtrl(textControl, 0, wx.TOP, 3)
textLabel.SetFocus()
while panel.Affirmed():
panel.SetResult(
periodNumCtrl.GetValue(),
textControl.GetValue(),
)
#===============================================================================
class StopTitlebarObservation(eg.ActionBase):
def __call__(self):
if self.plugin.observThread:
ot = self.plugin.observThread
if ot.isAlive():
ot.AbortObservation()
del self.plugin.observThread
self.plugin.observThread = None
#===============================================================================
class OpenManager(eg.ActionBase):
def __call__(self):
if not self.plugin.manager:
wx.CallAfter(ManagerDialog, self.text, self.plugin)
else:
if self.plugin.manager.GetPosition() == (-32000, -32000):
ShowWindow(self.plugin.manager.GetHandle(), SW_RESTORE)
wx.CallAfter(self.plugin.manager.Raise)
#===============================================================================
class HideManager(eg.ActionBase):
def __call__(self):
if self.plugin.manager:
wx.CallAfter(self.plugin.manager.Close)
#===============================================================================
class OpenScheduler(eg.ActionBase):
def __call__(self):
if not self.plugin.dialog:
wx.CallAfter(SchedulerDialog, self.text, self.plugin)
else:
if self.plugin.dialog.GetPosition() == (-32000, -32000):
ShowWindow(self.plugin.dialog.GetHandle(), SW_RESTORE)
wx.CallAfter(self.plugin.dialog.Raise)
#===============================================================================
class HideScheduler(eg.ActionBase):
def __call__(self):
if self.plugin.dialog:
wx.CallAfter(self.plugin.dialog.Close)
#===============================================================================
class EnableSchedule(eg.ActionBase):
class text:
scheduleTitle = "Schedule title:"
notFound = 'Can not find schedule "%s" !'
def __call__(self, schedule=""):
schedule = eg.ParseString(schedule)
xmlfile = u'%s\\Scheduler.xml' % self.plugin.xmlpath
if not exists(xmlfile):
return
data = self.plugin.data
tmpLst = [item[1] for item in data]
if schedule in tmpLst:
ix = tmpLst.index(schedule)
if self.value > -1:
data[ix][0] = self.value
self.plugin.dataToXml()
self.plugin.UpdateEGscheduler()
if self.plugin.dialog:
wx.CallAfter(self.plugin.dialog.EnableSchedule, schedule, self.value)
return data[tmpLst.index(schedule)]
else:
return self.text.notFound % schedule
def Configure(self, schedule=""):
panel = eg.ConfigPanel()
xmlfile = u'%s\\Scheduler.xml' % self.plugin.xmlpath
if not exists(xmlfile):
return
data = self.plugin.xmlToData()
choices = [item[1] for item in data]
textControl = wx.ComboBox(panel, -1, schedule, size = (300,-1), choices = choices)
panel.sizer.Add(wx.StaticText(panel,-1,self.text.scheduleTitle), 0,wx.LEFT|wx.TOP, 10)
panel.sizer.Add(textControl, 0, wx.LEFT, 10)
while panel.Affirmed():
panel.SetResult(textControl.GetValue())
#===============================================================================
class EnableAll(eg.ActionBase):
def __call__(self):
xmlfile = u'%s\\Scheduler.xml' % self.plugin.xmlpath
if not exists(xmlfile):
return
data = self.plugin.data
for schedule in data:
schedule[0] = self.value
self.plugin.dataToXml()
self.plugin.UpdateEGscheduler()
if self.plugin.dialog:
wx.CallAfter(self.plugin.dialog.EnableAll, self.value)
#===============================================================================
class DeleteSchedule(eg.ActionBase):
class text:
scheduleTitle = "Schedule title:"
def __call__(self, schedule=""):
schedule = eg.ParseString(schedule)
xmlfile = u'%s\\Scheduler.xml' % self.plugin.xmlpath
if not exists(xmlfile):
return
data = self.plugin.data
tmpLst = [item[1] for item in data]
if schedule in tmpLst:
ix = tmpLst.index(schedule)
data.pop(ix)
self.plugin.dataToXml()
self.plugin.UpdateEGscheduler()
if self.plugin.dialog:
wx.CallAfter(self.plugin.dialog.DeleteSchedule, schedule)
def Configure(self, schedule=""):
panel = eg.ConfigPanel()
xmlfile = u'%s\\Scheduler.xml' % self.plugin.xmlpath
if not exists(xmlfile):
return
data = self.plugin.xmlToData()
choices = [item[1] for item in data]
textControl = wx.ComboBox(panel, -1, schedule, size = (300,-1), choices = choices)
panel.sizer.Add(wx.StaticText(panel,-1,self.text.scheduleTitle), 0,wx.LEFT|wx.TOP, 10)
panel.sizer.Add(textControl, 0, wx.LEFT, 10)
while panel.Affirmed():
panel.SetResult(textControl.GetValue())
#===============================================================================
class RunScheduleImmediately(eg.ActionBase):
class text:
scheduleTitle = "Schedule title:"
notFound = 'Can not find schedule "%s" !'
immedRun = 'Schedule "%s" - IMMEDIATELY execution. Possible next time: %s'
def __call__(self, schedule=""):
schedule = eg.ParseString(schedule)
data = self.plugin.data
tmpLst = [item[1] for item in data]
if schedule in tmpLst:
ix = tmpLst.index(schedule)
sched = self.plugin.data[ix]
if sched[0]:
for sch in eg.scheduler.__dict__['heap']:
if sch[1] == self.plugin.RadioSureScheduleRun:
if sch[2][0] == sched[1]:
eg.scheduler.CancelTask(sch)
self.plugin.updateLogFile(self.plugin.text.canc % sch[2][0])
break
next, cmdline = self.plugin.Execute(sched, True)
next = next[:19] if next else self.plugin.text.none
self.plugin.updateLogFile(self.text.immedRun % (sched[1], next))
self.plugin.updateLogFile(self.plugin.text.cmdLine % cmdline)
else:
self.PrintError(self.text.notFound % schedule)
return self.text.notFound % schedule
def Configure(self, schedule = ""):
panel = eg.ConfigPanel()
data = self.plugin.data
choices = [item[1] for item in data]
textControl = wx.ComboBox(panel, -1, schedule, size = (300, -1), choices = choices)
panel.sizer.Add(wx.StaticText(panel, -1, self.text.scheduleTitle), 0, wx.LEFT | wx.TOP, 10)
panel.sizer.Add(textControl, 0, wx.LEFT, 10)
while panel.Affirmed():
panel.SetResult(textControl.GetValue())
#===============================================================================
class AddSchedule(eg.ActionBase):
class text:
python_expr = "Python expression:"
descr = u'''<rst>**Add schedule**.
In the edit box, enter a python expression with the parameters of the plan.
This may be for example *eg.result*, *eg.event.payload* or the entire list
(in the same format, what you get as a result of actions **"GetSchedule"**).
This action works in two ways (depending on the title of the schedule):
1. If the schedule with the same title already exists, its parameters are overwritten by new ones.
2. If the title does not exist yet, the schedule is added to the list as new.'''
def __call__(self, expr = ""):
schedule = eg.ParseString(expr)
schedule = eval(schedule)
if len(schedule) == 8 and isinstance(schedule[1], unicode):
data = self.plugin.data
tmpLst = [item[1] for item in data]
if schedule[1] in tmpLst:
data[tmpLst.index(schedule[1])] = schedule
else:
data.append(schedule)
self.plugin.UpdateEGscheduler()
if self.plugin.dialog:
wx.CallAfter(self.plugin.dialog.AddSchedule, schedule)
def Configure(self, expr=""):
panel = eg.ConfigPanel(resizable=True)
textControl = wx.TextCtrl(panel, -1, expr, size = (300,-1), style = wx.TE_MULTILINE )
panel.sizer.Add(wx.StaticText(panel,-1,self.text.python_expr), 0,wx.LEFT|wx.TOP, 10)
panel.sizer.Add(textControl, 1, wx.LEFT|wx.RIGHT|wx.BOTTOM|wx.EXPAND, 10)
while panel.Affirmed():
panel.SetResult(textControl.GetValue())
#===============================================================================
class ShowMenu(eg.ActionClass):
name = "Show Radio Sure menu"
description = "Show Radio Sure menu."
panel = None
class text:
OSELabel = 'Menu show on:'
menuPreview = 'RS On Screen Menu preview:'
menuFont = 'Font:'
txtColour = 'Text colour'
background = 'Background colour'
txtColourSel = 'Selected text colour'
backgroundSel = 'Selected background colour'
dialog = "Events ..."
btnToolTip = """Press this button to assign events to control the menu !!!"""
evtAssignTitle = "Menu control - events assignement"
events = (
"Cursor up:",
"Cursor down:",
"Back from the (sub)menu:",
"Submenu, or select an item:",
"Cancel (Escape):",
)
inverted = "Use inverted colours"
submenuLbl = "Show main menu or submenu:"
def __call__(
self,
fore,
back,
fontInfo = TAHOMA_INFO,
monitor = 0,
foreSel = (180, 180, 180),
backSel = (75, 75, 75),
evtList = [],
inverted = True,
submenu = 0
):
hwnd = HandleRS()
if hwnd:
if not self.plugin.menuDlg:
self.plugin.menuDlg = Menu()
self.event = CreateEvent(None, 0, 0, None)
wx.CallAfter(self.plugin.menuDlg.ShowMenu,
fore,
back,
foreSel,
backSel,
fontInfo,
False,
self.plugin,
self.event,
monitor,
hwnd[0],
evtList,
(0, 7, 8, 9, 10, 11, 12, 14)[submenu],
)
eg.actionThread.WaitOnEvent(self.event)
def GetLabel(
self,
fore,
back,
fontInfo,
monitor,
foreSel,
backSel,
evtList,
inverted,
submenu = 0
):
return "%s: %s" % (self.name, self.plugin.submenus[submenu])
def Configure(
self,
fore = (75, 75, 75),
back = (180, 180, 180),
fontInfo = TAHOMA_INFO,
monitor = 0,
foreSel = (180, 180, 180),
backSel = (75, 75, 75),
evtList = [[],[],[],[],[]],
inverted = True,
submenu = 0
):
self.fontInfo = fontInfo
self.fore = fore
self.back = back
self.foreSel = foreSel
self.backSel = backSel
self.oldSel=0
self.inverted = inverted
global panel
panel = eg.ConfigPanel(self)
panel.evtList = cpy(evtList)
previewLbl=wx.StaticText(panel, -1, self.text.menuPreview)
listBoxCtrl = MenuGrid(panel, 3)
items = (("Blabla_1",0,True,804),
("Blabla_2",1,False,804),
("Blabla_3",2,False,-1),)
listBoxCtrl.Set(items)
listBoxCtrl.SetBackgroundColour(self.back)
listBoxCtrl.SetForegroundColour(self.fore)
listBoxCtrl.SetSelectionBackground(self.backSel)
listBoxCtrl.SetSelectionForeground(self.foreSel)
#Font button
fontLbl=wx.StaticText(panel, -1, self.text.menuFont)
fontButton = ExtFontSelectButton(panel, value = fontInfo)
font = wx.FontFromNativeInfoString(fontInfo)
for n in range(10,20):
font.SetPointSize(n)
fontButton.SetFont(font)
hght = fontButton.GetTextExtent('X')[1]
if hght > 20:
break
listBoxCtrl.SetDefaultCellFont(font)
arial = wx.FontFromNativeInfoString(ARIAL_INFO)
fontButton.SetFont(font)
for n in range(1,1000):
arial.SetPointSize(n)
fontButton.SetFont(arial)
h = fontButton.GetTextExtent(u"\u25a0")[1]
if h > hght:
break
arial.SetPointSize(2*n/3)
fontButton.SetFont(arial)
w0 = 2 * fontButton.GetTextExtent(u"\u25a0")[0]
attr = gridlib.GridCellAttr()
attr.SetFont(arial)
attr.SetAlignment(wx.ALIGN_CENTRE, wx.ALIGN_CENTRE)
listBoxCtrl.SetColAttr(0,attr)
for n in range(1,1000):
arial.SetPointSize(n)
fontButton.SetFont(arial)
h = fontButton.GetTextExtent(u"\u25ba")[1]
if h > hght:
break
arial.SetPointSize(n/2)
fontButton.SetFont(arial)
w2 = 2 * fontButton.GetTextExtent(u"\u25ba")[0]
attr = gridlib.GridCellAttr()
attr.SetFont(arial)
attr.SetAlignment(wx.ALIGN_RIGHT, wx.ALIGN_CENTRE)
listBoxCtrl.SetColAttr(2,attr)
listBoxCtrl.SetDefaultRowSize(hght+4, True)
displayChoice = eg.DisplayChoice(panel, monitor)
w = displayChoice.GetSize()[0]
OSElbl = wx.StaticText(panel, -1, self.text.OSELabel)
useInvertedCtrl = wx.CheckBox(panel, -1, self.text.inverted)
useInvertedCtrl.SetValue(inverted)
subMenuLbl = wx.StaticText(panel, -1, self.text.submenuLbl)
if self.plugin.submenus:
choices = self.plugin.submenus
else:
choices = self.plugin.GetSubmenuStrings()
subMenuCtrl = wx.Choice(panel, -1, choices = choices)
subMenuCtrl.SetSelection(submenu)
#Button Text Colour
foreLbl=wx.StaticText(panel, -1, self.text.txtColour+':')
foreColourButton = ExtColourSelectButton(panel,fore,title = self.text.txtColour)
#Button Background Colour
backLbl=wx.StaticText(panel, -1, self.text.background+':')
backColourButton = ExtColourSelectButton(panel,back,title = self.text.background)
#Button Selected Text Colour
foreSelLbl=wx.StaticText(panel, -1, self.text.txtColourSel+':')
foreSelColourButton = ExtColourSelectButton(panel,foreSel,title = self.text.txtColourSel)
#Button Selected Background Colour
backSelLbl=wx.StaticText(panel, -1, self.text.backgroundSel+':')
backSelColourButton = ExtColourSelectButton(panel,backSel,title = self.text.backgroundSel)
#Button Dialog "Menu control - assignement of events"
dialogButton = wx.Button(panel,-1,self.text.dialog)
dialogButton.SetToolTipString(self.text.btnToolTip)
foreSelLbl.Enable(not inverted)
foreSelColourButton.Enable(not inverted)
backSelLbl.Enable(not inverted)
backSelColourButton.Enable(not inverted)
#Sizers
mainSizer = panel.sizer
topSizer=wx.GridBagSizer(2, 30)
mainSizer.Add(topSizer)
topSizer.Add(previewLbl,(0, 0),flag = wx.TOP,border = 0)
topSizer.Add(listBoxCtrl,(1, 0),(4, 1))
topSizer.Add(useInvertedCtrl,(6, 0),flag = wx.TOP, border = 8)
topSizer.Add(subMenuLbl,(8, 0), flag = wx.TOP,border = 8)
topSizer.Add(subMenuCtrl,(9, 0), flag = wx.TOP)
topSizer.Add(fontLbl,(0, 1),flag = wx.TOP)
topSizer.Add(fontButton,(1, 1),flag = wx.TOP)
topSizer.Add(foreLbl,(2, 1),flag = wx.TOP,border = 8)
topSizer.Add(foreColourButton,(3, 1),flag = wx.TOP)
topSizer.Add(backLbl,(4, 1),flag = wx.TOP,border = 8)
topSizer.Add(backColourButton,(5, 1),flag = wx.TOP)
topSizer.Add(OSElbl,(0, 2), flag = wx.TOP)
topSizer.Add(displayChoice,(1, 2),flag = wx.TOP)
topSizer.Add(foreSelLbl,(6, 1), (1, 2), flag = wx.TOP,border = 8)
topSizer.Add(foreSelColourButton, (7, 1), flag = wx.TOP)
topSizer.Add(backSelLbl,(8, 1), (1, 2), flag = wx.TOP,border = 8)
topSizer.Add(backSelColourButton, (9, 1), flag = wx.TOP)
topSizer.Add(dialogButton, (3, 2), flag = wx.TOP|wx.EXPAND)
panel.sizer.Layout()
wdth = 160
if (hght+4)*listBoxCtrl.GetNumberRows() > listBoxCtrl.GetSize()[1]: #after Layout() !!!
wdth -= SYS_VSCROLL_X
listBoxCtrl.SetColSize(0, w0)
listBoxCtrl.SetColSize(1, wdth - w0 - w2)
listBoxCtrl.SetColSize(2, w2)
listBoxCtrl.SetGridCursor(-1, 1)
listBoxCtrl.SelectRow(0)
def OnMonitor(evt):
listBoxCtrl.SetFocus()
evt.Skip
displayChoice.Bind(wx.EVT_CHOICE, OnMonitor)
def OnInverted(evt):
flag = evt.IsChecked()
foreSelLbl.Enable(not flag)
foreSelColourButton.Enable(not flag)
backSelLbl.Enable(not flag)
backSelColourButton.Enable(not flag)
self.inverted = flag
if flag:
self.foreSel = self.back
self.backSel = self.fore
backSelColourButton.SetValue(self.backSel)
foreSelColourButton.SetValue(self.foreSel)
listBoxCtrl.SetSelectionForeground(self.foreSel)
listBoxCtrl.SetSelectionBackground(self.backSel)
listBoxCtrl.SetFocus()
evt.Skip
useInvertedCtrl.Bind(wx.EVT_CHECKBOX, OnInverted)
def OnDialogBtn(evt):
dlg = MenuEventsDialog(
parent = panel,
plugin = self.plugin,
)
dlg.Centre()
wx.CallAfter(dlg.ShowMenuEventsDialog, self.text.evtAssignTitle, self.text.events)
evt.Skip()
dialogButton.Bind(wx.EVT_BUTTON, OnDialogBtn)
def OnFontBtn(evt):
value = evt.GetValue()
self.fontInfo = value
font = wx.FontFromNativeInfoString(value)
for n in range(10,20):
font.SetPointSize(n)
fontButton.SetFont(font)
hght = fontButton.GetTextExtent('X')[1]
if hght > 20:
break
listBoxCtrl.SetDefaultCellFont(font)
listBoxCtrl.SetDefaultRowSize(hght+4, True)
for i in range(listBoxCtrl.GetNumberRows()):
listBoxCtrl.SetCellFont(i,1,font)
listBoxCtrl.SetFocus()
if evt:
evt.Skip()
fontButton.Bind(EVT_BUTTON_AFTER, OnFontBtn)
def OnColourBtn(evt):
id = evt.GetId()
value = evt.GetValue()
if id == foreColourButton.GetId():
listBoxCtrl.SetForegroundColour(value)
if self.inverted:
self.backSel = self.fore
listBoxCtrl.SetSelectionBackground(value)
backSelColourButton.SetValue(value)
elif id == backColourButton.GetId():
listBoxCtrl.SetBackgroundColour(value)
if self.inverted:
self.foreSel = self.back
listBoxCtrl.SetSelectionForeground(value)
foreSelColourButton.SetValue(value)
elif id == foreSelColourButton.GetId():
listBoxCtrl.SetSelectionForeground(value)
elif id == backSelColourButton.GetId():
listBoxCtrl.SetSelectionBackground(value)
listBoxCtrl.Refresh()
listBoxCtrl.SetFocus()
evt.Skip()
foreColourButton.Bind(EVT_BUTTON_AFTER, OnColourBtn)
backColourButton.Bind(EVT_BUTTON_AFTER, OnColourBtn)
foreSelColourButton.Bind(EVT_BUTTON_AFTER, OnColourBtn)
backSelColourButton.Bind(EVT_BUTTON_AFTER, OnColourBtn)
def setFocus():
listBoxCtrl.SetFocus()
panel.setFocus = setFocus
# re-assign the test button
def OnButton(event):
hwnds = HandleRS()
if hwnds:
if not self.plugin.menuDlg:
self.plugin.menuDlg = Menu()
self.event = CreateEvent(None, 0, 0, None)
wx.CallAfter(self.plugin.menuDlg.ShowMenu,
foreColourButton.GetValue(),
backColourButton.GetValue(),
foreSelColourButton.GetValue(),
backSelColourButton.GetValue(),
self.fontInfo,
True,
self.plugin,
self.event,
displayChoice.GetSelection(),
hwnds[0],
panel.evtList,
(0, 7, 8, 9, 10, 11, 12, 14)[subMenuCtrl.GetSelection()]
)
eg.actionThread.WaitOnEvent(self.event)
panel.dialog.buttonRow.testButton.Bind(wx.EVT_BUTTON, OnButton)
while panel.Affirmed():
fontInfo = fontButton.GetValue()
if not fontInfo:
font = listBoxCtrl.GetFont()
font.SetPointSize(36)
fontInfo = font.GetNativeFontInfoDesc()
panel.SetResult(
foreColourButton.GetValue(),
backColourButton.GetValue(),
fontInfo,
displayChoice.GetSelection(),
foreSelColourButton.GetValue(),
backSelColourButton.GetValue(),
panel.evtList,
useInvertedCtrl.GetValue(),
subMenuCtrl.GetSelection()
)
#===============================================================================
ACTIONS = (
(Run,"Run","Run RadioSure","Run RadioSure with its default settings.",None),
(SendMessageActions,"Close","Close window (exit RadioSure)","Close window (exit RadioSure).",1),
(GetPlayingTitle,"GetPlayingTitle","Get currently playing station/title","Gets the name of currently playing station/title.", None),
(StartTitlebarObservation,"StartTitlebarObservation","Start observation of titlebar","Starts observation of titlebar.", None),
(StopTitlebarObservation,"StopTitlebarObservation","Stop observation of titlebar","Stops observation of titlebar.", None),
(ShowMenu,"ShowMenu","ShowMenu","ShowMenu.",None),
(eg.ActionGroup, 'Window', 'Window', 'Window',(
(SendMessageActions,"Minimize","Minimize window","Minimize window.",2),
(WindowControl,"Restore","Restore window","Restore window.",SC_RESTORE),
(SendMessageActions,"MinimRest","Minimize/Restore","Minimize/Restore window.",1075),
(SendMessageActions,"Expand","Collapse/Expand window","Collapse/Expand window.",1076),
(SendMessageActions,"OnTop","Stay on top On/Off","Stay on top On/Off.",1077),
)),
(eg.ActionGroup, 'MainControl', 'Main control', 'Main control',(
(SendMessageActions,"PlayStop","Play/Stop","Play/Stop.",1000),
(CheckAndChange,"Play","Play","Play.",(0, False, 1000)),
(SendMessageActions,"Stop","Stop","Stop.",1008),
(GetStatus,"GetPlaying","Get status of playing","Get status of playing.",0),
(SendMessageActions,"MuteOnOff","Mute On/Off","Mute On/Off.",1027),
(CheckAndChange,"MuteOn","Mute on","Mute on.",(1, False, 1027)),
(CheckAndChange,"MuteOff","Mute off","Mute off.",(1, True, 1027)),
(GetStatus,"GetMuted","Get muted","Get muted.",1),
(SendMessageActions,"RecOnOff","Recording On/Off","Recording On/Off.",1051),
(CheckAndChange,"RecOn","Recording on","Recording on.",(2, False, 1051)),
(CheckAndChange,"RecOff","Recording off","Recording off.",(2, True, 1051)),
(GetStatus,"GetRecording","Get recording","Get recording.",2),
(SendMessageActions,"RecOnlyCurr",'Toggle "Record only current track"','Toggle "Record only current track".', 4036),
(CheckAndChange,"RecOnlyOn",'Set "Record only current track"','Set "Record only current track".',(3, False, 4036)),
(CheckAndChange,"RecOnlyOff",'Clear "Record only current track"','Clear "Record only current track".',(3, True, 4036)),
(GetStatus,"GetRecOnlyCurr",'Get "Record only current track"','Get "Record only current track".',3),
)),
(eg.ActionGroup, 'Volume', 'Volume', 'Volume',(
(GetVolume,"GetVolume","Get volume","Get volume.", None),
(SetVolume,"SetVolume","Set volume","Set volume.", 0),
(SetVolume,"VolumeUp","Volume up","Volume up.", 1),
(SetVolume,"VolumeDown","Volume down","Volume down.", 2),
)),
(eg.ActionGroup, 'Clipboard', 'Clipboard', 'Clipboard',(
(SendMessageActions,"CopyURLtoClipboard","Copy URL to Clipboard","Copy URL to Clipboard.", 4037),
(SendMessageActions,"CopyTitleToClipboard","Copy Title to Clipboard","Copy Title to Clipboard.", 4038),
(SendMessageActions,"PlayURLfromClipboard","Play URL from Clipboard","Play URL from Clipboard.", 4039),
)),
(eg.ActionGroup, 'Equalizer', 'Equalizer', 'Equalizer',(
(SendMessageActions,"EqualizerOff","Equalizer Off","Equalizer Off.", 4040),
(SendMessageActions,"EqualizerJazz","Equalizer Jazz","Equalizer Jazz.", 4041),
(SendMessageActions,"EqualizerPop","Equalizer Pop","Equalizer Pop.", 4042),
(SendMessageActions,"EqualizerRock","Equalizer Rock","Equalizer Rock.", 4043),
(SendMessageActions,"EqualizerClassic","Equalizer Classic","Equalizer Classic.", 4044),
(GetMenuItem, "GetEqualizerIndex", "Get Equalizer", "Get Equalizer.", 9),
)),
(eg.ActionGroup, 'SleepTimer', 'Sleep timer', 'Sleep timer',(
(SendMessageActions,"SleepTimerOff","Sleep timer Off","Sleep timer Off.", 4034),
(SendMessageActions,"SleepIn5Min","Sleep in 5 min","Sleep in 5 min.", 4026),
(SendMessageActions,"SleepIn10Min","Sleep in 10 min","Sleep in 10 min.", 4027),
(SendMessageActions,"SleepIn15Min","Sleep in 15 min","Sleep in 15 min.", 4028),
(SendMessageActions,"SleepIn20Min","Sleep in 20 min","Sleep in 20 min.", 4029),
(SendMessageActions,"SleepIn30Min","Sleep in 30 min","Sleep in 30 min.", 4030),
(SendMessageActions,"SleepIn60Min","Sleep in 60 min","Sleep in 60 min.", 4031),
(SendMessageActions,"SleepIn90Min","Sleep in 90 min","Sleep in 90 min.", 4032),
(SendMessageActions,"SleepIn120Min","Sleep in 120 min","Sleep in 120 min.", 4033),
)),
(eg.ActionGroup, 'Fav_and_Hist', 'Favorites and History', 'Favorites and History',(
(SendMessageActions,"AddFav","Add to favorites","Add current station to favorites.",1324),
(SendMessageActions,"RemFav","Remove from favorites","Remove current station from favorites.",1325),
(SelectFav,"SelectFav","Select favorite (preset number)","Select favorite by preset number (order).", None),
(NextPrevFav,"NextFav","Next favorite","Next favorite.", 1),
(NextPrevFav,"PreviousFav","Previous favorite","Previous favorite.", -1),
(RandomFav,"RandomFav","Random favorite","Random favorite.", None),
(SendMessageActions,"PreviousHist","Back in history","Back in history.",1038),
(SendMessageActions,"ForwardHist","Forward in history","Forward in history.",1039),
(OpenManager,"OpenManager","Open manager","Open manager.", None),
(HideManager,"HideManager","Hide manager","Hide manager.", None),
)),
(eg.ActionGroup, 'Scheduler', 'Scheduler', 'Scheduler',(
(OpenScheduler,"OpenScheduler","Open scheduler","Open scheduler.", None),
(HideScheduler,"HideScheduler","Hide scheduler","Hide scheduler.", None),
(EnableSchedule,"EnableSchedule","Enable schedule","Enable schedule.", 1),
(EnableSchedule,"DisableSchedule","Disable schedule","Disable schedule.", 0),
(EnableAll,"EnableAll","Enable all schedules","Enable all schedules.", 1),
(EnableAll,"DisableAll","Disable all schedules","Disable all schedules.", 0),
(EnableSchedule,"GetSchedule","Get schedule","Get schedule.", -1),
(AddSchedule,"AddSchedule","Add schedule",AddSchedule.text.descr, None),
(DeleteSchedule,"DeleteSchedule","Delete schedule","Delete schedule.", None),
(RunScheduleImmediately, "RunScheduleImmediately", "Run schedule immediately", "Runs schedule immediately.", None),
)),
)
#=============================================================================== | gpl-2.0 | -4,150,490,300,528,661,500 | 38.438699 | 136 | 0.51269 | false |
scikit-learn-contrib/py-earth | examples/plot_feature_importance.py | 3 | 2142 | """
===========================
Plotting feature importance
===========================
A simple example showing how to compute and display
feature importances, it is also compared with the
feature importances obtained using random forests.
Feature importance is a measure of the effect of the features
on the outputs. For each feature, the values go from
0 to 1 where a higher the value means that the feature will have
a higher effect on the outputs.
Currently three criteria are supported : 'gcv', 'rss' and 'nb_subsets'.
See [1], section 12.3 for more information about the criteria.
.. [1] http://www.milbo.org/doc/earth-notes.pdf
"""
import numpy
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from pyearth import Earth
# Create some fake data
numpy.random.seed(2)
m = 10000
n = 10
X = numpy.random.uniform(size=(m, n))
y = (10 * numpy.sin(numpy.pi * X[:, 0] * X[:, 1]) +
20 * (X[:, 2] - 0.5) ** 2 +
10 * X[:, 3] +
5 * X[:, 4] + numpy.random.uniform(size=m))
# Fit an Earth model
criteria = ('rss', 'gcv', 'nb_subsets')
model = Earth(max_degree=3,
max_terms=10,
minspan_alpha=.5,
feature_importance_type=criteria,
verbose=True)
model.fit(X, y)
rf = RandomForestRegressor()
rf.fit(X, y)
# Print the model
print(model.trace())
print(model.summary())
print(model.summary_feature_importances(sort_by='gcv'))
# Plot the feature importances
importances = model.feature_importances_
importances['random_forest'] = rf.feature_importances_
criteria = criteria + ('random_forest',)
idx = 1
fig = plt.figure(figsize=(20, 10))
labels = ['$x_{}$'.format(i) for i in range(n)]
for crit in criteria:
plt.subplot(2, 2, idx)
plt.bar(numpy.arange(len(labels)),
importances[crit],
align='center',
color='red')
plt.xticks(numpy.arange(len(labels)), labels)
plt.title(crit)
plt.ylabel('importances')
idx += 1
title = '$x_0,...x_9 \sim \mathcal{N}(0, 1)$\n$y= 10sin(\pi x_{0}x_{1}) + 20(x_2 - 0.5)^2 + 10x_3 + 5x_4 + Unif(0, 1)$'
fig.suptitle(title, fontsize="x-large")
plt.show()
| bsd-3-clause | -7,920,168,771,436,378,000 | 28.75 | 119 | 0.641457 | false |
makerbot/ReplicatorG | skein_engines/skeinforge-47/fabmetheus_utilities/vector3index.py | 12 | 8587 | """
Vector3 is a three dimensional vector class.
Below are examples of Vector3 use.
>>> from vector3 import Vector3
>>> origin = Vector3()
>>> origin
0.0, 0.0, 0.0
>>> pythagoras = Vector3( 3, 4, 0 )
>>> pythagoras
3.0, 4.0, 0.0
>>> pythagoras.magnitude()
5.0
>>> pythagoras.magnitudeSquared()
25
>>> triplePythagoras = pythagoras * 3.0
>>> triplePythagoras
9.0, 12.0, 0.0
>>> plane = pythagoras.dropAxis()
>>> plane
(3+4j)
"""
from __future__ import absolute_import
try:
import psyco
psyco.full()
except:
pass
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities import xml_simple_writer
import math
import operator
__author__ = 'Enrique Perez ([email protected])'
__credits__ = 'Nophead <http://forums.reprap.org/profile.php?12,28>\nArt of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
class Vector3Index:
'A three dimensional vector index class.'
__slots__ = ['index', 'x', 'y', 'z']
def __init__( self, index, x = 0.0, y = 0.0, z = 0.0 ):
self.index = index
self.x = x
self.y = y
self.z = z
def __abs__(self):
'Get the magnitude of the Vector3.'
return math.sqrt( self.x * self.x + self.y * self.y + self.z * self.z )
magnitude = __abs__
def __add__(self, other):
'Get the sum of this Vector3 and other one.'
return Vector3Index( self.index, self.x + other.x, self.y + other.y, self.z + other.z )
def __copy__(self):
'Get the copy of this Vector3.'
return Vector3Index( self.index, self.x, self.y, self.z )
__pos__ = __copy__
copy = __copy__
def __div__(self, other):
'Get a new Vector3 by dividing each component of this one.'
return Vector3Index( self.index, self.x / other, self.y / other, self.z / other )
def __eq__(self, other):
'Determine whether this vector is identical to other one.'
if other == None:
return False
if other.__class__ != self.__class__:
return False
return self.x == other.x and self.y == other.y and self.z == other.z
def __floordiv__(self, other):
'Get a new Vector3 by floor dividing each component of this one.'
return Vector3Index( self.index, self.x // other, self.y // other, self.z // other )
def __hash__(self):
'Determine whether this vector is identical to other one.'
return self.__repr__().__hash__()
def __iadd__(self, other):
'Add other Vector3 to this one.'
self.x += other.x
self.y += other.y
self.z += other.z
return self
def __idiv__(self, other):
'Divide each component of this Vector3.'
self.x /= other
self.y /= other
self.z /= other
return self
def __ifloordiv__(self, other):
'Floor divide each component of this Vector3.'
self.x //= other
self.y //= other
self.z //= other
return self
def __imul__(self, other):
'Multiply each component of this Vector3.'
self.x *= other
self.y *= other
self.z *= other
return self
def __isub__(self, other):
'Subtract other Vector3 from this one.'
self.x -= other.x
self.y -= other.y
self.z -= other.z
return self
def __itruediv__(self, other):
'True divide each component of this Vector3.'
self.x = operator.truediv( self.x, other )
self.y = operator.truediv( self.y, other )
self.z = operator.truediv( self.z, other )
return self
def __mul__(self, other):
'Get a new Vector3 by multiplying each component of this one.'
return Vector3Index( self.index, self.x * other, self.y * other, self.z * other )
def __ne__(self, other):
'Determine whether this vector is not identical to other one.'
return not self.__eq__(other)
def __neg__(self):
return Vector3Index( self.index, - self.x, - self.y, - self.z )
def __nonzero__(self):
return self.x != 0 or self.y != 0 or self.z != 0
def __rdiv__(self, other):
'Get a new Vector3 by dividing each component of this one.'
return Vector3Index( self.index, other / self.x, other / self.y, other / self.z )
def __repr__(self):
'Get the string representation of this Vector3 index.'
return '(%s, %s, %s, %s)' % (self.index, self.x, self.y, self.z)
def __rfloordiv__(self, other):
'Get a new Vector3 by floor dividing each component of this one.'
return Vector3Index( self.index, other // self.x, other // self.y, other // self.z )
def __rmul__(self, other):
'Get a new Vector3 by multiplying each component of this one.'
return Vector3Index( self.index, self.x * other, self.y * other, self.z * other )
def __rtruediv__(self, other):
'Get a new Vector3 by true dividing each component of this one.'
return Vector3Index( self.index, operator.truediv( other , self.x ), operator.truediv( other, self.y ), operator.truediv( other, self.z ) )
def __sub__(self, other):
'Get the difference between the Vector3 and other one.'
return Vector3Index( self.index, self.x - other.x, self.y - other.y, self.z - other.z )
def __truediv__(self, other):
'Get a new Vector3 by true dividing each component of this one.'
return Vector3Index( self.index, operator.truediv( self.x, other ), operator.truediv( self.y, other ), operator.truediv( self.z, other ) )
def _getAccessibleAttribute(self, attributeName):
'Get the accessible attribute.'
global globalGetAccessibleAttributeSet
if attributeName in globalGetAccessibleAttributeSet:
return getattr(self, attributeName, None)
return None
def _setAccessibleAttribute(self, attributeName, value):
'Set the accessible attribute.'
if attributeName in globalSetAccessibleAttributeSet:
setattr(self, attributeName, value)
def cross(self, other):
'Calculate the cross product of this vector with other one.'
return Vector3Index( self.index, self.y * other.z - self.z * other.y, - self.x * other.z + self.z * other.x, self.x * other.y - self.y * other.x )
def distance(self, other):
'Get the Euclidean distance between this vector and other one.'
return math.sqrt( self.distanceSquared(other) )
def distanceSquared(self, other):
'Get the square of the Euclidean distance between this vector and other one.'
separationX = self.x - other.x
separationY = self.y - other.y
separationZ = self.z - other.z
return separationX * separationX + separationY * separationY + separationZ * separationZ
def dot(self, other):
'Calculate the dot product of this vector with other one.'
return self.x * other.x + self.y * other.y + self.z * other.z
def dropAxis( self, which = 2 ):
'Get a complex by removing one axis of the vector3.'
if which == 0:
return complex( self.y, self.z )
if which == 1:
return complex( self.x, self.z )
if which == 2:
return complex( self.x, self.y )
def getFloatList(self):
'Get the vector as a list of floats.'
return [ float( self.x ), float( self.y ), float( self.z ) ]
def getIsDefault(self):
'Determine if this is the zero vector.'
if self.x != 0.0:
return False
if self.y != 0.0:
return False
return self.z == 0.0
def getNormalized(self):
'Get the normalized Vector3.'
magnitude = abs(self)
if magnitude == 0.0:
return self.copy()
return self / magnitude
def magnitudeSquared(self):
'Get the square of the magnitude of the Vector3.'
return self.x * self.x + self.y * self.y + self.z * self.z
def maximize(self, other):
'Maximize the Vector3.'
self.x = max(other.x, self.x)
self.y = max(other.y, self.y)
self.z = max(other.z, self.z)
def minimize(self, other):
'Minimize the Vector3.'
self.x = min(other.x, self.x)
self.y = min(other.y, self.y)
self.z = min(other.z, self.z)
def normalize(self):
'Scale each component of this Vector3 so that it has a magnitude of 1. If this Vector3 has a magnitude of 0, this method has no effect.'
magnitude = abs(self)
if magnitude != 0.0:
self /= magnitude
def reflect( self, normal ):
'Reflect the Vector3 across the normal, which is assumed to be normalized.'
distance = 2 * ( self.x * normal.x + self.y * normal.y + self.z * normal.z )
return Vector3Index( self.index, self.x - distance * normal.x, self.y - distance * normal.y, self.z - distance * normal.z )
def setToVector3(self, other):
'Set this Vector3 to be identical to other one.'
self.x = other.x
self.y = other.y
self.z = other.z
def setToXYZ( self, x, y, z ):
'Set the x, y, and z components of this Vector3.'
self.x = x
self.y = y
self.z = z
globalGetAccessibleAttributeSet = 'x y z'.split()
globalSetAccessibleAttributeSet = globalGetAccessibleAttributeSet
| gpl-2.0 | -4,169,895,940,327,702,500 | 30 | 157 | 0.673111 | false |
Pasithea/Flask-RESTFul-Sample | sample/exception.py | 1 | 1163 | from flask import jsonify
class NotUniqueException(Exception):
pass
class ExistedException(Exception):
pass
class DoesNotExistsException(Exception):
pass
class HttpException(Exception):
pass
except_dict = {
'LoginFailed': {
'code': 403,
'message': "Login Failed"
},
'NeedAuth': {
'code': 403,
'message': "Need Auth"
},
'NotPermission': {
'code': 403,
'message': "Not Permission"
},
'GrandTypeError': {
'code': 400,
'message': "Grand Type Error"
},
'ParamsError': {
'code': 400,
'message': "Parameter Error"
}
}
def __init__(self, **kwargs):
self.message = self.message.format(**kwargs)
def __str__(self):
return self.message
def __repr__(self):
return self.message
exceptions_list = []
bases = (HttpException,)
attrs = {
'__init__': __init__,
'__str__': __str__,
'__repr__': __repr__
}
for (eklass_name, attr) in except_dict.items():
attrs.update(attr)
eklass = type(str(eklass_name), bases, attrs)
exceptions_list.append(eklass)
globals().update({eklass_name: eklass})
| mit | 345,196,142,304,916,500 | 16.621212 | 49 | 0.569218 | false |
akosel/servo | python/servo/devenv_commands.py | 31 | 4939 | # Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
from __future__ import print_function, unicode_literals
from os import path, getcwd, listdir
import subprocess
import sys
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from servo.command_base import CommandBase, cd
@CommandProvider
class MachCommands(CommandBase):
@Command('cargo',
description='Run Cargo',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help="Command-line arguments to be passed through to Cargo")
def cargo(self, params):
if not params:
params = []
if self.context.topdir == getcwd():
with cd(path.join('components', 'servo')):
return subprocess.call(
["cargo"] + params, env=self.build_env())
return subprocess.call(['cargo'] + params,
env=self.build_env())
@Command('cargo-update',
description='Same as update-cargo',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help='Command-line arguments to be passed through to cargo update')
@CommandArgument(
'--package', '-p', default=None,
help='Updates selected package')
@CommandArgument(
'--all-packages', '-a', action='store_true',
help='Updates all packages')
def cargo_update(self, params=None, package=None, all_packages=None):
self.update_cargo(params, package, all_packages)
@Command('update-cargo',
description='Update Cargo dependencies',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help='Command-line arguments to be passed through to cargo update')
@CommandArgument(
'--package', '-p', default=None,
help='Updates selected package')
@CommandArgument(
'--all-packages', '-a', action='store_true',
help='Updates all packages')
def update_cargo(self, params=None, package=None, all_packages=None):
if not params:
params = []
if package:
params += ["-p", package]
elif all_packages:
params = []
else:
print("Please choose package to update with the --package (-p) ")
print("flag or update all packages with --all-packages (-a) flag")
sys.exit(1)
cargo_paths = [path.join('components', 'servo'),
path.join('ports', 'cef'),
path.join('ports', 'gonk')]
for cargo_path in cargo_paths:
with cd(cargo_path):
print(cargo_path)
subprocess.call(["cargo", "update"] + params,
env=self.build_env())
@Command('rustc',
description='Run the Rust compiler',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help="Command-line arguments to be passed through to rustc")
def rustc(self, params):
if params is None:
params = []
return subprocess.call(["rustc"] + params, env=self.build_env())
@Command('rust-root',
description='Print the path to the root of the Rust compiler',
category='devenv')
def rust_root(self):
print(self.config["tools"]["rust-root"])
@Command('grep',
description='`git grep` for selected directories.',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help="Command-line arguments to be passed through to `git grep`")
def grep(self, params):
if not params:
params = []
# get all directories under tests/
tests_dirs = listdir('tests')
# Remove 'wpt' from obtained dir list
tests_dirs = filter(lambda dir: dir != 'wpt', tests_dirs)
# Set of directories in project root
root_dirs = ['components', 'ports', 'python', 'etc', 'resources']
# Generate absolute paths for directories in tests/ and project-root/
tests_dirs_abs = [path.join(self.context.topdir, 'tests', s) for s in tests_dirs]
root_dirs_abs = [path.join(self.context.topdir, s) for s in root_dirs]
# Absolute paths for all directories to be considered
grep_paths = root_dirs_abs + tests_dirs_abs
return subprocess.call(["git"] + ["grep"] + params + ['--'] + grep_paths, env=self.build_env())
| mpl-2.0 | -7,794,453,175,304,286,000 | 36.70229 | 103 | 0.591618 | false |
willthames/ansible | test/units/parsing/vault/test_vault.py | 13 | 18751 | # -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <[email protected]>
# (c) 2016, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import binascii
import io
import os
from binascii import hexlify
import pytest
from ansible.compat.tests import unittest
from ansible import errors
from ansible.module_utils import six
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.vault import VaultLib
from ansible.parsing import vault
class TestVaultIsEncrypted(unittest.TestCase):
def test_bytes_not_encrypted(self):
b_data = b"foobar"
self.assertFalse(vault.is_encrypted(b_data))
def test_bytes_encrypted(self):
b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")
self.assertTrue(vault.is_encrypted(b_data))
def test_text_not_encrypted(self):
b_data = to_text(b"foobar")
self.assertFalse(vault.is_encrypted(b_data))
def test_text_encrypted(self):
b_data = to_text(b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible"))
self.assertTrue(vault.is_encrypted(b_data))
def test_invalid_text_not_ascii(self):
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ "
self.assertFalse(vault.is_encrypted(data))
def test_invalid_bytes_not_ascii(self):
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ "
b_data = to_bytes(data, encoding='utf-8')
self.assertFalse(vault.is_encrypted(b_data))
class TestVaultIsEncryptedFile(unittest.TestCase):
def test_binary_file_handle_not_encrypted(self):
b_data = b"foobar"
b_data_fo = io.BytesIO(b_data)
self.assertFalse(vault.is_encrypted_file(b_data_fo))
def test_text_file_handle_not_encrypted(self):
data = u"foobar"
data_fo = io.StringIO(data)
self.assertFalse(vault.is_encrypted_file(data_fo))
def test_binary_file_handle_encrypted(self):
b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")
b_data_fo = io.BytesIO(b_data)
self.assertTrue(vault.is_encrypted_file(b_data_fo))
def test_text_file_handle_encrypted(self):
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % to_text(hexlify(b"ansible"))
data_fo = io.StringIO(data)
self.assertTrue(vault.is_encrypted_file(data_fo))
def test_binary_file_handle_invalid(self):
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ "
b_data = to_bytes(data)
b_data_fo = io.BytesIO(b_data)
self.assertFalse(vault.is_encrypted_file(b_data_fo))
def test_text_file_handle_invalid(self):
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ "
data_fo = io.StringIO(data)
self.assertFalse(vault.is_encrypted_file(data_fo))
def test_file_already_read_from_finds_header(self):
b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos")
b_data_fo = io.BytesIO(b_data)
b_data_fo.read(42) # Arbitrary number
self.assertTrue(vault.is_encrypted_file(b_data_fo))
def test_file_already_read_from_saves_file_pos(self):
b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos")
b_data_fo = io.BytesIO(b_data)
b_data_fo.read(69) # Arbitrary number
vault.is_encrypted_file(b_data_fo)
self.assertEqual(b_data_fo.tell(), 69)
def test_file_with_offset(self):
b_data = b"JUNK$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos")
b_data_fo = io.BytesIO(b_data)
self.assertTrue(vault.is_encrypted_file(b_data_fo, start_pos=4))
def test_file_with_count(self):
b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos")
vault_length = len(b_data)
b_data = b_data + u'ァ ア'.encode('utf-8')
b_data_fo = io.BytesIO(b_data)
self.assertTrue(vault.is_encrypted_file(b_data_fo, count=vault_length))
def test_file_with_offset_and_count(self):
b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos")
vault_length = len(b_data)
b_data = b'JUNK' + b_data + u'ァ ア'.encode('utf-8')
b_data_fo = io.BytesIO(b_data)
self.assertTrue(vault.is_encrypted_file(b_data_fo, start_pos=4, count=vault_length))
@pytest.mark.skipif(not vault.HAS_CRYPTOGRAPHY,
reason="Skipping cryptography tests because cryptography is not installed")
class TestVaultCipherAes256(unittest.TestCase):
def setUp(self):
self.vault_cipher = vault.VaultAES256()
def test(self):
self.assertIsInstance(self.vault_cipher, vault.VaultAES256)
# TODO: tag these as slow tests
def test_create_key_cryptography(self):
b_password = b'hunter42'
b_salt = os.urandom(32)
b_key_cryptography = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16)
self.assertIsInstance(b_key_cryptography, six.binary_type)
@pytest.mark.skipif(not vault.HAS_PYCRYPTO, reason='Not testing pycrypto key as pycrypto is not installed')
def test_create_key_pycrypto(self):
b_password = b'hunter42'
b_salt = os.urandom(32)
b_key_pycrypto = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16)
self.assertIsInstance(b_key_pycrypto, six.binary_type)
@pytest.mark.skipif(not vault.HAS_PYCRYPTO,
reason='Not comparing cryptography key to pycrypto key as pycrypto is not installed')
def test_compare_new_keys(self):
b_password = b'hunter42'
b_salt = os.urandom(32)
b_key_cryptography = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16)
b_key_pycrypto = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16)
self.assertEqual(b_key_cryptography, b_key_pycrypto)
def test_create_key_known_cryptography(self):
b_password = b'hunter42'
# A fixed salt
b_salt = b'q' * 32 # q is the most random letter.
b_key_1 = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16)
self.assertIsInstance(b_key_1, six.binary_type)
# verify we get the same answer
# we could potentially run a few iterations of this and time it to see if it's roughly constant time
# and or that it exceeds some minimal time, but that would likely cause unreliable fails, esp in CI
b_key_2 = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16)
self.assertIsInstance(b_key_2, six.binary_type)
self.assertEqual(b_key_1, b_key_2)
# And again with pycrypto
b_key_3 = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16)
self.assertIsInstance(b_key_3, six.binary_type)
# verify we get the same answer
# we could potentially run a few iterations of this and time it to see if it's roughly constant time
# and or that it exceeds some minimal time, but that would likely cause unreliable fails, esp in CI
b_key_4 = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16)
self.assertIsInstance(b_key_4, six.binary_type)
self.assertEqual(b_key_3, b_key_4)
self.assertEqual(b_key_1, b_key_4)
def test_create_key_known_pycrypto(self):
b_password = b'hunter42'
# A fixed salt
b_salt = b'q' * 32 # q is the most random letter.
b_key_3 = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16)
self.assertIsInstance(b_key_3, six.binary_type)
# verify we get the same answer
# we could potentially run a few iterations of this and time it to see if it's roughly constant time
# and or that it exceeds some minimal time, but that would likely cause unreliable fails, esp in CI
b_key_4 = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16)
self.assertIsInstance(b_key_4, six.binary_type)
self.assertEqual(b_key_3, b_key_4)
def test_is_equal_is_equal(self):
self.assertTrue(self.vault_cipher._is_equal(b'abcdefghijklmnopqrstuvwxyz', b'abcdefghijklmnopqrstuvwxyz'))
def test_is_equal_unequal_length(self):
self.assertFalse(self.vault_cipher._is_equal(b'abcdefghijklmnopqrstuvwxyz', b'abcdefghijklmnopqrstuvwx and sometimes y'))
def test_is_equal_not_equal(self):
self.assertFalse(self.vault_cipher._is_equal(b'abcdefghijklmnopqrstuvwxyz', b'AbcdefghijKlmnopQrstuvwxZ'))
def test_is_equal_empty(self):
self.assertTrue(self.vault_cipher._is_equal(b'', b''))
def test_is_equal_non_ascii_equal(self):
utf8_data = to_bytes(u'私はガラスを食べられます。それは私を傷つけません。')
self.assertTrue(self.vault_cipher._is_equal(utf8_data, utf8_data))
def test_is_equal_non_ascii_unequal(self):
utf8_data = to_bytes(u'私はガラスを食べられます。それは私を傷つけません。')
utf8_data2 = to_bytes(u'Pot să mănânc sticlă și ea nu mă rănește.')
# Test for the len optimization path
self.assertFalse(self.vault_cipher._is_equal(utf8_data, utf8_data2))
# Test for the slower, char by char comparison path
self.assertFalse(self.vault_cipher._is_equal(utf8_data, utf8_data[:-1] + b'P'))
def test_is_equal_non_bytes(self):
""" Anything not a byte string should raise a TypeError """
self.assertRaises(TypeError, self.vault_cipher._is_equal, u"One fish", b"two fish")
self.assertRaises(TypeError, self.vault_cipher._is_equal, b"One fish", u"two fish")
self.assertRaises(TypeError, self.vault_cipher._is_equal, 1, b"red fish")
self.assertRaises(TypeError, self.vault_cipher._is_equal, b"blue fish", 2)
@pytest.mark.skipif(not vault.HAS_PYCRYPTO,
reason="Skipping Pycrypto tests because pycrypto is not installed")
class TestVaultCipherAes256PyCrypto(TestVaultCipherAes256):
def setUp(self):
self.has_cryptography = vault.HAS_CRYPTOGRAPHY
vault.HAS_CRYPTOGRAPHY = False
super(TestVaultCipherAes256PyCrypto, self).setUp()
def tearDown(self):
vault.HAS_CRYPTOGRAPHY = self.has_cryptography
super(TestVaultCipherAes256PyCrypto, self).tearDown()
@pytest.mark.skipif(not vault.HAS_CRYPTOGRAPHY,
reason="Skipping cryptography tests because cryptography is not installed")
class TestVaultLib(unittest.TestCase):
def setUp(self):
self.v = VaultLib('test-vault-password')
def test_encrypt(self):
plaintext = u'Some text to encrypt in a café'
b_vaulttext = self.v.encrypt(plaintext)
self.assertIsInstance(b_vaulttext, six.binary_type)
b_header = b'$ANSIBLE_VAULT;1.1;AES256\n'
self.assertEqual(b_vaulttext[:len(b_header)], b_header)
def test_encrypt_bytes(self):
plaintext = to_bytes(u'Some text to encrypt in a café')
b_vaulttext = self.v.encrypt(plaintext)
self.assertIsInstance(b_vaulttext, six.binary_type)
b_header = b'$ANSIBLE_VAULT;1.1;AES256\n'
self.assertEqual(b_vaulttext[:len(b_header)], b_header)
def test_is_encrypted(self):
self.assertFalse(self.v.is_encrypted(b"foobar"), msg="encryption check on plaintext yielded false positive")
b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")
self.assertTrue(self.v.is_encrypted(b_data), msg="encryption check on headered text failed")
def test_format_output(self):
self.v.cipher_name = "TEST"
b_ciphertext = b"ansible"
b_vaulttext = self.v._format_output(b_ciphertext)
b_lines = b_vaulttext.split(b'\n')
self.assertGreater(len(b_lines), 1, msg="failed to properly add header")
b_header = b_lines[0]
self.assertTrue(b_header.endswith(b';TEST'), msg="header does not end with cipher name")
b_header_parts = b_header.split(b';')
self.assertEqual(len(b_header_parts), 3, msg="header has the wrong number of parts")
self.assertEqual(b_header_parts[0], b'$ANSIBLE_VAULT', msg="header does not start with $ANSIBLE_VAULT")
self.assertEqual(b_header_parts[1], self.v.b_version, msg="header version is incorrect")
self.assertEqual(b_header_parts[2], b'TEST', msg="header does not end with cipher name")
def test_split_header(self):
b_vaulttext = b"$ANSIBLE_VAULT;9.9;TEST\nansible"
b_ciphertext = self.v._split_header(b_vaulttext)
b_lines = b_ciphertext.split(b'\n')
self.assertEqual(b_lines[0], b"ansible", msg="Payload was not properly split from the header")
self.assertEqual(self.v.cipher_name, u'TEST', msg="cipher name was not properly set")
self.assertEqual(self.v.b_version, b"9.9", msg="version was not properly set")
def test_encrypt_decrypt_aes(self):
self.v.cipher_name = u'AES'
self.v.b_password = b'ansible'
# AES encryption code has been removed, so this is old output for
# AES-encrypted 'foobar' with password 'ansible'.
b_vaulttext = b'''$ANSIBLE_VAULT;1.1;AES
53616c7465645f5fc107ce1ef4d7b455e038a13b053225776458052f8f8f332d554809d3f150bfa3
fe3db930508b65e0ff5947e4386b79af8ab094017629590ef6ba486814cf70f8e4ab0ed0c7d2587e
786a5a15efeb787e1958cbdd480d076c
'''
b_plaintext = self.v.decrypt(b_vaulttext)
self.assertEqual(b_plaintext, b"foobar", msg="decryption failed")
def test_encrypt_decrypt_aes256(self):
self.v.cipher_name = u'AES256'
plaintext = u"foobar"
b_vaulttext = self.v.encrypt(plaintext)
b_plaintext = self.v.decrypt(b_vaulttext)
self.assertNotEqual(b_vaulttext, b"foobar", msg="encryption failed")
self.assertEqual(b_plaintext, b"foobar", msg="decryption failed")
def test_encrypt_decrypt_aes256_existing_vault(self):
self.v.cipher_name = u'AES256'
b_orig_plaintext = b"Setec Astronomy"
vaulttext = u'''$ANSIBLE_VAULT;1.1;AES256
33363965326261303234626463623963633531343539616138316433353830356566396130353436
3562643163366231316662386565383735653432386435610a306664636137376132643732393835
63383038383730306639353234326630666539346233376330303938323639306661313032396437
6233623062366136310a633866373936313238333730653739323461656662303864663666653563
3138'''
b_plaintext = self.v.decrypt(vaulttext)
self.assertEqual(b_plaintext, b_plaintext, msg="decryption failed")
b_vaulttext = to_bytes(vaulttext, encoding='ascii', errors='strict')
b_plaintext = self.v.decrypt(b_vaulttext)
self.assertEqual(b_plaintext, b_orig_plaintext, msg="decryption failed")
# FIXME This test isn't working quite yet.
@pytest.mark.skip(reason='This test is not ready yet')
def test_encrypt_decrypt_aes256_bad_hmac(self):
self.v.cipher_name = 'AES256'
# plaintext = "Setec Astronomy"
enc_data = '''$ANSIBLE_VAULT;1.1;AES256
33363965326261303234626463623963633531343539616138316433353830356566396130353436
3562643163366231316662386565383735653432386435610a306664636137376132643732393835
63383038383730306639353234326630666539346233376330303938323639306661313032396437
6233623062366136310a633866373936313238333730653739323461656662303864663666653563
3138'''
b_data = to_bytes(enc_data, errors='strict', encoding='utf-8')
b_data = self.v._split_header(b_data)
foo = binascii.unhexlify(b_data)
lines = foo.splitlines()
# line 0 is salt, line 1 is hmac, line 2+ is ciphertext
b_salt = lines[0]
b_hmac = lines[1]
b_ciphertext_data = b'\n'.join(lines[2:])
b_ciphertext = binascii.unhexlify(b_ciphertext_data)
# b_orig_ciphertext = b_ciphertext[:]
# now muck with the text
# b_munged_ciphertext = b_ciphertext[:10] + b'\x00' + b_ciphertext[11:]
# b_munged_ciphertext = b_ciphertext
# assert b_orig_ciphertext != b_munged_ciphertext
b_ciphertext_data = binascii.hexlify(b_ciphertext)
b_payload = b'\n'.join([b_salt, b_hmac, b_ciphertext_data])
# reformat
b_invalid_ciphertext = self.v._format_output(b_payload)
# assert we throw an error
self.v.decrypt(b_invalid_ciphertext)
def test_encrypt_encrypted(self):
self.v.cipher_name = u'AES'
b_vaulttext = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")
vaulttext = to_text(b_vaulttext, errors='strict')
self.assertRaises(errors.AnsibleError, self.v.encrypt, b_vaulttext)
self.assertRaises(errors.AnsibleError, self.v.encrypt, vaulttext)
def test_decrypt_decrypted(self):
plaintext = u"ansible"
self.assertRaises(errors.AnsibleError, self.v.decrypt, plaintext)
b_plaintext = b"ansible"
self.assertRaises(errors.AnsibleError, self.v.decrypt, b_plaintext)
def test_cipher_not_set(self):
plaintext = u"ansible"
self.v.encrypt(plaintext)
self.assertEquals(self.v.cipher_name, "AES256")
@pytest.mark.skipif(not vault.HAS_PYCRYPTO,
reason="Skipping Pycrypto tests because pycrypto is not installed")
class TestVaultLibPyCrypto(TestVaultLib):
def setUp(self):
self.has_cryptography = vault.HAS_CRYPTOGRAPHY
vault.HAS_CRYPTOGRAPHY = False
super(TestVaultLibPyCrypto, self).setUp()
def tearDown(self):
vault.HAS_CRYPTOGRAPHY = self.has_cryptography
super(TestVaultLibPyCrypto, self).tearDown()
| gpl-3.0 | 3,975,710,180,156,028,400 | 43.786925 | 129 | 0.678272 | false |
alphatwirl/alphatwirl | alphatwirl/collector/ToDataFrameWithDatasetColumn.py | 1 | 1326 | # Tai Sakuma <[email protected]>
import pandas as pd
from .ToTupleListWithDatasetColumn import ToTupleListWithDatasetColumn
##__________________________________________________________________||
class ToDataFrameWithDatasetColumn:
def __init__(self, summaryColumnNames,
datasetColumnName = 'component'
):
self.summaryColumnNames = summaryColumnNames
self.datasetColumnName = datasetColumnName
self.to_tuple_list = ToTupleListWithDatasetColumn(
summaryColumnNames = summaryColumnNames,
datasetColumnName = datasetColumnName)
def __repr__(self):
name_value_pairs = (
('summaryColumnNames', self.summaryColumnNames),
('datasetColumnName', self.datasetColumnName),
)
return '{}({})'.format(
self.__class__.__name__,
', '.join(['{} = {!r}'.format(n, v) for n, v in name_value_pairs]),
)
def combine(self, dataset_readers_list):
tuple_list = self.to_tuple_list.combine(dataset_readers_list)
if tuple_list is None:
return None
header = tuple_list[0]
contents = tuple_list[1:]
return pd.DataFrame(contents, columns = header)
##__________________________________________________________________||
| bsd-3-clause | 3,453,909,577,626,807,300 | 33.894737 | 79 | 0.549774 | false |
FilipDominec/python-meep-utils | scripts_postpro/plot_TY.py | 1 | 4307 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
simtime = 80e-12
size_y = 1400e-6
c = 3e8
maxfreq = 2e12
## Import common moduli
import numpy as np
from scipy.constants import c, hbar, pi
import matplotlib, sys, os, time
import matplotlib.pyplot as plt
## Start figure + subplot (interactive)
fig = plt.figure(figsize=(10,10))
ax = plt.subplot(111, axisbg='w')
fig.subplots_adjust(left=.05, bottom=.05, right=.99, top=.99, wspace=.05, hspace=.05)
## Start figure + subplot (interactive)
fig = plt.figure(figsize=(10,10))
ax = plt.subplot(111, axisbg='w')
fig.subplots_adjust(left=.05, bottom=.05, right=.99, top=.99, wspace=.05, hspace=.05)
## Decide the filename to load data
import sys
filename = sys.argv[1] if len(sys.argv)>1 else 'input.dat'
if not os.path.isfile(filename): raise IOError, 'File %s can not be opened!' % filename
## Load n-dimensional arrays from a HDF5 file
import h5py
h5file = h5py.File(filename, "r")
print "Found datasets:", h5file.keys()
time1 = time.time()
data = np.array(h5file['ex.r']) * (1+0j)
data += np.array(h5file['ex.i']) * 1j
print "Loaded dataset with shape:", data.shape, 'in %04d s.' % (time.time()-time1)
try:
Et = data[:,-1,:] ## take the farthest slice by the z-axis
except IndexError:
Et = data ## if data already 2D
t = np.linspace(0, simtime, Et.shape[1]) ## define the dimension of data axes
y = np.linspace(0, size_y, Et.shape[0])
## Export n-dimensional arrays to a HDF5 file
## Fourier transform
freq = np.fft.fftfreq(len(t), d=(t[1]-t[0])) # calculate the frequency axis with proper spacing
Efy = np.fft.fft(Et, axis=1) / len(t) * 2*np.pi # calculate the FFT values
#def ffts(arr):
#return np.hstack([arr[len(arr)/2+1:], arr[:len(arr)/2]])
def ffts2(arr):
return np.vstack([arr[len(arr)/2:,:], arr[:len(arr)/2,:]])
#freq = ffts(freq)
#Efy = ffts2(Efy)
freq = np.fft.fftshift(freq) #+ freq[len(freq)/2]
Efy = np.fft.fftshift(Efy)
kT = np.fft.fftfreq(len(y), d=(y[1]-y[0])) # calculate the frequency axis with proper spacing
Ef = np.fft.fft(Efy, axis=0) / len(y) * 2*np.pi # calculate the FFT values
kT = np.fft.fftshift(kT)
#Ef = np.fft.fftshift(Ef)
print Ef.shape
Ef = ffts2(Ef)
print Ef.shape
truncated = np.logical_and(freq>0, freq<maxfreq) # (optional) get the frequency range
freq = freq[truncated]
Ef = Ef[:,truncated]
print 'freq', freq.shape, freq[::10]
print 'kT', kT.shape, kT[::10]
## plot contours for gridded data
#contours = plt.contourf(t, y, np.log10(np.abs(et)+1e-6), cmap=matplotlib.cm.gist_earth, extend='both') # levels=np.arange(0.,1,.01),
#contours = plt.contourf(t, y, et, cmap=matplotlib.cm.rdbu, extend='both') # levels=np.arange(0.,1,.01),
toplot = (np.abs(Et))
contours = plt.contourf(t, y, toplot, cmap=matplotlib.cm.gist_earth, levels=np.linspace(np.min(toplot)*0+np.max(toplot)*0,np.max(toplot),200) ,extend='both') #
#contours = plt.contourf(freq, kT, np.abs(Ef), cmap=matplotlib.cm.gist_earth, extend='both') # levels=np.arange(0.,1,.01),
#plt.plot([0, maxfreq], [0, 0], c='w',lw=.5)
#plt.plot([0, maxfreq], [0, maxfreq/c], c='w',lw=.5)
#plt.plot([0, maxfreq], [0, -maxfreq/c], c='w',lw=.5)
#plt.annotate('+45$^\\circ$', xy = (maxfreq/2, maxfreq/2/c), xytext = (-10, 10), textcoords='offset points',color='w')
#plt.annotate('-45$^\\circ$', xy = (maxfreq/2, -maxfreq/2/c), xytext = (10, 10), textcoords='offset points',color='w')
#
try:
## Load 1D curve
filename = "effparam.dat"
(x, y) = np.loadtxt(filename, usecols=(0,5), unpack=True)
truncated = np.logical_and(x>0, x<maxfreq) # (optional) get the frequency range
x = x[truncated]
y = y[truncated]
## Plot line
plt.plot(x, np.real(y)*1000, color="#FF8800", label=u"$y'$", ls='-', c='w',lw=1)
except:
print "refractive index could not be loaded"
for contour in contours.collections: contour.set_antialiased(False) ## optional: avoid white aliasing (for matplotlib 1.0.1 and older)
plt.colorbar() ## optional: colorbar
## Finish the plot + save
#plt.ylim((-2e4,2e4))
plt.xlabel(u"time");
plt.ylabel(u"y");
plt.grid()
plt.legend(prop={'size':10}, loc='upper right')
plt.savefig("output_T-Y.png", bbox_inches='tight')
| gpl-2.0 | 3,451,224,569,395,763,000 | 35.193277 | 162 | 0.636174 | false |
danielharbor/openerp | addons/stock_picking_wave/__init__.py | 374 | 1105 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock_picking_wave
import wizard
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,129,930,119,616,902,100 | 41.5 | 78 | 0.624434 | false |
xzturn/tensorflow | tensorflow/python/distribute/input_lib.py | 2 | 49393 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various classes representing distributed inputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import six
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import distribute
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import multi_device_iterator_ops
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import input_ops
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util import nest
from tensorflow.python.util.deprecation import deprecated
def get_distributed_dataset(dataset,
input_workers,
strategy,
split_batch_by=None,
input_context=None):
"""Returns a wrapped tf.data.DatasetV1 or tf.data.DatasetV2 instance.
This is a common function that is used by all strategies to return the right
tf.data.Dataset wrapped instance depending on the `dataset` argument type.
Args:
dataset: a tf.data.DatasetV1 or tf.data.DatasetV2 instance.
input_workers: an InputWorkers object which specifies devices on which
iterators should be created.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
split_batch_by: Optional integer. If present, we "split" each batch of the
dataset by `split_batch_by` value.
input_context: `InputContext` for sharding. Only pass this in for between
graph multi-worker cases where there is only one `input_worker`. In
these cases, we will shard based on the `input_pipeline_id` and
`num_input_pipelines` in the `InputContext`.
Returns:
A wrapped tf.data.DatasetV1 or tf.data.DatasetV2 instance.
"""
if isinstance(dataset, dataset_ops.DatasetV1):
return DistributedDatasetV1(
dataset,
input_workers,
strategy,
split_batch_by=split_batch_by,
input_context=input_context)
else:
return DistributedDataset(
dataset,
input_workers,
strategy,
split_batch_by=split_batch_by,
input_context=input_context)
def get_distributed_datasets_from_function(dataset_fn,
input_workers,
input_contexts,
strategy):
"""Returns a wrapped tf.data.DatasetV1 or tf.data.DatasetV2 instance.
This is a common function that is used by all strategies to return the right
tf.data.Dataset wrapped instance depending on if we are in graph or eager
mode.
Args:
dataset_fn: a function that returns a tf.data.DatasetV1 or tf.data.DatasetV2
instance.
input_workers: an InputWorkers object which specifies devices on which
iterators should be created.
input_contexts: A list of `InputContext` instances to be passed to call(s)
to `dataset_fn`. Length and order should match worker order in
`worker_device_pairs`.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
Returns:
A wrapped tf.data.DatasetV1 or tf.data.DatasetV2 instance.
"""
if ops.executing_eagerly_outside_functions():
return DistributedDatasetsFromFunction(
dataset_fn,
input_workers,
input_contexts,
strategy)
else:
return DistributedDatasetsFromFunctionV1(
dataset_fn,
input_workers,
input_contexts,
strategy)
class InputWorkers(object):
"""A 1-to-many mapping from input worker devices to compute devices."""
def __init__(self, worker_device_pairs):
"""Initialize an `InputWorkers` object.
Args:
worker_device_pairs: A sequence of pairs:
`(input device, a tuple of compute devices fed by that input device)`.
"""
self._input_worker_devices = tuple(d for d, _ in worker_device_pairs)
self._fed_devices = tuple(tuple(device_util.canonicalize(d) for d in f)
for _, f in worker_device_pairs)
@property
def num_workers(self):
return len(self._input_worker_devices)
@property
def worker_devices(self):
return self._input_worker_devices
def compute_devices_for_worker(self, worker_index):
return self._fed_devices[worker_index]
def __repr__(self):
devices = self.worker_devices
debug_repr = ",\n".join(" %d %s: %s" %
(i, devices[i], self._fed_devices[i])
for i in range(len(devices)))
return "%s:{\n%s}" % (self.__class__.__name__, debug_repr)
def _get_next_as_optional(iterator, strategy, name=None):
"""Returns an empty dataset indicator and the next input from the iterator."""
replicas = []
worker_has_values = []
worker_devices = []
for i, worker in enumerate(iterator._input_workers.worker_devices): # pylint: disable=protected-access
if name is not None:
d = tf_device.DeviceSpec.from_string(worker)
new_name = "%s_%s_%d" % (name, d.job, d.task)
else:
new_name = None
with ops.device(worker):
worker_has_value, next_element = (
iterator._iterators[i].get_next_as_list(new_name)) # pylint: disable=protected-access
# Collective all-reduce requires explicit devices for inputs.
with ops.device("/cpu:0"):
# Converting to integers for all-reduce.
worker_has_value = math_ops.cast(worker_has_value, dtypes.int32)
worker_devices.append(worker_has_value.device)
worker_has_values.append(worker_has_value)
# Make `replicas` a flat list of values across all replicas.
replicas.append(next_element)
# Run an all-reduce to see whether any worker has values.
# TODO(b/131423105): we should be able to short-cut the all-reduce in some
# cases.
if getattr(strategy.extended, "_support_per_replica_values", True):
# Slight hack: `reduce` expects a `PerReplica`, so we pass it one, even
# though it doesn't actually have a value per replica.
worker_has_values = values.PerReplica(worker_has_values)
global_has_value = strategy.reduce(
reduce_util.ReduceOp.SUM, worker_has_values, axis=None)
else:
assert len(worker_has_values) == 1
global_has_value = worker_has_values[0]
global_has_value = array_ops.reshape(
math_ops.cast(global_has_value, dtypes.bool), [])
return global_has_value, replicas
def _is_statically_shaped(tensor_class, shape):
"""Test if an iteratort output is statically shaped.
For sparse and ragged tensors this only tests the batch dimension.
Args:
tensor_class: a class from an iterator.output_classes list.
shape: a TensorShape from an iterator.output_shapes list.
Returns:
True if the shape is static, false otherwise.
"""
if (tensor_class == sparse_tensor.SparseTensor or
isinstance(tensor_class, ragged_tensor.RaggedTensorSpec)):
# For sparse or ragged tensor, we should only check the first
# dimension in order to get_next_as_optional. This is because
# when these tensors get batched by dataset only the batch dimension
# is set.
if shape.rank > 0 and shape.as_list()[0] is None:
return False
return True
return shape.is_fully_defined()
class DistributedIterator(object):
"""Common implementation for all input iterators."""
def __init__(self, input_workers, iterators, strategy):
static_shape = True
for iterator in iterators:
if not isinstance(iterator, _SingleWorkerDatasetIterator):
continue
flattened = zip(nest.flatten(iterator.output_shapes),
nest.flatten(iterator.output_classes))
for output_shape, output_class in flattened:
if not _is_statically_shaped(output_class, output_shape):
static_shape = False
break
# TODO(b/133073708): we currently need a flag to control the usage because
# there is a performance difference between get_next() and
# get_next_as_optional(). And we only enable get_next_as_optional when the
# output shapes are not static.
#
# TODO(yuefengz): Currently `experimental_enable_get_next_as_optional` is
# always set to False in CollectiveAllReduceStrategy. We want to have a way
# to distinguish multi workers/single worker between graph, so we can enable
# the behavior in single worker case.
#
# TODO(rxsang): We want to always enable the get_next_as_optional behavior
# when user passed input_fn instead of dataset.
if getattr(
strategy.extended, "experimental_enable_get_next_as_optional", False):
self._enable_get_next_as_optional = not static_shape
else:
self._enable_get_next_as_optional = False
assert isinstance(input_workers, InputWorkers)
if not input_workers.worker_devices:
raise ValueError("Should have at least one worker for input iterator.")
self._iterators = iterators
self._input_workers = input_workers
self._strategy = strategy
def next(self):
return self.__next__()
def __next__(self):
try:
return self.get_next()
except errors.OutOfRangeError:
raise StopIteration
def __iter__(self):
return self
def get_next(self, name=None):
"""Returns the next input from the iterator for all replicas."""
if not self._enable_get_next_as_optional:
replicas = []
for i, worker in enumerate(self._input_workers.worker_devices):
if name is not None:
d = tf_device.DeviceSpec.from_string(worker)
new_name = "%s_%s_%d" % (name, d.job, d.task)
else:
new_name = None
with ops.device(worker):
# Make `replicas` a flat list of values across all replicas.
replicas.extend(
self._iterators[i].get_next_as_list_static_shapes(new_name))
return values.regroup(replicas)
out_of_range_replicas = []
def out_of_range_fn(worker_index, device):
"""This function will throw an OutOfRange error."""
# As this will be only called when there is no data left, so calling
# get_next() will trigger an OutOfRange error.
data = self._iterators[worker_index].get_next(device)
out_of_range_replicas.append(data)
return data
global_has_value, replicas = _get_next_as_optional(self, self._strategy)
results = []
for i, worker in enumerate(self._input_workers.worker_devices):
with ops.device(worker):
devices = self._input_workers.compute_devices_for_worker(i)
for j, device in enumerate(devices):
with ops.device(device):
# pylint: disable=undefined-loop-variable
# pylint: disable=cell-var-from-loop
# It is fine for the lambda to capture variables from the loop as
# the lambda is executed in the loop as well.
result = control_flow_ops.cond(
global_has_value,
lambda: replicas[i][j],
lambda: out_of_range_fn(i, device),
strict=True,
)
# pylint: enable=cell-var-from-loop
# pylint: enable=undefined-loop-variable
results.append(result)
replicas = results
# Some dimensions in `replicas` will become unknown after we conditionally
# return the real tensors or the dummy tensors. We fix the input shapes by
# using the shapes from `out_of_range_replicas` because it is calling
# get_next() inside.
flattened_replicas = nest.flatten(replicas)
for i, replica_data in enumerate(nest.flatten(out_of_range_replicas)):
for target, source in zip(
nest.flatten(flattened_replicas[i], expand_composites=True),
nest.flatten(replica_data, expand_composites=True)):
target.set_shape(source.get_shape())
# `SparseTensor` shape is not determined by the shape of its component
# tensors. Rather, its shape depends on a tensor's values.
if sparse_tensor.is_sparse(replica_data) and replica_data.get_shape():
dense_shape = replica_data.get_shape()
with ops.device(flattened_replicas[i].op.device):
# For partially defined shapes, fill in missing values from tensor.
if not dense_shape.is_fully_defined():
dense_shape = array_ops.stack([
flattened_replicas[i].dense_shape[j] if dim is None else dim
for j, dim in enumerate(dense_shape.as_list())
])
flattened_replicas[i] = sparse_tensor.SparseTensor(
indices=flattened_replicas[i].indices,
values=flattened_replicas[i].values,
dense_shape=dense_shape)
replicas = nest.pack_sequence_as(replicas, flattened_replicas)
return values.regroup(replicas)
# We need a private initializer method for re-initializing multidevice
# iterators when used with Keras training loops. If we don't reinitialize the
# iterator we run into memory leak issues (b/123315763).
@property
def _initializer(self):
init_ops = []
for it in self._iterators:
init_ops.extend(it.initialize())
return control_flow_ops.group(init_ops)
@property
def element_spec(self):
"""The type specification of an element of this iterator."""
return self._element_spec
class DistributedIteratorV1(DistributedIterator):
"""Input Iterator for tf.data.DatasetV1."""
@deprecated(None, "Use the iterator's `initializer` property instead.")
def initialize(self):
"""Initialze underlying iterators.
Returns:
A list of any initializer ops that should be run.
"""
return super(DistributedIteratorV1, self)._initializer
@property
def initializer(self):
"""Returns a list of ops that initialize the iterator."""
return self.initialize()
# TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs.
@property
def output_classes(self):
return self._iterators[0].output_classes
# TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs.
@property
def output_shapes(self):
return self._iterators[0].output_shapes
# TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs.
@property
def output_types(self):
return self._iterators[0].output_types
# TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs.
def get_iterator(self, worker):
for i, w in enumerate(self._input_workers.worker_devices):
if worker == w:
return self._iterators[i]
return None
class _IterableInput(object):
"""Base class for iterable inputs for distribution strategies."""
def __init__(self, input_workers):
assert isinstance(input_workers, InputWorkers)
self._input_workers = input_workers
def __iter__(self):
raise NotImplementedError("must be implemented in descendants")
def reduce(self, initial_state, reduce_fn):
"""Execute a `reduce_fn` over all the elements of the input."""
iterator = iter(self)
has_data, data = _get_next_as_optional(iterator, self._strategy)
def cond(has_data, data, state):
del data, state # Unused.
return has_data
def loop_body(has_data, data, state):
"""Executes `reduce_fn` in a loop till the dataset is empty."""
del has_data # Unused.
# data is list of lists here. where each list corresponds to one worker.
# TODO(b/130570614): Add support for the multiworker and TPU pods use
# case.
if self._input_workers.num_workers == 1:
data = data[0]
else:
raise ValueError("Dataset iteration within a tf.function is"
" not supported for multiple workers.")
state = reduce_fn(state, values.regroup(data))
has_data, data = _get_next_as_optional(iterator, self._strategy)
return has_data, data, state
has_data, data, final_state = control_flow_ops.while_loop(
cond, loop_body, [has_data, data, initial_state], parallel_iterations=1)
return final_state
class DistributedDataset(_IterableInput):
"""Wrapped tf.data.DatasetV2 that supports prefetching to multiple devices."""
def __init__(self,
dataset,
input_workers,
strategy,
split_batch_by=None,
input_context=None):
"""Distribute the dataset on all workers.
If `split_batch_by` is not None, we "split" each batch of the dataset by
`split_batch_by` value.
Args:
dataset: `tf.data.Dataset` that will be used as the input source.
input_workers: an `InputWorkers` object.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
split_batch_by: Optional integer. If present, we "split" each batch of the
dataset by `split_batch_by` value.
input_context: `InputContext` for sharding. Only pass this in for between
graph multi-worker cases where there is only one `input_worker`. In
these cases, we will shard based on the `input_pipeline_id` and
`num_input_pipelines` in the `InputContext`.
"""
super(DistributedDataset, self).__init__(input_workers=input_workers)
# We clone and shard the dataset on each worker. The current setup tries to
# shard the dataset by files if possible so that each worker sees a
# different subset of files. If that is not possible, will attempt to shard
# the final input such that each worker will run the entire preprocessing
# pipeline and only receive its own shard of the dataset.
if split_batch_by:
try:
# pylint: disable=protected-access
with ops.colocate_with(dataset._variant_tensor):
dataset = distribute._RebatchDataset(dataset, split_batch_by)
# Add a prefetch to pipeline rebatching for performance.
# TODO(rachelim): Instead of inserting an extra prefetch stage here,
# leverage static graph rewrites to insert _RebatchDataset before
# the final `prefetch` if it exists.
dataset = dataset.prefetch(split_batch_by)
except errors.InvalidArgumentError as e:
if "without encountering a batch" in str(e):
six.reraise(
ValueError,
ValueError(
"Call the `batch` method on the input Dataset in order to be "
"able to split your input across {} replicas.\n Please "
"the tf.distribute.Strategy guide. {}".format(
split_batch_by, e)),
sys.exc_info()[2])
else:
raise
# TODO(b/138745411): Remove once stateful transformations are supported.
options = dataset_ops.Options()
options.experimental_distribute._make_stateless = True # pylint: disable=protected-access
dataset = dataset.with_options(options)
self._cloned_datasets = []
if input_context:
# Between-graph where we rely on the input_context for sharding
assert input_workers.num_workers == 1
dataset = input_ops.auto_shard_dataset(dataset,
input_context.num_input_pipelines,
input_context.input_pipeline_id)
self._cloned_datasets.append(dataset)
else:
replicated_ds = distribute.replicate(dataset,
input_workers.worker_devices)
for i, worker in enumerate(input_workers.worker_devices):
with ops.device(worker):
cloned_dataset = replicated_ds[worker]
cloned_dataset = cloned_dataset.with_options(dataset.options())
cloned_dataset = input_ops.auto_shard_dataset(
cloned_dataset, len(input_workers.worker_devices), i)
self._cloned_datasets.append(cloned_dataset)
self._input_workers = input_workers
self._strategy = strategy
self._element_spec = _create_distributed_tensor_spec(self._strategy,
dataset.element_spec) # pylint: disable=protected-access
def __iter__(self):
if not (context.executing_eagerly() or
ops.get_default_graph().building_function):
raise RuntimeError("__iter__() is only supported inside of tf.function "
"or when eager execution is enabled.")
worker_iterators = _create_iterators_per_worker(self._cloned_datasets,
self._input_workers)
iterator = DistributedIterator(self._input_workers, worker_iterators,
self._strategy)
iterator._element_spec = self.element_spec # pylint: disable=protected-access
return iterator
@property
def element_spec(self):
"""The type specification of an element of this dataset."""
return self._element_spec
class DistributedDatasetV1(DistributedDataset):
"""Wrapped tf.data.DatasetV1 that supports prefetching to multiple devices."""
def __init__(self,
dataset,
input_workers,
strategy,
split_batch_by=None,
input_context=None):
self._input_workers = input_workers
super(DistributedDatasetV1, self).__init__(
dataset,
input_workers,
strategy,
split_batch_by=split_batch_by,
input_context=input_context)
def make_one_shot_iterator(self):
"""Get a one time use iterator for DistributedDatasetV1.
Note: This API is deprecated. Please use `for ... in dataset:` to iterate
over the dataset or `iter` to create an iterator.
Returns:
A DistributedIteratorV1 instance.
"""
return self._make_one_shot_iterator()
def _make_one_shot_iterator(self):
"""Get an iterator for DistributedDatasetV1."""
# Graph mode with one shot iterator is disabled because we have to call
# `initialize` on the iterator which is only required if we are using a
# tf.distribute strategy.
if not context.executing_eagerly():
raise ValueError("Cannot create a one shot iterator. Please use "
"`make_initializable_iterator()` instead.")
return self._get_iterator()
def make_initializable_iterator(self):
"""Get an initializable iterator for DistributedDatasetV1.
Note: This API is deprecated. Please use
`tf.compat.v1.data.make_initializable_iterator(dataset)` to create an
initializable iterator.
Returns:
A DistributedIteratorV1 instance.
"""
return self._make_initializable_iterator()
def _make_initializable_iterator(self, shared_name=None): # pylint: disable=unused-argument
"""Get an initializable iterator for DistributedDatasetV1."""
# Eager mode generates already initialized iterators. Hence we cannot create
# an initializable iterator.
if context.executing_eagerly():
raise ValueError("Cannot create initializable iterator in Eager mode. "
"Please use `iter()` instead.")
return self._get_iterator()
def _get_iterator(self):
worker_iterators = _create_iterators_per_worker(self._cloned_datasets,
self._input_workers)
iterator = DistributedIteratorV1(self._input_workers, worker_iterators,
self._strategy)
iterator._element_spec = self.element_spec # pylint: disable=protected-access
return iterator
# TODO(priyag): Add other replication modes.
class DistributedDatasetsFromFunction(_IterableInput):
"""Inputs created from dataset function."""
def __init__(self, dataset_fn, input_workers, input_contexts, strategy):
"""Makes an iterable from datasets created by the given function.
Args:
dataset_fn: A function that returns a `Dataset` given an `InputContext`.
input_workers: an `InputWorkers` object.
input_contexts: A list of `InputContext` instances to be passed to call(s)
to `dataset_fn`. Length and order should match worker order in
`worker_device_pairs`.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
"""
super(DistributedDatasetsFromFunction, self).__init__(
input_workers=input_workers)
if input_workers.num_workers != len(input_contexts):
raise ValueError(
"Number of input workers (%d) is not same as number of "
"input_contexts (%d)" %
(input_workers.num_workers, len(input_contexts)))
self._dataset_fn = dataset_fn
self._input_workers = input_workers
self._input_contexts = input_contexts
self._strategy = strategy
self._element_spec = None
def __iter__(self):
if not (context.executing_eagerly() or
ops.get_default_graph().building_function):
raise RuntimeError("__iter__() is only supported inside of tf.function "
"or when eager execution is enabled.")
iterators, element_spec = _create_iterators_per_worker_with_input_context(
self._input_contexts, self._input_workers, self._dataset_fn)
iterator = DistributedIterator(self._input_workers, iterators,
self._strategy)
self._element_spec = _create_distributed_tensor_spec(self._strategy,
element_spec)
iterator._element_spec = self._element_spec # pylint: disable=protected-access
return iterator
@property
def element_spec(self):
"""The type specification of an element of this dataset."""
if self._element_spec is None:
raise ValueError("You must create an iterator before calling "
"`element_spec` on the distributed dataset or iterator. "
"This is because the dataset function is not called "
"before an iterator is created.")
return self._element_spec
class DistributedDatasetsFromFunctionV1(DistributedDatasetsFromFunction):
"""Inputs created from dataset function."""
def _make_initializable_iterator(self, shared_name=None):
"""Get an initializable iterator for DistributedDatasetsFromFunctionV1."""
del shared_name # Unused
# Eager mode generates already initialized iterators. Hence we cannot create
# an initializable iterator.
if context.executing_eagerly():
raise ValueError("Cannot create initializable iterator in Eager mode. "
"Please use `iter()` instead.")
return self._get_iterator()
def _make_one_shot_iterator(self):
"""Get an iterator for iterating over DistributedDatasetsFromFunctionV1."""
# Graph mode with one shot iterator is disabled because we have to call
# `initialize` on the iterator which is only required if we are using a
# tf.distribute strategy.
if not context.executing_eagerly():
raise ValueError("Cannot create a one shot iterator. Please use "
"`make_initializable_iterator()` instead.")
return self._get_iterator()
def _get_iterator(self):
iterators, element_spec = _create_iterators_per_worker_with_input_context(
self._input_contexts, self._input_workers, self._dataset_fn)
iterator = DistributedIteratorV1(self._input_workers, iterators,
self._strategy)
self._element_spec = _create_distributed_tensor_spec(self._strategy,
element_spec)
iterator._element_spec = self._element_spec # pylint: disable=protected-access
return iterator
# TODO(anjalisridhar): This class will be soon be removed in favor of newer
# APIs.
class InputFunctionIterator(DistributedIteratorV1):
"""Iterator created from input function."""
def __init__(self, input_fn, input_workers, input_contexts, strategy):
"""Make an iterator for input provided via an input function.
Currently implements PER_WORKER mode, in which the `input_fn` is called
once on each worker.
TODO(priyag): Add other replication modes.
Args:
input_fn: Input function that returns a `tf.data.Dataset` object.
input_workers: an `InputWorkers` object.
input_contexts: A list of `InputContext` instances to be passed to call(s)
to `input_fn`. Length and order should match worker order in
`worker_device_pairs`.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
"""
assert isinstance(input_workers, InputWorkers)
if input_workers.num_workers != len(input_contexts):
raise ValueError(
"Number of input workers (%d) is not same as number of "
"input_contexts (%d)" %
(input_workers.num_workers, len(input_contexts)))
iterators = []
for i, ctx in enumerate(input_contexts):
worker = input_workers.worker_devices[i]
with ops.device(worker):
result = input_fn(ctx)
devices = input_workers.compute_devices_for_worker(i)
if isinstance(result, dataset_ops.DatasetV2):
iterator = _SingleWorkerDatasetIterator(result, worker, devices)
elif callable(result):
iterator = _SingleWorkerCallableIterator(result, worker, devices)
else:
raise ValueError(
"input_fn must return a tf.data.Dataset or a callable.")
iterators.append(iterator)
super(InputFunctionIterator, self).__init__(input_workers, iterators,
strategy)
# TODO(anjalisridhar): This class will soon be removed and users should move
# to using DistributedIterator.
class DatasetIterator(DistributedIteratorV1):
"""Iterator created from input dataset."""
def __init__(self,
dataset,
input_workers,
strategy,
split_batch_by=None,
input_context=None):
"""Make an iterator for the dataset on given devices.
If `split_batch_by` is not None, we "split" each batch of the
dataset by `split_batch_by` value.
Args:
dataset: `tf.data.Dataset` that will be used as the input source.
input_workers: an `InputWorkers` object.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
split_batch_by: Optional integer. If present, we "split" each batch of the
dataset by `split_batch_by` value.
input_context: `InputContext` for sharding. Only pass this in for between
graph multi-worker cases where there is only one `input_worker`. In
these cases, we will shard based on the `input_pipeline_id` and
`num_input_pipelines` in the `InputContext`.
"""
dist_dataset = DistributedDatasetV1(
dataset,
input_workers,
strategy,
split_batch_by=split_batch_by,
input_context=input_context)
worker_iterators = _create_iterators_per_worker(
dist_dataset._cloned_datasets, input_workers) # pylint: disable=protected-access
super(DatasetIterator, self).__init__(
input_workers,
worker_iterators, # pylint: disable=protected-access
strategy)
self._element_spec = dist_dataset.element_spec
def _dummy_tensor_fn(value_structure):
"""A function to create dummy tensors from `value_structure`."""
def create_dummy_tensor(type_spec):
"""Create a dummy tensor with possible batch dimensions set to 0."""
if isinstance(type_spec, ragged_tensor.RaggedTensorSpec):
# Splice out the ragged dimensions.
# pylint: disable=protected-access
feature_shape = type_spec._shape[:1].concatenate(
type_spec._shape[(1 + type_spec._ragged_rank):])
feature_type = type_spec._dtype
# pylint: enable=protected-access
else:
feature_shape = type_spec.shape
feature_type = type_spec.dtype
# Ideally we should set the batch dimension to 0, however as in
# DistributionStrategy we don't know the batch dimension, we try to
# guess it as much as possible. If the feature has unknown dimensions, we
# will set them to 0. If the feature shape is already static, we guess the
# first dimension as batch dimension and set it to 0.
dims = ([dim if dim is not None else 0 for dim in feature_shape.as_list()]
if feature_shape else [])
if dims and (isinstance(type_spec, ragged_tensor.RaggedTensorSpec) or
feature_shape.is_fully_defined()):
dims[0] = tensor_shape.Dimension(0)
if isinstance(type_spec, sparse_tensor.SparseTensorSpec):
return sparse_tensor.SparseTensor(
values=array_ops.zeros(0, feature_type),
indices=array_ops.zeros((0, len(dims)), dtypes.int64),
dense_shape=dims)
# Create the dummy tensor.
dummy_tensor = array_ops.zeros(tensor_shape.TensorShape(dims), feature_type)
if isinstance(type_spec, ragged_tensor.RaggedTensorSpec):
# Reinsert the ragged dimensions with size 0.
# pylint: disable=protected-access
row_splits = array_ops.zeros(1, type_spec._row_splits_dtype)
dummy_tensor = ragged_tensor.RaggedTensor.from_nested_row_splits(
dummy_tensor, (row_splits,) * type_spec._ragged_rank, validate=False)
# pylint: enable=protected-access
return dummy_tensor
return nest.map_structure(create_dummy_tensor, value_structure)
class _SingleWorkerDatasetIterator(object):
"""Iterator for a single `tf.data.Dataset`."""
def __init__(self, dataset, worker, devices):
"""Create iterator for the `dataset` to fetch data to worker's `devices` .
`MultiDeviceIterator` is used to prefetch input to the devices on the
given worker.
Args:
dataset: A `tf.data.Dataset` instance.
worker: Worker on which ops should be created.
devices: Distribute data from `dataset` to these devices.
"""
self._dataset = dataset
self._worker = worker
self._devices = devices
self._make_iterator()
def _make_iterator(self):
"""Make appropriate iterator on the dataset."""
with ops.device(self._worker):
self._iterator = multi_device_iterator_ops.MultiDeviceIterator(
self._dataset, self._devices)
def get_next(self, device, name=None):
"""Get next element for the given device."""
del name
with ops.device(self._worker):
return self._iterator.get_next(device)
def get_next_as_list_static_shapes(self, name=None):
"""Get next element from the underlying iterator.
Runs the iterator get_next() within a device scope. Since this doesn't use
get_next_as_optional(), is is considerably faster than get_next_as_list()
(but can only be used when the shapes are static).
Args:
name: not used.
Returns:
A list consisting of the next data from each device.
"""
del name
with ops.device(self._worker):
return self._iterator.get_next()
def get_next_as_list(self, name=None):
"""Get next element from underlying iterator.
If there is no data left, a list of dummy tensors with possible batch
dimensions set to 0 will be returned. Use of get_next_as_optional() and
extra logic adds overhead compared to get_next_as_list_static_shapes(), but
allows us to handle non-static shapes.
Args:
name: not used.
Returns:
A boolean tensor indicates whether there is any data in next element and
the real data as the next element or a list of dummy tensors if no data
left.
"""
del name
with ops.device(self._worker):
data_list = self._iterator.get_next_as_optional()
result = []
for i, data in enumerate(data_list):
# Place the condition op in the same device as the data so the data
# doesn't need to be sent back to the worker.
with ops.device(self._devices[i]):
# As MultiDeviceIterator will fetch data in order, so we only need to
# check if the first replica has value to see whether there is data
# left for this single worker.
if i == 0:
worker_has_value = data.has_value()
# pylint: disable=unnecessary-lambda
# pylint: disable=cell-var-from-loop
real_data = control_flow_ops.cond(
data.has_value(),
lambda: data.get_value(),
lambda: _dummy_tensor_fn(data.value_structure),
strict=True,
)
result.append(real_data)
# pylint: enable=cell-var-from-loop
# pylint: enable=unnecessary-lambda
return worker_has_value, result
def initialize(self):
"""Initialze underlying iterator.
In eager execution, this simply recreates the underlying iterator.
In graph execution, it returns the initializer ops for the underlying
iterator.
Returns:
A list of any initializer ops that should be run.
"""
if ops.executing_eagerly_outside_functions():
self._iterator._eager_reset() # pylint: disable=protected-access
return []
else:
return [self._iterator.initializer]
@property
def output_classes(self):
return dataset_ops.get_legacy_output_classes(self._iterator)
@property
def output_shapes(self):
return dataset_ops.get_legacy_output_shapes(self._iterator)
@property
def output_types(self):
return dataset_ops.get_legacy_output_types(self._iterator)
class _SingleWorkerCallableIterator(object):
"""Iterator for a single tensor-returning callable."""
def __init__(self, fn, worker, devices):
self._fn = fn
self._worker = worker
self._devices = devices
def get_next(self, device, name=None):
"""Get next element for the given device from the callable."""
del device, name
with ops.device(self._worker):
return self._fn()
def get_next_as_list_static_shapes(self, name=None):
"""Get next element from the callable."""
del name
with ops.device(self._worker):
data_list = [self._fn() for _ in self._devices]
return data_list
def get_next_as_list(self, name=None):
"""Get next element from the callable."""
del name
with ops.device(self._worker):
data_list = [self._fn() for _ in self._devices]
return constant_op.constant(True), data_list
def initialize(self):
# TODO(petebu) Should this throw an exception instead?
return []
def _create_iterators_per_worker(worker_datasets, input_workers):
"""Create a multidevice iterator on each of the workers."""
assert isinstance(input_workers, InputWorkers)
assert len(worker_datasets) == len(input_workers.worker_devices)
iterators = []
for i, worker in enumerate(input_workers.worker_devices):
with ops.device(worker):
worker_devices = input_workers.compute_devices_for_worker(i)
iterator = _SingleWorkerDatasetIterator(worker_datasets[i], worker,
worker_devices)
iterators.append(iterator)
return iterators
def _create_iterators_per_worker_with_input_context(input_contexts,
input_workers,
dataset_fn):
"""Create a multidevice iterator per workers given a dataset function."""
iterators = []
for i, ctx in enumerate(input_contexts):
worker = input_workers.worker_devices[i]
with ops.device(worker):
dataset = dataset_fn(ctx)
# TODO(b/138745411): Remove once stateful transformations are supported.
options = dataset_ops.Options()
options.experimental_distribute._make_stateless = True # pylint: disable=protected-access
dataset = dataset.with_options(options)
devices = input_workers.compute_devices_for_worker(i)
iterator = _SingleWorkerDatasetIterator(dataset, worker, devices)
iterators.append(iterator)
return iterators, dataset.element_spec
# TODO(sourabhbajaj): Remove this in lieu of distributed datasets
def _get_batched_dataset(d):
"""Get the batched dataset from `d`."""
# pylint: disable=protected-access
if isinstance(d, dataset_ops.DatasetV1Adapter):
d = d._dataset
if isinstance(d, (dataset_ops.BatchDataset, batching._MapAndBatchDataset)):
return d
elif isinstance(d, (dataset_ops.PrefetchDataset,
dataset_ops._OptionsDataset)):
return _get_batched_dataset(d._input_dataset)
raise ValueError(
"Unable to get batched dataset from the input dataset. `batch` "
"`map_and_batch` need to be the last operations on the dataset. "
"The batch operations can be followed by a prefetch.")
def _get_batched_dataset_attributes(d):
"""Get `batch_size`, `drop_remainder` of dataset."""
# pylint: disable=protected-access
assert isinstance(d,
(dataset_ops.BatchDataset, batching._MapAndBatchDataset))
if isinstance(d, dataset_ops.BatchDataset):
batch_size = d._batch_size
drop_remainder = d._drop_remainder
elif isinstance(d, batching._MapAndBatchDataset):
batch_size = d._batch_size_t
drop_remainder = d._drop_remainder_t
# pylint: enable=protected-access
if tensor_util.is_tensor(batch_size):
batch_size = tensor_util.constant_value(batch_size)
if tensor_util.is_tensor(drop_remainder):
drop_remainder = tensor_util.constant_value(drop_remainder)
return batch_size, drop_remainder
# TODO(sourabhbajaj): Remove this in lieu of distributed datasets
def _get_dataset_attributes(dataset):
"""Get the underlying attributes from the dataset object."""
# pylint: disable=protected-access
# First, get batch_size and drop_remainder from the dataset. We need
# to walk back the dataset creation process and find the batched version in
# order to get the attributes.
batched_dataset = _get_batched_dataset(dataset)
batch_size, drop_remainder = _get_batched_dataset_attributes(batched_dataset)
# Second, prefetch buffer should be get from the original dataset.
prefetch_buffer = None
if isinstance(dataset, dataset_ops.PrefetchDataset):
prefetch_buffer = dataset._buffer_size
elif (isinstance(dataset, dataset_ops.DatasetV1Adapter)
and isinstance(dataset._dataset, dataset_ops.PrefetchDataset)):
prefetch_buffer = dataset._dataset._buffer_size
return batch_size, drop_remainder, prefetch_buffer
class MultiStepContext(object):
"""A context object that can be used to capture things when running steps.
This context object is useful when running multiple steps at a time using the
`experimental_run_steps_on_iterator` API. For e.g. it allows the user's step
function to specify which outputs to emit at what frequency. Currently it
supports capturing output from the last step, as well as capturing non tensor
outputs. In the future it will be augmented to support other use cases such
as output each N steps.
"""
def __init__(self):
"""Initialize an output context.
Returns:
A context object.
"""
self._last_step_outputs = {}
self._last_step_outputs_reduce_ops = {}
self._non_tensor_outputs = {}
@property
def last_step_outputs(self):
"""A dictionary consisting of outputs to be captured on last step.
Keys in the dictionary are names of tensors to be captured, as specified
when `set_last_step_output` is called.
Values in the dictionary are the tensors themselves. If
`set_last_step_output` was called with a `reduce_op` for this output,
then the value is the reduced value.
Returns:
A dictionary with last step outputs.
"""
return self._last_step_outputs
def _set_last_step_outputs(self, outputs):
"""Replace the entire dictionary of last step outputs."""
if not isinstance(outputs, dict):
raise ValueError("Need a dictionary to set last_step_outputs.")
self._last_step_outputs = outputs
def set_last_step_output(self, name, output, reduce_op=None):
"""Set `output` with `name` to be outputted from the last step.
Args:
name: String, name to identify the output. Doesn't need to match tensor
name.
output: The tensors that should be outputted with `name`. See below for
actual types supported.
reduce_op: Reduction method to use to reduce outputs from multiple
replicas. Required if `set_last_step_output` is called in a replica
context. Optional in cross_replica_context.
When present, the outputs from all the replicas are reduced using the
current distribution strategy's `reduce` method. Hence, the type of
`output` must be what's supported by the corresponding `reduce` method.
For e.g. if using MirroredStrategy and reduction is set, output
must be a `PerReplica` value.
The reduce method is also recorded in a dictionary
`_last_step_outputs_reduce_ops` for later interpreting of the
outputs as already reduced or not.
"""
if distribution_strategy_context.in_cross_replica_context():
self._last_step_outputs_reduce_ops[name] = reduce_op
if reduce_op is None:
self._last_step_outputs[name] = output
else:
distribution = distribution_strategy_context.get_strategy()
self._last_step_outputs[name] = distribution.reduce(reduce_op, output,
axis=None)
else:
assert reduce_op is not None
def merge_fn(distribution, value):
self._last_step_outputs[name] = distribution.reduce(reduce_op, value,
axis=None)
# Setting this inside the `merge_fn` because all replicas share the same
# context object, so it's more robust to set it only once (even if all
# the replicas are trying to set the same value).
self._last_step_outputs_reduce_ops[name] = reduce_op
distribution_strategy_context.get_replica_context().merge_call(
merge_fn, args=(output,))
@property
def non_tensor_outputs(self):
"""A dictionary consisting of any non tensor outputs to be captured."""
return self._non_tensor_outputs
def set_non_tensor_output(self, name, output):
"""Set `output` with `name` to be captured as a non tensor output."""
if distribution_strategy_context.in_cross_replica_context():
self._non_tensor_outputs[name] = output
else:
def merge_fn(distribution, value):
# NOTE(priyag): For non tensor outputs, we simply return all the values
# in a list as reduction doesn't make sense on non tensors.
self._non_tensor_outputs[name] = (
distribution.experimental_local_results(value))
distribution_strategy_context.get_replica_context().merge_call(
merge_fn, args=(output,))
def _create_distributed_tensor_spec(strategy, tensor_spec):
"""Create a `tf.TypeSpec` for a given strategy and input `tensor_spec`.
Args:
strategy: The given `tf.distribute` strategy.
tensor_spec: `tf.TensorSpec` of a given value. The batch dimension of the
shape should be None if you have partial batches.
Returns:
A `tf.TypeSpec` that matches the values produced by a given strategy. This
can be a `tf.TensorSpec` or a `PerRelicaSpec`.
"""
num_replicas = len(strategy.extended.worker_devices)
# If the number of devices used in the strategy is just 1 then we return
# the tensor_spec as is.
if num_replicas == 1:
return tensor_spec
# If the number of devices is greater than 1 then we assume the input to
# tf.function is a per replica type.
def _get_value_per_replica(tensor_spec_per_input):
value_specs = [tensor_spec_per_input for _ in range(num_replicas)]
return values.PerReplicaSpec(*value_specs)
return nest.map_structure(_get_value_per_replica, tensor_spec)
| apache-2.0 | 3,307,326,775,798,725,000 | 39.091721 | 114 | 0.666957 | false |
bloomberg/phabricator-tools | py/aon/aoncmd_taskquery.py | 4 | 8598 | """display and filter the list of maniphest tasks.
you can use the 'task id' output from this command as input to the
'arcyon task-update' command.
usage examples:
list all tasks:
$ arcyon task-query
output formats:
--format-ids
3
2
1
--format-short
8 / Open / High / rethink the blob module
7 / Open / High / document the lifecycle of a request
3 / Open / Low / extract methods out of the doWork() function
--format-python
[{'description': u'',
'id': u'1',
'objectName': u'T1',
'priority': u'Needs Triage',
'status': u'0',
...
--format-json
[
{
"description": "",
"id": "1",
"objectName": "T1",
...
"""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# aoncmd_taskquery
#
# Public Functions:
# getFromfilePrefixChars
# setupParser
# process
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import pprint
import textwrap
import phlcon_maniphest
import phlcon_project
import phlcon_user
import phlsys_makeconduit
def getFromfilePrefixChars():
return ""
def setupParser(parser):
# make a list of priority names in increasing order of importance
priority_name_list = phlcon_maniphest.PRIORITIES.keys()
priority_name_list.sort(
key=lambda x: phlcon_maniphest.PRIORITIES[x])
priorities = parser.add_argument_group(
'optional priority arguments',
'use any of ' + textwrap.fill(
str(priority_name_list)))
output_group = parser.add_argument_group(
'Output format arguments',
'Mutually exclusive, defaults to "--format-short"')
output = output_group.add_mutually_exclusive_group()
opt = parser.add_argument_group(
'Optional task arguments')
priorities.add_argument(
'--priorities',
'-p',
nargs="*",
choices=priority_name_list,
metavar="PRIORITY",
default=None,
type=str,
help="filter by priority of the task")
opt.add_argument(
'--order',
choices=phlcon_maniphest.ORDERS.keys(),
default=None,
type=str,
help="the ordering of the returned results")
opt.add_argument(
'--ids',
nargs="+",
metavar='INT',
default=[],
help='specific task ids to restrict the query to',
type=str)
opt.add_argument(
'--owners',
'-o',
nargs="+",
metavar='USER',
default=[],
help='specific owners usernames to restrict the query to',
type=str)
opt.add_argument(
'--authors',
nargs="+",
metavar='USER',
default=[],
help='specific author usernames to restrict the query to',
type=str)
opt.add_argument(
'--ccs',
'-c',
nargs="+",
metavar='USER',
default=[],
help='specific cc usernames to restrict the query to',
type=str)
opt.add_argument(
'--projects',
nargs="+",
metavar='PROJECT',
default=[],
help='a list of project names to restrict the query',
type=str)
opt.add_argument(
'--status',
type=str,
default=None,
choices=phlcon_maniphest.STATUS_FILTERS.keys(),
help='a single status type to restrict items to')
opt.add_argument(
'--text',
type=str,
metavar='STRING',
default=None,
help='string to search the full text of each task for.')
opt.add_argument(
'--max-results',
type=int,
metavar='INT',
default=None,
help='limit the number of results returned, if unspecified then the '
'server default limit is used (seems to be 1000).')
opt.add_argument(
'--offset-results',
type=int,
metavar='INT',
default=None,
help='where there is a limit on the number of results, you can supply '
'an offset to return the next batch of results. e.g. if the '
'number of results is limited to 100, then to see the next "page"'
'of results, supply an offset of 100. To see "page 3" of the '
'results, supply an offset of 200 and so on. Theres no way to '
'count the total number of results at present.')
output.add_argument(
'--format-short',
action='store_true',
help='will print a short human-readable summary of each task.')
output.add_argument(
'--format-ids',
action='store_true',
help='will print just the ids of the tasks, for scripting.')
output.add_argument(
'--format-string',
type=str,
default=None,
help='will print using the supplied format string, e.g. "{id}" '
'to print a list of ids. use --format-python to list all the '
'available attributes for printing.')
output.add_argument(
'--format-python',
action='store_true',
help='will pretty-print the response as a python object.')
output.add_argument(
'--format-json',
action='store_true',
help='will pretty-print the response in json.')
phlsys_makeconduit.add_argparse_arguments(parser)
def _combine_lists_if_not_none(*lists):
result = []
for l in lists:
if l is not None:
result += l
return result
def process(args):
conduit = phlsys_makeconduit.make_conduit(
args.uri, args.user, args.cert, args.act_as_user)
# conduit expects PHIDs not plain usernames
user_phids = phlcon_user.UserPhidCache(conduit)
user_phids.add_hint_list(
_combine_lists_if_not_none(args.owners, args.ccs))
authors = [user_phids.get_phid(user) for user in args.authors]
owners = [user_phids.get_phid(user) for user in args.owners]
ccs = [user_phids.get_phid(user) for user in args.ccs]
# conduit expects PHIDs not plain project names
projects = None
if args.projects:
project_to_phid = phlcon_project.make_project_to_phid_dict(conduit)
projects = [project_to_phid[p] for p in args.projects]
filters = phlcon_maniphest.STATUS_FILTERS
status = filters[args.status] if args.status is not None else None
orderings = phlcon_maniphest.ORDERS
order = orderings[args.order] if args.order is not None else None
results = phlcon_maniphest.query(
conduit,
ids=args.ids,
authors=authors,
owners=owners,
ccs=ccs,
projects=projects,
status=status,
limit=args.max_results,
offset=args.offset_results,
order=order,
text=args.text)
results = [dict(r.__dict__) for r in results]
for r in results:
if r['statusName'] is None:
r['statusName'] = phlcon_maniphest.STATUSES[int(r['status'])]
# initialise to format for 'args.format_short'
output_format = "{id} / {statusName} / {priority} / {title}"
if args.format_ids:
output_format = "{id}"
elif args.format_string is not None:
output_format = args.format_string
if args.format_python:
pprint.pprint(results)
elif args.format_json:
print(json.dumps(results, sort_keys=True, indent=2))
else:
for r in results:
print(output_format.format(**r))
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| apache-2.0 | -5,671,145,255,759,464,000 | 29.489362 | 79 | 0.571528 | false |
fkie-cad/FACT_core | src/objects/firmware.py | 1 | 2206 | from objects.file import FileObject
from helperFunctions.hash import get_md5
from helperFunctions.tag import TagColor
from contextlib import suppress
class Firmware(FileObject):
'''
This objects represents a firmware
'''
def __init__(self, binary=None, file_name=None, file_path=None, scheduled_analysis=None):
super().__init__(binary=binary, file_name=file_name, file_path=file_path, scheduled_analysis=scheduled_analysis)
self.device_name = None
self.version = None
self.device_class = None
self.vendor = None
self.part = ''
self.release_date = None
self.tags = dict()
self._update_root_id_and_virtual_path()
def set_device_name(self, device_name):
self.device_name = device_name
def set_part_name(self, part):
if part == 'complete':
self.part = ''
else:
self.part = part
def set_firmware_version(self, version):
self.version = version
def set_device_class(self, device_class):
self.device_class = device_class
def set_binary(self, binary):
super().set_binary(binary)
self._update_root_id_and_virtual_path()
self.md5 = get_md5(binary)
def set_vendor(self, vendor):
self.vendor = vendor
def set_release_date(self, release_date):
self.release_date = release_date
def _update_root_id_and_virtual_path(self):
self.root_uid = self.uid
self.virtual_file_path = {self.uid: [self.uid]}
def set_tag(self, tag, tag_color=TagColor.GRAY):
self.tags[tag] = tag_color
def remove_tag(self, tag):
with suppress(KeyError):
self.tags.pop(tag)
def get_hid(self, root_uid=None):
'''
return a human readable identifier
'''
part = ' - {}'.format(self.part) if self.part else ''
return '{} {}{} v. {}'.format(self.vendor, self.device_name, part, self.version)
def __str__(self):
return '{}\nProcessed Analysis: {}\nScheduled Analysis: {}'.format(self.get_hid(), list(self.processed_analysis.keys()), self.scheduled_analysis)
def __repr__(self):
return self.__str__()
| gpl-3.0 | 3,670,237,960,476,488,700 | 30.070423 | 153 | 0.612874 | false |
asendecka/djangae | djangae/tests/test_transactional.py | 7 | 6364 | from djangae.test import TestCase
from djangae.db import transaction
from djangae.contrib import sleuth
class TransactionTests(TestCase):
def test_repeated_usage_in_a_loop(self):
from .test_connector import TestUser
pk = TestUser.objects.create(username="foo").pk
for i in xrange(4):
with transaction.atomic(xg=True):
TestUser.objects.get(pk=pk)
continue
with transaction.atomic(xg=True):
TestUser.objects.get(pk=pk)
def test_atomic_decorator(self):
from .test_connector import TestUser
@transaction.atomic
def txn():
TestUser.objects.create(username="foo", field2="bar")
self.assertTrue(transaction.in_atomic_block())
raise ValueError()
with self.assertRaises(ValueError):
txn()
self.assertEqual(0, TestUser.objects.count())
def test_interaction_with_datastore_txn(self):
from google.appengine.ext import db
from google.appengine.datastore.datastore_rpc import TransactionOptions
from .test_connector import TestUser
@db.transactional(propagation=TransactionOptions.INDEPENDENT)
def some_indie_txn(_username):
TestUser.objects.create(username=_username)
@db.transactional()
def some_non_indie_txn(_username):
TestUser.objects.create(username=_username)
@db.transactional()
def double_nested_transactional():
@db.transactional(propagation=TransactionOptions.INDEPENDENT)
def do_stuff():
TestUser.objects.create(username="Double")
raise ValueError()
try:
return do_stuff
except:
return
with transaction.atomic():
double_nested_transactional()
@db.transactional()
def something_containing_atomic():
with transaction.atomic():
TestUser.objects.create(username="Inner")
something_containing_atomic()
with transaction.atomic():
with transaction.atomic():
some_non_indie_txn("Bob1")
some_indie_txn("Bob2")
some_indie_txn("Bob3")
with transaction.atomic(independent=True):
some_non_indie_txn("Fred1")
some_indie_txn("Fred2")
some_indie_txn("Fred3")
def test_atomic_context_manager(self):
from .test_connector import TestUser
with self.assertRaises(ValueError):
with transaction.atomic():
TestUser.objects.create(username="foo", field2="bar")
raise ValueError()
self.assertEqual(0, TestUser.objects.count())
def test_non_atomic_context_manager(self):
from .test_connector import TestUser
existing = TestUser.objects.create(username="existing", field2="exists")
with transaction.atomic():
self.assertTrue(transaction.in_atomic_block())
user = TestUser.objects.create(username="foo", field2="bar")
with transaction.non_atomic():
# We're outside the transaction, so the user should not exist
self.assertRaises(TestUser.DoesNotExist, TestUser.objects.get, pk=user.pk)
self.assertFalse(transaction.in_atomic_block())
with sleuth.watch("google.appengine.api.datastore.Get") as datastore_get:
TestUser.objects.get(pk=existing.pk) #Should hit the cache, not the datastore
self.assertFalse(datastore_get.called)
with transaction.atomic(independent=True):
user2 = TestUser.objects.create(username="foo2", field2="bar2")
self.assertTrue(transaction.in_atomic_block())
with transaction.non_atomic():
self.assertFalse(transaction.in_atomic_block())
self.assertRaises(TestUser.DoesNotExist, TestUser.objects.get, pk=user2.pk)
with transaction.non_atomic():
self.assertFalse(transaction.in_atomic_block())
self.assertRaises(TestUser.DoesNotExist, TestUser.objects.get, pk=user2.pk)
with sleuth.watch("google.appengine.api.datastore.Get") as datastore_get:
TestUser.objects.get(pk=existing.pk) #Should hit the cache, not the datastore
self.assertFalse(transaction.in_atomic_block())
self.assertRaises(TestUser.DoesNotExist, TestUser.objects.get, pk=user2.pk)
self.assertTrue(TestUser.objects.filter(pk=user2.pk).exists())
self.assertTrue(transaction.in_atomic_block())
def test_xg_argument(self):
from .test_connector import TestUser, TestFruit
@transaction.atomic(xg=True)
def txn(_username):
TestUser.objects.create(username=_username, field2="bar")
TestFruit.objects.create(name="Apple", color="pink")
raise ValueError()
with self.assertRaises(ValueError):
txn("foo")
self.assertEqual(0, TestUser.objects.count())
self.assertEqual(0, TestFruit.objects.count())
def test_independent_argument(self):
"""
We would get a XG error if the inner transaction was not independent
"""
from .test_connector import TestUser, TestFruit
@transaction.atomic
def txn1(_username, _fruit):
@transaction.atomic(independent=True)
def txn2(_fruit):
TestFruit.objects.create(name=_fruit, color="pink")
raise ValueError()
TestUser.objects.create(username=_username)
txn2(_fruit)
with self.assertRaises(ValueError):
txn1("test", "banana")
def test_nested_decorator(self):
# Nested decorator pattern we discovered can cause a connection_stack
# underflow.
@transaction.atomic
def inner_txn():
pass
@transaction.atomic
def outer_txn():
inner_txn()
# Calling inner_txn first puts it in a state which means it doesn't
# then behave properly in a nested transaction.
inner_txn()
outer_txn()
| bsd-3-clause | -1,173,051,211,443,170,300 | 33.586957 | 105 | 0.603708 | false |
ryuunosukeyoshi/PartnerPoi-Bot | lib/aiohttp/multipart.py | 20 | 32684 | import asyncio
import base64
import binascii
import io
import json
import mimetypes
import os
import re
import sys
import uuid
import warnings
import zlib
from collections import Mapping, Sequence, deque
from pathlib import Path
from urllib.parse import parse_qsl, quote, unquote, urlencode
from multidict import CIMultiDict
from .hdrs import (CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LENGTH,
CONTENT_TRANSFER_ENCODING, CONTENT_TYPE)
from .helpers import parse_mimetype
from .protocol import HttpParser
__all__ = ('MultipartReader', 'MultipartWriter',
'BodyPartReader', 'BodyPartWriter',
'BadContentDispositionHeader', 'BadContentDispositionParam',
'parse_content_disposition', 'content_disposition_filename')
CHAR = set(chr(i) for i in range(0, 128))
CTL = set(chr(i) for i in range(0, 32)) | {chr(127), }
SEPARATORS = {'(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']',
'?', '=', '{', '}', ' ', chr(9)}
TOKEN = CHAR ^ CTL ^ SEPARATORS
PY_35 = sys.version_info >= (3, 5)
PY_352 = sys.version_info >= (3, 5, 2)
class BadContentDispositionHeader(RuntimeWarning):
pass
class BadContentDispositionParam(RuntimeWarning):
pass
def parse_content_disposition(header):
def is_token(string):
return string and TOKEN >= set(string)
def is_quoted(string):
return string[0] == string[-1] == '"'
def is_rfc5987(string):
return is_token(string) and string.count("'") == 2
def is_extended_param(string):
return string.endswith('*')
def is_continuous_param(string):
pos = string.find('*') + 1
if not pos:
return False
substring = string[pos:-1] if string.endswith('*') else string[pos:]
return substring.isdigit()
def unescape(text, *, chars=''.join(map(re.escape, CHAR))):
return re.sub('\\\\([{}])'.format(chars), '\\1', text)
if not header:
return None, {}
disptype, *parts = header.split(';')
if not is_token(disptype):
warnings.warn(BadContentDispositionHeader(header))
return None, {}
params = {}
for item in parts:
if '=' not in item:
warnings.warn(BadContentDispositionHeader(header))
return None, {}
key, value = item.split('=', 1)
key = key.lower().strip()
value = value.lstrip()
if key in params:
warnings.warn(BadContentDispositionHeader(header))
return None, {}
if not is_token(key):
warnings.warn(BadContentDispositionParam(item))
continue
elif is_continuous_param(key):
if is_quoted(value):
value = unescape(value[1:-1])
elif not is_token(value):
warnings.warn(BadContentDispositionParam(item))
continue
elif is_extended_param(key):
if is_rfc5987(value):
encoding, _, value = value.split("'", 2)
encoding = encoding or 'utf-8'
else:
warnings.warn(BadContentDispositionParam(item))
continue
try:
value = unquote(value, encoding, 'strict')
except UnicodeDecodeError: # pragma: nocover
warnings.warn(BadContentDispositionParam(item))
continue
else:
if is_quoted(value):
value = unescape(value[1:-1].lstrip('\\/'))
elif not is_token(value):
warnings.warn(BadContentDispositionHeader(header))
return None, {}
params[key] = value
return disptype.lower(), params
def content_disposition_filename(params):
if not params:
return None
elif 'filename*' in params:
return params['filename*']
elif 'filename' in params:
return params['filename']
else:
parts = []
fnparams = sorted((key, value)
for key, value in params.items()
if key.startswith('filename*'))
for num, (key, value) in enumerate(fnparams):
_, tail = key.split('*', 1)
if tail.endswith('*'):
tail = tail[:-1]
if tail == str(num):
parts.append(value)
else:
break
if not parts:
return None
value = ''.join(parts)
if "'" in value:
encoding, _, value = value.split("'", 2)
encoding = encoding or 'utf-8'
return unquote(value, encoding, 'strict')
return value
class MultipartResponseWrapper(object):
"""Wrapper around the :class:`MultipartBodyReader` to take care about
underlying connection and close it when it needs in."""
def __init__(self, resp, stream):
self.resp = resp
self.stream = stream
if PY_35:
def __aiter__(self):
return self
if not PY_352: # pragma: no cover
__aiter__ = asyncio.coroutine(__aiter__)
@asyncio.coroutine
def __anext__(self):
part = yield from self.next()
if part is None:
raise StopAsyncIteration # NOQA
return part
def at_eof(self):
"""Returns ``True`` when all response data had been read.
:rtype: bool
"""
return self.resp.content.at_eof()
@asyncio.coroutine
def next(self):
"""Emits next multipart reader object."""
item = yield from self.stream.next()
if self.stream.at_eof():
yield from self.release()
return item
@asyncio.coroutine
def release(self):
"""Releases the connection gracefully, reading all the content
to the void."""
yield from self.resp.release()
class BodyPartReader(object):
"""Multipart reader for single body part."""
chunk_size = 8192
def __init__(self, boundary, headers, content):
self.headers = headers
self._boundary = boundary
self._content = content
self._at_eof = False
length = self.headers.get(CONTENT_LENGTH, None)
self._length = int(length) if length is not None else None
self._read_bytes = 0
self._unread = deque()
self._prev_chunk = None
self._content_eof = 0
if PY_35:
def __aiter__(self):
return self
if not PY_352: # pragma: no cover
__aiter__ = asyncio.coroutine(__aiter__)
@asyncio.coroutine
def __anext__(self):
part = yield from self.next()
if part is None:
raise StopAsyncIteration # NOQA
return part
@asyncio.coroutine
def next(self):
item = yield from self.read()
if not item:
return None
return item
@asyncio.coroutine
def read(self, *, decode=False):
"""Reads body part data.
:param bool decode: Decodes data following by encoding
method from `Content-Encoding` header. If it missed
data remains untouched
:rtype: bytearray
"""
if self._at_eof:
return b''
data = bytearray()
if self._length is None:
while not self._at_eof:
data.extend((yield from self.readline()))
else:
while not self._at_eof:
data.extend((yield from self.read_chunk(self.chunk_size)))
if decode:
return self.decode(data)
return data
@asyncio.coroutine
def read_chunk(self, size=chunk_size):
"""Reads body part content chunk of the specified size.
:param int size: chunk size
:rtype: bytearray
"""
if self._at_eof:
return b''
if self._length:
chunk = yield from self._read_chunk_from_length(size)
else:
chunk = yield from self._read_chunk_from_stream(size)
self._read_bytes += len(chunk)
if self._read_bytes == self._length:
self._at_eof = True
if self._at_eof:
assert b'\r\n' == (yield from self._content.readline()), \
'reader did not read all the data or it is malformed'
return chunk
@asyncio.coroutine
def _read_chunk_from_length(self, size):
"""Reads body part content chunk of the specified size.
The body part must has `Content-Length` header with proper value.
:param int size: chunk size
:rtype: bytearray
"""
assert self._length is not None, \
'Content-Length required for chunked read'
chunk_size = min(size, self._length - self._read_bytes)
chunk = yield from self._content.read(chunk_size)
return chunk
@asyncio.coroutine
def _read_chunk_from_stream(self, size):
"""Reads content chunk of body part with unknown length.
The `Content-Length` header for body part is not necessary.
:param int size: chunk size
:rtype: bytearray
"""
assert size >= len(self._boundary) + 2, \
'Chunk size must be greater or equal than boundary length + 2'
first_chunk = self._prev_chunk is None
if first_chunk:
self._prev_chunk = yield from self._content.read(size)
chunk = yield from self._content.read(size)
self._content_eof += int(self._content.at_eof())
assert self._content_eof < 3, "Reading after EOF"
window = self._prev_chunk + chunk
sub = b'\r\n' + self._boundary
if first_chunk:
idx = window.find(sub)
else:
idx = window.find(sub, max(0, len(self._prev_chunk) - len(sub)))
if idx >= 0:
# pushing boundary back to content
self._content.unread_data(window[idx:])
if size > idx:
self._prev_chunk = self._prev_chunk[:idx]
chunk = window[len(self._prev_chunk):idx]
if not chunk:
self._at_eof = True
if 0 < len(chunk) < len(sub) and not self._content_eof:
self._prev_chunk += chunk
self._at_eof = False
return b''
result = self._prev_chunk
self._prev_chunk = chunk
return result
@asyncio.coroutine
def readline(self):
"""Reads body part by line by line.
:rtype: bytearray
"""
if self._at_eof:
return b''
if self._unread:
line = self._unread.popleft()
else:
line = yield from self._content.readline()
if line.startswith(self._boundary):
# the very last boundary may not come with \r\n,
# so set single rules for everyone
sline = line.rstrip(b'\r\n')
boundary = self._boundary
last_boundary = self._boundary + b'--'
# ensure that we read exactly the boundary, not something alike
if sline == boundary or sline == last_boundary:
self._at_eof = True
self._unread.append(line)
return b''
else:
next_line = yield from self._content.readline()
if next_line.startswith(self._boundary):
line = line[:-2] # strip CRLF but only once
self._unread.append(next_line)
return line
@asyncio.coroutine
def release(self):
"""Like :meth:`read`, but reads all the data to the void.
:rtype: None
"""
if self._at_eof:
return
if self._length is None:
while not self._at_eof:
yield from self.readline()
else:
while not self._at_eof:
yield from self.read_chunk(self.chunk_size)
@asyncio.coroutine
def text(self, *, encoding=None):
"""Like :meth:`read`, but assumes that body part contains text data.
:param str encoding: Custom text encoding. Overrides specified
in charset param of `Content-Type` header
:rtype: str
"""
data = yield from self.read(decode=True)
encoding = encoding or self.get_charset(default='latin1')
return data.decode(encoding)
@asyncio.coroutine
def json(self, *, encoding=None):
"""Like :meth:`read`, but assumes that body parts contains JSON data.
:param str encoding: Custom JSON encoding. Overrides specified
in charset param of `Content-Type` header
"""
data = yield from self.read(decode=True)
if not data:
return None
encoding = encoding or self.get_charset(default='utf-8')
return json.loads(data.decode(encoding))
@asyncio.coroutine
def form(self, *, encoding=None):
"""Like :meth:`read`, but assumes that body parts contains form
urlencoded data.
:param str encoding: Custom form encoding. Overrides specified
in charset param of `Content-Type` header
"""
data = yield from self.read(decode=True)
if not data:
return None
encoding = encoding or self.get_charset(default='utf-8')
return parse_qsl(data.rstrip().decode(encoding), encoding=encoding)
def at_eof(self):
"""Returns ``True`` if the boundary was reached or
``False`` otherwise.
:rtype: bool
"""
return self._at_eof
def decode(self, data):
"""Decodes data according the specified `Content-Encoding`
or `Content-Transfer-Encoding` headers value.
Supports ``gzip``, ``deflate`` and ``identity`` encodings for
`Content-Encoding` header.
Supports ``base64``, ``quoted-printable``, ``binary`` encodings for
`Content-Transfer-Encoding` header.
:param bytearray data: Data to decode.
:raises: :exc:`RuntimeError` - if encoding is unknown.
:rtype: bytes
"""
if CONTENT_TRANSFER_ENCODING in self.headers:
data = self._decode_content_transfer(data)
if CONTENT_ENCODING in self.headers:
return self._decode_content(data)
return data
def _decode_content(self, data):
encoding = self.headers[CONTENT_ENCODING].lower()
if encoding == 'deflate':
return zlib.decompress(data, -zlib.MAX_WBITS)
elif encoding == 'gzip':
return zlib.decompress(data, 16 + zlib.MAX_WBITS)
elif encoding == 'identity':
return data
else:
raise RuntimeError('unknown content encoding: {}'.format(encoding))
def _decode_content_transfer(self, data):
encoding = self.headers[CONTENT_TRANSFER_ENCODING].lower()
if encoding == 'base64':
return base64.b64decode(data)
elif encoding == 'quoted-printable':
return binascii.a2b_qp(data)
elif encoding == 'binary':
return data
else:
raise RuntimeError('unknown content transfer encoding: {}'
''.format(encoding))
def get_charset(self, default=None):
"""Returns charset parameter from ``Content-Type`` header or default.
"""
ctype = self.headers.get(CONTENT_TYPE, '')
*_, params = parse_mimetype(ctype)
return params.get('charset', default)
@property
def filename(self):
"""Returns filename specified in Content-Disposition header or ``None``
if missed or header is malformed."""
_, params = parse_content_disposition(
self.headers.get(CONTENT_DISPOSITION))
return content_disposition_filename(params)
class MultipartReader(object):
"""Multipart body reader."""
#: Response wrapper, used when multipart readers constructs from response.
response_wrapper_cls = MultipartResponseWrapper
#: Multipart reader class, used to handle multipart/* body parts.
#: None points to type(self)
multipart_reader_cls = None
#: Body part reader class for non multipart/* content types.
part_reader_cls = BodyPartReader
def __init__(self, headers, content):
self.headers = headers
self._boundary = ('--' + self._get_boundary()).encode()
self._content = content
self._last_part = None
self._at_eof = False
self._at_bof = True
self._unread = []
if PY_35:
def __aiter__(self):
return self
if not PY_352: # pragma: no cover
__aiter__ = asyncio.coroutine(__aiter__)
@asyncio.coroutine
def __anext__(self):
part = yield from self.next()
if part is None:
raise StopAsyncIteration # NOQA
return part
@classmethod
def from_response(cls, response):
"""Constructs reader instance from HTTP response.
:param response: :class:`~aiohttp.client.ClientResponse` instance
"""
obj = cls.response_wrapper_cls(response, cls(response.headers,
response.content))
return obj
def at_eof(self):
"""Returns ``True`` if the final boundary was reached or
``False`` otherwise.
:rtype: bool
"""
return self._at_eof
@asyncio.coroutine
def next(self):
"""Emits the next multipart body part."""
# So, if we're at BOF, we need to skip till the boundary.
if self._at_eof:
return
yield from self._maybe_release_last_part()
if self._at_bof:
yield from self._read_until_first_boundary()
self._at_bof = False
else:
yield from self._read_boundary()
if self._at_eof: # we just read the last boundary, nothing to do there
return
self._last_part = yield from self.fetch_next_part()
return self._last_part
@asyncio.coroutine
def release(self):
"""Reads all the body parts to the void till the final boundary."""
while not self._at_eof:
item = yield from self.next()
if item is None:
break
yield from item.release()
@asyncio.coroutine
def fetch_next_part(self):
"""Returns the next body part reader."""
headers = yield from self._read_headers()
return self._get_part_reader(headers)
def _get_part_reader(self, headers):
"""Dispatches the response by the `Content-Type` header, returning
suitable reader instance.
:param dict headers: Response headers
"""
ctype = headers.get(CONTENT_TYPE, '')
mtype, *_ = parse_mimetype(ctype)
if mtype == 'multipart':
if self.multipart_reader_cls is None:
return type(self)(headers, self._content)
return self.multipart_reader_cls(headers, self._content)
else:
return self.part_reader_cls(self._boundary, headers, self._content)
def _get_boundary(self):
mtype, *_, params = parse_mimetype(self.headers[CONTENT_TYPE])
assert mtype == 'multipart', 'multipart/* content type expected'
if 'boundary' not in params:
raise ValueError('boundary missed for Content-Type: %s'
% self.headers[CONTENT_TYPE])
boundary = params['boundary']
if len(boundary) > 70:
raise ValueError('boundary %r is too long (70 chars max)'
% boundary)
return boundary
@asyncio.coroutine
def _readline(self):
if self._unread:
return self._unread.pop()
return (yield from self._content.readline())
@asyncio.coroutine
def _read_until_first_boundary(self):
while True:
chunk = yield from self._readline()
if chunk == b'':
raise ValueError("Could not find starting boundary %r"
% (self._boundary))
chunk = chunk.rstrip()
if chunk == self._boundary:
return
elif chunk == self._boundary + b'--':
self._at_eof = True
return
@asyncio.coroutine
def _read_boundary(self):
chunk = (yield from self._readline()).rstrip()
if chunk == self._boundary:
pass
elif chunk == self._boundary + b'--':
self._at_eof = True
else:
raise ValueError('Invalid boundary %r, expected %r'
% (chunk, self._boundary))
@asyncio.coroutine
def _read_headers(self):
lines = [b'']
while True:
chunk = yield from self._content.readline()
chunk = chunk.strip()
lines.append(chunk)
if not chunk:
break
parser = HttpParser()
headers, *_ = parser.parse_headers(lines)
return headers
@asyncio.coroutine
def _maybe_release_last_part(self):
"""Ensures that the last read body part is read completely."""
if self._last_part is not None:
if not self._last_part.at_eof():
yield from self._last_part.release()
self._unread.extend(self._last_part._unread)
self._last_part = None
class BodyPartWriter(object):
"""Multipart writer for single body part."""
def __init__(self, obj, headers=None, *, chunk_size=8192):
if headers is None:
headers = CIMultiDict()
elif not isinstance(headers, CIMultiDict):
headers = CIMultiDict(headers)
self.obj = obj
self.headers = headers
self._chunk_size = chunk_size
self._fill_headers_with_defaults()
self._serialize_map = {
bytes: self._serialize_bytes,
str: self._serialize_str,
io.IOBase: self._serialize_io,
MultipartWriter: self._serialize_multipart,
('application', 'json'): self._serialize_json,
('application', 'x-www-form-urlencoded'): self._serialize_form
}
def _fill_headers_with_defaults(self):
if CONTENT_TYPE not in self.headers:
content_type = self._guess_content_type(self.obj)
if content_type is not None:
self.headers[CONTENT_TYPE] = content_type
if CONTENT_LENGTH not in self.headers:
content_length = self._guess_content_length(self.obj)
if content_length is not None:
self.headers[CONTENT_LENGTH] = str(content_length)
if CONTENT_DISPOSITION not in self.headers:
filename = self._guess_filename(self.obj)
if filename is not None:
self.set_content_disposition('attachment', filename=filename)
def _guess_content_length(self, obj):
if isinstance(obj, bytes):
return len(obj)
elif isinstance(obj, str):
*_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))
charset = params.get('charset', 'us-ascii')
return len(obj.encode(charset))
elif isinstance(obj, io.StringIO):
*_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))
charset = params.get('charset', 'us-ascii')
return len(obj.getvalue().encode(charset)) - obj.tell()
elif isinstance(obj, io.BytesIO):
return len(obj.getvalue()) - obj.tell()
elif isinstance(obj, io.IOBase):
try:
return os.fstat(obj.fileno()).st_size - obj.tell()
except (AttributeError, OSError):
return None
else:
return None
def _guess_content_type(self, obj, default='application/octet-stream'):
if hasattr(obj, 'name'):
name = getattr(obj, 'name')
return mimetypes.guess_type(name)[0]
elif isinstance(obj, (str, io.StringIO)):
return 'text/plain; charset=utf-8'
else:
return default
def _guess_filename(self, obj):
if isinstance(obj, io.IOBase):
name = getattr(obj, 'name', None)
if name is not None:
return Path(name).name
def serialize(self):
"""Yields byte chunks for body part."""
has_encoding = (
CONTENT_ENCODING in self.headers and
self.headers[CONTENT_ENCODING] != 'identity' or
CONTENT_TRANSFER_ENCODING in self.headers
)
if has_encoding:
# since we're following streaming approach which doesn't assumes
# any intermediate buffers, we cannot calculate real content length
# with the specified content encoding scheme. So, instead of lying
# about content length and cause reading issues, we have to strip
# this information.
self.headers.pop(CONTENT_LENGTH, None)
if self.headers:
yield b'\r\n'.join(
b': '.join(map(lambda i: i.encode('latin1'), item))
for item in self.headers.items()
)
yield b'\r\n\r\n'
yield from self._maybe_encode_stream(self._serialize_obj())
yield b'\r\n'
def _serialize_obj(self):
obj = self.obj
mtype, stype, *_ = parse_mimetype(self.headers.get(CONTENT_TYPE))
serializer = self._serialize_map.get((mtype, stype))
if serializer is not None:
return serializer(obj)
for key in self._serialize_map:
if not isinstance(key, tuple) and isinstance(obj, key):
return self._serialize_map[key](obj)
return self._serialize_default(obj)
def _serialize_bytes(self, obj):
yield obj
def _serialize_str(self, obj):
*_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))
yield obj.encode(params.get('charset', 'us-ascii'))
def _serialize_io(self, obj):
while True:
chunk = obj.read(self._chunk_size)
if not chunk:
break
if isinstance(chunk, str):
yield from self._serialize_str(chunk)
else:
yield from self._serialize_bytes(chunk)
def _serialize_multipart(self, obj):
yield from obj.serialize()
def _serialize_json(self, obj):
*_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))
yield json.dumps(obj).encode(params.get('charset', 'utf-8'))
def _serialize_form(self, obj):
if isinstance(obj, Mapping):
obj = list(obj.items())
return self._serialize_str(urlencode(obj, doseq=True))
def _serialize_default(self, obj):
raise TypeError('unknown body part type %r' % type(obj))
def _maybe_encode_stream(self, stream):
if CONTENT_ENCODING in self.headers:
stream = self._apply_content_encoding(stream)
if CONTENT_TRANSFER_ENCODING in self.headers:
stream = self._apply_content_transfer_encoding(stream)
yield from stream
def _apply_content_encoding(self, stream):
encoding = self.headers[CONTENT_ENCODING].lower()
if encoding == 'identity':
yield from stream
elif encoding in ('deflate', 'gzip'):
if encoding == 'gzip':
zlib_mode = 16 + zlib.MAX_WBITS
else:
zlib_mode = -zlib.MAX_WBITS
zcomp = zlib.compressobj(wbits=zlib_mode)
for chunk in stream:
yield zcomp.compress(chunk)
else:
yield zcomp.flush()
else:
raise RuntimeError('unknown content encoding: {}'
''.format(encoding))
def _apply_content_transfer_encoding(self, stream):
encoding = self.headers[CONTENT_TRANSFER_ENCODING].lower()
if encoding == 'base64':
buffer = bytearray()
while True:
if buffer:
div, mod = divmod(len(buffer), 3)
chunk, buffer = buffer[:div * 3], buffer[div * 3:]
if chunk:
yield base64.b64encode(chunk)
chunk = next(stream, None)
if not chunk:
if buffer:
yield base64.b64encode(buffer[:])
return
buffer.extend(chunk)
elif encoding == 'quoted-printable':
for chunk in stream:
yield binascii.b2a_qp(chunk)
elif encoding == 'binary':
yield from stream
else:
raise RuntimeError('unknown content transfer encoding: {}'
''.format(encoding))
def set_content_disposition(self, disptype, **params):
"""Sets ``Content-Disposition`` header.
:param str disptype: Disposition type: inline, attachment, form-data.
Should be valid extension token (see RFC 2183)
:param dict params: Disposition params
"""
if not disptype or not (TOKEN > set(disptype)):
raise ValueError('bad content disposition type {!r}'
''.format(disptype))
value = disptype
if params:
lparams = []
for key, val in params.items():
if not key or not (TOKEN > set(key)):
raise ValueError('bad content disposition parameter'
' {!r}={!r}'.format(key, val))
qval = quote(val, '')
lparams.append((key, '"%s"' % qval))
if key == 'filename':
lparams.append(('filename*', "utf-8''" + qval))
sparams = '; '.join('='.join(pair) for pair in lparams)
value = '; '.join((value, sparams))
self.headers[CONTENT_DISPOSITION] = value
@property
def filename(self):
"""Returns filename specified in Content-Disposition header or ``None``
if missed."""
_, params = parse_content_disposition(
self.headers.get(CONTENT_DISPOSITION))
return content_disposition_filename(params)
class MultipartWriter(object):
"""Multipart body writer."""
#: Body part reader class for non multipart/* content types.
part_writer_cls = BodyPartWriter
def __init__(self, subtype='mixed', boundary=None):
boundary = boundary if boundary is not None else uuid.uuid4().hex
try:
boundary.encode('us-ascii')
except UnicodeEncodeError:
raise ValueError('boundary should contains ASCII only chars')
self.headers = CIMultiDict()
self.headers[CONTENT_TYPE] = 'multipart/{}; boundary="{}"'.format(
subtype, boundary
)
self.parts = []
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __iter__(self):
return iter(self.parts)
def __len__(self):
return len(self.parts)
@property
def boundary(self):
*_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))
return params['boundary'].encode('us-ascii')
def append(self, obj, headers=None):
"""Adds a new body part to multipart writer."""
if isinstance(obj, self.part_writer_cls):
if headers:
obj.headers.update(headers)
self.parts.append(obj)
else:
if not headers:
headers = CIMultiDict()
self.parts.append(self.part_writer_cls(obj, headers))
return self.parts[-1]
def append_json(self, obj, headers=None):
"""Helper to append JSON part."""
if not headers:
headers = CIMultiDict()
headers[CONTENT_TYPE] = 'application/json'
return self.append(obj, headers)
def append_form(self, obj, headers=None):
"""Helper to append form urlencoded part."""
if not headers:
headers = CIMultiDict()
headers[CONTENT_TYPE] = 'application/x-www-form-urlencoded'
assert isinstance(obj, (Sequence, Mapping))
return self.append(obj, headers)
def serialize(self):
"""Yields multipart byte chunks."""
if not self.parts:
yield b''
return
for part in self.parts:
yield b'--' + self.boundary + b'\r\n'
yield from part.serialize()
else:
yield b'--' + self.boundary + b'--\r\n'
yield b''
| gpl-3.0 | -6,627,738,167,722,210,000 | 32.590956 | 79 | 0.559876 | false |
TacticalGoat/reddit | ErroneousQuotes/erroneousquotes.py | 3 | 3063 | #/u/GoldenSights
import praw # simple interface to the reddit API, also handles rate limiting of requests
import time
import sqlite3
import random
'''USER CONFIGURATION'''
USERNAME = ""
#This is the bot's Username. In order to send mail, he must have some amount of Karma.
PASSWORD = ""
#This is the bot's Password.
USERAGENT = ""
#This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter bot"
SUBREDDIT = "GoldTesting"
#This is the sub or list of subs to scan for new posts. For a single sub, use "sub1". For multiple subreddits, use "sub1+sub2+sub3+..."
NAMES = ["Abraham Lincoln", "George Washington", "Bill Gates", "Rosa Parks", "GoldenSights", "Unidan", "Napoleon Bonaparte"]
#Famous People
MAXPOSTS = 100
#This is how many posts you want to retrieve all at once. PRAW can download 100 at a time.
MAXLENGTH = 150
#To avoid bot abuse, do not generate any quotes longer than this many characters.
WAIT = 20
#This is how many seconds you will wait between cycles. The bot is completely inactive during this time.
'''All done!'''
WAITS = str(WAIT)
try:
import bot #This is a file in my python library which contains my Bot's username and password. I can push code to Git without showing credentials
USERNAME = bot.getuG()
PASSWORD = bot.getpG()
USERAGENT = bot.getaG()
except ImportError:
pass
cutoff = len(USERNAME) + 4
sql = sqlite3.connect('sql.db')
print('Loaded SQL Database')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS oldposts(ID TEXT)')
print('Loaded Completed table')
sql.commit()
r = praw.Reddit(USERAGENT)
r.login(USERNAME, PASSWORD)
def scanSub():
print('Searching '+ SUBREDDIT + '.')
subreddit = r.get_subreddit(SUBREDDIT)
posts = subreddit.get_comments(limit=MAXPOSTS)
for post in posts:
pid = post.id
pbody = post.body
try:
pauthor = post.author.name
except AttributeError:
pauthor = '[DELETED]'
cur.execute('SELECT * FROM oldposts WHERE ID=?', [pid])
if not cur.fetchone():
cur.execute('INSERT INTO oldposts VALUES(?)', [pid])
if pbody.lower()[:cutoff] == '/u/' + USERNAME.lower() + ' ':
quote = pbody.split('\n\n')[0][cutoff:]
if len(quote) <= MAXLENGTH and pauthor != USERNAME:
if ('/u/' + USERNAME) in quote:
print(pid + ': Meatbag detected')
response = 'Nice try, meatbag'
else:
name = NAMES[random.randint(0,len(NAMES)-1)]
print(pid + ': ' + quote + '- ' + name)
response = '>' + quote + '\n\n- ' + name
post.reply(response)
else:
print(pid + ': Comment too long')
sql.commit()
while True:
try:
scanSub()
except Exception as e:
print('An error has occured:', str(e))
print('Running again in ' + WAITS + ' seconds \n')
sql.commit()
time.sleep(WAIT)
| mit | -1,687,144,039,775,525,400 | 32.67033 | 149 | 0.612471 | false |
jkorell/PTVS | Python/Product/PythonTools/visualstudio_py_repl.py | 19 | 51102 | # ############################################################################
#
# Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
# ###########################################################################
from __future__ import with_statement
# This module MUST NOT import threading in global scope. This is because in a direct (non-ptvsd)
# attach scenario, it is loaded on the injected debugger attach thread, and if threading module
# hasn't been loaded already, it will assume that the thread on which it is being loaded is the
# main thread. This will cause issues when the thread goes away after attach completes.
try:
import thread
except ImportError:
# Renamed in Python3k
import _thread as thread
try:
from ssl import SSLError
except:
SSLError = None
import sys
import socket
import select
import time
import struct
import imp
import traceback
import random
import os
import inspect
import types
from collections import deque
try:
# In the local attach scenario, visualstudio_py_util is injected into globals()
# by PyDebugAttach before loading this module, and cannot be imported.
_vspu = visualstudio_py_util
except:
try:
import visualstudio_py_util as _vspu
except ImportError:
import ptvsd.visualstudio_py_util as _vspu
to_bytes = _vspu.to_bytes
read_bytes = _vspu.read_bytes
read_int = _vspu.read_int
read_string = _vspu.read_string
write_bytes = _vspu.write_bytes
write_int = _vspu.write_int
write_string = _vspu.write_string
try:
unicode
except NameError:
unicode = str
try:
BaseException
except NameError:
# BaseException not defined until Python 2.5
BaseException = Exception
DEBUG = os.environ.get('DEBUG_REPL') is not None
__all__ = ['ReplBackend', 'BasicReplBackend', 'BACKEND']
def _debug_write(out):
if DEBUG:
sys.__stdout__.write(out)
sys.__stdout__.flush()
class SafeSendLock(object):
"""a lock which ensures we're released if we take a KeyboardInterrupt exception acquiring it"""
def __init__(self):
self.lock = thread.allocate_lock()
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_value, tb):
self.release()
def acquire(self):
try:
self.lock.acquire()
except KeyboardInterrupt:
try:
self.lock.release()
except:
pass
raise
def release(self):
self.lock.release()
def _command_line_to_args_list(cmdline):
"""splits a string into a list using Windows command line syntax."""
args_list = []
if cmdline and cmdline.strip():
from ctypes import c_int, c_voidp, c_wchar_p
from ctypes import byref, POINTER, WinDLL
clta = WinDLL('shell32').CommandLineToArgvW
clta.argtypes = [c_wchar_p, POINTER(c_int)]
clta.restype = POINTER(c_wchar_p)
lf = WinDLL('kernel32').LocalFree
lf.argtypes = [c_voidp]
pNumArgs = c_int()
r = clta(cmdline, byref(pNumArgs))
if r:
for index in range(0, pNumArgs.value):
if sys.hexversion >= 0x030000F0:
argval = r[index]
else:
argval = r[index].encode('ascii', 'replace')
args_list.append(argval)
lf(r)
else:
sys.stderr.write('Error parsing script arguments:\n')
sys.stderr.write(cmdline + '\n')
return args_list
class UnsupportedReplException(Exception):
def __init__(self, reason):
self.reason = reason
# save the start_new_thread so we won't debug/break into the REPL comm thread.
start_new_thread = thread.start_new_thread
class ReplBackend(object):
"""back end for executing REPL code. This base class handles all of the
communication with the remote process while derived classes implement the
actual inspection and introspection."""
_MRES = to_bytes('MRES')
_SRES = to_bytes('SRES')
_MODS = to_bytes('MODS')
_IMGD = to_bytes('IMGD')
_PRPC = to_bytes('PRPC')
_RDLN = to_bytes('RDLN')
_STDO = to_bytes('STDO')
_STDE = to_bytes('STDE')
_DBGA = to_bytes('DBGA')
_DETC = to_bytes('DETC')
_DPNG = to_bytes('DPNG')
_DXAM = to_bytes('DXAM')
_MERR = to_bytes('MERR')
_SERR = to_bytes('SERR')
_ERRE = to_bytes('ERRE')
_EXIT = to_bytes('EXIT')
_DONE = to_bytes('DONE')
_MODC = to_bytes('MODC')
def __init__(self):
import threading
self.conn = None
self.send_lock = SafeSendLock()
self.input_event = threading.Lock()
self.input_event.acquire() # lock starts acquired (we use it like a manual reset event)
self.input_string = None
self.exit_requested = False
def connect(self, port):
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.connect(('127.0.0.1', port))
# start a new thread for communicating w/ the remote process
start_new_thread(self._repl_loop, ())
def connect_using_socket(self, socket):
self.conn = socket
start_new_thread(self._repl_loop, ())
def _repl_loop(self):
"""loop on created thread which processes communicates with the REPL window"""
try:
while True:
if self.check_for_exit_repl_loop():
break
# we receive a series of 4 byte commands. Each command then
# has it's own format which we must parse before continuing to
# the next command.
self.flush()
self.conn.settimeout(10)
# 2.x raises SSLError in case of timeout (http://bugs.python.org/issue10272)
if SSLError:
timeout_exc_types = (socket.timeout, SSLError)
else:
timeout_exc_types = socket.timeout
try:
inp = read_bytes(self.conn, 4)
except timeout_exc_types:
r, w, x = select.select([], [], [self.conn], 0)
if x:
# an exception event has occured on the socket...
raise
continue
self.conn.settimeout(None)
if inp == '':
break
self.flush()
cmd = ReplBackend._COMMANDS.get(inp)
if cmd is not None:
cmd(self)
except:
_debug_write('error in repl loop')
_debug_write(traceback.format_exc())
self.exit_process()
time.sleep(2) # try and exit gracefully, then interrupt main if necessary
if sys.platform == 'cli':
# just kill us as fast as possible
import System
System.Environment.Exit(1)
self.interrupt_main()
def check_for_exit_repl_loop(self):
return False
def _cmd_run(self):
"""runs the received snippet of code"""
self.run_command(read_string(self.conn))
def _cmd_abrt(self):
"""aborts the current running command"""
# abort command, interrupts execution of the main thread.
self.interrupt_main()
def _cmd_exit(self):
"""exits the interactive process"""
self.exit_requested = True
self.exit_process()
def _cmd_mems(self):
"""gets the list of members available for the given expression"""
expression = read_string(self.conn)
try:
name, inst_members, type_members = self.get_members(expression)
except:
with self.send_lock:
write_bytes(self.conn, ReplBackend._MERR)
_debug_write('error in eval')
_debug_write(traceback.format_exc())
else:
with self.send_lock:
write_bytes(self.conn, ReplBackend._MRES)
write_string(self.conn, name)
self._write_member_dict(inst_members)
self._write_member_dict(type_members)
def _cmd_sigs(self):
"""gets the signatures for the given expression"""
expression = read_string(self.conn)
try:
sigs = self.get_signatures(expression)
except:
with self.send_lock:
write_bytes(self.conn, ReplBackend._SERR)
_debug_write('error in eval')
_debug_write(traceback.format_exc())
else:
with self.send_lock:
write_bytes(self.conn, ReplBackend._SRES)
# single overload
write_int(self.conn, len(sigs))
for doc, args, vargs, varkw, defaults in sigs:
# write overload
write_string(self.conn, (doc or '')[:4096])
arg_count = len(args) + (vargs is not None) + (varkw is not None)
write_int(self.conn, arg_count)
def_values = [''] * (len(args) - len(defaults)) + ['=' + d for d in defaults]
for arg, def_value in zip(args, def_values):
write_string(self.conn, (arg or '') + def_value)
if vargs is not None:
write_string(self.conn, '*' + vargs)
if varkw is not None:
write_string(self.conn, '**' + varkw)
def _cmd_setm(self):
global exec_mod
"""sets the current module which code will execute against"""
mod_name = read_string(self.conn)
self.set_current_module(mod_name)
def _cmd_sett(self):
"""sets the current thread and frame which code will execute against"""
thread_id = read_int(self.conn)
frame_id = read_int(self.conn)
frame_kind = read_int(self.conn)
self.set_current_thread_and_frame(thread_id, frame_id, frame_kind)
def _cmd_mods(self):
"""gets the list of available modules"""
try:
res = self.get_module_names()
res.sort()
except:
res = []
with self.send_lock:
write_bytes(self.conn, ReplBackend._MODS)
write_int(self.conn, len(res))
for name, filename in res:
write_string(self.conn, name)
write_string(self.conn, filename)
def _cmd_inpl(self):
"""handles the input command which returns a string of input"""
self.input_string = read_string(self.conn)
self.input_event.release()
def _cmd_excf(self):
"""handles executing a single file"""
filename = read_string(self.conn)
args = read_string(self.conn)
self.execute_file(filename, args)
def _cmd_excx(self):
"""handles executing a single file, module or process"""
filetype = read_string(self.conn)
filename = read_string(self.conn)
args = read_string(self.conn)
self.execute_file_ex(filetype, filename, args)
def _cmd_debug_attach(self):
import visualstudio_py_debugger
port = read_int(self.conn)
id = read_string(self.conn)
debug_options = visualstudio_py_debugger.parse_debug_options(read_string(self.conn))
self.attach_process(port, id, debug_options)
_COMMANDS = {
to_bytes('run '): _cmd_run,
to_bytes('abrt'): _cmd_abrt,
to_bytes('exit'): _cmd_exit,
to_bytes('mems'): _cmd_mems,
to_bytes('sigs'): _cmd_sigs,
to_bytes('mods'): _cmd_mods,
to_bytes('setm'): _cmd_setm,
to_bytes('sett'): _cmd_sett,
to_bytes('inpl'): _cmd_inpl,
to_bytes('excf'): _cmd_excf,
to_bytes('excx'): _cmd_excx,
to_bytes('dbga'): _cmd_debug_attach,
}
def _write_member_dict(self, mem_dict):
write_int(self.conn, len(mem_dict))
for name, type_name in mem_dict.items():
write_string(self.conn, name)
write_string(self.conn, type_name)
def on_debugger_detach(self):
with self.send_lock:
write_bytes(self.conn, ReplBackend._DETC)
def init_debugger(self):
from os import path
sys.path.append(path.dirname(__file__))
import visualstudio_py_debugger
visualstudio_py_debugger.DONT_DEBUG.append(path.normcase(__file__))
new_thread = visualstudio_py_debugger.new_thread()
sys.settrace(new_thread.trace_func)
visualstudio_py_debugger.intercept_threads(True)
def send_image(self, filename):
with self.send_lock:
write_bytes(self.conn, ReplBackend._IMGD)
write_string(self.conn, filename)
def write_png(self, image_bytes):
with self.send_lock:
write_bytes(self.conn, ReplBackend._DPNG)
write_int(self.conn, len(image_bytes))
write_bytes(self.conn, image_bytes)
def write_xaml(self, xaml_bytes):
with self.send_lock:
write_bytes(self.conn, ReplBackend._DXAM)
write_int(self.conn, len(xaml_bytes))
write_bytes(self.conn, xaml_bytes)
def send_prompt(self, ps1, ps2, update_all = True):
"""sends the current prompt to the interactive window"""
with self.send_lock:
write_bytes(self.conn, ReplBackend._PRPC)
write_string(self.conn, ps1)
write_string(self.conn, ps2)
write_int(self.conn, update_all)
def send_error(self):
"""reports that an error occured to the interactive window"""
with self.send_lock:
write_bytes(self.conn, ReplBackend._ERRE)
def send_exit(self):
"""reports the that the REPL process has exited to the interactive window"""
with self.send_lock:
write_bytes(self.conn, ReplBackend._EXIT)
def send_command_executed(self):
with self.send_lock:
write_bytes(self.conn, ReplBackend._DONE)
def send_modules_changed(self):
with self.send_lock:
write_bytes(self.conn, ReplBackend._MODC)
def read_line(self):
"""reads a line of input from standard input"""
with self.send_lock:
write_bytes(self.conn, ReplBackend._RDLN)
self.input_event.acquire()
return self.input_string
def write_stdout(self, value):
"""writes a string to standard output in the remote console"""
with self.send_lock:
write_bytes(self.conn, ReplBackend._STDO)
write_string(self.conn, value)
def write_stderr(self, value):
"""writes a string to standard input in the remote console"""
with self.send_lock:
write_bytes(self.conn, ReplBackend._STDE)
write_string(self.conn, value)
################################################################
# Implementation of execution, etc...
def execution_loop(self):
"""starts processing execution requests"""
raise NotImplementedError
def run_command(self, command):
"""runs the specified command which is a string containing code"""
raise NotImplementedError
def execute_file(self, filename, args):
"""executes the given filename as the main module"""
return self.execute_file_ex('script', filename, args)
def execute_file_ex(self, filetype, filename, args):
"""executes the given filename as a 'script', 'module' or 'process'."""
raise NotImplementedError
def interrupt_main(self):
"""aborts the current running command"""
raise NotImplementedError
def exit_process(self):
"""exits the REPL process"""
raise NotImplementedError
def get_members(self, expression):
"""returns a tuple of the type name, instance members, and type members"""
raise NotImplementedError
def get_signatures(self, expression):
"""returns doc, args, vargs, varkw, defaults."""
raise NotImplementedError
def set_current_module(self, module):
"""sets the module which code executes against"""
raise NotImplementedError
def set_current_thread_and_frame(self, thread_id, frame_id, frame_kind):
"""sets the current thread and frame which code will execute against"""
raise NotImplementedError
def get_module_names(self):
"""returns a list of module names"""
raise NotImplementedError
def flush(self):
"""flushes the stdout/stderr buffers"""
raise NotImplementedError
def attach_process(self, port, debugger_id, debug_options):
"""starts processing execution requests"""
raise NotImplementedError
def exit_work_item():
sys.exit(0)
if sys.platform == 'cli':
# We need special handling to reset the abort for keyboard interrupt exceptions
class ReplAbortException(Exception): pass
import clr
clr.AddReference('Microsoft.Dynamic')
clr.AddReference('Microsoft.Scripting')
clr.AddReference('IronPython')
from Microsoft.Scripting import KeyboardInterruptException
from Microsoft.Scripting import ParamDictionaryAttribute
from IronPython.Runtime.Operations import PythonOps
from IronPython.Runtime import PythonContext
from Microsoft.Scripting import SourceUnit, SourceCodeKind
from Microsoft.Scripting.Runtime import Scope
python_context = clr.GetCurrentRuntime().GetLanguage(PythonContext)
from System import DBNull, ParamArrayAttribute
builtin_method_descriptor_type = type(list.append)
import System
NamespaceType = type(System)
class _OldClass:
pass
_OldClassType = type(_OldClass)
_OldInstanceType = type(_OldClass())
class BasicReplBackend(ReplBackend):
future_bits = 0x3e010 # code flags used to mark future bits
"""Basic back end which executes all Python code in-proc"""
def __init__(self, mod_name = '__main__', launch_file = None):
import threading
ReplBackend.__init__(self)
if mod_name is not None:
if sys.platform == 'cli':
self.exec_mod = Scope()
self.exec_mod.__name__ = '__main__'
else:
sys.modules[mod_name] = self.exec_mod = imp.new_module(mod_name)
else:
self.exec_mod = sys.modules['__main__']
self.launch_file = launch_file
self.code_flags = 0
self.execute_item = None
self.execute_item_lock = threading.Lock()
self.execute_item_lock.acquire() # lock starts acquired (we use it like manual reset event)
def init_connection(self):
sys.stdout = _ReplOutput(self, is_stdout = True)
sys.stderr = _ReplOutput(self, is_stdout = False)
sys.stdin = _ReplInput(self)
if sys.platform == 'cli':
import System
System.Console.SetOut(DotNetOutput(self, True))
System.Console.SetError(DotNetOutput(self, False))
def connect(self, port):
ReplBackend.connect(self, port)
self.init_connection()
def connect_using_socket(self, socket):
ReplBackend.connect_using_socket(self, socket)
self.init_connection()
def run_file_as_main(self, filename, args):
f = open(filename, 'rb')
try:
contents = f.read().replace(to_bytes('\r\n'), to_bytes('\n'))
finally:
f.close()
sys.argv = [filename]
sys.argv.extend(_command_line_to_args_list(args))
self.exec_mod.__file__ = filename
if sys.platform == 'cli':
code = python_context.CreateSnippet(contents, None, SourceCodeKind.File)
code.Execute(self.exec_mod)
else:
self.code_flags = 0
real_file = filename
if isinstance(filename, unicode) and unicode is not str:
# http://pytools.codeplex.com/workitem/696
# We need to encode the unicode filename here, Python 2.x will throw trying
# to convert it to ASCII instead of the filesystem encoding.
real_file = filename.encode(sys.getfilesystemencoding())
code = compile(contents, real_file, 'exec')
self.code_flags |= (code.co_flags & BasicReplBackend.future_bits)
exec(code, self.exec_mod.__dict__, self.exec_mod.__dict__)
def python_executor(self, code):
"""we can't close over unbound variables in execute_code_work_item
due to the exec, so we do it here"""
def func():
code.Execute(self.exec_mod)
return func
def execute_code_work_item(self):
_debug_write('Executing: ' + repr(self.current_code))
stripped_code = self.current_code.strip()
if sys.platform == 'cli':
code_to_send = ''
for line in stripped_code.split('\n'):
stripped = line.strip()
if (stripped.startswith('#') or not stripped) and not code_to_send:
continue
code_to_send += line + '\n'
code = python_context.CreateSnippet(code_to_send, None, SourceCodeKind.InteractiveCode)
dispatcher = clr.GetCurrentRuntime().GetLanguage(PythonContext).GetCommandDispatcher()
if dispatcher is not None:
dispatcher(self.python_executor(code))
else:
code.Execute(self.exec_mod)
else:
code = compile(self.current_code, '<stdin>', 'single', self.code_flags)
self.code_flags |= (code.co_flags & BasicReplBackend.future_bits)
exec(code, self.exec_mod.__dict__, self.exec_mod.__dict__)
self.current_code = None
def run_one_command(self, cur_modules, cur_ps1, cur_ps2):
# runs a single iteration of an input, execute file, etc...
# This is extracted into it's own method so we play nice w/ IronPython thread abort.
# Otherwise we have a nested exception hanging around and the 2nd abort doesn't
# work (that's probably an IronPython bug)
try:
new_modules = self._get_cur_module_set()
try:
if new_modules != cur_modules:
self.send_modules_changed()
except:
pass
cur_modules = new_modules
self.execute_item_lock.acquire()
if self.check_for_exit_execution_loop():
return True, None, None, None
if self.execute_item is not None:
try:
self.execute_item()
finally:
self.execute_item = None
try:
self.send_command_executed()
except SocketError:
return True, None, None, None
try:
if cur_ps1 != sys.ps1 or cur_ps2 != sys.ps2:
new_ps1 = str(sys.ps1)
new_ps2 = str(sys.ps2)
self.send_prompt(new_ps1, new_ps2)
cur_ps1 = new_ps1
cur_ps2 = new_ps2
except:
pass
except SystemExit:
self.send_error()
self.send_exit()
# wait for ReplEvaluator to send back exit requested which will indicate
# that all the output has been processed.
while not self.exit_requested:
time.sleep(.25)
return True, None, None, None
except BaseException:
_debug_write('Exception')
exc_type, exc_value, exc_tb = sys.exc_info()
if sys.platform == 'cli':
if isinstance(exc_value.clsException, System.Threading.ThreadAbortException):
try:
System.Threading.Thread.ResetAbort()
except SystemError:
pass
sys.stderr.write('KeyboardInterrupt')
else:
# let IronPython format the exception so users can do -X:ExceptionDetail or -X:ShowClrExceptions
exc_next = self.skip_internal_frames(exc_tb)
sys.stderr.write(''.join(traceback.format_exception(exc_type, exc_value, exc_next)))
else:
exc_next = self.skip_internal_frames(exc_tb)
sys.stderr.write(''.join(traceback.format_exception(exc_type, exc_value, exc_next)))
try:
self.send_error()
except SocketError:
_debug_write('err sending DONE')
return True, None, None, None
return False, cur_modules, cur_ps1, cur_ps2
def skip_internal_frames(self, tb):
"""return the first frame outside of the repl/debugger code"""
while tb is not None and self.is_internal_frame(tb):
tb = tb.tb_next
return tb
def is_internal_frame(self, tb):
"""return true if the frame is from internal code (repl or debugger)"""
f = tb.tb_frame
co = f.f_code
filename = co.co_filename
return filename.endswith('visualstudio_py_repl.py') or filename.endswith('visualstudio_py_debugger.py')
def execution_loop(self):
"""loop on the main thread which is responsible for executing code"""
if sys.platform == 'cli' and sys.version_info[:3] < (2, 7, 1):
# IronPython doesn't support thread.interrupt_main until 2.7.1
import System
self.main_thread = System.Threading.Thread.CurrentThread
# save our selves so global lookups continue to work (required pre-2.6)...
cur_modules = set()
try:
cur_ps1 = sys.ps1
cur_ps2 = sys.ps2
except:
# CPython/IronPython don't set sys.ps1 for non-interactive sessions, Jython and PyPy do
sys.ps1 = cur_ps1 = '>>> '
sys.ps2 = cur_ps2 = '... '
self.send_prompt(cur_ps1, cur_ps2)
# launch the startup script if one has been specified
if self.launch_file:
try:
self.run_file_as_main(self.launch_file, '')
except:
print('error in launching startup script:')
traceback.print_exc()
while True:
exit, cur_modules, cur_ps1, cur_ps2 = self.run_one_command(cur_modules, cur_ps1, cur_ps2)
if exit:
return
def check_for_exit_execution_loop(self):
return False
def execute_script_work_item(self):
self.run_file_as_main(self.current_code, self.current_args)
def execute_module_work_item(self):
new_argv = [''] + _command_line_to_args_list(self.current_args)
old_argv = sys.argv
import runpy
try:
sys.argv = new_argv
runpy.run_module(self.current_code, alter_sys=True)
except Exception:
traceback.print_exc()
finally:
sys.argv = old_argv
def execute_process_work_item(self):
try:
from subprocess import Popen, PIPE, STDOUT
import codecs
out_codec = codecs.lookup(sys.stdout.encoding)
proc = Popen(
'"%s" %s' % (self.current_code, self.current_args),
stdout=PIPE,
stderr=STDOUT,
bufsize=0,
)
for line in proc.stdout:
print(out_codec.decode(line, 'replace')[0].rstrip('\r\n'))
except Exception:
traceback.print_exc()
@staticmethod
def _get_cur_module_set():
"""gets the set of modules avoiding exceptions if someone puts something
weird in there"""
try:
return set(sys.modules)
except:
res = set()
for name in sys.modules:
try:
res.add(name)
except:
pass
return res
def run_command(self, command):
self.current_code = command
self.execute_item = self.execute_code_work_item
self.execute_item_lock.release()
def execute_file_ex(self, filetype, filename, args):
self.current_code = filename
self.current_args = args
self.execute_item = getattr(self, 'execute_%s_work_item' % filetype, None)
self.execute_item_lock.release()
def interrupt_main(self):
# acquire the send lock so we dont interrupt while we're communicting w/ the debugger
with self.send_lock:
if sys.platform == 'cli' and sys.version_info[:3] < (2, 7, 1):
# IronPython doesn't get thread.interrupt_main until 2.7.1
self.main_thread.Abort(ReplAbortException())
else:
thread.interrupt_main()
def exit_process(self):
self.execute_item = exit_work_item
try:
self.execute_item_lock.release()
except:
pass
sys.exit(0)
def get_members(self, expression):
"""returns a tuple of the type name, instance members, and type members"""
getattr_func = getattr
if not expression:
all_members = {}
if sys.platform == 'cli':
code = python_context.CreateSnippet('vars()', None, SourceCodeKind.AutoDetect)
items = code.Execute(self.exec_mod)
else:
items = self.exec_mod.__dict__
for key, value in items.items():
all_members[key] = self.get_type_name(value)
return '', all_members, {}
else:
if sys.platform == 'cli':
code = python_context.CreateSnippet(expression, None, SourceCodeKind.AutoDetect)
val = code.Execute(self.exec_mod)
code = python_context.CreateSnippet('dir(' + expression + ')', None, SourceCodeKind.AutoDetect)
members = code.Execute(self.exec_mod)
code = python_context.CreateSnippet('lambda value, name: getattr(value, name)', None, SourceCodeKind.AutoDetect)
getattr_func = code.Execute(self.exec_mod)
else:
val = eval(expression, self.exec_mod.__dict__, self.exec_mod.__dict__)
members = dir(val)
return self.collect_members(val, members, getattr_func)
def collect_members(self, val, members, getattr_func):
t = type(val)
inst_members = {}
if hasattr(val, '__dict__'):
# collect the instance members
try:
for mem_name in val.__dict__:
mem_t = self._get_member_type(val, mem_name, True, getattr_func)
if mem_t is not None:
inst_members[mem_name] = mem_t
except:
pass
# collect the type members
type_members = {}
for mem_name in members:
if mem_name not in inst_members:
mem_t = self._get_member_type(val, mem_name, False, getattr_func)
if mem_t is not None:
type_members[mem_name] = mem_t
return t.__module__ + '.' + t.__name__, inst_members, type_members
def get_ipy_sig(self, obj, ctor):
args = []
vargs = None
varkw = None
defaults = []
for param in ctor.GetParameters():
if param.IsDefined(ParamArrayAttribute, False):
vargs = param.Name
elif param.IsDefined(ParamDictionaryAttribute, False):
varkw = param.Name
else:
args.append(param.Name)
if param.DefaultValue is not DBNull.Value:
defaults.append(repr(param.DefaultValue))
return obj.__doc__, args, vargs, varkw, tuple(defaults)
def get_signatures(self, expression):
if sys.platform == 'cli':
code = python_context.CreateSnippet(expression, None, SourceCodeKind.AutoDetect)
val = code.Execute(self.exec_mod)
else:
val = eval(expression, self.exec_mod.__dict__, self.exec_mod.__dict__)
return self.collect_signatures(val)
def collect_signatures(self, val):
doc = val.__doc__
type_obj = None
if isinstance(val, type) or isinstance(val, _OldClassType):
type_obj = val
val = val.__init__
try:
args, vargs, varkw, defaults = inspect.getargspec(val)
except TypeError:
# we're not doing inspect on a Python function...
if sys.platform == 'cli':
if type_obj is not None:
clr_type = clr.GetClrType(type_obj)
ctors = clr_type.GetConstructors()
return [self.get_ipy_sig(type_obj, ctor) for ctor in ctors]
elif type(val) is types.BuiltinFunctionType:
return [self.get_ipy_sig(target, target.Targets[0]) for target in val.Overloads.Functions]
elif type(val) is builtin_method_descriptor_type:
val = PythonOps.GetBuiltinMethodDescriptorTemplate(val)
return [self.get_ipy_sig(target, target.Targets[0]) for target in val.Overloads.Functions]
raise
remove_self = type_obj is not None or (type(val) is types.MethodType and
((sys.version_info >= (3,) and val.__self__ is not None) or
(sys.version_info < (3,) and val.im_self is not None)))
if remove_self:
# remove self for instance methods and types
args = args[1:]
if defaults is not None:
defaults = [repr(default) for default in defaults]
else:
defaults = []
return [(doc, args, vargs, varkw, defaults)]
def set_current_module(self, module):
mod = sys.modules.get(module)
if mod is not None:
_debug_write('Setting module to ' + module)
if sys.platform == 'cli':
self.exec_mod = clr.GetClrType(type(sys)).GetProperty('Scope', System.Reflection.BindingFlags.Public | System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Instance).GetValue(sys, ())
else:
self.exec_mod = mod
else:
_debug_write('Unknown module ' + module)
def get_module_names(self):
res = []
for name, module in sys.modules.items():
try:
if name != 'visualstudio_py_repl' and name != '$visualstudio_py_debugger':
if sys.platform == 'cli' and type(module) is NamespaceType:
self.get_namespaces(name, module, res)
else:
filename = getattr(module, '__file__', '') or ''
res.append((name, filename))
except:
pass
return res
def get_namespaces(self, basename, namespace, names):
names.append((basename, ''))
try:
for name in dir(namespace):
new_name = basename + '.' + name
new_namespace = getattr(namespace, name)
if type(new_namespace) is NamespaceType:
self.get_namespaces(new_name, new_namespace, names)
except:
pass
def flush(self):
sys.stdout.flush()
def do_detach(self):
import visualstudio_py_debugger
visualstudio_py_debugger.DETACH_CALLBACKS.remove(self.do_detach)
self.on_debugger_detach()
def attach_process(self, port, debugger_id, debug_options):
def execute_attach_process_work_item():
import visualstudio_py_debugger
visualstudio_py_debugger.DETACH_CALLBACKS.append(self.do_detach)
visualstudio_py_debugger.attach_process(port, debugger_id, debug_options, report=True, block=True)
self.execute_item = execute_attach_process_work_item
self.execute_item_lock.release()
@staticmethod
def get_type_name(val):
try:
mem_t = type(val)
mem_t_name = mem_t.__module__ + '.' + mem_t.__name__
return mem_t_name
except:
pass
@staticmethod
def _get_member_type(inst, name, from_dict, getattr_func = None):
try:
if from_dict:
val = inst.__dict__[name]
elif type(inst) is _OldInstanceType:
val = getattr_func(inst.__class__, name)
else:
val = getattr_func(type(inst), name)
mem_t_name = BasicReplBackend.get_type_name(val)
return mem_t_name
except:
if not from_dict:
try:
return BasicReplBackend.get_type_name(getattr_func(inst, name))
except:
pass
return
class DebugReplBackend(BasicReplBackend):
def __init__(self, debugger):
BasicReplBackend.__init__(self, None, None)
self.debugger = debugger
self.thread_id = None
self.frame_id = None
self.frame_kind = None
self.disconnect_requested = False
def init_connection(self):
sys.stdout = _ReplOutput(self, is_stdout = True, old_out = sys.stdout)
sys.stderr = _ReplOutput(self, is_stdout = False, old_out = sys.stderr)
if sys.platform == 'cli':
import System
self.old_cli_stdout = System.Console.Out
self.old_cli_stderr = System.Console.Error
System.Console.SetOut(DotNetOutput(self, True, System.Console.Out))
System.Console.SetError(DotNetOutput(self, False, System.Console.Error))
def connect_from_debugger(self, port):
ReplBackend.connect(self, port)
self.init_connection()
def connect_from_debugger_using_socket(self, socket):
ReplBackend.connect_using_socket(self, socket)
self.init_connection()
def disconnect_from_debugger(self):
sys.stdout = sys.stdout.old_out
sys.stderr = sys.stderr.old_out
if sys.platform == 'cli':
System.Console.SetOut(self.old_cli_stdout)
System.Console.SetError(self.old_cli_stderr)
del self.old_cli_stdout
del self.old_cli_stderr
# this tells both _repl_loop and execution_loop, each
# running on its own worker thread, to exit
self.disconnect_requested = True
self.execute_item_lock.release()
def set_current_thread_and_frame(self, thread_id, frame_id, frame_kind):
self.thread_id = thread_id
self.frame_id = frame_id
self.frame_kind = frame_kind
self.exec_mod = None
def execute_code_work_item(self):
if self.exec_mod is not None:
BasicReplBackend.execute_code_work_item(self)
else:
try:
self.debugger.execute_code_no_report(self.current_code, self.thread_id, self.frame_id, self.frame_kind)
finally:
self.current_code = None
def get_members(self, expression):
"""returns a tuple of the type name, instance members, and type members"""
if self.exec_mod is not None:
return BasicReplBackend.get_members(self, expression)
else:
thread, cur_frame = self.debugger.get_thread_and_frame(self.thread_id, self.frame_id, self.frame_kind)
return self.get_members_for_frame(expression, thread, cur_frame, self.frame_kind)
def get_signatures(self, expression):
"""returns doc, args, vargs, varkw, defaults."""
if self.exec_mod is not None:
return BasicReplBackend.get_signatures(self, expression)
else:
thread, cur_frame = self.debugger.get_thread_and_frame(self.thread_id, self.frame_id, self.frame_kind)
return self.get_signatures_for_frame(expression, thread, cur_frame, self.frame_kind)
def get_members_for_frame(self, expression, thread, cur_frame, frame_kind):
"""returns a tuple of the type name, instance members, and type members"""
getattr_func = getattr
if not expression:
all_members = {}
if sys.platform == 'cli':
code = python_context.CreateSnippet('vars()', None, SourceCodeKind.AutoDetect)
globals = code.Execute(Scope(cur_frame.f_globals))
locals = code.Execute(Scope(thread.get_locals(cur_frame, frame_kind)))
else:
globals = cur_frame.f_globals
locals = thread.get_locals(cur_frame, frame_kind)
for key, value in globals.items():
all_members[key] = self.get_type_name(value)
for key, value in locals.items():
all_members[key] = self.get_type_name(value)
return '', all_members, {}
else:
if sys.platform == 'cli':
scope = Scope(cur_frame.f_globals)
code = python_context.CreateSnippet(expression, None, SourceCodeKind.AutoDetect)
val = code.Execute(scope)
code = python_context.CreateSnippet('dir(' + expression + ')', None, SourceCodeKind.AutoDetect)
members = code.Execute(scope)
code = python_context.CreateSnippet('lambda value, name: getattr(value, name)', None, SourceCodeKind.AutoDetect)
getattr_func = code.Execute(scope)
else:
val = eval(expression, cur_frame.f_globals, thread.get_locals(cur_frame, frame_kind))
members = dir(val)
return self.collect_members(val, members, getattr_func)
def get_signatures_for_frame(self, expression, thread, cur_frame, frame_kind):
if sys.platform == 'cli':
code = python_context.CreateSnippet(expression, None, SourceCodeKind.AutoDetect)
val = code.Execute(Scope(cur_frame.f_globals))
else:
val = eval(expression, cur_frame.f_globals, thread.get_locals(cur_frame, frame_kind))
return self.collect_signatures(val)
def set_current_module(self, module):
if module == '<CurrentFrame>':
self.exec_mod = None
else:
BasicReplBackend.set_current_module(self, module)
def check_for_exit_repl_loop(self):
return self.disconnect_requested
def check_for_exit_execution_loop(self):
return self.disconnect_requested
class _ReplOutput(object):
"""file like object which redirects output to the repl window."""
errors = None
def __init__(self, backend, is_stdout, old_out = None):
self.name = "<stdout>" if is_stdout else "<stderr>"
self.backend = backend
self.old_out = old_out
self.is_stdout = is_stdout
self.pipe = None
def flush(self):
if self.old_out:
self.old_out.flush()
def fileno(self):
if self.pipe is None:
self.pipe = os.pipe()
thread.start_new_thread(self.pipe_thread, (), {})
return self.pipe[1]
def pipe_thread(self):
while True:
data = os.read(self.pipe[0], 1)
if data == '\r':
data = os.read(self.pipe[0], 1)
if data == '\n':
self.write('\n')
else:
self.write('\r' + data)
else:
self.write(data)
@property
def encoding(self):
return 'utf8'
def writelines(self, lines):
for line in lines:
self.write(line)
self.write('\n')
def write(self, value):
_debug_write('printing ' + repr(value) + '\n')
if self.is_stdout:
self.backend.write_stdout(value)
else:
self.backend.write_stderr(value)
if self.old_out:
self.old_out.write(value)
def isatty(self):
return True
def next(self):
pass
class _ReplInput(object):
"""file like object which redirects input from the repl window"""
def __init__(self, backend):
self.backend = backend
def readline(self):
return self.backend.read_line()
def readlines(self, size = None):
res = []
while True:
line = self.readline()
if line is not None:
res.append(line)
else:
break
return res
def xreadlines(self):
return self
def write(self, *args):
raise IOError("File not open for writing")
def flush(self): pass
def isatty(self):
return True
def __iter__(self):
return self
def next(self):
return self.readline()
if sys.platform == 'cli':
import System
class DotNetOutput(System.IO.TextWriter):
def __new__(cls, backend, is_stdout, old_out=None):
return System.IO.TextWriter.__new__(cls)
def __init__(self, backend, is_stdout, old_out=None):
self.backend = backend
self.is_stdout = is_stdout
self.old_out = old_out
def Write(self, value, *args):
if self.old_out:
self.old_out.Write(value, *args)
if not args:
if type(value) is str or type(value) is System.Char:
if self.is_stdout:
self.backend.write_stdout(str(value).replace('\r\n', '\n'))
else:
self.backend.write_stderr(str(value).replace('\r\n', '\n'))
else:
super(DotNetOutput, self).Write.Overloads[object](value)
else:
self.Write(System.String.Format(value, *args))
def WriteLine(self, value, *args):
if self.old_out:
self.old_out.WriteLine(value, *args)
if not args:
if type(value) is str or type(value) is System.Char:
if self.is_stdout:
self.backend.write_stdout(str(value).replace('\r\n', '\n') + '\n')
else:
self.backend.write_stderr(str(value).replace('\r\n', '\n') + '\n')
else:
super(DotNetOutput, self).WriteLine.Overloads[object](value)
else:
self.WriteLine(System.String.Format(value, *args))
@property
def Encoding(self):
return System.Text.Encoding.UTF8
BACKEND = None
def _run_repl():
from optparse import OptionParser
parser = OptionParser(prog='repl', description='Process REPL options')
parser.add_option('--port', dest='port',
help='the port to connect back to')
parser.add_option('--launch_file', dest='launch_file',
help='the script file to run on startup')
parser.add_option('--execution_mode', dest='backend',
help='the backend to use')
parser.add_option('--enable-attach', dest='enable_attach',
action="store_true", default=False,
help='enable attaching the debugger via $attach')
(options, args) = parser.parse_args()
# kick off repl
# make us available under our "normal" name, not just __main__ which we'll likely replace.
sys.modules['visualstudio_py_repl'] = sys.modules['__main__']
global __name__
__name__ = 'visualstudio_py_repl'
backend_type = BasicReplBackend
backend_error = None
if options.backend is not None and options.backend.lower() != 'standard':
try:
split_backend = options.backend.split('.')
backend_mod_name = '.'.join(split_backend[:-1])
backend_name = split_backend[-1]
backend_type = getattr(__import__(backend_mod_name), backend_name)
except UnsupportedReplException:
backend_error = sys.exc_info()[1].reason
except:
backend_error = traceback.format_exc()
# fix sys.path so that cwd is where the project lives.
sys.path[0] = '.'
# remove all of our parsed args in case we have a launch file that cares...
sys.argv = args or ['']
global BACKEND
BACKEND = backend_type(launch_file=options.launch_file)
BACKEND.connect(int(options.port))
if options.enable_attach:
BACKEND.init_debugger()
if backend_error is not None:
sys.stderr.write('Error using selected REPL back-end:\n')
sys.stderr.write(backend_error + '\n')
sys.stderr.write('Using standard backend instead\n')
# execute code on the main thread which we can interrupt
BACKEND.execution_loop()
if __name__ == '__main__':
try:
_run_repl()
except:
if DEBUG:
_debug_write(traceback.format_exc())
_debug_write('exiting')
input()
raise
| apache-2.0 | -5,157,967,153,413,950,000 | 35.136628 | 220 | 0.553053 | false |
atopuzov/nitro-python | nssrc/com/citrix/netscaler/nitro/resource/config/appfw/appfwxmlerrorpage.py | 3 | 6286 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class appfwxmlerrorpage(base_resource) :
""" Configuration for xml error page resource. """
def __init__(self) :
self._name = ""
self._src = ""
self._comment = ""
self._overwrite = False
self._response = ""
@property
def name(self) :
ur"""Indicates name of the imported xml error page to be removed.
<br/>Minimum length = 1<br/>Maximum length = 31.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Indicates name of the imported xml error page to be removed.
<br/>Minimum length = 1<br/>Maximum length = 31
"""
try :
self._name = name
except Exception as e:
raise e
@property
def src(self) :
ur"""URL (protocol, host, path, and name) for the location at which to store the imported XML error object.
NOTE: The import fails if the object to be imported is on an HTTPS server that requires client certificate authentication for access.<br/>Minimum length = 1<br/>Maximum length = 2047.
"""
try :
return self._src
except Exception as e:
raise e
@src.setter
def src(self, src) :
ur"""URL (protocol, host, path, and name) for the location at which to store the imported XML error object.
NOTE: The import fails if the object to be imported is on an HTTPS server that requires client certificate authentication for access.<br/>Minimum length = 1<br/>Maximum length = 2047
"""
try :
self._src = src
except Exception as e:
raise e
@property
def comment(self) :
ur"""Any comments to preserve information about the XML error object.<br/>Maximum length = 128.
"""
try :
return self._comment
except Exception as e:
raise e
@comment.setter
def comment(self, comment) :
ur"""Any comments to preserve information about the XML error object.<br/>Maximum length = 128
"""
try :
self._comment = comment
except Exception as e:
raise e
@property
def overwrite(self) :
ur"""Overwrite any existing XML error object of the same name.
"""
try :
return self._overwrite
except Exception as e:
raise e
@overwrite.setter
def overwrite(self, overwrite) :
ur"""Overwrite any existing XML error object of the same name.
"""
try :
self._overwrite = overwrite
except Exception as e:
raise e
@property
def response(self) :
try :
return self._response
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(appfwxmlerrorpage_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.appfwxmlerrorpage
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete appfwxmlerrorpage.
"""
try :
if type(resource) is not list :
deleteresource = appfwxmlerrorpage()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
except Exception as e :
raise e
@classmethod
def Import(cls, client, resource) :
ur""" Use this API to Import appfwxmlerrorpage.
"""
try :
if type(resource) is not list :
Importresource = appfwxmlerrorpage()
Importresource.src = resource.src
Importresource.name = resource.name
Importresource.comment = resource.comment
Importresource.overwrite = resource.overwrite
return Importresource.perform_operation(client,"Import")
except Exception as e :
raise e
@classmethod
def change(cls, client, resource) :
ur""" Use this API to change appfwxmlerrorpage.
"""
try :
if type(resource) is not list :
changeresource = appfwxmlerrorpage()
changeresource.name = resource.name
return changeresource.perform_operation(client,"update")
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the appfwxmlerrorpage resources that are configured on netscaler.
"""
try :
if not name :
obj = appfwxmlerrorpage()
response = obj.get_resources(client, option_)
if type(name) != cls :
if type(name) is not list :
obj = appfwxmlerrorpage()
obj.name = name
response = obj.get_resource(client, option_)
return response
except Exception as e :
raise e
class appfwxmlerrorpage_response(base_response) :
def __init__(self, length=1) :
self.appfwxmlerrorpage = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.appfwxmlerrorpage = [appfwxmlerrorpage() for _ in range(length)]
| apache-2.0 | -261,051,988,979,751,780 | 28.511737 | 187 | 0.699809 | false |
mavteam13/paparazzi | sw/tools/tcp_aircraft_server/phoenix/xmlobject.py | 86 | 17644 | #Copyright 2014, Antoine Drouin
"""
Allows XML files to be operated on like Python objects.
Features:
- load XML source from file pathnames, readable file objects or raw strings
- add, get and set tag attributes like with python attributes
- iterate over nodes
- save the modified XMLFile or XMLObject to file
Example XML file::
<?xml version="1.0" encoding="UTF-8"?>
<rapsheets>
<person name="John Smith" age="42">
<!-- John Smith has an appeal in process against his last conviction -->
<crime name="Armed robbery" date="March 11, 1994"/>
<crime name="Aggravated burglary" date="June 9, 2001"/>
</person>
<person name="Mary Jones" age="33">
<crime name="Prostitution" date="January 8, 1997"/>
<crime name="Selling heroin" date="September 4, 2002"/>
<crime name="Manslaughter" date="December 21, 2004"/>
</person>
</rapsheets>
Example usage::
>>> from xmlobject import XMLFile
>>> x = XMLFile(path="sample.xml")
>>> print x
<xmlobj.XMLFile instance at 0xb7ccc52c>
>>> print x.root
<XMLNode: rapsheets>
>>> print x.root._children
[<XMLNode: text>, <XMLNode: person>, <XMLNode: text>,
<XMLNode: person>, <XMLNode: text>]
>>> print x.root.person
[<XMLNode: person>, <XMLNode: person>]
>>> print x.root.person[0].name
John Smith
>>> john = x.root.person[0]
>>> john.height = 184
>>> c = john._addNode("crime")
>>> c.name = "Grand Theft Auto"
>>> c.date = "4 May, 2005"
>>> print x.toxml()
<?xml version="1.0" ?>
<rapsheets>
<person age="42" height="184" name="John Smith">
<!-- John Smith has an appeal in process against his last conviction -->
<crime date="March 11, 1994" name="Armed robbery"/>
<crime date="June 9, 2001" name="Aggravated burglary"/>
<crime date="4 May, 2005" name="Grand Theft Auto"/></person>
<person age="33" name="Mary Jones">
<crime date="January 8, 1997" name="Prostitution"/>
<crime date="September 4, 2002" name="Selling heroin"/>
<crime date="December 21, 2004" name="Manslaughter"/>
</person>
</rapsheets>
>>>
"""
import sys, os
import xml.dom
import xml.dom.minidom
from xml.dom.minidom import parse, parseString, getDOMImplementation
impl = getDOMImplementation()
def ensure_list(obj):
"""
ensures the object passed is a list, so it is iterable.
useful workaround until i decide if XMLNode.foo should always
return a list of foo, even if there is only one foo child
"""
if len(obj):
return obj
else:
return [obj]
class MissingRootTag(Exception):
"""root tag name was not given"""
class InvalidXML(Exception):
"""failed to parse XML input"""
class CannotSave(Exception):
"""unable to save"""
class InvalidNode(Exception):
"""not a valid minidom node"""
class XMLFile:
"""
Allows an xml file to be viewed and operated on
as a python object.
(If you're viewing the epydoc-generated HTML documentation, click the 'show private'
link at the top right of this page to see all the methods)
Holds the root node in the .root attribute, also in an attribute
with the same name as this root node.
"""
def __init__(self, **kw):
"""
Create an XMLFile
Keywords:
- path - a pathname from which the file can be read
- file - an open file object from which the raw xml
can be read
- raw - the raw xml itself
- root - name of root tag, if not reading content
Usage scenarios:
1. Working with existing content - you must supply input in
one of the following ways:
- 'path' must be an existing file, or
- 'file' must be a readable file object, or
- 'raw' must contain raw xml as a string
2. Creating whole new content - you must give the name
of the root tag in the 'root' keyword
Notes:
- Keyword precedence governing existing content is:
1. path (if existing file)
2. file
3. raw
- If working with existing content:
- if the 'root' is given, then the content's toplevel tag
MUST match the value given for 'root'
- trying to _save will raise an exception unless 'path'
has been given
- if not working with existing content:
- 'root' must be given
- _save() will raise an exception unless 'path' has been given
"""
path = kw.get("path", None)
fobj = kw.get("file", None)
raw = kw.get("raw", None)
root = kw.get("root", None)
if path:
self.path = path
try:
fobj = file(path)
except IOError:
pass
else:
self.path = None
if fobj:
raw = fobj.read()
if raw:
self.dom = xml.dom.minidom.parseString(raw)
else:
# could not source content, so create a blank slate
if not root:
# in which case, must give a root node name
raise MissingRootTag(
"No existing content, so must specify root")
# ok, create a blank dom
self.dom = impl.createDocument(None, root, None)
# get the root node, save it as attributes 'root' and name of node
rootnode = self.dom.documentElement
# now validate root tag
if root:
if rootnode.nodeName != root:
raise IncorrectRootTag("Gave root='%s', input has root='%s'" % (
root, rootnode.nodeName))
# need this for recursion in XMLNode
self._childrenByName = {}
self._children = []
# add all the child nodes
for child in self.dom.childNodes:
childnode = XMLNode(self, child)
#print "compare %s to %s" % (rootnode, child)
if child == rootnode:
#print "found root"
self.root = childnode
setattr(self, rootnode.nodeName, self.root)
def save(self, where=None, obj=None):
"""
Saves the document.
If argument 'where' is given, saves to it, otherwise
tries to save to the original given 'path' (or barfs)
Value can be a string (taken to be a file path), or an open
file object.
"""
obj = obj or self.dom
if not where:
if self._root.path:
where = self._root.path
if isinstance(where, str):
where = file(where, "w")
if not where:
raise CannotSave("No save destination, and no original path")
where.write(obj.toxml())
where.flush()
def saveAs(self, path):
"""
save this time, and all subsequent times, to filename 'path'
"""
self.path = path
self.save()
def toxml(self):
return self.dom.toxml()
def __len__(self):
"""
returns number of child nodes
"""
return len(self._children)
def __getitem__(self, idx):
if isinstance(idx, int):
return self._children[idx]
else:
return self._childrenByName[idx]
class XMLNode:
"""
This is the workhorse for the xml object interface
(If you're viewing the epydoc-generated HTML documentation, click the 'show private'
link at the top right of this page to see all the methods)
"""
# http://docs.python.org/reference/lexical_analysis.html#id6
__RESERVED_WORDS = (
"and","del","class","from","not","while"
"as","elif","global","or","with","assert","else","if",
"pass","yield","break","except","import","print",
"class","exec","in","raise","continue","finally",
"is","return","def","for","lambda","try"
)
def __init__(self, parent, node):
"""
You shouldn't need to instantiate this directly
"""
self._parent = parent
if isinstance(parent, XMLFile):
self._root = parent
else:
self._root = parent._root
self._node = node
self._childrenByName = {}
self._children = []
# add ourself to parent's children registry
parent._children.append(self)
# the deal with named subtags is that we store the first instance
# as itself, and with second and subsequent instances, we make a list
parentDict = self._parent._childrenByName
# If the name of the node is a python reserved word then captilize it
nodeName = node.nodeName
if nodeName in self.__RESERVED_WORDS:
nodeName = nodeName.upper()
if nodeName not in parentDict:
parentDict[nodeName] = parent.__dict__[nodeName] = self
else:
if isinstance(parentDict[nodeName], XMLNode):
# this is the second child node of a given tag name, so convert
# the instance to a list
parentDict[nodeName] = parent.__dict__[nodeName] = [parentDict[nodeName]]
parentDict[nodeName].append(self)
# figure out our type
self._value = None
if isinstance(node, xml.dom.minidom.Text):
self._type = "text"
self._value = node.nodeValue
elif isinstance(node, xml.dom.minidom.Element):
self._type = "node"
elif isinstance(node, xml.dom.minidom.Comment):
self._type = "comment"
self._value = node.nodeValue
elif isinstance(node, xml.dom.minidom.DocumentType):
#<!DOCTYPE protocol SYSTEM "messages.dtd">
#Ignore doctype, could possibly check it....
pass
else:
raise InvalidNode("node class %s" % node.__class__)
# and wrap all the child nodes
for child in node.childNodes:
XMLNode(self, child)
def _render(self):
"""
Produces well-formed XML of this node's contents,
indented as required
"""
return self._node.toxml()
def __repr__(self):
if self._type == "node":
return "<XMLNode: %s>" % self._node.nodeName
else:
return "<XMLNode: %s>" % self._type
def __getattr__(self, attr):
"""
Fetches an attribute or child node of this tag
If it's an attribute, then returns the attribute value as a string.
If a child node, then:
- if there is only one child node of that name, return it
- if there is more than one child node of that name, return a list
of child nodes of that tag name
Supports some magic attributes:
- _text - the value of the first child node of type text
"""
#print "%s: __getattr__: attr=%s" % (self, attr)
if attr == '_text':
# magic attribute to return text
tnode = self['#text']
if isinstance(tnode, list):
tnode = tnode[0]
return tnode._value
if self._type in ['text', 'comment']:
if attr == '_value':
return self._node.nodeValue
else:
raise AttributeError(attr)
if self._node.hasAttribute(attr):
return self._node.getAttribute(attr)
elif attr in self._childrenByName:
return self._childrenByName[attr]
#elif attr == 'value':
# magic attribute
else:
raise AttributeError(attr)
def __setattr__(self, attr, val):
"""
Change the value of an attribute of this tag
The magic attribute '_text' can be used to set the first child
text node's value
For example::
Consider:
<somenode>
<child>foo</child>
</somenode>
>>> somenode
<XMLNODE: somenode>
>>> somenode.child
<XMLNODE: child>
>>> somenode.child._text
'foo'
>>> somenode._toxml()
u'<somenode><child>foo</child></somenode>'
>>> somenode.child._text = 'bar'
>>> somenode.child._text
'bar'
>>> somenode.child._toxml()
u'<somenode><child>bar/child></somenode>'
"""
if attr.startswith("_"):
# magic attribute for setting _text
if attr == '_text':
tnode = self['#text']
if isinstance(tnode, list):
tnode = tnode[0]
tnode._node.nodeValue = val
tnode._value = val
return
self.__dict__[attr] = val
elif self._type in ['text', 'comment']:
self._node.nodeValue = val
else:
# discern between attribute and child node
if attr in self._childrenByName:
raise Exception("Attribute Exists")
self._node.setAttribute(attr, str(val))
def _keys(self):
"""
Return a list of attribute names
"""
return list(self._node.attributes.keys())
def _values(self):
"""
Returns a list of (attrname, attrval) tuples for this tag
"""
return [self._node.getAttribute(k) for k in list(self._node.attributes.keys())]
def _items(self):
"""
returns a list of attribute values for this tag
"""
return [(k, self._node.getAttribute(k)) for k in list(self._node.attributes.keys())]
def _has_key(self, k):
"""
returns True if this tag has an attribute of the given name
"""
return self._node.hasAttribute(k) or k in self._childrenByName
def _get(self, k, default=None):
"""
returns the value of attribute k, or default if no such attribute
"""
if self._has_key(k):
return getattr(self, k)
else:
return default
def __len__(self):
"""
returns number of child nodes
"""
return len(self._children)
def __getitem__(self, idx):
"""
if given key is numeric, return the nth child, otherwise
try to return the child tag (or list of child tags) having
the key as the tag name
"""
#print "__getitem__: idx=%s" % str(idx)
if isinstance(idx, slice) or isinstance(idx, int):
return self._children[idx]
elif isinstance(idx, str):
return self._childrenByName[idx]
else:
raise IndexError(idx)
def _addNode(self, child):
"""
Tries to append a child node to the tree, and returns it
Value of 'child' must be one of:
- a string (in which case it is taken to be the name
of the new node's tag)
- a dom object, in which case it will be wrapped and added
- an XMLNode object, in which case it will be added without
wrapping
"""
if isinstance(child, XMLNode):
# add it to our children registry
self._children.append(child)
parentDict = self._childrenByName
nodeName = child._node.nodeName
if nodeName not in parentDict:
parentDict[nodeName] = parent.__dict__[nodeName] = child
else:
if isinstance(parentDict[nodeName], XMLNode):
# this is the second child node of a given tag name, so convert
# the instance to a list
parentDict[nodeName] = self.__dict__[nodeName] = [parentDict[nodeName]]
parentDict[nodeName].append(child)
# and stick it in the dom
self._node.appendChild(child._node)
return child
elif isinstance(child, str):
childNode = self._root.dom.createElement(child)
self._node.appendChild(childNode)
elif isinstance(child, xml.dom.minidom.Element):
childNode = child
child = childNode.nodeName
self._node.appendChild(childNode)
return XMLNode(self, childNode)
def _addText(self, value):
"""
Tries to append a child text node, with the given text, to the tree,
and returns the created node object
"""
childNode = self._root.dom.createTextNode(value)
self._node.appendChild(childNode)
return XMLNode(self, childNode)
def _addComment(self, comment):
"""
Tries to append a child comment node (with the given text value)
to the tree, and returns the create node object
"""
childNode = self._root.dom.createCommentNode(comment)
self._node.appendChild(childNode)
return XMLNode(self, childNode)
def _save(self, where=None):
"""
Generates well-formed XML from just this node, and saves it
to a file.
Argument 'where' is either an open file object, or a pathname
If 'where' is not given, then saves the entire document tree.
"""
if not where:
self._root.save()
else:
self._root.save(where, self._node)
def _toxml(self):
"""
renders just this node out to raw xml code
"""
return self._node.toxml()
| gpl-2.0 | -8,518,920,597,523,127,000 | 30.173145 | 92 | 0.55492 | false |
grani/grpc | src/python/grpcio_tests/tests/unit/_rpc_test.py | 21 | 36230 | # Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test of RPCs made against gRPC Python's application-layer API."""
import itertools
import threading
import unittest
from concurrent import futures
import grpc
from grpc.framework.foundation import logging_pool
from tests.unit.framework.common import test_constants
from tests.unit.framework.common import test_control
_SERIALIZE_REQUEST = lambda bytestring: bytestring * 2
_DESERIALIZE_REQUEST = lambda bytestring: bytestring[len(bytestring) // 2:]
_SERIALIZE_RESPONSE = lambda bytestring: bytestring * 3
_DESERIALIZE_RESPONSE = lambda bytestring: bytestring[:len(bytestring) // 3]
_UNARY_UNARY = '/test/UnaryUnary'
_UNARY_STREAM = '/test/UnaryStream'
_STREAM_UNARY = '/test/StreamUnary'
_STREAM_STREAM = '/test/StreamStream'
class _Callback(object):
def __init__(self):
self._condition = threading.Condition()
self._value = None
self._called = False
def __call__(self, value):
with self._condition:
self._value = value
self._called = True
self._condition.notify_all()
def value(self):
with self._condition:
while not self._called:
self._condition.wait()
return self._value
class _Handler(object):
def __init__(self, control):
self._control = control
def handle_unary_unary(self, request, servicer_context):
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
# TODO(https://github.com/grpc/grpc/issues/8483): test the values
# returned by these methods rather than only "smoke" testing that
# the return after having been called.
servicer_context.is_active()
servicer_context.time_remaining()
return request
def handle_unary_stream(self, request, servicer_context):
for _ in range(test_constants.STREAM_LENGTH):
self._control.control()
yield request
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
def handle_stream_unary(self, request_iterator, servicer_context):
if servicer_context is not None:
servicer_context.invocation_metadata()
self._control.control()
response_elements = []
for request in request_iterator:
self._control.control()
response_elements.append(request)
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
return b''.join(response_elements)
def handle_stream_stream(self, request_iterator, servicer_context):
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
for request in request_iterator:
self._control.control()
yield request
self._control.control()
class _MethodHandler(grpc.RpcMethodHandler):
def __init__(self, request_streaming, response_streaming,
request_deserializer, response_serializer, unary_unary,
unary_stream, stream_unary, stream_stream):
self.request_streaming = request_streaming
self.response_streaming = response_streaming
self.request_deserializer = request_deserializer
self.response_serializer = response_serializer
self.unary_unary = unary_unary
self.unary_stream = unary_stream
self.stream_unary = stream_unary
self.stream_stream = stream_stream
class _GenericHandler(grpc.GenericRpcHandler):
def __init__(self, handler):
self._handler = handler
def service(self, handler_call_details):
if handler_call_details.method == _UNARY_UNARY:
return _MethodHandler(False, False, None, None,
self._handler.handle_unary_unary, None, None,
None)
elif handler_call_details.method == _UNARY_STREAM:
return _MethodHandler(False, True, _DESERIALIZE_REQUEST,
_SERIALIZE_RESPONSE, None,
self._handler.handle_unary_stream, None, None)
elif handler_call_details.method == _STREAM_UNARY:
return _MethodHandler(True, False, _DESERIALIZE_REQUEST,
_SERIALIZE_RESPONSE, None, None,
self._handler.handle_stream_unary, None)
elif handler_call_details.method == _STREAM_STREAM:
return _MethodHandler(True, True, None, None, None, None, None,
self._handler.handle_stream_stream)
else:
return None
def _unary_unary_multi_callable(channel):
return channel.unary_unary(_UNARY_UNARY)
def _unary_stream_multi_callable(channel):
return channel.unary_stream(
_UNARY_STREAM,
request_serializer=_SERIALIZE_REQUEST,
response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_unary_multi_callable(channel):
return channel.stream_unary(
_STREAM_UNARY,
request_serializer=_SERIALIZE_REQUEST,
response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_stream_multi_callable(channel):
return channel.stream_stream(_STREAM_STREAM)
class RPCTest(unittest.TestCase):
def setUp(self):
self._control = test_control.PauseFailControl()
self._handler = _Handler(self._control)
self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
self._server = grpc.server(self._server_pool)
port = self._server.add_insecure_port('[::]:0')
self._server.add_generic_rpc_handlers((_GenericHandler(self._handler),))
self._server.start()
self._channel = grpc.insecure_channel('localhost:%d' % port)
def tearDown(self):
self._server.stop(None)
self._server_pool.shutdown(wait=True)
def testUnrecognizedMethod(self):
request = b'abc'
with self.assertRaises(grpc.RpcError) as exception_context:
self._channel.unary_unary('NoSuchMethod')(request)
self.assertEqual(grpc.StatusCode.UNIMPLEMENTED,
exception_context.exception.code())
def testSuccessfulUnaryRequestBlockingUnaryResponse(self):
request = b'\x07\x08'
expected_response = self._handler.handle_unary_unary(request, None)
multi_callable = _unary_unary_multi_callable(self._channel)
response = multi_callable(
request,
metadata=(('test', 'SuccessfulUnaryRequestBlockingUnaryResponse'),))
self.assertEqual(expected_response, response)
def testSuccessfulUnaryRequestBlockingUnaryResponseWithCall(self):
request = b'\x07\x08'
expected_response = self._handler.handle_unary_unary(request, None)
multi_callable = _unary_unary_multi_callable(self._channel)
response, call = multi_callable.with_call(
request,
metadata=(('test',
'SuccessfulUnaryRequestBlockingUnaryResponseWithCall'),))
self.assertEqual(expected_response, response)
self.assertIs(grpc.StatusCode.OK, call.code())
def testSuccessfulUnaryRequestFutureUnaryResponse(self):
request = b'\x07\x08'
expected_response = self._handler.handle_unary_unary(request, None)
multi_callable = _unary_unary_multi_callable(self._channel)
response_future = multi_callable.future(
request,
metadata=(('test', 'SuccessfulUnaryRequestFutureUnaryResponse'),))
response = response_future.result()
self.assertIsInstance(response_future, grpc.Future)
self.assertIsInstance(response_future, grpc.Call)
self.assertEqual(expected_response, response)
self.assertIsNone(response_future.exception())
self.assertIsNone(response_future.traceback())
def testSuccessfulUnaryRequestStreamResponse(self):
request = b'\x37\x58'
expected_responses = tuple(
self._handler.handle_unary_stream(request, None))
multi_callable = _unary_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request,
metadata=(('test', 'SuccessfulUnaryRequestStreamResponse'),))
responses = tuple(response_iterator)
self.assertSequenceEqual(expected_responses, responses)
def testSuccessfulStreamRequestBlockingUnaryResponse(self):
requests = tuple(b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
response = multi_callable(
request_iterator,
metadata=(
('test', 'SuccessfulStreamRequestBlockingUnaryResponse'),))
self.assertEqual(expected_response, response)
def testSuccessfulStreamRequestBlockingUnaryResponseWithCall(self):
requests = tuple(b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
response, call = multi_callable.with_call(
request_iterator,
metadata=(
('test',
'SuccessfulStreamRequestBlockingUnaryResponseWithCall'),))
self.assertEqual(expected_response, response)
self.assertIs(grpc.StatusCode.OK, call.code())
def testSuccessfulStreamRequestFutureUnaryResponse(self):
requests = tuple(b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
response_future = multi_callable.future(
request_iterator,
metadata=(('test', 'SuccessfulStreamRequestFutureUnaryResponse'),))
response = response_future.result()
self.assertEqual(expected_response, response)
self.assertIsNone(response_future.exception())
self.assertIsNone(response_future.traceback())
def testSuccessfulStreamRequestStreamResponse(self):
requests = tuple(b'\x77\x58'
for _ in range(test_constants.STREAM_LENGTH))
expected_responses = tuple(
self._handler.handle_stream_stream(iter(requests), None))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request_iterator,
metadata=(('test', 'SuccessfulStreamRequestStreamResponse'),))
responses = tuple(response_iterator)
self.assertSequenceEqual(expected_responses, responses)
def testSequentialInvocations(self):
first_request = b'\x07\x08'
second_request = b'\x0809'
expected_first_response = self._handler.handle_unary_unary(
first_request, None)
expected_second_response = self._handler.handle_unary_unary(
second_request, None)
multi_callable = _unary_unary_multi_callable(self._channel)
first_response = multi_callable(
first_request, metadata=(('test', 'SequentialInvocations'),))
second_response = multi_callable(
second_request, metadata=(('test', 'SequentialInvocations'),))
self.assertEqual(expected_first_response, first_response)
self.assertEqual(expected_second_response, second_response)
def testConcurrentBlockingInvocations(self):
pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
requests = tuple(b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
expected_responses = [expected_response
] * test_constants.THREAD_CONCURRENCY
response_futures = [None] * test_constants.THREAD_CONCURRENCY
multi_callable = _stream_unary_multi_callable(self._channel)
for index in range(test_constants.THREAD_CONCURRENCY):
request_iterator = iter(requests)
response_future = pool.submit(
multi_callable,
request_iterator,
metadata=(('test', 'ConcurrentBlockingInvocations'),))
response_futures[index] = response_future
responses = tuple(response_future.result()
for response_future in response_futures)
pool.shutdown(wait=True)
self.assertSequenceEqual(expected_responses, responses)
def testConcurrentFutureInvocations(self):
requests = tuple(b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
expected_responses = [expected_response
] * test_constants.THREAD_CONCURRENCY
response_futures = [None] * test_constants.THREAD_CONCURRENCY
multi_callable = _stream_unary_multi_callable(self._channel)
for index in range(test_constants.THREAD_CONCURRENCY):
request_iterator = iter(requests)
response_future = multi_callable.future(
request_iterator,
metadata=(('test', 'ConcurrentFutureInvocations'),))
response_futures[index] = response_future
responses = tuple(response_future.result()
for response_future in response_futures)
self.assertSequenceEqual(expected_responses, responses)
def testWaitingForSomeButNotAllConcurrentFutureInvocations(self):
pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
request = b'\x67\x68'
expected_response = self._handler.handle_unary_unary(request, None)
response_futures = [None] * test_constants.THREAD_CONCURRENCY
lock = threading.Lock()
test_is_running_cell = [True]
def wrap_future(future):
def wrap():
try:
return future.result()
except grpc.RpcError:
with lock:
if test_is_running_cell[0]:
raise
return None
return wrap
multi_callable = _unary_unary_multi_callable(self._channel)
for index in range(test_constants.THREAD_CONCURRENCY):
inner_response_future = multi_callable.future(
request,
metadata=(
('test',
'WaitingForSomeButNotAllConcurrentFutureInvocations'),))
outer_response_future = pool.submit(
wrap_future(inner_response_future))
response_futures[index] = outer_response_future
some_completed_response_futures_iterator = itertools.islice(
futures.as_completed(response_futures),
test_constants.THREAD_CONCURRENCY // 2)
for response_future in some_completed_response_futures_iterator:
self.assertEqual(expected_response, response_future.result())
with lock:
test_is_running_cell[0] = False
def testConsumingOneStreamResponseUnaryRequest(self):
request = b'\x57\x38'
multi_callable = _unary_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request,
metadata=(('test', 'ConsumingOneStreamResponseUnaryRequest'),))
next(response_iterator)
def testConsumingSomeButNotAllStreamResponsesUnaryRequest(self):
request = b'\x57\x38'
multi_callable = _unary_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request,
metadata=(
('test', 'ConsumingSomeButNotAllStreamResponsesUnaryRequest'),))
for _ in range(test_constants.STREAM_LENGTH // 2):
next(response_iterator)
def testConsumingSomeButNotAllStreamResponsesStreamRequest(self):
requests = tuple(b'\x67\x88'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request_iterator,
metadata=(('test',
'ConsumingSomeButNotAllStreamResponsesStreamRequest'),))
for _ in range(test_constants.STREAM_LENGTH // 2):
next(response_iterator)
def testConsumingTooManyStreamResponsesStreamRequest(self):
requests = tuple(b'\x67\x88'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request_iterator,
metadata=(
('test', 'ConsumingTooManyStreamResponsesStreamRequest'),))
for _ in range(test_constants.STREAM_LENGTH):
next(response_iterator)
for _ in range(test_constants.STREAM_LENGTH):
with self.assertRaises(StopIteration):
next(response_iterator)
self.assertIsNotNone(response_iterator.initial_metadata())
self.assertIs(grpc.StatusCode.OK, response_iterator.code())
self.assertIsNotNone(response_iterator.details())
self.assertIsNotNone(response_iterator.trailing_metadata())
def testCancelledUnaryRequestUnaryResponse(self):
request = b'\x07\x17'
multi_callable = _unary_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request,
metadata=(('test', 'CancelledUnaryRequestUnaryResponse'),))
response_future.cancel()
self.assertTrue(response_future.cancelled())
with self.assertRaises(grpc.FutureCancelledError):
response_future.result()
with self.assertRaises(grpc.FutureCancelledError):
response_future.exception()
with self.assertRaises(grpc.FutureCancelledError):
response_future.traceback()
self.assertIs(grpc.StatusCode.CANCELLED, response_future.code())
def testCancelledUnaryRequestStreamResponse(self):
request = b'\x07\x19'
multi_callable = _unary_stream_multi_callable(self._channel)
with self._control.pause():
response_iterator = multi_callable(
request,
metadata=(('test', 'CancelledUnaryRequestStreamResponse'),))
self._control.block_until_paused()
response_iterator.cancel()
with self.assertRaises(grpc.RpcError) as exception_context:
next(response_iterator)
self.assertIs(grpc.StatusCode.CANCELLED,
exception_context.exception.code())
self.assertIsNotNone(response_iterator.initial_metadata())
self.assertIs(grpc.StatusCode.CANCELLED, response_iterator.code())
self.assertIsNotNone(response_iterator.details())
self.assertIsNotNone(response_iterator.trailing_metadata())
def testCancelledStreamRequestUnaryResponse(self):
requests = tuple(b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request_iterator,
metadata=(('test', 'CancelledStreamRequestUnaryResponse'),))
self._control.block_until_paused()
response_future.cancel()
self.assertTrue(response_future.cancelled())
with self.assertRaises(grpc.FutureCancelledError):
response_future.result()
with self.assertRaises(grpc.FutureCancelledError):
response_future.exception()
with self.assertRaises(grpc.FutureCancelledError):
response_future.traceback()
self.assertIsNotNone(response_future.initial_metadata())
self.assertIs(grpc.StatusCode.CANCELLED, response_future.code())
self.assertIsNotNone(response_future.details())
self.assertIsNotNone(response_future.trailing_metadata())
def testCancelledStreamRequestStreamResponse(self):
requests = tuple(b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
with self._control.pause():
response_iterator = multi_callable(
request_iterator,
metadata=(('test', 'CancelledStreamRequestStreamResponse'),))
response_iterator.cancel()
with self.assertRaises(grpc.RpcError):
next(response_iterator)
self.assertIsNotNone(response_iterator.initial_metadata())
self.assertIs(grpc.StatusCode.CANCELLED, response_iterator.code())
self.assertIsNotNone(response_iterator.details())
self.assertIsNotNone(response_iterator.trailing_metadata())
def testExpiredUnaryRequestBlockingUnaryResponse(self):
request = b'\x07\x17'
multi_callable = _unary_unary_multi_callable(self._channel)
with self._control.pause():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable.with_call(
request,
timeout=test_constants.SHORT_TIMEOUT,
metadata=(
('test', 'ExpiredUnaryRequestBlockingUnaryResponse'),))
self.assertIsInstance(exception_context.exception, grpc.Call)
self.assertIsNotNone(exception_context.exception.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIsNotNone(exception_context.exception.details())
self.assertIsNotNone(exception_context.exception.trailing_metadata())
def testExpiredUnaryRequestFutureUnaryResponse(self):
request = b'\x07\x17'
callback = _Callback()
multi_callable = _unary_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request,
timeout=test_constants.SHORT_TIMEOUT,
metadata=(('test', 'ExpiredUnaryRequestFutureUnaryResponse'),))
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
self.assertIs(response_future, value_passed_to_callback)
self.assertIsNotNone(response_future.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
self.assertIsNotNone(response_future.details())
self.assertIsNotNone(response_future.trailing_metadata())
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIsNotNone(response_future.traceback())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
response_future.exception().code())
def testExpiredUnaryRequestStreamResponse(self):
request = b'\x07\x19'
multi_callable = _unary_stream_multi_callable(self._channel)
with self._control.pause():
with self.assertRaises(grpc.RpcError) as exception_context:
response_iterator = multi_callable(
request,
timeout=test_constants.SHORT_TIMEOUT,
metadata=(('test', 'ExpiredUnaryRequestStreamResponse'),))
next(response_iterator)
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
response_iterator.code())
def testExpiredStreamRequestBlockingUnaryResponse(self):
requests = tuple(b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
with self._control.pause():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable(
request_iterator,
timeout=test_constants.SHORT_TIMEOUT,
metadata=(
('test', 'ExpiredStreamRequestBlockingUnaryResponse'),))
self.assertIsInstance(exception_context.exception, grpc.RpcError)
self.assertIsInstance(exception_context.exception, grpc.Call)
self.assertIsNotNone(exception_context.exception.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIsNotNone(exception_context.exception.details())
self.assertIsNotNone(exception_context.exception.trailing_metadata())
def testExpiredStreamRequestFutureUnaryResponse(self):
requests = tuple(b'\x07\x18'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
callback = _Callback()
multi_callable = _stream_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request_iterator,
timeout=test_constants.SHORT_TIMEOUT,
metadata=(('test', 'ExpiredStreamRequestFutureUnaryResponse'),))
with self.assertRaises(grpc.FutureTimeoutError):
response_future.result(timeout=test_constants.SHORT_TIMEOUT /
2.0)
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIsNotNone(response_future.traceback())
self.assertIs(response_future, value_passed_to_callback)
self.assertIsNotNone(response_future.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
self.assertIsNotNone(response_future.details())
self.assertIsNotNone(response_future.trailing_metadata())
def testExpiredStreamRequestStreamResponse(self):
requests = tuple(b'\x67\x18'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
with self._control.pause():
with self.assertRaises(grpc.RpcError) as exception_context:
response_iterator = multi_callable(
request_iterator,
timeout=test_constants.SHORT_TIMEOUT,
metadata=(('test', 'ExpiredStreamRequestStreamResponse'),))
next(response_iterator)
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
response_iterator.code())
def testFailedUnaryRequestBlockingUnaryResponse(self):
request = b'\x37\x17'
multi_callable = _unary_unary_multi_callable(self._channel)
with self._control.fail():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable.with_call(
request,
metadata=(
('test', 'FailedUnaryRequestBlockingUnaryResponse'),))
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
def testFailedUnaryRequestFutureUnaryResponse(self):
request = b'\x37\x17'
callback = _Callback()
multi_callable = _unary_unary_multi_callable(self._channel)
with self._control.fail():
response_future = multi_callable.future(
request,
metadata=(('test', 'FailedUnaryRequestFutureUnaryResponse'),))
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
self.assertIsInstance(response_future, grpc.Future)
self.assertIsInstance(response_future, grpc.Call)
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIsNotNone(response_future.traceback())
self.assertIs(grpc.StatusCode.UNKNOWN,
response_future.exception().code())
self.assertIs(response_future, value_passed_to_callback)
def testFailedUnaryRequestStreamResponse(self):
request = b'\x37\x17'
multi_callable = _unary_stream_multi_callable(self._channel)
with self.assertRaises(grpc.RpcError) as exception_context:
with self._control.fail():
response_iterator = multi_callable(
request,
metadata=(('test', 'FailedUnaryRequestStreamResponse'),))
next(response_iterator)
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
def testFailedStreamRequestBlockingUnaryResponse(self):
requests = tuple(b'\x47\x58'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
with self._control.fail():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable(
request_iterator,
metadata=(
('test', 'FailedStreamRequestBlockingUnaryResponse'),))
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
def testFailedStreamRequestFutureUnaryResponse(self):
requests = tuple(b'\x07\x18'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
callback = _Callback()
multi_callable = _stream_unary_multi_callable(self._channel)
with self._control.fail():
response_future = multi_callable.future(
request_iterator,
metadata=(('test', 'FailedStreamRequestFutureUnaryResponse'),))
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(grpc.StatusCode.UNKNOWN, response_future.code())
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIsNotNone(response_future.traceback())
self.assertIs(response_future, value_passed_to_callback)
def testFailedStreamRequestStreamResponse(self):
requests = tuple(b'\x67\x88'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
with self._control.fail():
with self.assertRaises(grpc.RpcError) as exception_context:
response_iterator = multi_callable(
request_iterator,
metadata=(('test', 'FailedStreamRequestStreamResponse'),))
tuple(response_iterator)
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
self.assertIs(grpc.StatusCode.UNKNOWN, response_iterator.code())
def testIgnoredUnaryRequestFutureUnaryResponse(self):
request = b'\x37\x17'
multi_callable = _unary_unary_multi_callable(self._channel)
multi_callable.future(
request,
metadata=(('test', 'IgnoredUnaryRequestFutureUnaryResponse'),))
def testIgnoredUnaryRequestStreamResponse(self):
request = b'\x37\x17'
multi_callable = _unary_stream_multi_callable(self._channel)
multi_callable(
request, metadata=(('test', 'IgnoredUnaryRequestStreamResponse'),))
def testIgnoredStreamRequestFutureUnaryResponse(self):
requests = tuple(b'\x07\x18'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
multi_callable.future(
request_iterator,
metadata=(('test', 'IgnoredStreamRequestFutureUnaryResponse'),))
def testIgnoredStreamRequestStreamResponse(self):
requests = tuple(b'\x67\x88'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
multi_callable(
request_iterator,
metadata=(('test', 'IgnoredStreamRequestStreamResponse'),))
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause | 7,213,980,074,858,184,000 | 41.87574 | 80 | 0.639829 | false |
abadger/ansible | lib/ansible/module_utils/facts/virtual/linux.py | 15 | 17328 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import glob
import os
import re
from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector
from ansible.module_utils.facts.utils import get_file_content, get_file_lines
class LinuxVirtual(Virtual):
"""
This is a Linux-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'Linux'
# For more information, check: http://people.redhat.com/~rjones/virt-what/
def get_virtual_facts(self):
virtual_facts = {}
# We want to maintain compatibility with the old "virtualization_type"
# and "virtualization_role" entries, so we need to track if we found
# them. We won't return them until the end, but if we found them early,
# we should avoid updating them again.
found_virt = False
# But as we go along, we also want to track virt tech the new way.
host_tech = set()
guest_tech = set()
# lxc/docker
if os.path.exists('/proc/1/cgroup'):
for line in get_file_lines('/proc/1/cgroup'):
if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line):
guest_tech.add('docker')
if not found_virt:
virtual_facts['virtualization_type'] = 'docker'
virtual_facts['virtualization_role'] = 'guest'
found_virt = True
if re.search('/lxc/', line) or re.search('/machine.slice/machine-lxc', line):
guest_tech.add('lxc')
if not found_virt:
virtual_facts['virtualization_type'] = 'lxc'
virtual_facts['virtualization_role'] = 'guest'
found_virt = True
if re.search('/system.slice/containerd.service', line):
guest_tech.add('containerd')
if not found_virt:
virtual_facts['virtualization_type'] = 'containerd'
virtual_facts['virtualization_role'] = 'guest'
found_virt = True
# lxc does not always appear in cgroups anymore but sets 'container=lxc' environment var, requires root privs
if os.path.exists('/proc/1/environ'):
for line in get_file_lines('/proc/1/environ', line_sep='\x00'):
if re.search('container=lxc', line):
guest_tech.add('lxc')
if not found_virt:
virtual_facts['virtualization_type'] = 'lxc'
virtual_facts['virtualization_role'] = 'guest'
found_virt = True
if re.search('container=podman', line):
guest_tech.add('podman')
if not found_virt:
virtual_facts['virtualization_type'] = 'podman'
virtual_facts['virtualization_role'] = 'guest'
found_virt = True
if re.search('^container=.', line):
guest_tech.add('container')
if not found_virt:
virtual_facts['virtualization_type'] = 'container'
virtual_facts['virtualization_role'] = 'guest'
found_virt = True
if os.path.exists('/proc/vz') and not os.path.exists('/proc/lve'):
virtual_facts['virtualization_type'] = 'openvz'
if os.path.exists('/proc/bc'):
host_tech.add('openvz')
if not found_virt:
virtual_facts['virtualization_role'] = 'host'
else:
guest_tech.add('openvz')
if not found_virt:
virtual_facts['virtualization_role'] = 'guest'
found_virt = True
systemd_container = get_file_content('/run/systemd/container')
if systemd_container:
guest_tech.add(systemd_container)
if not found_virt:
virtual_facts['virtualization_type'] = systemd_container
virtual_facts['virtualization_role'] = 'guest'
found_virt = True
# ensure 'container' guest_tech is appropriately set
if guest_tech.intersection(set(['docker', 'lxc', 'podman', 'openvz', 'containerd'])) or systemd_container:
guest_tech.add('container')
if os.path.exists("/proc/xen"):
is_xen_host = False
try:
for line in get_file_lines('/proc/xen/capabilities'):
if "control_d" in line:
is_xen_host = True
except IOError:
pass
if is_xen_host:
host_tech.add('xen')
if not found_virt:
virtual_facts['virtualization_type'] = 'xen'
virtual_facts['virtualization_role'] = 'host'
else:
if not found_virt:
virtual_facts['virtualization_type'] = 'xen'
virtual_facts['virtualization_role'] = 'guest'
found_virt = True
# assume guest for this block
if not found_virt:
virtual_facts['virtualization_role'] = 'guest'
product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name')
sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
product_family = get_file_content('/sys/devices/virtual/dmi/id/product_family')
if product_name in ('KVM', 'KVM Server', 'Bochs', 'AHV'):
guest_tech.add('kvm')
if not found_virt:
virtual_facts['virtualization_type'] = 'kvm'
found_virt = True
if sys_vendor == 'oVirt':
guest_tech.add('oVirt')
if not found_virt:
virtual_facts['virtualization_type'] = 'oVirt'
found_virt = True
if sys_vendor == 'Red Hat':
if product_family == 'RHV':
guest_tech.add('RHV')
if not found_virt:
virtual_facts['virtualization_type'] = 'RHV'
found_virt = True
elif product_name == 'RHEV Hypervisor':
guest_tech.add('RHEV')
if not found_virt:
virtual_facts['virtualization_type'] = 'RHEV'
found_virt = True
if product_name in ('VMware Virtual Platform', 'VMware7,1'):
guest_tech.add('VMware')
if not found_virt:
virtual_facts['virtualization_type'] = 'VMware'
found_virt = True
if product_name in ('OpenStack Compute', 'OpenStack Nova'):
guest_tech.add('openstack')
if not found_virt:
virtual_facts['virtualization_type'] = 'openstack'
found_virt = True
bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
if bios_vendor == 'Xen':
guest_tech.add('xen')
if not found_virt:
virtual_facts['virtualization_type'] = 'xen'
found_virt = True
if bios_vendor == 'innotek GmbH':
guest_tech.add('virtualbox')
if not found_virt:
virtual_facts['virtualization_type'] = 'virtualbox'
found_virt = True
if bios_vendor in ('Amazon EC2', 'DigitalOcean', 'Hetzner'):
guest_tech.add('kvm')
if not found_virt:
virtual_facts['virtualization_type'] = 'kvm'
found_virt = True
KVM_SYS_VENDORS = ('QEMU', 'Amazon EC2', 'DigitalOcean', 'Google', 'Scaleway', 'Nutanix')
if sys_vendor in KVM_SYS_VENDORS:
guest_tech.add('kvm')
if not found_virt:
virtual_facts['virtualization_type'] = 'kvm'
found_virt = True
if sys_vendor == 'KubeVirt':
guest_tech.add('KubeVirt')
if not found_virt:
virtual_facts['virtualization_type'] = 'KubeVirt'
found_virt = True
# FIXME: This does also match hyperv
if sys_vendor == 'Microsoft Corporation':
guest_tech.add('VirtualPC')
if not found_virt:
virtual_facts['virtualization_type'] = 'VirtualPC'
found_virt = True
if sys_vendor == 'Parallels Software International Inc.':
guest_tech.add('parallels')
if not found_virt:
virtual_facts['virtualization_type'] = 'parallels'
found_virt = True
if sys_vendor == 'OpenStack Foundation':
guest_tech.add('openstack')
if not found_virt:
virtual_facts['virtualization_type'] = 'openstack'
found_virt = True
# unassume guest
if not found_virt:
del virtual_facts['virtualization_role']
if os.path.exists('/proc/self/status'):
for line in get_file_lines('/proc/self/status'):
if re.match(r'^VxID:\s+\d+', line):
if not found_virt:
virtual_facts['virtualization_type'] = 'linux_vserver'
if re.match(r'^VxID:\s+0', line):
host_tech.add('linux_vserver')
if not found_virt:
virtual_facts['virtualization_role'] = 'host'
else:
guest_tech.add('linux_vserver')
if not found_virt:
virtual_facts['virtualization_role'] = 'guest'
found_virt = True
if os.path.exists('/proc/cpuinfo'):
for line in get_file_lines('/proc/cpuinfo'):
if re.match('^model name.*QEMU Virtual CPU', line):
guest_tech.add('kvm')
if not found_virt:
virtual_facts['virtualization_type'] = 'kvm'
elif re.match('^vendor_id.*User Mode Linux', line):
guest_tech.add('uml')
if not found_virt:
virtual_facts['virtualization_type'] = 'uml'
elif re.match('^model name.*UML', line):
guest_tech.add('uml')
if not found_virt:
virtual_facts['virtualization_type'] = 'uml'
elif re.match('^machine.*CHRP IBM pSeries .emulated by qemu.', line):
guest_tech.add('kvm')
if not found_virt:
virtual_facts['virtualization_type'] = 'kvm'
elif re.match('^vendor_id.*PowerVM Lx86', line):
guest_tech.add('powervm_lx86')
if not found_virt:
virtual_facts['virtualization_type'] = 'powervm_lx86'
elif re.match('^vendor_id.*IBM/S390', line):
guest_tech.add('PR/SM')
if not found_virt:
virtual_facts['virtualization_type'] = 'PR/SM'
lscpu = self.module.get_bin_path('lscpu')
if lscpu:
rc, out, err = self.module.run_command(["lscpu"])
if rc == 0:
for line in out.splitlines():
data = line.split(":", 1)
key = data[0].strip()
if key == 'Hypervisor':
tech = data[1].strip()
guest_tech.add(tech)
if not found_virt:
virtual_facts['virtualization_type'] = tech
else:
guest_tech.add('ibm_systemz')
if not found_virt:
virtual_facts['virtualization_type'] = 'ibm_systemz'
else:
continue
if virtual_facts['virtualization_type'] == 'PR/SM':
if not found_virt:
virtual_facts['virtualization_role'] = 'LPAR'
else:
if not found_virt:
virtual_facts['virtualization_role'] = 'guest'
if not found_virt:
found_virt = True
# Beware that we can have both kvm and virtualbox running on a single system
if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK):
modules = []
for line in get_file_lines("/proc/modules"):
data = line.split(" ", 1)
modules.append(data[0])
if 'kvm' in modules:
host_tech.add('kvm')
if not found_virt:
virtual_facts['virtualization_type'] = 'kvm'
virtual_facts['virtualization_role'] = 'host'
if os.path.isdir('/rhev/'):
# Check whether this is a RHEV hypervisor (is vdsm running ?)
for f in glob.glob('/proc/[0-9]*/comm'):
try:
with open(f) as virt_fh:
comm_content = virt_fh.read().rstrip()
if comm_content in ('vdsm', 'vdsmd'):
# We add both kvm and RHEV to host_tech in this case.
# It's accurate. RHEV uses KVM.
host_tech.add('RHEV')
if not found_virt:
virtual_facts['virtualization_type'] = 'RHEV'
break
except Exception:
pass
found_virt = True
if 'vboxdrv' in modules:
host_tech.add('virtualbox')
if not found_virt:
virtual_facts['virtualization_type'] = 'virtualbox'
virtual_facts['virtualization_role'] = 'host'
found_virt = True
if 'virtio' in modules:
host_tech.add('kvm')
if not found_virt:
virtual_facts['virtualization_type'] = 'kvm'
virtual_facts['virtualization_role'] = 'guest'
found_virt = True
# In older Linux Kernel versions, /sys filesystem is not available
# dmidecode is the safest option to parse virtualization related values
dmi_bin = self.module.get_bin_path('dmidecode')
# We still want to continue even if dmidecode is not available
if dmi_bin is not None:
(rc, out, err) = self.module.run_command('%s -s system-product-name' % dmi_bin)
if rc == 0:
# Strip out commented lines (specific dmidecode output)
vendor_name = ''.join([line.strip() for line in out.splitlines() if not line.startswith('#')])
if vendor_name.startswith('VMware'):
guest_tech.add('VMware')
if not found_virt:
virtual_facts['virtualization_type'] = 'VMware'
virtual_facts['virtualization_role'] = 'guest'
found_virt = True
if 'BHYVE' in out:
guest_tech.add('bhyve')
if not found_virt:
virtual_facts['virtualization_type'] = 'bhyve'
virtual_facts['virtualization_role'] = 'guest'
found_virt = True
if os.path.exists('/dev/kvm'):
host_tech.add('kvm')
if not found_virt:
virtual_facts['virtualization_type'] = 'kvm'
virtual_facts['virtualization_role'] = 'host'
found_virt = True
# If none of the above matches, return 'NA' for virtualization_type
# and virtualization_role. This allows for proper grouping.
if not found_virt:
virtual_facts['virtualization_type'] = 'NA'
virtual_facts['virtualization_role'] = 'NA'
found_virt = True
virtual_facts['virtualization_tech_guest'] = guest_tech
virtual_facts['virtualization_tech_host'] = host_tech
return virtual_facts
class LinuxVirtualCollector(VirtualCollector):
_fact_class = LinuxVirtual
_platform = 'Linux'
| gpl-3.0 | 8,108,010,358,189,152,000 | 42.868354 | 117 | 0.504675 | false |
cgalleguillosm/accasim | accasim/utils/plot_factory.py | 1 | 51706 | """
MIT License
Copyright (c) 2017 cgalleguillosm, AlessioNetti
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.backends.backend_pdf import PdfPages
from math import floor
from accasim.utils.reader_class import DefaultReader
from accasim.utils.misc import load_config, from_isodatetime_2_timestamp as timestamp_func, str_resources
from accasim.utils.file import path_leaf, load_jsonfile
from accasim.base.resource_manager_class import Resources
from accasim.experimentation.schedule_parser import define_result_parser
from accasim.utils.misc import DEFAULT_SIMULATION
from copy import deepcopy
from os.path import splitext, join
from scipy.signal import savgol_filter
from os.path import isfile
import numpy as np
from matplotlib.pyplot import boxplot
class PlotFactory:
"""
A class for plot production and schedule files pre-processing.
In this class, some basic algorithms are implemented for pre-processing the schedule files produced through
simulation, and for producing some common evaluation plots.
"""
SCHEDULE_CLASS = 'schedule'
BENCHMARK_CLASS = 'benchmark'
SLOWDOWN_PLOT = 'slowdown'
QUEUE_SIZE_PLOT = 'queue_size'
LOAD_RATIO_PLOT = 'load_ratio'
EFFICIENCY_PLOT = 'efficiency'
SCALABILITY_PLOT = 'scalability'
SIMULATION_TIME_PLOT = 'sim_time'
SIMULAION_MEMORY_PLOT = 'sim_memory'
PLOT_TYPES = {
SCHEDULE_CLASS: [SLOWDOWN_PLOT, QUEUE_SIZE_PLOT, LOAD_RATIO_PLOT, EFFICIENCY_PLOT],
BENCHMARK_CLASS: [SCALABILITY_PLOT, SIMULATION_TIME_PLOT, SIMULAION_MEMORY_PLOT]
}
def __init__(self, plot_class, sim_params_fname=None, config=None, resource=None, workload_parser=None, debug=False):
"""
The constructor for the class.
:param plot_class: the plot_class of files to be analyzed. Can be either 'schedule', if schedule files are going to be
analyzed, or 'benchmark' if resource usage log files will be analyzed;
:params sim_params_fname:
:param config: The path to a system configuration file. Needed for the schedule meta-simulation;
:param resource: a resource type in the system to be considered. If specified, all resource-related statistics
will be computed in regards to this resource alone;
:param workload_parser:
:param debug: Debug flag.
"""
self._debug = debug
if not (plot_class in self.PLOT_TYPES.keys()):
if self._debug:
print('Wrong Plot plot_class chosen. Selecting schedule plot_class by default...')
plot_class = self.SCHEDULE_CLASS
self._plot_class = plot_class
self._sim_params_fname = sim_params_fname # if sim_params_fname is not None and isfile(sim_params_fname) else None
self._config = config
self._resource = resource
self._workload_parser = workload_parser
self._preprocessed = False
self._filepaths = []
self._labels = []
self._slowdowns = []
self._queuesizes = []
self._loadratiosX = []
self._loadratiosY = []
self._efficiencies = []
self._simdata = []
self._schedtimes = []
self._mantimes = []
self._simmemory = []
self._scalabilitydataX = []
self._scalabilitydataY = []
self._resource_order = None
if self._sim_params_fname is None:
self._resource_order = DEFAULT_SIMULATION['RESOURCE_ORDER']
# Base resource availability per-node (never changes)
self._base_res = {}
# Current resource availability per-node
self._sys_res = {}
# Aggregated used resources for all nodes
self._used_res_sum = {}
# Aggregate base resource availability for used nodes only
self._avl_res_sum = {}
# Aggregated base resource availability for all nodes
self._base_res_sum = {}
# Amount of currently used nodes
self._used_nodes = 0
# Number of total nodes in the system
self._total_nodes = 0
def set_files(self, paths, labels):
"""
Set the paths and labels of the files to be analyzed.
:param paths: A list of filepaths related to the files to be analyzed;
:param labels: the labels associated to each single file, used in the plots; must have the same length as paths;
"""
self._preprocessed = False
if not isinstance(paths, (list, tuple)):
self._filepaths = [paths]
self._labels = [labels]
else:
self._filepaths = paths
self._labels = labels
if len(self._filepaths) != len(self._labels):
if self._debug:
print("Filepaths and Labels lists must have the same lengths.")
self._labels = []
self._filepaths = []
def pre_process(self, trimSlowdown=True, trimQueueSize=False):
"""
Performs pre-processing on all specified files, according to their type.
If the files are of the schedule type, a meta-simulation is run for each of them, computing data like slowdown,
queue size, load ratios and such. If the data is of the benchmark type, the files are simply parsed and their
information stored.
:param: trimSlowdown: boolean flag. If True, slowdown values equal to 1 will be discarded. Default is True
:param: trimQueueSize: boolean flag. If True, queue size values equal to 0 will be discarded. Default is False
"""
if not self._preprocessed:
# Perform pre-processing for schedule files
if self._plot_class == self.SCHEDULE_CLASS:
self._slowdowns = []
self._queuesizes = []
self._loadratiosX = []
self._loadratiosY = []
self._efficiencies = []
self._preprocessed = True
for f in self._filepaths:
# If an error is encountered on one of the files, the process is aborted
if not self._getScheduleData(f, self._config, self._resource, trimSlowdown, trimQueueSize):
self._preprocessed = False
break
# Perform pre-processing for benchmark files
elif self._plot_class == self.BENCHMARK_CLASS:
self._simdata = []
self._schedtimes = []
self._mantimes = []
self._simmemory = []
self._scalabilitydataX = []
self._scalabilitydataY = []
self._preprocessed = True
for f in self._filepaths:
if not self._getBenchmarkData(f):
self._preprocessed = False
break
if not self._preprocessed:
print("Could not process files, please ensure they are in the correct path and format.")
return self._preprocessed
def produce_plot(self, type, title='', scale='linear', xlim=(None, None), ylim=(None, None), legend=True, figsize=(7, 5), meansonly=False, alpha=0.005, smooth=30, output='Output.pdf', groups=1, **kwargs):
"""
Produces a single plot on the pre-processed files.
The user can produce plots among the available types. These are:
- slowdown: a box-plot distribution plot for slowdown values across test instances
- queue_size: a box-plot for queue size in the simulation across test instances
- load_ratio: a distribution scatter plot for the load ratio in function of the number of used nodes, for
test instances separately;
- efficiency: a box-plot for resource allocation efficiency across test instances
- scalability: a scalability plot for dispatching methods across test instances
- sim_time: a bar plot for the simulation timings across test instances
- sim_memory: a bar plot for memory usage across test instances
:param type: the type of the plot, must be one of the above;
:param title: the title of the plot;
:param scale: the scale of the plot (see matplotlib documentation);
:param xlim: the left-right bounds for axis scaling, is a tuple;
:param ylim: the bottom-top bounds for axis scaling, is a tuple;
:param legend: activates the legend, is a boolean;
:param figsize: the size of the figure, is a tuple;
:param meansonly: triggers the plot of mean values alone in box-plots, is a boolean;
:param alpha: the alpha of certain features in plots, in particular for distribution scatter plots;
:param smooth: smoothing factor used for the Savitzky-Golay filter in the scalabily plot. The lower the number,
the higher the smoothing;
:param output: path of the output PDF file;
"""
if not self._preprocessed:
self.pre_process()
print("Plot_factory: Files were not pre-processed yet. Calling the pre_process method.")
if type == self.SLOWDOWN_PLOT and self._plot_class == self.SCHEDULE_CLASS:
self.box_plot(self._slowdowns, title=title, ylabel='Slowdown', scale=scale, xlim=xlim, ylim=ylim, figsize=figsize, meansonly=meansonly, output=output, groups=groups, **kwargs)
elif type == self.QUEUE_SIZE_PLOT and self._plot_class == self.SCHEDULE_CLASS:
self.box_plot(self._queuesizes, title=title, ylabel='Queue size', scale=scale, xlim=xlim, ylim=(0, None), figsize=figsize, meansonly=meansonly, output=output, groups=groups, **kwargs)
elif type == self.LOAD_RATIO_PLOT and self._plot_class == self.SCHEDULE_CLASS:
self.distribution_scatter_plot(self._loadratiosX, self._loadratiosY, title=title, scale=scale, xlim=(-0.01, 1.01), ylim=(-0.01, 1.01), figsize=figsize, alpha=alpha, output=output, **kwargs)
elif type == self.EFFICIENCY_PLOT and self._plot_class == self.SCHEDULE_CLASS:
self.box_plot(self._efficiencies, title=title, ylabel='Resource efficiency', scale=scale, xlim=xlim, ylim=ylim, figsize=figsize, meansonly=meansonly, output=output, groups=groups, **kwargs)
elif type == self.SCALABILITY_PLOT and self._plot_class == self.BENCHMARK_CLASS:
self.scalability_plot(self._scalabilitydataX, self._scalabilitydataY, title, scale=scale, xlim=xlim, ylim=ylim, figsize=figsize, legend=legend, smooth=smooth, output=output, **kwargs)
elif type == self.SIMULATION_TIME_PLOT and self._plot_class == self.BENCHMARK_CLASS:
self.box_plot_times(self._mantimes, self._schedtimes, title=title, scale=scale, xlim=xlim, ylim=ylim, figsize=figsize, legend=legend, output=output, **kwargs)
elif type == self.SIMULAION_MEMORY_PLOT and self._plot_class == self.BENCHMARK_CLASS:
self.box_plot_memory(self._simmemory, title=title, scale=scale, xlim=xlim, ylim=ylim, figsize=figsize, legend=legend, output=output, **kwargs)
else:
raise Exception("Plot type specified is not valid. Review the documentation for valid plot types.")
def _getBenchmarkData(self, filepath):
"""
Pre-processes a resource usage log file.
:param filepath: the path to the log file;
:return: True if successful, False otherwise;
"""
if self._debug:
print("- Pre-processing file " + filepath + "...")
# Tries to read from the file, aborts if an error is encountered
try:
f = open(filepath)
mantimes = []
schedtimes = []
mems = []
simtime = 0
disptime = 0
maxqueuesize = 0
for line in f:
# Each line is parsed and values are extracted from it
attrs = line.split(';')
mantimes.append(float(attrs[4]))
schedtimes.append((int(attrs[1]), float(attrs[3])))
mems.append(float(attrs[5]))
simtime += float(attrs[2])
disptime += float(attrs[3])
if int(attrs[1]) > maxqueuesize:
maxqueuesize = int(attrs[1])
f.close()
except Exception as e:
raise Exception("Error encountered while pre-processing: " + str(e))
# Certain statistics are computed from the data
data = {}
data['avgman'] = np.average(np.array(mantimes))
data['avgsched'] = np.average(np.array([el[1] for el in schedtimes]))
data['simtime'] = simtime / 1000.0
data['schedtime'] = disptime / 1000.0
data['mantime'] = data['simtime'] - data['schedtime']
data['avgmem'] = np.average(np.array(mems))
data['maxmem'] = np.max(np.array(mems))
# The scalability data is computed through binning: we want to obtain an X, Y set, where in X are the distinct
# queue sizes, and in Y are the average times in ms to perform dispatching on such queue sizes
binningfactor = 1
bins = int(floor(maxqueuesize / binningfactor))
queuevalues = np.linspace(0, maxqueuesize, bins)
mappinglist = []
for i in range(bins):
mappinglist.append([])
step = (maxqueuesize) / (bins - 1)
for qsize, stime in schedtimes:
index = int(floor(qsize / step))
mappinglist[index].append(stime)
finallist = []
finalqueuevalues = []
for i in range(len(mappinglist)):
l = mappinglist[i]
if len(l) > 0:
finallist.append(sum(l) / len(l))
finalqueuevalues.append(queuevalues[i])
self._mantimes.append(mantimes)
self._schedtimes.append([el[1] for el in schedtimes])
self._simmemory.append(mems)
self._simdata.append(data)
self._scalabilitydataX.append(finalqueuevalues)
self._scalabilitydataY.append(finallist)
return True
def _getScheduleData(self, filepath, config, resource=None, trimSlowdown=True, trimQueueSize=False):
"""
Performs pre-processing on a schedule file through a meta-simulation process.
:param filepath: The path of the file to be analyzed;
:param config: The path to the system configuration file;
:param resource: A resource to be considered for resource-related metrics; if none is specified, all resource
types are used;
:param: trimSlowdown: boolean flag. If True, slowdown values equal to 1 will be discarded. Default is True
:param: trimQueueSize: boolean flag. If True, queue size values equal to 0 will be discarded. Default is False
:return: True if successful, False otherwise;
"""
if self._debug:
print("- Pre-processing file " + filepath + "...")
# Generates the dictionary of system resources from the config file
resobject, equiv = self._generateSystemConfig(config)
self._base_res = resobject.availability()
res_types = resobject._system_resource_types
# Makes sure the resource type exists in the system
if resource is not None and resource not in resobject._system_resource_types:
if self._debug:
print("Resource type " + resource + "is not valid. Using all available resources...")
resource = None
# Tries to read from the log file, aborts if an error is encountered
try:
_sim_params_path = None
# If the simulator config path points to a file, it is considered as is
if self._sim_params_fname is not None and isfile(self._sim_params_fname):
_sim_params_path = self._sim_params_fname
# If it is a plain string, it is used as a token for config files in the experimentation
elif self._sim_params_fname is not None:
_path, _filename = path_leaf(filepath)
_sim_params_path = join(_path, self._sim_params_fname)
# If it is none, the default_result_parser will use the DEFAULT_SIMULATION config
if _sim_params_path is not None:
_resource_order = load_jsonfile(_sim_params_path)['RESOURCE_ORDER']
else:
_resource_order = self._resource_order
if self._workload_parser is not None:
reader = DefaultReader(filepath, parser=self._workload_parser, equivalence=equiv)
else:
reader = DefaultReader(filepath, parser=define_result_parser(_sim_params_path), equivalence=equiv)
slowdowns = []
timePoints = set()
jobs = {}
rev_timePoints = {}
if self._debug:
print("Loading jobs...")
while True:
# Jobs are read and their slowdown values are stored
job = reader._read()
if job is not None:
job['start_time'] = timestamp_func(job['start_time'])
job['end_time'] = timestamp_func(job['end_time'])
job['queue_time'] = timestamp_func(job['queue_time'])
_start_time = job['start_time']
_end_time = job['end_time']
_queued_time = job['queue_time']
duration = _end_time - _start_time
wait = _start_time - _queued_time
slowdown = (wait + duration) / duration if duration != 0 else wait if wait != 0 else 1.0
if slowdown > 1.0 or not trimSlowdown:
slowdowns.append(slowdown)
job_id = job['job_id']
jobs[job_id] = job
# Timepoints for use in the simulation are stored
timePoints.add(_queued_time)
self._addToDictAsList(rev_timePoints, _queued_time, job_id, 'queue')
timePoints.add(_start_time)
self._addToDictAsList(rev_timePoints, _start_time, job_id, 'start')
if duration > 0:
timePoints.add(_end_time)
self._addToDictAsList(rev_timePoints, _end_time, job_id, 'end')
else:
break
except Exception as e:
raise Exception("Error encountered while pre-processing: " + str(e))
# It may happen that the slowdown list is empty if all jobs have a value equal to 1. In this case we add
# a fake value, equal to 1 as well
if trimSlowdown and len(slowdowns) == 0:
slowdowns.append(1)
if self._debug:
print("Jobs loaded. Sorting...")
# We compute the final set of distinct, ordered timepoints
timePoints = sorted(timePoints)
timePointsIDX = 0
self._sys_res = deepcopy(self._base_res)
self._base_res_sum = {k: sum(self._base_res[n][k] for n in self._base_res) for k in res_types}
self._used_res_sum = {k: 0 for k in res_types}
self._avl_res_sum = {k: 0 for k in res_types}
self._used_nodes = 0
self._total_nodes = len(self._base_res.values())
queue = set()
running = set()
# Pre-allocating the lists to store performance metrics, for efficiency
queued = [0] * len(timePoints) # []
resources = [0] * len(timePoints) # []
run = [0] * len(timePoints) # []
efficiency = [0] * len(timePoints) # []
efficiencyperjob = [0] * len(jobs) # []
efficiencyIDX = 0
if self._debug:
print("Sorting done. Starting simulation...")
# Meta-simulation: goes on until there are no more timepoints to consider
while timePointsIDX < len(timePoints):
point = timePoints[timePointsIDX]
timePointsIDX += 1
# Adds to the queue jobs that were submitted in this timepoint
jobstoqueue = rev_timePoints[point]['queue']
# queue += len(jobstoqueue)
queue.update(jobstoqueue)
# Jobs that have terminated release their resources
jobstoend = rev_timePoints[point]['end']
if len(jobstoend) > 0:
for j_id in jobstoend:
j = jobs[j_id]
req, assignations = self._getRequestedResources(_resource_order, j['assignations'])
self._deallocate_resources(req, assignations, resource)
# running -= len(jobstoend)
running = running - jobstoend
# Jobs that have to start take their resources from the system
jobstostart = rev_timePoints[point]['start']
if len(jobstostart) > 0:
for j_id in jobstostart:
j = jobs[j_id]
if j['end_time'] - j['start_time'] > 0:
req, assignations = self._getRequestedResources(_resource_order, j['assignations'])
self._allocate_resources(req, assignations, resource)
# running += 1
running.add(j_id)
# queue -= len(jobstostart)
queue = queue - jobstostart
# Additionally, we store for every started job its resource allocation efficiency
for j_id in jobstostart:
j = jobs[j_id]
if j['end_time'] - j['start_time'] > 0:
req, assignations = self._getRequestedResources(_resource_order, j['assignations'])
eff = self._getResourceEfficiency(req, assignations, self._sys_res, resource)
efficiencyperjob[efficiencyIDX] = eff
efficiencyIDX += 1
# System metrics are computed AFTER dispatching
queued[timePointsIDX - 1] = len(queue) # queue
run[timePointsIDX - 1] = len(running) # running
resources[timePointsIDX - 1] = self._getLoadRatio(resource)
efficiency[timePointsIDX - 1] = self._getLoadRatioSelective(resource)
if self._debug:
print("Simulation done!")
if trimQueueSize:
queued = [q for q in queued if q != 0]
run = [r for r in run if r != 0]
# The metrics values for this instance are added to the internal variables
self._slowdowns.append(slowdowns)
self._queuesizes.append(queued)
self._efficiencies.append(efficiencyperjob)
self._loadratiosX.append([el[0] for el in efficiency])
self._loadratiosY.append([el[1] for el in efficiency])
return True
def _addToDictAsList(self, dict, key, el, type):
"""
Simple method that adds an element to a dictionary and creates sub-entries if needed.
:param dict: The target dictionary
:param key: The key of the element to add
:param el: The element to add
:param type: The type of the element to add, used in the sub-dictionary for the key entry
:return: None
"""
if key not in dict:
dict[key] = {'queue': set(), 'start': set(), 'end': set()}
dict[key][type].add(el)
def _allocate_resources(self, req, assignations, resource=None):
"""
Method that allocates the resources for a certain starting job and updates all data structures related to
resource usage
:param req: The resource request of the job
:param assignations: The list of nodes assigned to the job
:param resource: A resource type to be considered for performance metrics (optional)
:return: None
"""
for node in assignations:
# If the node goes from the unused to the used state, we update the number of used nodes and the amount
# of available resources among the used nodes, for the efficiency plots
if resource is None and all(self._sys_res[node][k] == self._base_res[node][k] for k in self._base_res[node].keys()):
self._used_nodes += 1
for k, v in self._base_res[node].items():
self._avl_res_sum[k] += v
# If a specific resource type is considered, the same condition is triggered only if such resource is used
elif resource is not None and self._sys_res[node][resource] == self._base_res[node][resource] and req[resource] > 0:
self._used_nodes += 1
self._avl_res_sum[resource] += self._base_res[node][resource]
# Updating the per-node currently available resources
for k, val in req.items():
self._sys_res[node][k] -= val
if self._sys_res[node][k] < 0:
self._sys_res[node][k] = 0
if self._debug:
print("Caution: resource " + k + " is going below zero.")
# Updating the dictionary of per-type currently used resources
for k, v in req.items():
self._used_res_sum[k] += v * len(assignations)
if self._used_res_sum[k] > self._avl_res_sum[k]:
self._used_res_sum[k] = self._avl_res_sum[k]
def _deallocate_resources(self, req, assignations, resource):
"""
Method that de-allocates the resources for a certain starting job and updates all data structures related to
resource usage
:param req: The resource request of the job
:param assignations: The list of nodes assigned to the job
:param resource: A resource type to be considered for performance metrics (optional)
:return: None
"""
for node in assignations:
for k, val in req.items():
self._sys_res[node][k] += val
if self._sys_res[node][k] > self._base_res[node][k]:
self._sys_res[node][k] = self._base_res[node][k]
if self._debug:
print("Caution: resource " + k + " is going beyond its base capacity.")
# In this case the check for used-unused nodes must be performed after the resources are de-allocated
if resource is None and all(self._sys_res[node][k] == self._base_res[node][k] for k in self._base_res[node].keys()):
self._used_nodes -= 1
for k, v in self._base_res[node].items():
self._avl_res_sum[k] -= v
elif resource is not None and self._sys_res[node][resource] == self._base_res[node][resource] and req[resource] > 0:
self._used_nodes -= 1
self._avl_res_sum[resource] -= self._base_res[node][resource]
# The method is specular to allocate_resources and works identically
for k, v in req.items():
self._used_res_sum[k] -= v * len(assignations)
if self._used_res_sum[k] < 0:
self._used_res_sum[k] = 0
def _generateSystemConfig(self, config_path):
"""
Generates a Resources object from a system configuration file.
:param config_path: the path to the config file;
:return: the Resources object and the resource equivalence;
"""
try:
config = load_config(config_path)
equiv = config.pop('equivalence', {})
# PEP 448 - Additional Unpacking Generalizations
# python 3.5 and newer
if not('node_prefix' in config):
config['node_prefix'] = ''
resources = Resources(**config)
return resources, equiv
except Exception as e:
if config_path != '':
print("Could not load system config: " + str(e))
else:
print("A system configuration file must be specified.")
exit()
return None, None
def _getRequestedResources(self, _resource_order, assignations_str):
"""
TO BE IMPLEMENTED:
returns the requested resources for the input job.
:param job: the dictionary related to the current job;
:return: the dictionary of resources needed by each job unit, and the list of node assignations;
"""
_assignations_list = assignations_str.split(str_resources.SEPARATOR)[0:-1]
_nodes_list = [assign.split(';')[0] for assign in _assignations_list]
_request = { k:int(v) for k, v in zip(_resource_order, _assignations_list[0].split(';')[1:])}
return _request, _nodes_list
def _getResourceEfficiency(self, reqres, nodes, sys_res, resource):
"""
Computes the resource allocation efficiency metric for a certain input job.
This method computed the resource allocation efficiency AFTER dispatching is performed, not before.
:param reqres: the dictionary of resources requested by each job unit;
:param nodes: the list of node assignations;
:param sys_res: the dictionary of system resources;
:param resource: the resource type to be considered (if present);
:return: the resource allocation efficiency;
"""
# Computing the amount of used resources by the job
if resource is None:
used = sum(r * len(nodes) for r in reqres.values())
else:
used = reqres[resource] * len(nodes)
avl = 0
# Computing the amount of available resources in nodes used by the job
for node in set(nodes):
if resource is None:
avl += sum(r for r in sys_res[node].values())
else:
avl += sys_res[node][resource]
return used / (avl + used)
def _getLoadRatio(self, resource):
"""
Returns the standard load ratio for the system.
:param resource: the resource type to be considered (if present);
:return: the load ratio;
"""
loadratio = 0
if resource is None:
loadratio = sum(self._used_res_sum.values()) / sum(self._base_res_sum.values())
elif resource in self._base_res_sum:
loadratio = self._used_res_sum[resource] / self._base_res_sum[resource]
return loadratio
def _getLoadRatioSelective(self, resource):
"""
Returns the per-step resource allocation efficiency.
This is defined as a X,Y pair where X expresses the fraction of used nodes, and Y defines the fraction of used
resources in such nodes.
:param resource: the resource type to be considered (if present);
:return: an X,Y pair expressing the per-step resource allocation efficiency;
"""
loadratio = 0
if self._used_nodes > 0:
if resource is None:
loadratio = sum(self._used_res_sum.values()) / sum(self._avl_res_sum.values())
elif resource in self._avl_res_sum:
loadratio = self._used_res_sum[resource] / self._avl_res_sum[resource]
return self._used_nodes / self._total_nodes, loadratio
else:
return 0, 0
def _getDistributionStats(self, data):
"""
Returns some useful distribution statistics for the input data.
The mean, minimum, maximum, median, and quartiles for the data are computed.
:param data: The iterable for the input data;
:return: a dictionary of statistics for the data distribution;
"""
stats = {}
stats['avg'] = np.average(data)
stats['min'] = np.min(data)
stats['max'] = np.max(data)
stats['median'] = np.median(data)
stats['quartiles'] = np.percentile(data, range(0, 100, 25))
return stats
def box_plot(self, data, title='', ylabel='', scale='linear', figsize=(7, 5), meansonly=False, output='Output.pdf', groups=1, **kwargs):
"""
Produces a box-and-whiskers plot for the input data's distributions.
:param data: the input data; must be a list, in which each element is again a list containing all of the data
regarding a certain test instance; the ordering must be that of the labels;
:param title: the title of the plot;
:param ylabel: the Y-axis label;
:param scale: the scale of the plot;
:param figsize: the size of the figure, is a tuple;
:param meansonly: if True only the mean values for each distribution are depicted;
:param output: the path to the output file;
:param **kwargs:
- fig_format: {
'format': eps or pdf,
'dpi': Int number
}
- xlim: the left-right axis boundaries, is a tuple;
- ylim: the bottom-top axis boundaries, is a tuple;
"""
color_cycler = ['b', 'r', 'y', 'g', 'c', 'm', 'k', 'w']
hatch_cycler = ['/', '\\', '|', '-', '+', 'x', 'o', 'O', '.', '*']
ncycle = 2
fontsize = 12
plt.rc('xtick', labelsize=fontsize)
plt.rc('ytick', labelsize=fontsize)
N = len(data)
ylim = kwargs.pop('ylim', None)
xlim = kwargs.pop('xlim', None)
show_legend = kwargs.pop('show_legend', False)
spacing = 0.2
ind = [i * spacing for i in np.arange(N)]
width = 0.1
markersize = 250
linecol = 'black'
tricol = 'black'
vertlinecol = 'gray'
fig, ax = plt.subplots(figsize=figsize)
c_group = 0
c = groups
r_hatch = len(hatch_cycler)
color_list = []
hatch_list = []
for i, d in enumerate(data):
color_list.append(color_cycler[c_group])
hatch_list.append(hatch_cycler[len(hatch_cycler) - r_hatch] * ncycle)
c -= 1
if c == 0:
c_group += 1
c = groups
r_hatch -= 1
if r_hatch == 0:
ncycle += 1
r_hatch = len(hatch_cycler)
bp = ax.boxplot(data, labels=self._labels, patch_artist=True, sym="", whis=[0, 100], showmeans=True, showfliers=False)
for patch, color, hatch in zip(bp['boxes'], color_list, hatch_list):
patch.set_facecolor(color)
patch.set_alpha(0.75)
patch.set_hatch(hatch)
# add some text for labels, title and axes ticks
ax.set_ylabel(ylabel, fontsize=fontsize)
ax.set_xlabel('Dispatching method', fontsize=fontsize)
ax.set_title(title)
ax.set_yscale(scale)
if show_legend:
ax.legend(bp['boxes'], self._labels, bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=len(self._labels) // 2, mode="expand", borderaxespad=0.)
if ylim:
ax.set_ylim(top=ylim[1], bottom=ylim[0], emit=True, auto=False)
if xlim:
ax.set_xlim(left=xlim[0], right=xlim[1], emit=True, auto=False)
plt.tight_layout()
plt.grid(linestyle=':', color='gray', zorder=0)
plt.show()
fig_format = kwargs.pop('fig_format', {})
fig.savefig(output, **fig_format)
def box_plot_times(self, dataman, datasched, title='', scale='linear', xlim=(None, None), ylim=(None, None), figsize=(7, 5), legend=True, output='Output.pdf'):
"""
Produces a bar plot for the timings in the simulations, across test instances.
The bars will depict the average time required to perform dispatching in each simulation step, and the
time required to perform simulation-related tasks in the simulation.
:param dataman: the data for the time required in each step to perform simulation-related tasks. Is a list,
where each element is again a list containing the data for a certain test instance;
:param datasched: the data for the time required in each step to perform dispatching. Is a list, where
each element is again a list containing the data for a certain test instance;
:param title: the title of the plot;
:param scale: the scale of the plot;
:param xlim: the left-right boundaries for the plot, is a tuple;
:param ylim: the bottom-top boundaries for the plot, is a tuple;
:param figsize: the size of the figure, is a tuple;
:param legend: enables or disables visualization of the legend;
:param output: the path to the output file;
"""
fontsize = 12
plt.rc('xtick', labelsize=fontsize)
plt.rc('ytick', labelsize=fontsize)
N = len(dataman)
spacing = 0.2
ind = [i * spacing for i in np.arange(N)]
width = 0.1
markersize = 250
fig, ax = plt.subplots(figsize=figsize)
for i in range(N):
avgman = np.average(np.array(dataman[i]))
avgsched = np.average(np.array(datasched[i]))
if i == 0:
ax.add_patch(patches.Rectangle((ind[i], 0), width, avgman, facecolor='orange', edgecolor='black', hatch='//', alpha=0.75)) # , label='Simulation'))
ax.add_patch(patches.Rectangle((ind[i], avgman), width, avgsched, facecolor='blue', edgecolor='black', hatch='\\', alpha=0.75, label='Dispatching decision'))
else:
ax.add_patch(patches.Rectangle((ind[i], 0), width, avgman, facecolor='orange', edgecolor='black', hatch='//', alpha=0.75))
ax.add_patch(patches.Rectangle((ind[i], avgman), width, avgsched, facecolor='blue', edgecolor='black', hatch='\\', alpha=0.75))
ax.scatter(ind[i] + width / 2, avgman + avgsched, marker='_', s=markersize / 4, zorder=0, color='black')
# add some text for labels, title and axes ticks
ax.set_ylabel('Time [ms]', fontsize=fontsize)
ax.set_xlabel('Dispatching method', fontsize=fontsize)
ax.set_title(title)
ax.set_xticks([i + width / 2 for i in ind])
if legend:
ax.legend()
ax.set_xticklabels(self._labels)
ax.set_yscale(scale)
ax.set_ylim(top=ylim[1], bottom=ylim[0], emit=True, auto=False)
ax.set_xlim(left=xlim[0], right=xlim[1], emit=True, auto=False)
plt.grid(linestyle=':', color='gray', zorder=0)
plt.setp(plt.gca().get_legend().get_texts(), fontsize=fontsize)
plt.show()
ff = PdfPages(output)
ff.savefig(fig)
ff.close()
def box_plot_memory(self, data, title='', scale='linear', xlim=(None, None), ylim=(None, None), figsize=(7, 5), legend=True, output='Output.pdf'):
"""
Produces a bar plot for the memory usage in the simulations, across test instances.
The bars depict average and maximum memory usage in the simulation.
:param data: the data for memory usage in each simulation step. Is a list, where
each element is again a list containing the data for a certain test instance;
:param title: the title of the plot;
:param scale: the scale of the plot;
:param xlim: the left-right boundaries for the plot, is a tuple;
:param ylim: the bottom-top boundaries for the plot, is a tuple;
:param figsize: the size of the figure, is a tuple;
:param legend: enables or disables visualization of the legend;
:param output: the path to the output file;
"""
fontsize = 12
plt.rc('xtick', labelsize=fontsize)
plt.rc('ytick', labelsize=fontsize)
N = len(data)
spacing = 0.2
ind = [i * spacing for i in np.arange(N)]
width = 0.1
markersize = 250
fig, ax = plt.subplots(figsize=figsize)
for i in range(N):
avgmem = np.average(np.array(data[i]))
maxmem = np.max(np.array(data[i]))
if i == 0:
ax.add_patch(patches.Rectangle((ind[i], 0), width, avgmem, facecolor='orange', edgecolor='black', hatch='//', alpha=0.75, label='Avg. Mem'))
ax.add_patch(patches.Rectangle((ind[i], avgmem), width, maxmem - avgmem, facecolor='blue', edgecolor='black', hatch='\\', alpha=0.75, label='Max. Mem'))
else:
ax.add_patch(patches.Rectangle((ind[i], 0), width, avgmem, facecolor='orange', edgecolor='black', hatch='//', alpha=0.75))
ax.add_patch(patches.Rectangle((ind[i], avgmem), width, maxmem - avgmem, facecolor='blue', edgecolor='black', hatch='\\', alpha=0.75))
ax.scatter(ind[i] + width / 2, maxmem, marker='_', s=markersize / 4, zorder=0, color='black')
ax.set_ylabel('Average Memory Usage [MB]', fontsize=fontsize)
ax.set_xlabel('Dispatching method', fontsize=fontsize)
ax.set_title(title)
ax.set_xticks([i + width / 2 for i in ind])
if legend:
ax.legend()
ax.set_xticklabels(self._labels)
ax.set_yscale(scale)
ax.set_ylim(top=ylim[1], bottom=ylim[0], emit=True, auto=False)
ax.set_xlim(left=xlim[0], right=xlim[1], emit=True, auto=False)
plt.grid(linestyle=':', color='gray', zorder=0)
plt.setp(plt.gca().get_legend().get_texts(), fontsize=fontsize)
plt.show()
ff = PdfPages(output)
ff.savefig(fig)
ff.close()
def scalability_plot(self, xdata, ydata, title='', scale='linear', xlim=(None, None), ylim=(None, None), figsize=(7, 5), legend=True, smooth=30, linestyles=None, markers=None, output='Output.pdf'):
"""
Creates a scalability plot for all test instances, where X represents the queue size, and Y the average
time required by each dispatching method in the instances.
:param xdata: the X data, containing the queue sizes for each test instance; is a list, where each element
contains a list with the data for each test instance;
:param ydata: the Y data, containing the average times required to perform dispatching in each test instance;
is a list, where each element contains a list with the data for each test instance;
:param title: the title of the plot;
:param scale: the scale of the plot;
:param xlim: the left-right boundaries for the plot, is a tuple;
:param ylim: the bottom-top boundaries for the plot, is a tuple;
:param figsize: the size of the figure, is a tuple;
:param legend: enables or disables visualization of the legend;
:param smooth: smoothing factor for the Savitzky-Golay filter. The lower the number, the higher the smoothing;
:param output: the path of the output file;
"""
fontsize = 12
plt.rc('xtick', labelsize=fontsize)
plt.rc('ytick', labelsize=fontsize)
if not linestyles:
linestyles = ('-', '-', '--', '--', '-.', '-.', ':', ':')
if not markers:
markers = (None, 'o', None, '^', None, 's', None, 'p')
numstyles = len(linestyles)
fig, ax = plt.subplots(figsize=figsize)
divideFactor = smooth
for i in range(len(xdata)):
markeroffset = floor(max(xdata[i]) / 20 + i * 2)
if divideFactor > 1 and len(ydata[i]) >= divideFactor:
win_len = floor(len(ydata[i]) / divideFactor)
win_len += (win_len + 1) % 2
if win_len < 5:
win_len = 5
yfiltered = savgol_filter(ydata[i], win_len, 3)
else:
yfiltered = ydata[i]
ax.plot(xdata[i], yfiltered, label=self._labels[i], linestyle=linestyles[i % numstyles], marker=markers[i % numstyles], markevery=markeroffset, zorder=2 if markers[i % numstyles] is None else 0)
ax.set_ylabel('Time [ms]', fontsize=fontsize)
ax.set_xlabel('Queue size', fontsize=fontsize)
ax.set_title(title)
if legend:
ax.legend()
ax.set_yscale(scale)
ax.set_ylim(top=ylim[1], bottom=ylim[0], emit=True, auto=False)
ax.set_xlim(left=xlim[0], right=xlim[1], emit=True, auto=False)
plt.grid(linestyle=':', color='gray', zorder=0)
plt.setp(plt.gca().get_legend().get_texts(), fontsize=fontsize)
plt.show()
ff = PdfPages(output)
ff.savefig(fig)
ff.close()
def distribution_scatter_plot(self, xdata, ydata, title='', scale='linear', xlim=(0, 1.05), ylim=(0, 1.05), figsize=(7, 5), alpha=0.005, output='Output.pdf'):
"""
Creates a distribution scatter plot for the system's resource efficiency.
The X values represent the amount of used nodes in a certain time step, while the Y values represent the
fraction of used resources in such nodes. Darker areas of the plot represent values with higher frequency.
The method creates one plot per test instance, automatically.
:param xdata:
:param ydata:
:param alpha: the alpha to be used for each dot in the plot;
:param title: the title of the plot;
:param scale: the scale of the plot;
:param xlim: the left-right boundaries for the plot, is a tuple;
:param ylim: the bottom-top boundaries for the plot, is a tuple;
:param figsize: the size of the figure, is a tuple;
:param output: the path to the output files: the label for each test instance will be automatically added
for each file;
"""
for i in range(len(xdata)):
fig, ax = plt.subplots(figsize=figsize)
ax.scatter(xdata[i], ydata[i], color='black', alpha=alpha, s=5)
ax.set_title(title)
ax.set_xlabel('Used Nodes')
ax.set_ylabel('Used Resources')
ax.set_yscale(scale)
ax.set_ylim(top=ylim[1], bottom=ylim[0], emit=True, auto=False)
ax.set_xlim(left=xlim[0], right=xlim[1], emit=True, auto=False)
ax.grid(True)
plt.show()
splitoutput = splitext(output)
ff = PdfPages(splitoutput[0] + '-' + self._labels[i] + '.pdf')
ff.savefig(fig)
ff.close()
def get_preprocessed_benchmark_data(self):
"""
Returns all of the pre-processed benchmark-related data.
A tuple is returned; each element of the tuple is related to a specific kind of metric that was processed.
Also, each element of the tuple is a list, with as many entries as the files that were processed, in the
same order. Each element of these lists contains then the data related to a specific metric, for a specific
test instance. All data is stored in standard Python lists.
:return: a tuple in which every element is a list containing, in each element, a specific kind of data
regarding one of the test instances. The tuple contains, in this order:
- the resource usage statistics' dictionaries;
- the lists of dispatching times for each time step;
- the lists of management times for each time step;
- the lists of memory usage values for each time step;
- the X scalability data containing the queue size for each test instance;
- the Y scalability data containing the average dispatching times for each test instance;
"""
if not self._preprocessed or self._plot_class != self.BENCHMARK_CLASS:
return None, None, None, None, None, None
else:
return self._simdata, self._schedtimes, self._mantimes, self._simmemory, self._scalabilitydataX, self._scalabilitydataY
def get_preprocessed_schedule_data(self):
"""
Returns all of the pre-processed schedule-related data.
A tuple is returned; each element of the tuple is related to a specific kind of metric that was processed.
Also, each element of the tuple is a list, with as many entries as the files that were processed, in the
same order. Each element of these lists contains then the data related to a specific metric, for a specific
test instance. All data is stored in standard Python lists.
:return: a tuple in which every element is a list containing, in each element, the data regarding one of the
test instances. The tuple contains, in this order:
- the slowdown values for jobs;
- the queue sizes for all time steps;
- the resource allocation efficiencies for all jobs;
- the X data regarding the load ratios (fraction of used nodes) for all time steps;
- the Y data regarding the load ratios (fraction of used resources) for all time steps;
"""
if not self._preprocessed or self._plot_class != self.SCHEDULE_CLASS:
return None, None, None, None, None
else:
return self._slowdowns, self._queuesizes, self._efficiencies, self._loadratiosX, self._loadratiosY
if __name__ == '__main__':
# This is an example. It should not be executed here, but in a script in the project's root, where also
# basic_example.py is, so that all imports can be resolved correctly.
resultpath = ['Path/to/benchmark/file',
'Path/to/benchmark/file2']
resultlabel = ['Label',
'Label2']
plots = PlotFactory('benchmark')
plots.set_files(resultpath, resultlabel)
plots.pre_process()
plots.produce_plot(type='scalability', title='My Scalability Plot')
| mit | -8,890,675,378,469,619,000 | 46.595865 | 208 | 0.588674 | false |
dmsimard/ansible | test/support/windows-integration/plugins/action/win_template.py | 269 | 1198 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
from ansible.plugins.action.template import ActionModule as TemplateActionModule
# Even though TemplateActionModule inherits from ActionBase, we still need to
# directly inherit from ActionBase to appease the plugin loader.
class ActionModule(TemplateActionModule, ActionBase):
DEFAULT_NEWLINE_SEQUENCE = '\r\n'
| gpl-3.0 | 6,576,294,700,988,176,000 | 40.310345 | 80 | 0.775459 | false |
asterisk/ari-py | ari/client.py | 1 | 11612 | #
# Copyright (c) 2013, Digium, Inc.
#
"""ARI client library.
"""
import json
import logging
import urlparse
import swaggerpy.client
from ari.model import *
log = logging.getLogger(__name__)
class Client(object):
"""ARI Client object.
:param base_url: Base URL for accessing Asterisk.
:param http_client: HTTP client interface.
"""
def __init__(self, base_url, http_client):
url = urlparse.urljoin(base_url, "ari/api-docs/resources.json")
self.swagger = swaggerpy.client.SwaggerClient(
url, http_client=http_client)
self.repositories = {
name: Repository(self, name, api)
for (name, api) in self.swagger.resources.items()}
# Extract models out of the events resource
events = [api['api_declaration']
for api in self.swagger.api_docs['apis']
if api['name'] == 'events']
if events:
self.event_models = events[0]['models']
else:
self.event_models = {}
self.websockets = set()
self.event_listeners = {}
self.exception_handler = \
lambda ex: log.exception("Event listener threw exception")
def __getattr__(self, item):
"""Exposes repositories as fields of the client.
:param item: Field name
"""
repo = self.get_repo(item)
if not repo:
raise AttributeError(
"'%r' object has no attribute '%s'" % (self, item))
return repo
def close(self):
"""Close this ARI client.
This method will close any currently open WebSockets, and close the
underlying Swaggerclient.
"""
for ws in self.websockets:
ws.send_close()
self.swagger.close()
def get_repo(self, name):
"""Get a specific repo by name.
:param name: Name of the repo to get
:return: Repository, or None if not found.
:rtype: ari.model.Repository
"""
return self.repositories.get(name)
def __run(self, ws):
"""Drains all messages from a WebSocket, sending them to the client's
listeners.
:param ws: WebSocket to drain.
"""
# TypeChecker false positive on iter(callable, sentinel) -> iterator
# Fixed in plugin v3.0.1
# noinspection PyTypeChecker
for msg_str in iter(lambda: ws.recv(), None):
msg_json = json.loads(msg_str)
if not isinstance(msg_json, dict) or 'type' not in msg_json:
log.error("Invalid event: %s" % msg_str)
continue
listeners = list(self.event_listeners.get(msg_json['type'], []))
for listener in listeners:
# noinspection PyBroadException
try:
callback, args, kwargs = listener
args = args or ()
kwargs = kwargs or {}
callback(msg_json, *args, **kwargs)
except Exception as e:
self.exception_handler(e)
def run(self, apps):
"""Connect to the WebSocket and begin processing messages.
This method will block until all messages have been received from the
WebSocket, or until this client has been closed.
:param apps: Application (or list of applications) to connect for
:type apps: str or list of str
"""
if isinstance(apps, list):
apps = ','.join(apps)
ws = self.swagger.events.eventWebsocket(app=apps)
self.websockets.add(ws)
try:
self.__run(ws)
finally:
ws.close()
self.websockets.remove(ws)
def on_event(self, event_type, event_cb, *args, **kwargs):
"""Register callback for events with given type.
:param event_type: String name of the event to register for.
:param event_cb: Callback function
:type event_cb: (dict) -> None
:param args: Arguments to pass to event_cb
:param kwargs: Keyword arguments to pass to event_cb
"""
listeners = self.event_listeners.setdefault(event_type, list())
for cb in listeners:
if event_cb == cb[0]:
listeners.remove(cb)
callback_obj = (event_cb, args, kwargs)
listeners.append(callback_obj)
client = self
class EventUnsubscriber(object):
"""Class to allow events to be unsubscribed.
"""
def close(self):
"""Unsubscribe the associated event callback.
"""
if callback_obj in client.event_listeners[event_type]:
client.event_listeners[event_type].remove(callback_obj)
return EventUnsubscriber()
def on_object_event(self, event_type, event_cb, factory_fn, model_id,
*args, **kwargs):
"""Register callback for events with the given type. Event fields of
the given model_id type are passed along to event_cb.
If multiple fields of the event have the type model_id, a dict is
passed mapping the field name to the model object.
:param event_type: String name of the event to register for.
:param event_cb: Callback function
:type event_cb: (Obj, dict) -> None or (dict[str, Obj], dict) ->
:param factory_fn: Function for creating Obj from JSON
:param model_id: String id for Obj from Swagger models.
:param args: Arguments to pass to event_cb
:param kwargs: Keyword arguments to pass to event_cb
"""
# Find the associated model from the Swagger declaration
event_model = self.event_models.get(event_type)
if not event_model:
raise ValueError("Cannot find event model '%s'" % event_type)
# Extract the fields that are of the expected type
obj_fields = [k for (k, v) in event_model['properties'].items()
if v['type'] == model_id]
if not obj_fields:
raise ValueError("Event model '%s' has no fields of type %s"
% (event_type, model_id))
def extract_objects(event, *args, **kwargs):
"""Extract objects of a given type from an event.
:param event: Event
:param args: Arguments to pass to the event callback
:param kwargs: Keyword arguments to pass to the event
callback
"""
# Extract the fields which are of the expected type
obj = {obj_field: factory_fn(self, event[obj_field])
for obj_field in obj_fields
if event.get(obj_field)}
# If there's only one field in the schema, just pass that along
if len(obj_fields) == 1:
if obj:
obj = obj.values()[0]
else:
obj = None
event_cb(obj, event, *args, **kwargs)
return self.on_event(event_type, extract_objects,
*args,
**kwargs)
def on_channel_event(self, event_type, fn, *args, **kwargs):
"""Register callback for Channel related events
:param event_type: String name of the event to register for.
:param fn: Callback function
:type fn: (Channel, dict) -> None or (list[Channel], dict) -> None
:param args: Arguments to pass to fn
:param kwargs: Keyword arguments to pass to fn
"""
return self.on_object_event(event_type, fn, Channel, 'Channel',
*args, **kwargs)
def on_bridge_event(self, event_type, fn, *args, **kwargs):
"""Register callback for Bridge related events
:param event_type: String name of the event to register for.
:param fn: Callback function
:type fn: (Bridge, dict) -> None or (list[Bridge], dict) -> None
:param args: Arguments to pass to fn
:param kwargs: Keyword arguments to pass to fn
"""
return self.on_object_event(event_type, fn, Bridge, 'Bridge',
*args, **kwargs)
def on_playback_event(self, event_type, fn, *args, **kwargs):
"""Register callback for Playback related events
:param event_type: String name of the event to register for.
:param fn: Callback function
:type fn: (Playback, dict) -> None or (list[Playback], dict) -> None
:param args: Arguments to pass to fn
:param kwargs: Keyword arguments to pass to fn
"""
return self.on_object_event(event_type, fn, Playback, 'Playback',
*args, **kwargs)
def on_live_recording_event(self, event_type, fn, *args, **kwargs):
"""Register callback for LiveRecording related events
:param event_type: String name of the event to register for.
:param fn: Callback function
:type fn: (LiveRecording, dict) -> None or (list[LiveRecording], dict) -> None
:param args: Arguments to pass to fn
:param kwargs: Keyword arguments to pass to fn
"""
return self.on_object_event(event_type, fn, LiveRecording,
'LiveRecording', *args, **kwargs)
def on_stored_recording_event(self, event_type, fn, *args, **kwargs):
"""Register callback for StoredRecording related events
:param event_type: String name of the event to register for.
:param fn: Callback function
:type fn: (StoredRecording, dict) -> None or (list[StoredRecording], dict) -> None
:param args: Arguments to pass to fn
:param kwargs: Keyword arguments to pass to fn
"""
return self.on_object_event(event_type, fn, StoredRecording,
'StoredRecording', *args, **kwargs)
def on_endpoint_event(self, event_type, fn, *args, **kwargs):
"""Register callback for Endpoint related events
:param event_type: String name of the event to register for.
:param fn: Callback function
:type fn: (Endpoint, dict) -> None or (list[Endpoint], dict) -> None
:param args: Arguments to pass to fn
:param kwargs: Keyword arguments to pass to fn
"""
return self.on_object_event(event_type, fn, Endpoint, 'Endpoint',
*args, **kwargs)
def on_device_state_event(self, event_type, fn, *args, **kwargs):
"""Register callback for DeviceState related events
:param event_type: String name of the event to register for.
:param fn: Callback function
:type fn: (DeviceState, dict) -> None or (list[DeviceState], dict) -> None
:param args: Arguments to pass to fn
:param kwargs: Keyword arguments to pass to fn
"""
return self.on_object_event(event_type, fn, DeviceState, 'DeviceState',
*args, **kwargs)
def on_sound_event(self, event_type, fn, *args, **kwargs):
"""Register callback for Sound related events
:param event_type: String name of the event to register for.
:param fn: Sound function
:type fn: (Sound, dict) -> None or (list[Sound], dict) -> None
:param args: Arguments to pass to fn
:param kwargs: Keyword arguments to pass to fn
"""
return self.on_object_event(event_type, fn, Sound, 'Sound',
*args, **kwargs)
| bsd-3-clause | -3,331,569,382,110,264,000 | 37.83612 | 91 | 0.573114 | false |
KISSMonX/micropython | tests/extmod/uctypes_native_le.py | 10 | 2037 | # This test is exactly like uctypes_le.py, but uses native structure layout.
# Codepaths for packed vs native structures are different. This test only works
# on little-endian machine (no matter if 32 or 64 bit).
import sys
import uctypes
if sys.byteorder != "little":
print("SKIP")
sys.exit()
desc = {
"s0": uctypes.UINT16 | 0,
"sub": (0, {
"b0": uctypes.UINT8 | 0,
"b1": uctypes.UINT8 | 1,
}),
"arr": (uctypes.ARRAY | 0, uctypes.UINT8 | 2),
"arr2": (uctypes.ARRAY | 0, 2, {"b": uctypes.UINT8 | 0}),
"bitf0": uctypes.BFUINT16 | 0 | 0 << uctypes.BF_POS | 8 << uctypes.BF_LEN,
"bitf1": uctypes.BFUINT16 | 0 | 8 << uctypes.BF_POS | 8 << uctypes.BF_LEN,
"bf0": uctypes.BFUINT16 | 0 | 0 << uctypes.BF_POS | 4 << uctypes.BF_LEN,
"bf1": uctypes.BFUINT16 | 0 | 4 << uctypes.BF_POS | 4 << uctypes.BF_LEN,
"bf2": uctypes.BFUINT16 | 0 | 8 << uctypes.BF_POS | 4 << uctypes.BF_LEN,
"bf3": uctypes.BFUINT16 | 0 | 12 << uctypes.BF_POS | 4 << uctypes.BF_LEN,
"ptr": (uctypes.PTR | 0, uctypes.UINT8),
"ptr2": (uctypes.PTR | 0, {"b": uctypes.UINT8 | 0}),
}
data = bytearray(b"01")
S = uctypes.struct(desc, uctypes.addressof(data), uctypes.NATIVE)
#print(S)
print(hex(S.s0))
assert hex(S.s0) == "0x3130"
#print(S.sub.b0)
print(S.sub.b0, S.sub.b1)
assert S.sub.b0, S.sub.b1 == (0x30, 0x31)
try:
S[0]
assert False, "Can't index struct"
except TypeError:
print("TypeError")
print("arr:", S.arr[0], S.arr[1])
assert (S.arr[0], S.arr[1]) == (0x30, 0x31)
print("arr of struct:", S.arr2[0].b, S.arr2[1].b)
assert (S.arr2[0].b, S.arr2[1].b) == (0x30, 0x31)
try:
S.arr[2]
assert False, "Out of bounds index"
except IndexError:
print("IndexError")
print("bf:", S.bitf0, S.bitf1)
assert (S.bitf0, S.bitf1) == (0x30, 0x31)
print("bf 4bit:", S.bf3, S.bf2, S.bf1, S.bf0)
assert (S.bf3, S.bf2, S.bf1, S.bf0) == (3, 1, 3, 0)
# Write access
S.sub.b0 = ord("2")
print(data)
assert bytes(data) == b"21"
S.bf3 = 5
print(data)
assert bytes(data) == b"2Q"
| mit | -281,293,852,833,337,500 | 25.802632 | 79 | 0.602847 | false |
williamsandrew/kubernetes | hack/jenkins/test-history/gen_html_test.py | 12 | 2491 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gen_html."""
import json
import os
import shutil
import tempfile
import unittest
import gen_html
TEST_DATA = {
"test1":
{"kubernetes-release": [{"build": 3, "failed": False, "time": 3.52},
{"build": 4, "failed": True, "time": 63.21}],
"kubernetes-debug": [{"build": 5, "failed": False, "time": 7.56},
{"build": 6, "failed": False, "time": 8.43}],
},
"test2":
{"kubernetes-debug": [{"build": 6, "failed": True, "time": 3.53}]},
}
class GenHtmlTest(unittest.TestCase):
def gen_html(self, *args):
return gen_html.gen_html(TEST_DATA, *args)[0]
def testGenHtml(self):
html = self.gen_html('')
self.assertIn("test1", html)
self.assertIn("test2", html)
self.assertIn("release", html)
self.assertIn("debug", html)
def testGenHtmlFilter(self):
html = self.gen_html('release')
self.assertIn("release", html)
self.assertIn('skipped">\ntest2', html)
self.assertNotIn("debug", html)
def testGenHtmlFilterExact(self):
html = self.gen_html('release', True)
self.assertNotIn('debug', html)
def testMain(self):
temp_dir = tempfile.mkdtemp(prefix='kube-test-hist-')
try:
tests_json = os.path.join(temp_dir, 'tests.json')
with open(tests_json, 'w') as f:
json.dump(TEST_DATA, f)
gen_html.main(['--suites', '--prefixes', ',rel,deb',
'--output-dir', temp_dir, '--input', tests_json])
for page in ('index', 'suite-kubernetes-debug', 'tests', 'tests-rel', 'tests-deb'):
self.assertTrue(os.path.exists('%s/%s.html' % (temp_dir, page)))
finally:
shutil.rmtree(temp_dir)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -540,147,821,993,442,560 | 33.123288 | 95 | 0.598956 | false |
cjhak/b2share | invenio/ext/session/legacy_session.py | 12 | 3341 | # -*- coding: utf-8 -*-
# This file is part of Invenio.
# Copyright (C) 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Implementation of legacy Invenio methods for Flask session."""
from flask import current_app, request
from flask.sessions import SessionMixin
from flask_login import current_user
from werkzeug.datastructures import CallbackDict
class Session(CallbackDict, SessionMixin):
"""Implement compatible legacy Invenio session."""
def __init__(self, initial=None, sid=None):
"""Initialize session with optional default value."""
self.sid = sid
self.logging_in = False
self.modified = initial is not None
def _on_update(d):
d.modified = True
CallbackDict.__init__(self, initial, _on_update)
def need_https(self):
"""Check if the user was previously authenticated.
If True session identifier need to be sent via HTTPS.
"""
return request.cookies.get(
current_app.session_cookie_name + 'stub', 'NO') == 'HTTPS'
def delete(self, clear=True):
"""Delete the session."""
if clear:
self.clear()
def invalidate(self):
"""Declare the session as invalid."""
self._invalid = 1
def set_remember_me(self, remember_me=True):
"""Set or unset the ``_remember_me`` flag.
:param remember_me: True if the session cookie should last one day or
until the browser is closed.
"""
self._remember_me = remember_me
self['_permanent'] = remember_me
def save_ip(self, request):
"""Save IP for current scheme."""
remote_ip = request.remote_addr
scheme_a = '_http_ip' if request.scheme == 'http' else '_https_ip'
scheme_b = '_https_ip' if request.scheme == 'http' else '_http_ip'
if scheme_a not in self:
self[scheme_a] = remote_ip
if scheme_b not in self:
self[scheme_b] = None
def check_ip(self, request):
"""Check that session is used from the same IP where it was created."""
remote_ip = request.remote_addr
if self.get('_{0}_ip'.format(request.scheme), remote_ip) != remote_ip:
return False
return True
def _get_uid(self):
return self.get('user_id', -1)
def _set_uid(self, uid):
if self.get('user_id') != uid:
self.logging_in = True
self['user_id'] = self['_uid'] = self['uid'] = uid
def _get_user_info(self):
return current_user
uid = property(_get_uid, _set_uid)
user_info = property(_get_user_info)
del _get_uid, _set_uid, _get_user_info
| gpl-2.0 | 8,962,688,884,133,845,000 | 32.41 | 79 | 0.632445 | false |
motion2015/a3 | openedx/core/djangoapps/course_groups/migrations/0003_auto__add_coursecohort__add_coursecohortssettings.py | 100 | 7464 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CourseCohort'
db.create_table('course_groups_coursecohort', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('course_user_group', self.gf('django.db.models.fields.related.OneToOneField')(related_name='cohort', unique=True, to=orm['course_groups.CourseUserGroup'])),
('assignment_type', self.gf('django.db.models.fields.CharField')(default='manual', max_length=20)),
))
db.send_create_signal('course_groups', ['CourseCohort'])
# Adding model 'CourseCohortsSettings'
db.create_table('course_groups_coursecohortssettings', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('is_cohorted', self.gf('django.db.models.fields.BooleanField')(default=False)),
('course_id', self.gf('xmodule_django.models.CourseKeyField')(unique=True, max_length=255, db_index=True)),
('cohorted_discussions', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('always_cohort_inline_discussions', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('course_groups', ['CourseCohortsSettings'])
def backwards(self, orm):
# Deleting model 'CourseCohort'
db.delete_table('course_groups_coursecohort')
# Deleting model 'CourseCohortsSettings'
db.delete_table('course_groups_coursecohortssettings')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'course_groups.coursecohort': {
'Meta': {'object_name': 'CourseCohort'},
'assignment_type': ('django.db.models.fields.CharField', [], {'default': "'manual'", 'max_length': '20'}),
'course_user_group': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'cohort'", 'unique': 'True', 'to': "orm['course_groups.CourseUserGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'course_groups.coursecohortssettings': {
'Meta': {'object_name': 'CourseCohortsSettings'},
'always_cohort_inline_discussions': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cohorted_discussions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_cohorted': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'course_groups.courseusergroup': {
'Meta': {'unique_together': "(('name', 'course_id'),)", 'object_name': 'CourseUserGroup'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'group_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'db_index': 'True', 'related_name': "'course_groups'", 'symmetrical': 'False', 'to': "orm['auth.User']"})
},
'course_groups.courseusergrouppartitiongroup': {
'Meta': {'object_name': 'CourseUserGroupPartitionGroup'},
'course_user_group': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['course_groups.CourseUserGroup']", 'unique': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'group_id': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'partition_id': ('django.db.models.fields.IntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['course_groups'] | agpl-3.0 | 7,775,110,186,029,337,000 | 68.12037 | 183 | 0.579046 | false |
JamesLiAndroid/django-blog-practice | blogproject/blog/migrations/0001_initial.py | 1 | 1880 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-14 06:16
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=70)),
('body', models.TextField()),
('create_time', models.DateTimeField()),
('modified_time', models.DateTimeField()),
('excerpt', models.CharField(blank=True, max_length=200)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category')),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='post',
name='tags',
field=models.ManyToManyField(blank=True, to='blog.Tag'),
),
]
| gpl-3.0 | -7,716,185,392,384,086,000 | 35.862745 | 120 | 0.565957 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.