repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
fedral/ITK | Wrapping/Generators/Python/Tests/BinaryErodeImageFilter.py | 19 | 1673 | #!/usr/bin/env python
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
#
# Test BinaryDilateImageFilter
#
import sys
import itk
itk.auto_progress(2)
inputImage = sys.argv[1]
outputImage = sys.argv[2]
radiusValue = int(sys.argv[3])
PixelType = itk.UC
Dimension = 2
ImageType = itk.Image[PixelType, Dimension]
ReaderType = itk.ImageFileReader[ImageType]
reader = ReaderType.New()
reader.SetFileName(inputImage)
StructuringElementType = itk.FlatStructuringElement[Dimension]
structuringElement = StructuringElementType.Ball(radiusValue)
ErodeFilterType = itk.BinaryErodeImageFilter[
ImageType, ImageType, StructuringElementType]
erodeFilter = ErodeFilterType.New()
erodeFilter.SetInput(reader.GetOutput())
erodeFilter.SetKernel(structuringElement)
erodeFilter.SetErodeValue(200)
WriterType = itk.ImageFileWriter[ImageType]
writer = WriterType.New()
writer.SetFileName(outputImage)
writer.SetInput(erodeFilter.GetOutput())
writer.Update()
| apache-2.0 | -1,586,508,986,531,938,800 | 28.350877 | 77 | 0.704722 | false |
2014c2g12/c2g12 | wsgi/exts/w2/static/Brython2.0.0-20140209-164925/Lib/reprlib.py | 923 | 5110 | """Redo the builtin repr() (representation) but with limits on most sizes."""
__all__ = ["Repr", "repr", "recursive_repr"]
import builtins
from itertools import islice
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class Repr:
def __init__(self):
self.maxlevel = 6
self.maxtuple = 6
self.maxlist = 6
self.maxarray = 5
self.maxdict = 4
self.maxset = 6
self.maxfrozenset = 6
self.maxdeque = 6
self.maxstring = 30
self.maxlong = 40
self.maxother = 30
def repr(self, x):
return self.repr1(x, self.maxlevel)
def repr1(self, x, level):
typename = type(x).__name__
if ' ' in typename:
parts = typename.split()
typename = '_'.join(parts)
if hasattr(self, 'repr_' + typename):
return getattr(self, 'repr_' + typename)(x, level)
else:
return self.repr_instance(x, level)
def _repr_iterable(self, x, level, left, right, maxiter, trail=''):
n = len(x)
if level <= 0 and n:
s = '...'
else:
newlevel = level - 1
repr1 = self.repr1
pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]
if n > maxiter: pieces.append('...')
s = ', '.join(pieces)
if n == 1 and trail: right = trail + right
return '%s%s%s' % (left, s, right)
def repr_tuple(self, x, level):
return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',')
def repr_list(self, x, level):
return self._repr_iterable(x, level, '[', ']', self.maxlist)
def repr_array(self, x, level):
header = "array('%s', [" % x.typecode
return self._repr_iterable(x, level, header, '])', self.maxarray)
def repr_set(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'set([', '])', self.maxset)
def repr_frozenset(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'frozenset([', '])',
self.maxfrozenset)
def repr_deque(self, x, level):
return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)
def repr_dict(self, x, level):
n = len(x)
if n == 0: return '{}'
if level <= 0: return '{...}'
newlevel = level - 1
repr1 = self.repr1
pieces = []
for key in islice(_possibly_sorted(x), self.maxdict):
keyrepr = repr1(key, newlevel)
valrepr = repr1(x[key], newlevel)
pieces.append('%s: %s' % (keyrepr, valrepr))
if n > self.maxdict: pieces.append('...')
s = ', '.join(pieces)
return '{%s}' % (s,)
def repr_str(self, x, level):
s = builtins.repr(x[:self.maxstring])
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = builtins.repr(x[:i] + x[len(x)-j:])
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_int(self, x, level):
s = builtins.repr(x) # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)//2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
try:
s = builtins.repr(x)
# Bugs in x.__repr__() can cause arbitrary
# exceptions -- then make up something
except Exception:
return '<%s instance at %x>' % (x.__class__.__name__, id(x))
if len(s) > self.maxother:
i = max(0, (self.maxother-3)//2)
j = max(0, self.maxother-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def _possibly_sorted(x):
# Since not all sequences of items can be sorted and comparison
# functions may raise arbitrary exceptions, return an unsorted
# sequence in that case.
try:
return sorted(x)
except Exception:
return list(x)
aRepr = Repr()
repr = aRepr.repr
| gpl-2.0 | -7,938,004,207,631,271,000 | 31.547771 | 79 | 0.526614 | false |
timcera/mettoolbox | mettoolbox/pet.py | 1 | 10467 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import warnings
from typing import Optional, Union
import numpy as np
import pandas as pd
import typic
from solarpy import declination
from tstoolbox import tsutils
from . import meteolib, utils
warnings.filterwarnings("ignore")
def _columns(tsd, req_column_list=[], optional_column_list=[]):
if None in req_column_list:
raise ValueError(
tsutils.error_wrapper(
"""
You need to supply the column (name or number, data column numbering
starts at 1) for {0} time-series.
Instead you gave {1}""".format(
len(req_column_list), req_column_list
)
)
)
collect = []
for loopvar in req_column_list + optional_column_list:
try:
nloopvar = int(loopvar) - 1
except TypeError:
nloopvar = loopvar
if nloopvar is None:
collect.append(None)
else:
collect.append(tsd.ix[:, nloopvar])
return collect
def _preprocess(
input_ts,
temp_min_col,
temp_max_col,
temp_mean_col,
temp_min_required,
temp_max_required,
skiprows,
names,
index_type,
start_date,
end_date,
round_index,
dropna,
clean,
source_units,
):
columns, column_names = utils._check_temperature_cols(
temp_min_col=temp_min_col,
temp_max_col=temp_max_col,
temp_mean_col=temp_mean_col,
temp_min_required=temp_min_required,
temp_max_required=temp_max_required,
)
tsd = tsutils.common_kwds(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
clean=clean,
)
if source_units is None:
# If "source_units" keyword is None must have source_units in column name.
source_units = []
for units in tsd.columns:
words = units.split(":")
if len(words) >= 2:
source_units.append(words[1])
else:
raise ValueError(
tsutils.error_wrapper(
"""
If "source_units" are not supplied as the second ":" delimited field in the column name
they must be supplied with the "source_units" keyword. """
)
)
else:
source_units = tsutils.make_list(source_units)
if len(source_units) != len(tsd.columns):
raise ValueError(
tsutils.error_wrapper(
"""
The number of "source_units" terms must match the number of temperature columns.
"""
)
)
interim_target_units = ["degC"] * len(tsd.columns)
tsd = tsutils.common_kwds(
tsd,
source_units=source_units,
target_units=interim_target_units,
)
tsd.columns = column_names
tsd = utils._validate_temperatures(tsd, temp_min_col, temp_max_col)
return tsd
def et0_pm(
input_ts="-",
columns=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
source_units=None,
target_units=None,
print_input=False,
tablefmt="csv",
avp=None,
avp_from_tdew=None,
avp_from_twet_tdry=None,
avp_from_rhmin_rh_max=None,
avp_from_rhmax=None,
avp_from_rhmean=None,
avp_from_tmin=None,
lat=None,
):
"""Penman-Monteith evaporation."""
tsd = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts, skiprows=skiprows, names=names, index_type=index_type
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
source_units=source_units,
target_units=target_units,
clean=clean,
)
return tsd
@typic.constrained(ge=-90, le=90)
class FloatLatitude(float):
"""-90 <= float <= 90"""
@typic.al
def hamon(
lat: FloatLatitude,
temp_min_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_max_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_mean_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
k: float = 1,
source_units=None,
input_ts="-",
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""hamon"""
temp_min_required = True
temp_max_required = True
tsd = _preprocess(
input_ts,
temp_min_col,
temp_max_col,
temp_mean_col,
temp_min_required,
temp_max_required,
skiprows,
names,
index_type,
start_date,
end_date,
round_index,
dropna,
clean,
source_units,
)
decl = [declination(i) for i in tsd.index.to_pydatetime()]
w = np.arccos(-np.tan(decl) * np.tan(lat))
es = meteolib.es_calc(tsd.tmean)
N = 24 * w / np.pi
# Create new dataframe with tsd.index as index in
# order to get all of the time components correct.
pe = pd.DataFrame(0.0, index=tsd.index, columns=["pet_hamon:mm"])
pe["pet_hamon:mm"] = k * 29.8 * N * es / (273.3 + tsd.tmean)
pe.loc[tsd.tmean <= 0, "pet_hamon:mm"] = 0.0
if target_units != source_units:
pe = tsutils.common_kwds(pe, source_units="mm", target_units=target_units)
return tsutils.return_input(print_input, tsd, pe)
@typic.al
def hargreaves(
lat: FloatLatitude,
temp_min_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_max_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_mean_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
source_units=None,
input_ts="-",
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units="mm",
print_input=False,
):
"""hargreaves"""
temp_min_required = True
temp_max_required = True
tsd = _preprocess(
input_ts,
temp_min_col,
temp_max_col,
temp_mean_col,
temp_min_required,
temp_max_required,
skiprows,
names,
index_type,
start_date,
end_date,
round_index,
dropna,
clean,
source_units,
)
newra = utils.radiation(tsd, lat)
tsdiff = tsd.tmax - tsd.tmin
# Create new dataframe with tsd.index as index in
# order to get all of the time components correct.
pe = pd.DataFrame(0.0, index=tsd.index, columns=["pet_hargreaves:mm"])
pe["pet_hargreaves:mm"] = (
0.408
* 0.0023
* newra.ra.values
* np.abs(tsdiff.values) ** 0.5
* (tsd.tmean.values + 17.8)
)
if target_units != source_units:
pe = tsutils.common_kwds(pe, source_units="mm", target_units=target_units)
return tsutils.return_input(print_input, tsd, pe)
@typic.al
def oudin_form(
lat: FloatLatitude,
temp_min_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_max_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_mean_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
k1=100,
k2=5,
source_units=None,
input_ts="-",
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""oudin form"""
temp_min_required = False
temp_max_required = False
tsd = _preprocess(
input_ts,
temp_min_col,
temp_max_col,
temp_mean_col,
temp_min_required,
temp_max_required,
skiprows,
names,
index_type,
start_date,
end_date,
round_index,
dropna,
clean,
source_units,
)
newra = utils.radiation(tsd, lat)
# Create new dataframe with tsd.index as index in
# order to get all of the time components correct.
pe = pd.DataFrame(0.0, index=tsd.index, columns=["pet_oudin:mm"])
gamma = 2.45 # the latent heat flux (MJ kg−1)
rho = 1000.0 # density of water (kg m-3)
pe.loc[tsd.tmean > k2, "pet_oudin:mm"] = (
newra.ra / (gamma * rho) * (tsd.tmean + k2) / k1 * 1000
)
if target_units != source_units:
pe = tsutils.common_kwds(pe, source_units="mm", target_units=target_units)
return tsutils.return_input(print_input, tsd, pe)
@typic.al
def allen(
lat: FloatLatitude,
temp_min_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_max_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_mean_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
source_units=None,
input_ts="-",
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""Allen"""
temp_min_required = False
temp_max_required = False
tsd = _preprocess(
input_ts,
temp_min_col,
temp_max_col,
temp_mean_col,
temp_min_required,
temp_max_required,
skiprows,
names,
index_type,
start_date,
end_date,
round_index,
dropna,
clean,
source_units,
)
newra = utils.radiation(tsd, lat)
# Create new dataframe with tsd.index as index in
# order to get all of the time components correct.
pe = pd.DataFrame(0.0, index=tsd.index, columns=["pet_allen:mm"])
pe["pet_allen:mm"] = (
0.408 * 0.0029 * newra.ra * (tsd.tmax - tsd.tmin) ** 0.4 * (tsd.tmean + 20)
)
if target_units != source_units:
pe = tsutils.common_kwds(pe, source_units="mm", target_units=target_units)
return tsutils.return_input(print_input, tsd, pe)
def reference():
"""reference penman-monteith"""
print("reference")
def potential():
"""potential"""
print("potential")
| bsd-3-clause | -2,276,524,630,884,471,300 | 24.277778 | 87 | 0.587387 | false |
Teamxrtc/webrtc-streaming-node | third_party/depot_tools/third_party/logilab/common/optparser.py | 92 | 3386 | # -*- coding: utf-8 -*-
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Extend OptionParser with commands.
Example:
>>> parser = OptionParser()
>>> parser.usage = '%prog COMMAND [options] <arg> ...'
>>> parser.add_command('build', 'mymod.build')
>>> parser.add_command('clean', run_clean, add_opt_clean)
>>> run, options, args = parser.parse_command(sys.argv[1:])
>>> return run(options, args[1:])
With mymod.build that defines two functions run and add_options
"""
from __future__ import print_function
__docformat__ = "restructuredtext en"
from warnings import warn
warn('lgc.optparser module is deprecated, use lgc.clcommands instead', DeprecationWarning,
stacklevel=2)
import sys
import optparse
class OptionParser(optparse.OptionParser):
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(self, *args, **kwargs)
self._commands = {}
self.min_args, self.max_args = 0, 1
def add_command(self, name, mod_or_funcs, help=''):
"""name of the command, name of module or tuple of functions
(run, add_options)
"""
assert isinstance(mod_or_funcs, str) or isinstance(mod_or_funcs, tuple), \
"mod_or_funcs has to be a module name or a tuple of functions"
self._commands[name] = (mod_or_funcs, help)
def print_main_help(self):
optparse.OptionParser.print_help(self)
print('\ncommands:')
for cmdname, (_, help) in self._commands.items():
print('% 10s - %s' % (cmdname, help))
def parse_command(self, args):
if len(args) == 0:
self.print_main_help()
sys.exit(1)
cmd = args[0]
args = args[1:]
if cmd not in self._commands:
if cmd in ('-h', '--help'):
self.print_main_help()
sys.exit(0)
elif self.version is not None and cmd == "--version":
self.print_version()
sys.exit(0)
self.error('unknown command')
self.prog = '%s %s' % (self.prog, cmd)
mod_or_f, help = self._commands[cmd]
# optparse inserts self.description between usage and options help
self.description = help
if isinstance(mod_or_f, str):
exec('from %s import run, add_options' % mod_or_f)
else:
run, add_options = mod_or_f
add_options(self)
(options, args) = self.parse_args(args)
if not (self.min_args <= len(args) <= self.max_args):
self.error('incorrect number of arguments')
return run, options, args
| mit | -7,316,781,646,560,002,000 | 35.804348 | 90 | 0.630538 | false |
bob-the-hamster/commandergenius | project/jni/python/src/Lib/bsddb/dbrecio.py | 203 | 5308 |
"""
File-like objects that read from or write to a bsddb record.
This implements (nearly) all stdio methods.
f = DBRecIO(db, key, txn=None)
f.close() # explicitly release resources held
flag = f.isatty() # always false
pos = f.tell() # get current position
f.seek(pos) # set current position
f.seek(pos, mode) # mode 0: absolute; 1: relative; 2: relative to EOF
buf = f.read() # read until EOF
buf = f.read(n) # read up to n bytes
f.truncate([size]) # truncate file at to at most size (default: current pos)
f.write(buf) # write at current position
f.writelines(list) # for line in list: f.write(line)
Notes:
- fileno() is left unimplemented so that code which uses it triggers
an exception early.
- There's a simple test set (see end of this file) - not yet updated
for DBRecIO.
- readline() is not implemented yet.
From:
Itamar Shtull-Trauring <[email protected]>
"""
import errno
import string
class DBRecIO:
def __init__(self, db, key, txn=None):
self.db = db
self.key = key
self.txn = txn
self.len = None
self.pos = 0
self.closed = 0
self.softspace = 0
def close(self):
if not self.closed:
self.closed = 1
del self.db, self.txn
def isatty(self):
if self.closed:
raise ValueError, "I/O operation on closed file"
return 0
def seek(self, pos, mode = 0):
if self.closed:
raise ValueError, "I/O operation on closed file"
if mode == 1:
pos = pos + self.pos
elif mode == 2:
pos = pos + self.len
self.pos = max(0, pos)
def tell(self):
if self.closed:
raise ValueError, "I/O operation on closed file"
return self.pos
def read(self, n = -1):
if self.closed:
raise ValueError, "I/O operation on closed file"
if n < 0:
newpos = self.len
else:
newpos = min(self.pos+n, self.len)
dlen = newpos - self.pos
r = self.db.get(self.key, txn=self.txn, dlen=dlen, doff=self.pos)
self.pos = newpos
return r
__fixme = """
def readline(self, length=None):
if self.closed:
raise ValueError, "I/O operation on closed file"
if self.buflist:
self.buf = self.buf + string.joinfields(self.buflist, '')
self.buflist = []
i = string.find(self.buf, '\n', self.pos)
if i < 0:
newpos = self.len
else:
newpos = i+1
if length is not None:
if self.pos + length < newpos:
newpos = self.pos + length
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
def readlines(self, sizehint = 0):
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
"""
def truncate(self, size=None):
if self.closed:
raise ValueError, "I/O operation on closed file"
if size is None:
size = self.pos
elif size < 0:
raise IOError(errno.EINVAL,
"Negative size not allowed")
elif size < self.pos:
self.pos = size
self.db.put(self.key, "", txn=self.txn, dlen=self.len-size, doff=size)
def write(self, s):
if self.closed:
raise ValueError, "I/O operation on closed file"
if not s: return
if self.pos > self.len:
self.buflist.append('\0'*(self.pos - self.len))
self.len = self.pos
newpos = self.pos + len(s)
self.db.put(self.key, s, txn=self.txn, dlen=len(s), doff=self.pos)
self.pos = newpos
def writelines(self, list):
self.write(string.joinfields(list, ''))
def flush(self):
if self.closed:
raise ValueError, "I/O operation on closed file"
"""
# A little test suite
def _test():
import sys
if sys.argv[1:]:
file = sys.argv[1]
else:
file = '/etc/passwd'
lines = open(file, 'r').readlines()
text = open(file, 'r').read()
f = StringIO()
for line in lines[:-2]:
f.write(line)
f.writelines(lines[-2:])
if f.getvalue() != text:
raise RuntimeError, 'write failed'
length = f.tell()
print 'File length =', length
f.seek(len(lines[0]))
f.write(lines[1])
f.seek(0)
print 'First line =', repr(f.readline())
here = f.tell()
line = f.readline()
print 'Second line =', repr(line)
f.seek(-len(line), 1)
line2 = f.read(len(line))
if line != line2:
raise RuntimeError, 'bad result after seek back'
f.seek(len(line2), 1)
list = f.readlines()
line = list[-1]
f.seek(f.tell() - len(line))
line2 = f.read()
if line != line2:
raise RuntimeError, 'bad result after seek back from EOF'
print 'Read', len(list), 'more lines'
print 'File length =', f.tell()
if f.tell() != length:
raise RuntimeError, 'bad length'
f.close()
if __name__ == '__main__':
_test()
"""
| lgpl-2.1 | -5,446,701,761,817,104,000 | 26.936842 | 78 | 0.546722 | false |
eckucukoglu/arm-linux-gnueabihf | arm-linux-gnueabihf/libc/usr/lib/python2.7/unittest/test/test_break.py | 105 | 9641 | import gc
import os
import sys
import signal
import weakref
from cStringIO import StringIO
import unittest
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
"if threads have been used")
class TestBreak(unittest.TestCase):
int_handler = None
def setUp(self):
self._default_handler = signal.getsignal(signal.SIGINT)
if self.int_handler is not None:
signal.signal(signal.SIGINT, self.int_handler)
def tearDown(self):
signal.signal(signal.SIGINT, self._default_handler)
unittest.signals._results = weakref.WeakKeyDictionary()
unittest.signals._interrupt_handler = None
def testInstallHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(unittest.signals._interrupt_handler.called)
def testRegisterResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
for ref in unittest.signals._results:
if ref is result:
break
elif ref is not result:
self.fail("odd object in result set")
else:
self.fail("result not found")
def testInterruptCaught(self):
default_handler = signal.getsignal(signal.SIGINT)
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.breakCaught)
def testSecondInterrupt(self):
# Can't use skipIf decorator because the signal handler may have
# been changed after defining this method.
if signal.getsignal(signal.SIGINT) == signal.SIG_IGN:
self.skipTest("test requires SIGINT to not be ignored")
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
os.kill(pid, signal.SIGINT)
self.fail("Second KeyboardInterrupt not raised")
try:
test(result)
except KeyboardInterrupt:
pass
else:
self.fail("Second KeyboardInterrupt not raised")
self.assertTrue(result.breakCaught)
def testTwoResults(self):
unittest.installHandler()
result = unittest.TestResult()
unittest.registerResult(result)
new_handler = signal.getsignal(signal.SIGINT)
result2 = unittest.TestResult()
unittest.registerResult(result2)
self.assertEqual(signal.getsignal(signal.SIGINT), new_handler)
result3 = unittest.TestResult()
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.shouldStop)
self.assertTrue(result2.shouldStop)
self.assertFalse(result3.shouldStop)
def testHandlerReplacedButCalled(self):
# Can't use skipIf decorator because the signal handler may have
# been changed after defining this method.
if signal.getsignal(signal.SIGINT) == signal.SIG_IGN:
self.skipTest("test requires SIGINT to not be ignored")
# If our handler has been replaced (is no longer installed) but is
# called by the *new* handler, then it isn't safe to delay the
# SIGINT and we should immediately delegate to the default handler
unittest.installHandler()
handler = signal.getsignal(signal.SIGINT)
def new_handler(frame, signum):
handler(frame, signum)
signal.signal(signal.SIGINT, new_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
else:
self.fail("replaced but delegated handler doesn't raise interrupt")
def testRunner(self):
# Creating a TextTestRunner with the appropriate argument should
# register the TextTestResult it creates
runner = unittest.TextTestRunner(stream=StringIO())
result = runner.run(unittest.TestSuite())
self.assertIn(result, unittest.signals._results)
def testWeakReferences(self):
# Calling registerResult on a result should not keep it alive
result = unittest.TestResult()
unittest.registerResult(result)
ref = weakref.ref(result)
del result
# For non-reference counting implementations
gc.collect();gc.collect()
self.assertIsNone(ref())
def testRemoveResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
unittest.installHandler()
self.assertTrue(unittest.removeResult(result))
# Should this raise an error instead?
self.assertFalse(unittest.removeResult(unittest.TestResult()))
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
self.assertFalse(result.shouldStop)
def testMainInstallsHandler(self):
failfast = object()
test = object()
verbosity = object()
result = object()
default_handler = signal.getsignal(signal.SIGINT)
class FakeRunner(object):
initArgs = []
runArgs = []
def __init__(self, *args, **kwargs):
self.initArgs.append((args, kwargs))
def run(self, test):
self.runArgs.append(test)
return result
class Program(unittest.TestProgram):
def __init__(self, catchbreak):
self.exit = False
self.verbosity = verbosity
self.failfast = failfast
self.catchbreak = catchbreak
self.testRunner = FakeRunner
self.test = test
self.result = None
p = Program(False)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
FakeRunner.initArgs = []
FakeRunner.runArgs = []
p = Program(True)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
# check that calling removeHandler multiple times has no ill-effect
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandlerAsDecorator(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
@unittest.removeHandler
def test():
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
test()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
"if threads have been used")
class TestBreakDefaultIntHandler(TestBreak):
int_handler = signal.default_int_handler
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
"if threads have been used")
class TestBreakSignalIgnored(TestBreak):
int_handler = signal.SIG_IGN
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
"if threads have been used")
class TestBreakSignalDefault(TestBreak):
int_handler = signal.SIG_DFL
| gpl-2.0 | 5,198,870,914,712,312,000 | 32.947183 | 79 | 0.625972 | false |
GaryBrittain/DB2S3 | process.py | 1 | 3210 | import dropbox
import sys
from sqlsync import *
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import os
from pushover import message
import json
if check_lock() == 1:
print 'Database is locked or unreachable, quitting...'
sys.exit()
conn = S3Connection('', '')
pb = conn.get_bucket('')
access_token = 'YOUR DROPBOX APP'
client = dropbox.client.DropboxClient(access_token)
curr_cursor_file = open("cursor.txt", "r")
curr_cursor = curr_cursor_file.read()
curr_cursor_file.close()
next_cursor = client.delta(curr_cursor, '/Camera Uploads')
curr_cursor_file = open("cursor.txt", "w")
curr_cursor_file.write(next_cursor['cursor'])
curr_cursor_file.close()
new_files = 0
if len(next_cursor['entries']) > 0:
for entry in next_cursor['entries']:
if entry[1] != None:
cur_path = entry[0]
cur_file = cur_path.rsplit("/",1)[1]
print 'processing file ['+str(new_files+1)+']: ' + cur_file
post_file(cur_path, cur_file)
new_files += 1
else:
print entry[0] + " has been removed."
with open("errors.txt", "a") as err:
err.write('File removed from dropbox: '+str(entry[0])+"\n")
err.close()
else:
print "No files have changed."
uploaded = 0
failed = 0
path = next_file_to_process()
workload = len(path)
processed = 0
for i in path:
if check_lock() == 1:
print 'Process locked by database, terminating...'
message('Process locked by database, terminating...')
break
processed += 1
cPath = i["PATH"]
cFile = i["FILENAME"]
print ' '
print 'Processing ' + str("{:,}".format(processed)) + ' of ' + str("{:,}".format(workload)) + ': ' + cPath
meta = client.metadata(cPath)
bytes = meta['bytes']
print meta['size']
#100MB chunks
chunk_size = 104857600
chunk_loops = int(bytes / chunk_size) + 1
current = 0
chunk_loop = 1
out = open(cFile, 'wb')
try:
while (bytes > current):
print 'Downloading chunk %s of %s' % (chunk_loop, chunk_loops)
chunk_loop += 1
f = client.get_file(cPath, rev=None, start=current, length=chunk_size)
out.write(f.read())
current += chunk_size
except:
failed += 1
print 'Error downloading ' + cPath
with open("errors.txt", "a") as err:
err.write('Could not download from dropbox file: '+str(cPath)+"\n")
err.close()
continue
print 'Downloaded'
out.close()
filesize = os.path.getsize(cFile)
if bytes != filesize:
print 'Downloaded file corrupted'
failed += 1
continue
k = Key(pb)
k.name = cPath
try:
k.set_contents_from_filename(cFile, encrypt_key=True)
os.remove(cFile)
print 'Uploaded'
s3_uploaded_confirm(cPath, meta['size'], meta['bytes'], meta['rev'], meta['revision'], meta['mime_type'], meta['modified'], meta['client_mtime'])
uploaded = uploaded + 1
except:
print 'Error uploading ' + cPath
try:
k.name='/db2s3/cursor.txt'
k.set_contents_from_filename('cursor.txt', encrypt_key=True)
except:
print 'could not copy cursor key to S3'
print '******************************'
summary = """%s new files found
%s files uploaded
%s failures - check errors.txt for info"""%(new_files,uploaded,failed)
message(summary)
print 'Finished!'
print summary
| mit | 8,403,771,263,856,615,000 | 25.75 | 149 | 0.643614 | false |
yongtang/tensorflow | tensorflow/python/distribute/cluster_resolver/tfconfig_cluster_resolver.py | 14 | 6847 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Cluster Resolvers for TF_CONFIG Environment Variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import ClusterResolver
from tensorflow.python.training.server_lib import ClusterSpec
from tensorflow.python.util.tf_export import tf_export
_TF_CONFIG_ENV = 'TF_CONFIG'
_SESSION_MASTER_KEY = 'session_master'
_RPC_LAYER_KEY = 'rpc_layer'
_TASK_KEY = 'task'
def format_master_url(master, rpc_layer=None):
if rpc_layer:
return '%s://%s' % (rpc_layer, master)
else:
return master
def _load_tf_config():
return json.loads(os.environ.get(_TF_CONFIG_ENV, '{}'))
def _get_value_in_tfconfig(key, default=None):
tf_config = _load_tf_config()
return tf_config[key] if key in tf_config else default
@tf_export('distribute.cluster_resolver.TFConfigClusterResolver')
class TFConfigClusterResolver(ClusterResolver):
"""Implementation of a ClusterResolver which reads the TF_CONFIG EnvVar.
This is an implementation of cluster resolvers when using TF_CONFIG to set
information about the cluster. The cluster spec returned will be
initialized from the TF_CONFIG environment variable.
An example to set TF_CONFIG is:
```Python
os.environ['TF_CONFIG'] = json.dumps({
'cluster': {
'worker': ["localhost:12345", "localhost:23456"]
},
'task': {'type': 'worker', 'index': 0}
})
```
However, sometimes the container orchestration framework will set TF_CONFIG
for you. In this case, you can just create an instance without passing in any
arguments. You can find an example here to let Kuburnetes set TF_CONFIG for
you: https://github.com/tensorflow/ecosystem/tree/master/kubernetes. Then you
can use it with `tf.distribute.Strategy` as:
```Python
# `TFConfigClusterResolver` is already the default one in the following
# strategy.
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
cluster_resolver=TFConfigClusterResolver())
```
"""
def __init__(self,
task_type=None,
task_id=None,
rpc_layer=None,
environment=None):
"""Creates a new TFConfigClusterResolver.
Args:
task_type: (String, optional) Overrides the task type specified in the
TF_CONFIG environment variable.
task_id: (Integer, optional) Overrides the task index specified in the
TF_CONFIG environment variable.
rpc_layer: (String, optional) Overrides the rpc layer TensorFlow uses.
environment: (String, optional) Overrides the environment TensorFlow
operates in.
"""
self._task_type = task_type
self._task_id = task_id
self._rpc_layer = rpc_layer
self._environment = environment
@property
def task_type(self):
if self._task_type is None:
task_info = _get_value_in_tfconfig(_TASK_KEY, {})
return str(task_info['type']) if 'type' in task_info else None
else:
return str(self._task_type)
@property
def task_id(self):
if self._task_id is None:
task_info = _get_value_in_tfconfig(_TASK_KEY, {})
return int(task_info['index']) if 'index' in task_info else None
else:
return int(self._task_id)
@task_type.setter
def task_type(self, task_type):
self._task_type = task_type
@task_id.setter
def task_id(self, task_id):
self._task_id = task_id
@property
def environment(self):
return self._environment
@property
def rpc_layer(self):
if self._rpc_layer is None:
return _get_value_in_tfconfig(_RPC_LAYER_KEY)
else:
return self._rpc_layer
@rpc_layer.setter
def rpc_layer(self, rpc_layer):
self._rpc_layer = rpc_layer
def num_accelerators(self,
task_type=None,
task_id=None,
config_proto=None):
task_type = self.task_type if task_type is None else task_type
task_id = self.task_id if task_id is None else task_id
return super(TFConfigClusterResolver, self).num_accelerators(
task_type, task_id, config_proto)
def cluster_spec(self):
"""Returns a ClusterSpec based on the TF_CONFIG environment variable.
Returns:
A ClusterSpec with information from the TF_CONFIG environment variable.
"""
tf_config = _load_tf_config()
if 'cluster' not in tf_config:
return ClusterSpec({})
return ClusterSpec(tf_config['cluster'])
def master(self, task_type=None, task_id=None, rpc_layer=None):
"""Returns the master address to use when creating a TensorFlow session.
Note: this is only useful for TensorFlow 1.x.
Args:
task_type: (String, optional) Overrides and sets the task_type of the
master.
task_id: (Integer, optional) Overrides and sets the task id of the
master.
rpc_layer: (String, optional) Overrides and sets the protocol over which
TensorFlow nodes communicate with each other.
Returns:
The address of the master.
Raises:
RuntimeError: If the task_type or task_id is not specified and the
`TF_CONFIG` environment variable does not contain a task section.
"""
# If `session_master` is set, just use that.
session_master = _get_value_in_tfconfig(_SESSION_MASTER_KEY)
if session_master is not None:
return session_master
# Return an empty string if we are the only job in the ClusterSpec.
cluster_spec = self.cluster_spec()
if (not cluster_spec.jobs or
(len(cluster_spec.jobs) == 1 and
len(cluster_spec.job_tasks(cluster_spec.jobs[0])) == 1)):
return ''
# We try to auto-detect the task type and id, but uses the user-supplied one
# where available
task_type = task_type if task_type is not None else self.task_type
task_id = task_id if task_id is not None else self.task_id
rpc_layer = rpc_layer if rpc_layer is not None else self.rpc_layer
return format_master_url(cluster_spec.task_address(task_type, task_id),
rpc_layer)
| apache-2.0 | -3,824,496,371,474,636,000 | 32.563725 | 90 | 0.672703 | false |
saydulk/horizon | openstack_dashboard/usage/views.py | 32 | 4722 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.usage import base
class UsageView(tables.DataTableView):
usage_class = None
show_terminated = True
csv_template_name = None
page_title = _("Overview")
def __init__(self, *args, **kwargs):
super(UsageView, self).__init__(*args, **kwargs)
if not issubclass(self.usage_class, base.BaseUsage):
raise AttributeError("You must specify a usage_class attribute "
"which is a subclass of BaseUsage.")
def get_template_names(self):
if self.request.GET.get('format', 'html') == 'csv':
return (self.csv_template_name or
".".join((self.template_name.rsplit('.', 1)[0], 'csv')))
return self.template_name
def get_content_type(self):
if self.request.GET.get('format', 'html') == 'csv':
return "text/csv"
return "text/html"
def get_data(self):
try:
project_id = self.kwargs.get('project_id',
self.request.user.tenant_id)
self.usage = self.usage_class(self.request, project_id)
self.usage.summarize(*self.usage.get_date_range())
self.usage.get_limits()
self.kwargs['usage'] = self.usage
return self.usage.usage_list
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve usage information.'))
return []
def get_context_data(self, **kwargs):
context = super(UsageView, self).get_context_data(**kwargs)
context['table'].kwargs['usage'] = self.usage
context['form'] = self.usage.form
context['usage'] = self.usage
context['charts'] = []
# (Used key, Max key, Human Readable Name, text to display when
# describing the quota by default it is 'Used')
types = [("totalInstancesUsed", "maxTotalInstances", _("Instances")),
("totalCoresUsed", "maxTotalCores", _("VCPUs")),
("totalRAMUsed", "maxTotalRAMSize", _("RAM")),
("totalFloatingIpsUsed", "maxTotalFloatingIps",
"Floating IPs", _("Allocated")),
("totalSecurityGroupsUsed", "maxSecurityGroups",
_("Security Groups"))]
# Check for volume usage
if 'totalVolumesUsed' in self.usage.limits and self.usage.limits[
'totalVolumesUsed'] >= 0:
types.append(("totalVolumesUsed", "maxTotalVolumes",
_("Volumes")))
types.append(("totalGigabytesUsed", "maxTotalVolumeGigabytes",
_("Volume Storage")))
for t in types:
if t[0] in self.usage.limits and t[1] in self.usage.limits:
text = False
if len(t) > 3:
text = t[3]
context['charts'].append({
'name': t[2],
'used': self.usage.limits[t[0]],
'max': self.usage.limits[t[1]],
'text': text
})
try:
context['simple_tenant_usage_enabled'] = \
api.nova.extension_supported('SimpleTenantUsage', self.request)
except Exception:
context['simple_tenant_usage_enabled'] = True
return context
def render_to_response(self, context, **response_kwargs):
if self.request.GET.get('format', 'html') == 'csv':
render_class = self.csv_response_class
response_kwargs.setdefault("filename", "usage.csv")
else:
render_class = self.response_class
context = self.render_context_with_title(context)
resp = render_class(request=self.request,
template=self.get_template_names(),
context=context,
content_type=self.get_content_type(),
**response_kwargs)
return resp
| apache-2.0 | -7,567,539,970,578,892,000 | 41.160714 | 79 | 0.56925 | false |
krahman/BuildingMachineLearningSystemsWithPython | ch04/build_lda.py | 1 | 2472 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from __future__ import print_function
try:
import nltk.corpus
except ImportError:
print("nltk not found")
print("please install it")
raise
from scipy.spatial import distance
import numpy as np
import string
from gensim import corpora, models, similarities
import sklearn.datasets
import nltk.stem
from collections import defaultdict
english_stemmer = nltk.stem.SnowballStemmer('english')
stopwords = set(nltk.corpus.stopwords.words('english'))
stopwords.update(['from:', 'subject:', 'writes:', 'writes'])
class DirectText(corpora.textcorpus.TextCorpus):
def get_texts(self):
return self.input
def __len__(self):
return len(self.input)
try:
dataset = sklearn.datasets.load_mlcomp("20news-18828", "train",
mlcomp_root='./data')
except:
print("Newsgroup data not found.")
print("Please download from http://mlcomp.org/datasets/379")
print("And expand the zip into the subdirectory data/")
print()
print()
raise
otexts = dataset.data
texts = dataset.data
texts = [t.decode('utf-8', 'ignore') for t in texts]
texts = [t.split() for t in texts]
texts = [map(lambda w: w.lower(), t) for t in texts]
texts = [filter(lambda s: not len(set("+-.?!()>@012345689") & set(s)), t)
for t in texts]
texts = [filter(lambda s: (len(s) > 3) and (s not in stopwords), t)
for t in texts]
texts = [map(english_stemmer.stem, t) for t in texts]
usage = defaultdict(int)
for t in texts:
for w in set(t):
usage[w] += 1
limit = len(texts) / 10
too_common = [w for w in usage if usage[w] > limit]
too_common = set(too_common)
texts = [filter(lambda s: s not in too_common, t) for t in texts]
corpus = DirectText(texts)
dictionary = corpus.dictionary
try:
dictionary['computer']
except:
pass
model = models.ldamodel.LdaModel(
corpus, num_topics=100, id2word=dictionary.id2token)
thetas = np.zeros((len(texts), 100))
for i, c in enumerate(corpus):
for ti, v in model[c]:
thetas[i, ti] += v
distances = distance.squareform(distance.pdist(thetas))
large = distances.max() + 1
for i in xrange(len(distances)):
distances[i, i] = large
print(otexts[1])
print()
print()
print()
print(otexts[distances[1].argmin()])
| mit | 2,488,482,030,953,443,000 | 26.466667 | 73 | 0.676375 | false |
point97/hapifis | server/apps/survey/migrations/0069_auto__add_field_response_answer_number.py | 1 | 15989 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Response.answer_number'
db.add_column(u'survey_response', 'answer_number',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=7, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Response.answer_number'
db.delete_column(u'survey_response', 'answer_number')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'survey.block': {
'Meta': {'object_name': 'Block'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'skip_condition': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'skip_question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'})
},
u'survey.gridanswer': {
'Meta': {'object_name': 'GridAnswer'},
'answer_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'col_label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'col_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Response']"}),
'row_label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'row_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'survey.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '7'}),
'lng': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '7'}),
'respondant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Respondant']", 'null': 'True', 'blank': 'True'}),
'response': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Response']"})
},
u'survey.locationanswer': {
'Meta': {'object_name': 'LocationAnswer'},
'answer': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Location']"})
},
u'survey.multianswer': {
'Meta': {'object_name': 'MultiAnswer'},
'answer_label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'answer_text': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Response']"})
},
u'survey.option': {
'Meta': {'object_name': 'Option'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.SlugField', [], {'max_length': '64'}),
'max': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'min': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rows': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'integer'", 'max_length': '20'})
},
u'survey.page': {
'Meta': {'ordering': "['survey', 'question__order']", 'object_name': 'Page'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']", 'null': 'True', 'blank': 'True'})
},
u'survey.question': {
'Meta': {'ordering': "['order']", 'object_name': 'Question'},
'allow_other': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'blocks': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Block']", 'null': 'True', 'blank': 'True'}),
'cols': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'filterBy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'filter_questions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'filter_questions_rel_+'", 'null': 'True', 'to': u"orm['survey.Question']"}),
'foreach_question': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'foreach'", 'null': 'True', 'to': u"orm['survey.Question']"}),
'grid_cols': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'grid_cols'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['survey.Option']"}),
'hoist_answers': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'hoisted'", 'null': 'True', 'to': u"orm['survey.Question']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'integer_max': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'integer_min': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'lat': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'lng': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'min_zoom': ('django.db.models.fields.IntegerField', [], {'default': '10', 'null': 'True', 'blank': 'True'}),
'modalQuestion': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modal_question'", 'null': 'True', 'to': u"orm['survey.Question']"}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Option']", 'null': 'True', 'blank': 'True'}),
'options_from_previous_answer': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'options_json': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'randomize_groups': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'report_type': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '20', 'null': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rows': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'skip_condition': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'skip_question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64'}),
'term_condition': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'}),
'visualize': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'zoom': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'survey.respondant': {
'Meta': {'object_name': 'Respondant'},
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '240', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'default': 'None', 'max_length': '254', 'null': 'True', 'blank': 'True'}),
'last_question': ('django.db.models.fields.CharField', [], {'max_length': '240', 'null': 'True', 'blank': 'True'}),
'locations': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'responses': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'responses'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['survey.Response']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '240', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '20', 'null': 'True', 'blank': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"}),
'surveyor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 12, 0, 0)'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'ddec2809-7f56-44c8-adf6-d609312f8e15'", 'max_length': '36', 'primary_key': 'True'})
},
u'survey.response': {
'Meta': {'object_name': 'Response'},
'answer': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'answer_number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'answer_raw': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}),
'respondant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Respondant']", 'null': 'True', 'blank': 'True'}),
'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 12, 0, 0)'})
},
u'survey.survey': {
'Meta': {'object_name': 'Survey'},
'anon': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'offline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'questions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Question']", 'null': 'True', 'through': u"orm['survey.Page']", 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '254'}),
'states': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['survey'] | gpl-3.0 | 8,509,431,234,108,823,000 | 83.603175 | 207 | 0.547439 | false |
nuagenetworks/vspk-python | vspk/v6/nuzfbrequest.py | 1 | 35457 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUJobsFetcher
from bambou import NURESTObject
class NUZFBRequest(NURESTObject):
""" Represents a ZFBRequest in the VSD
Notes:
Pending requests reflect Network Services Gateways that have initiated request for bootstrapping. Requests can be assigned, or matched, to continue the bootstrapping process. If a request is rejected, the NSG will terminate the auto-bootstrapping attempts.
"""
__rest_name__ = "zfbrequest"
__resource_name__ = "zfbrequests"
## Constants
CONST_ZFB_APPROVAL_STATUS_DENIED = "DENIED"
CONST_REQUEST_TYPE_SELF_REBOOTSTRAP = "SELF_REBOOTSTRAP"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_REQUEST_TYPE_ZFB = "ZFB"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_ZFB_APPROVAL_STATUS_UNASSIGNED = "UNASSIGNED"
CONST_ZFB_APPROVAL_STATUS_APPROVED = "APPROVED"
CONST_ZFB_APPROVAL_STATUS_ASSIGNED = "ASSIGNED"
CONST_ASSOCIATED_ENTITY_TYPE_GATEWAY = "GATEWAY"
CONST_ASSOCIATED_ENTITY_TYPE_NSGATEWAY = "NSGATEWAY"
def __init__(self, **kwargs):
""" Initializes a ZFBRequest instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> zfbrequest = NUZFBRequest(id=u'xxxx-xxx-xxx-xxx', name=u'ZFBRequest')
>>> zfbrequest = NUZFBRequest(data=my_dict)
"""
super(NUZFBRequest, self).__init__()
# Read/Write Attributes
self._mac_address = None
self._zfb_approval_status = None
self._zfb_bootstrap_enabled = None
self._zfb_info = None
self._zfb_request_retry_timer = None
self._sku = None
self._ip_address = None
self._cpu_type = None
self._nsg_version = None
self._uuid = None
self._family = None
self._last_connected_time = None
self._last_updated_by = None
self._last_updated_date = None
self._registration_url = None
self._request_type = None
self._serial_number = None
self._embedded_metadata = None
self._entity_scope = None
self._hostname = None
self._creation_date = None
self._original_enterprise_name = None
self._original_gateway_datapath_id = None
self._original_gateway_name = None
self._original_uplink_connection_info = None
self._associated_enterprise_id = None
self._associated_enterprise_name = None
self._associated_entity_type = None
self._associated_gateway_id = None
self._associated_gateway_name = None
self._associated_ns_gateway_id = None
self._associated_ns_gateway_name = None
self._status_string = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="mac_address", remote_name="MACAddress", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="zfb_approval_status", remote_name="ZFBApprovalStatus", attribute_type=str, is_required=False, is_unique=False, choices=[u'APPROVED', u'ASSIGNED', u'DENIED', u'UNASSIGNED'])
self.expose_attribute(local_name="zfb_bootstrap_enabled", remote_name="ZFBBootstrapEnabled", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="zfb_info", remote_name="ZFBInfo", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="zfb_request_retry_timer", remote_name="ZFBRequestRetryTimer", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="sku", remote_name="SKU", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="ip_address", remote_name="IPAddress", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="cpu_type", remote_name="CPUType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="nsg_version", remote_name="NSGVersion", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="uuid", remote_name="UUID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="family", remote_name="family", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_connected_time", remote_name="lastConnectedTime", attribute_type=float, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="registration_url", remote_name="registrationURL", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="request_type", remote_name="requestType", attribute_type=str, is_required=False, is_unique=False, choices=[u'SELF_REBOOTSTRAP', u'ZFB'])
self.expose_attribute(local_name="serial_number", remote_name="serialNumber", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="hostname", remote_name="hostname", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="original_enterprise_name", remote_name="originalEnterpriseName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="original_gateway_datapath_id", remote_name="originalGatewayDatapathID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="original_gateway_name", remote_name="originalGatewayName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="original_uplink_connection_info", remote_name="originalUplinkConnectionInfo", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_enterprise_id", remote_name="associatedEnterpriseID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_enterprise_name", remote_name="associatedEnterpriseName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_entity_type", remote_name="associatedEntityType", attribute_type=str, is_required=False, is_unique=False, choices=[u'GATEWAY', u'NSGATEWAY'])
self.expose_attribute(local_name="associated_gateway_id", remote_name="associatedGatewayID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_gateway_name", remote_name="associatedGatewayName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_ns_gateway_id", remote_name="associatedNSGatewayID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_ns_gateway_name", remote_name="associatedNSGatewayName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="status_string", remote_name="statusString", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.jobs = NUJobsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def mac_address(self):
""" Get mac_address value.
Notes:
MAC Address fo the NSG Port1 interface
This attribute is named `MACAddress` in VSD API.
"""
return self._mac_address
@mac_address.setter
def mac_address(self, value):
""" Set mac_address value.
Notes:
MAC Address fo the NSG Port1 interface
This attribute is named `MACAddress` in VSD API.
"""
self._mac_address = value
@property
def zfb_approval_status(self):
""" Get zfb_approval_status value.
Notes:
the status of the request
This attribute is named `ZFBApprovalStatus` in VSD API.
"""
return self._zfb_approval_status
@zfb_approval_status.setter
def zfb_approval_status(self, value):
""" Set zfb_approval_status value.
Notes:
the status of the request
This attribute is named `ZFBApprovalStatus` in VSD API.
"""
self._zfb_approval_status = value
@property
def zfb_bootstrap_enabled(self):
""" Get zfb_bootstrap_enabled value.
Notes:
whether the NSG should bootstrap, or just simulate bootstrap. Set from System Config
This attribute is named `ZFBBootstrapEnabled` in VSD API.
"""
return self._zfb_bootstrap_enabled
@zfb_bootstrap_enabled.setter
def zfb_bootstrap_enabled(self, value):
""" Set zfb_bootstrap_enabled value.
Notes:
whether the NSG should bootstrap, or just simulate bootstrap. Set from System Config
This attribute is named `ZFBBootstrapEnabled` in VSD API.
"""
self._zfb_bootstrap_enabled = value
@property
def zfb_info(self):
""" Get zfb_info value.
Notes:
The Base64 encoded JSON string of ZFB Attributes
This attribute is named `ZFBInfo` in VSD API.
"""
return self._zfb_info
@zfb_info.setter
def zfb_info(self, value):
""" Set zfb_info value.
Notes:
The Base64 encoded JSON string of ZFB Attributes
This attribute is named `ZFBInfo` in VSD API.
"""
self._zfb_info = value
@property
def zfb_request_retry_timer(self):
""" Get zfb_request_retry_timer value.
Notes:
ZFB Request retry timer on the gateway. Set on VSD's System Config panel.
This attribute is named `ZFBRequestRetryTimer` in VSD API.
"""
return self._zfb_request_retry_timer
@zfb_request_retry_timer.setter
def zfb_request_retry_timer(self, value):
""" Set zfb_request_retry_timer value.
Notes:
ZFB Request retry timer on the gateway. Set on VSD's System Config panel.
This attribute is named `ZFBRequestRetryTimer` in VSD API.
"""
self._zfb_request_retry_timer = value
@property
def sku(self):
""" Get sku value.
Notes:
The part number of the gateway being bootstrapped through ZFB.
This attribute is named `SKU` in VSD API.
"""
return self._sku
@sku.setter
def sku(self, value):
""" Set sku value.
Notes:
The part number of the gateway being bootstrapped through ZFB.
This attribute is named `SKU` in VSD API.
"""
self._sku = value
@property
def ip_address(self):
""" Get ip_address value.
Notes:
IP Address of the gateway being bootstrapped using ZFB.
This attribute is named `IPAddress` in VSD API.
"""
return self._ip_address
@ip_address.setter
def ip_address(self, value):
""" Set ip_address value.
Notes:
IP Address of the gateway being bootstrapped using ZFB.
This attribute is named `IPAddress` in VSD API.
"""
self._ip_address = value
@property
def cpu_type(self):
""" Get cpu_type value.
Notes:
Processor Type
This attribute is named `CPUType` in VSD API.
"""
return self._cpu_type
@cpu_type.setter
def cpu_type(self, value):
""" Set cpu_type value.
Notes:
Processor Type
This attribute is named `CPUType` in VSD API.
"""
self._cpu_type = value
@property
def nsg_version(self):
""" Get nsg_version value.
Notes:
The Nuage NSG Version
This attribute is named `NSGVersion` in VSD API.
"""
return self._nsg_version
@nsg_version.setter
def nsg_version(self, value):
""" Set nsg_version value.
Notes:
The Nuage NSG Version
This attribute is named `NSGVersion` in VSD API.
"""
self._nsg_version = value
@property
def uuid(self):
""" Get uuid value.
Notes:
Redhat UUID
This attribute is named `UUID` in VSD API.
"""
return self._uuid
@uuid.setter
def uuid(self, value):
""" Set uuid value.
Notes:
Redhat UUID
This attribute is named `UUID` in VSD API.
"""
self._uuid = value
@property
def family(self):
""" Get family value.
Notes:
Gateway Type
"""
return self._family
@family.setter
def family(self, value):
""" Set family value.
Notes:
Gateway Type
"""
self._family = value
@property
def last_connected_time(self):
""" Get last_connected_time value.
Notes:
The time in which the last GET was made from the gateway.
This attribute is named `lastConnectedTime` in VSD API.
"""
return self._last_connected_time
@last_connected_time.setter
def last_connected_time(self, value):
""" Set last_connected_time value.
Notes:
The time in which the last GET was made from the gateway.
This attribute is named `lastConnectedTime` in VSD API.
"""
self._last_connected_time = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def registration_url(self):
""" Get registration_url value.
Notes:
Registration URL to be used for a gateway to be bootstrapped using ZFB.
This attribute is named `registrationURL` in VSD API.
"""
return self._registration_url
@registration_url.setter
def registration_url(self, value):
""" Set registration_url value.
Notes:
Registration URL to be used for a gateway to be bootstrapped using ZFB.
This attribute is named `registrationURL` in VSD API.
"""
self._registration_url = value
@property
def request_type(self):
""" Get request_type value.
Notes:
Value that serves in indicating if the Auto-Bootstrapping request is made in the context of a new NSG instance being bootstrapped or an NSG going through a self-rebootstrapping phase following a revocation triggered by entering quarantine.
This attribute is named `requestType` in VSD API.
"""
return self._request_type
@request_type.setter
def request_type(self, value):
""" Set request_type value.
Notes:
Value that serves in indicating if the Auto-Bootstrapping request is made in the context of a new NSG instance being bootstrapped or an NSG going through a self-rebootstrapping phase following a revocation triggered by entering quarantine.
This attribute is named `requestType` in VSD API.
"""
self._request_type = value
@property
def serial_number(self):
""" Get serial_number value.
Notes:
The gateway's Serial Number.
This attribute is named `serialNumber` in VSD API.
"""
return self._serial_number
@serial_number.setter
def serial_number(self, value):
""" Set serial_number value.
Notes:
The gateway's Serial Number.
This attribute is named `serialNumber` in VSD API.
"""
self._serial_number = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def hostname(self):
""" Get hostname value.
Notes:
Hostname of the gateway bootstrapped using ZFB.
"""
return self._hostname
@hostname.setter
def hostname(self, value):
""" Set hostname value.
Notes:
Hostname of the gateway bootstrapped using ZFB.
"""
self._hostname = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def original_enterprise_name(self):
""" Get original_enterprise_name value.
Notes:
For an NSG that is self-rebootstrapping following a quarantine action, this field represents the original name of the enterprise/organisation to which the NSG belonged.
This attribute is named `originalEnterpriseName` in VSD API.
"""
return self._original_enterprise_name
@original_enterprise_name.setter
def original_enterprise_name(self, value):
""" Set original_enterprise_name value.
Notes:
For an NSG that is self-rebootstrapping following a quarantine action, this field represents the original name of the enterprise/organisation to which the NSG belonged.
This attribute is named `originalEnterpriseName` in VSD API.
"""
self._original_enterprise_name = value
@property
def original_gateway_datapath_id(self):
""" Get original_gateway_datapath_id value.
Notes:
For an NSG that is self-rebootstrapping following a quarantine action, this field represents the original datapath ID that it had before revoking.
This attribute is named `originalGatewayDatapathID` in VSD API.
"""
return self._original_gateway_datapath_id
@original_gateway_datapath_id.setter
def original_gateway_datapath_id(self, value):
""" Set original_gateway_datapath_id value.
Notes:
For an NSG that is self-rebootstrapping following a quarantine action, this field represents the original datapath ID that it had before revoking.
This attribute is named `originalGatewayDatapathID` in VSD API.
"""
self._original_gateway_datapath_id = value
@property
def original_gateway_name(self):
""" Get original_gateway_name value.
Notes:
For an NSG that is self-rebootstrapping following a quarantine action, this field represents the original name the gateway had before revoking.
This attribute is named `originalGatewayName` in VSD API.
"""
return self._original_gateway_name
@original_gateway_name.setter
def original_gateway_name(self, value):
""" Set original_gateway_name value.
Notes:
For an NSG that is self-rebootstrapping following a quarantine action, this field represents the original name the gateway had before revoking.
This attribute is named `originalGatewayName` in VSD API.
"""
self._original_gateway_name = value
@property
def original_uplink_connection_info(self):
""" Get original_uplink_connection_info value.
Notes:
For an NSG that is self-rebootstrapping following a quarantine action, this field represents an information blob of the original uplink connection information that applied to this NSG.
This attribute is named `originalUplinkConnectionInfo` in VSD API.
"""
return self._original_uplink_connection_info
@original_uplink_connection_info.setter
def original_uplink_connection_info(self, value):
""" Set original_uplink_connection_info value.
Notes:
For an NSG that is self-rebootstrapping following a quarantine action, this field represents an information blob of the original uplink connection information that applied to this NSG.
This attribute is named `originalUplinkConnectionInfo` in VSD API.
"""
self._original_uplink_connection_info = value
@property
def associated_enterprise_id(self):
""" Get associated_enterprise_id value.
Notes:
the ID of the associated enteprise
This attribute is named `associatedEnterpriseID` in VSD API.
"""
return self._associated_enterprise_id
@associated_enterprise_id.setter
def associated_enterprise_id(self, value):
""" Set associated_enterprise_id value.
Notes:
the ID of the associated enteprise
This attribute is named `associatedEnterpriseID` in VSD API.
"""
self._associated_enterprise_id = value
@property
def associated_enterprise_name(self):
""" Get associated_enterprise_name value.
Notes:
Name of the associated enterprise
This attribute is named `associatedEnterpriseName` in VSD API.
"""
return self._associated_enterprise_name
@associated_enterprise_name.setter
def associated_enterprise_name(self, value):
""" Set associated_enterprise_name value.
Notes:
Name of the associated enterprise
This attribute is named `associatedEnterpriseName` in VSD API.
"""
self._associated_enterprise_name = value
@property
def associated_entity_type(self):
""" Get associated_entity_type value.
Notes:
Associated Entity Type: NSGATEWAY or GATEWAY
This attribute is named `associatedEntityType` in VSD API.
"""
return self._associated_entity_type
@associated_entity_type.setter
def associated_entity_type(self, value):
""" Set associated_entity_type value.
Notes:
Associated Entity Type: NSGATEWAY or GATEWAY
This attribute is named `associatedEntityType` in VSD API.
"""
self._associated_entity_type = value
@property
def associated_gateway_id(self):
""" Get associated_gateway_id value.
Notes:
ID of the assigned Gateway
This attribute is named `associatedGatewayID` in VSD API.
"""
return self._associated_gateway_id
@associated_gateway_id.setter
def associated_gateway_id(self, value):
""" Set associated_gateway_id value.
Notes:
ID of the assigned Gateway
This attribute is named `associatedGatewayID` in VSD API.
"""
self._associated_gateway_id = value
@property
def associated_gateway_name(self):
""" Get associated_gateway_name value.
Notes:
Name of the associated Gateway
This attribute is named `associatedGatewayName` in VSD API.
"""
return self._associated_gateway_name
@associated_gateway_name.setter
def associated_gateway_name(self, value):
""" Set associated_gateway_name value.
Notes:
Name of the associated Gateway
This attribute is named `associatedGatewayName` in VSD API.
"""
self._associated_gateway_name = value
@property
def associated_ns_gateway_id(self):
""" Get associated_ns_gateway_id value.
Notes:
ID of the assigned NSG
This attribute is named `associatedNSGatewayID` in VSD API.
"""
return self._associated_ns_gateway_id
@associated_ns_gateway_id.setter
def associated_ns_gateway_id(self, value):
""" Set associated_ns_gateway_id value.
Notes:
ID of the assigned NSG
This attribute is named `associatedNSGatewayID` in VSD API.
"""
self._associated_ns_gateway_id = value
@property
def associated_ns_gateway_name(self):
""" Get associated_ns_gateway_name value.
Notes:
Name of the associated NSG
This attribute is named `associatedNSGatewayName` in VSD API.
"""
return self._associated_ns_gateway_name
@associated_ns_gateway_name.setter
def associated_ns_gateway_name(self, value):
""" Set associated_ns_gateway_name value.
Notes:
Name of the associated NSG
This attribute is named `associatedNSGatewayName` in VSD API.
"""
self._associated_ns_gateway_name = value
@property
def status_string(self):
""" Get status_string value.
Notes:
Extra status info
This attribute is named `statusString` in VSD API.
"""
return self._status_string
@status_string.setter
def status_string(self, value):
""" Set status_string value.
Notes:
Extra status info
This attribute is named `statusString` in VSD API.
"""
self._status_string = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| bsd-3-clause | 4,490,000,848,658,600,000 | 30.574354 | 296 | 0.581324 | false |
os2webscanner/os2webscanner | scrapy-webscanner/scanners/rules/regexrule.py | 1 | 6057 | # The contents of this file are subject to the Mozilla Public License
# Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# OS2Webscanner was developed by Magenta in collaboration with OS2 the
# Danish community of open source municipalities (http://www.os2web.dk/).
#
# The code is currently governed by OS2 the Danish community of open
# source municipalities ( http://www.os2web.dk/ )
"""Regular expression-based rules."""
import logging
import re
import regex
from .cpr import CPRRule
from .rule import Rule
from ..items import MatchItem
class RegexRule(Rule):
"""Represents a rule which matches using a regular expression."""
def __init__(self, name, pattern_strings, sensitivity, cpr_enabled=False, ignore_irrelevant=False,
do_modulus11=False, *args, **kwargs):
"""Initialize the rule.
The sensitivity is used to assign a sensitivity value to matches.
"""
# Convert QuerySet to list
super().__init__(*args, **kwargs)
self.regex_patterns = list(pattern_strings.all())
self.name = name
self.sensitivity = sensitivity
self.cpr_enabled = cpr_enabled
self.ignore_irrelevant = ignore_irrelevant
self.do_modulus11 = do_modulus11
self.regex_str = ''
if not self._is_cpr_only():
logging.info('------- Regex patters ---------')
for _psuedoRule in self.regex_patterns:
logging.info(_psuedoRule.pattern_string)
logging.info('-----------------------------\n')
self.regex_str = self.compund_rules()
self.regex = regex.compile(self.regex_str, regex.DOTALL)
# bind the 'do_modulus11' and 'ignore_irrelevant' variables to the cpr_enabled property so that they're always
# false if it is false
if not cpr_enabled:
self.do_modulus11 = cpr_enabled
self.ignore_irrelevant = cpr_enabled
def __str__(self):
"""
Returns a string object representation of this object
:return:
"""
return '{\n\tname: ' + self.name + \
',\n\tregex: ' + self.regex_str + \
',\n\tcpr_enabled: ' + str(self._is_cpr_only()) + \
',\n\tsensitivity: ' + str(self.sensitivity) + '\n}'
def compund_rules(self):
"""
This method compounds all the regex patterns in the rule set into one regex rule that is OR'ed
e.g. A ruleSet of {pattern1, pattern2, pattern3} becomes (pattern1 | pattern2 | pattern3)
:return: RegexRule representing the compound rule
"""
rule_set = set(self.regex_patterns)
if len(rule_set) == 1:
return rule_set.pop().pattern_string
if len(rule_set) > 1:
compound_rule = '('
for _ in self.regex_patterns:
compound_rule += rule_set.pop().pattern_string
if len(rule_set) <= 0:
compound_rule += ')'
else:
compound_rule += '|'
print('Returning< '+compound_rule+' >')
return compound_rule
if len(rule_set) < 1:
return None
def execute(self, text):
"""Execute the rule on the text."""
matches = set()
if self._is_cpr_only():
cpr_rule = CPRRule(self.do_modulus11, self.ignore_irrelevant, whitelist=None)
temp_matches = cpr_rule.execute(text)
matches.update(temp_matches)
else:
re_matches = self.regex.finditer(text)
if self.cpr_enabled:
cpr_rule = CPRRule(self.do_modulus11, self.ignore_irrelevant, whitelist=None)
matches.update(cpr_rule.execute(text))
for match in re_matches:
matched_data = match.group(0)
if len(matched_data) > 1024:
# TODO: Get rid of magic number
matched_data = match.group(1)
matches.add(MatchItem(matched_data=matched_data,
sensitivity=self.sensitivity))
return matches
def is_all_match(self, matches):
"""
Checks if each rule is matched with the provided list of matches
:param matches: List of matches
:return: {True | false}
"""
if not isinstance(matches, set):
return False
cpr_match = False
# If it turns out that we're only doing a cpr scan then scan for the first match and return true
if self._is_cpr_only():
for match in matches:
if re.match(self.cpr_pattern, match['original_matched_data']):
return True
else:
regex_patterns = set(self.regex_patterns)
# for rule in self.regex_patterns:
for pattern in self.regex_patterns:
for match in matches:
if re.match(pattern.pattern_string, match['matched_data']) and regex_patterns:
regex_patterns.pop()
continue
if self.cpr_enabled and not cpr_match and 'original_matched_data' in match:
if re.match(self.cpr_pattern, match['original_matched_data']):
cpr_match = True
if not regex_patterns:
break
if not self.cpr_enabled:
return not regex_patterns
else:
return not regex_patterns and cpr_match
def _is_cpr_only(self):
"""Just a method to decide if we are only doing a CPR scan."""
return self.cpr_enabled and len(self.regex_patterns) <= 0
| mpl-2.0 | -2,763,163,922,065,293,000 | 37.826923 | 118 | 0.575698 | false |
magcius/dolphin | Tools/find-includes-cycles.py | 157 | 2630 | #! /usr/bin/env python
'''
Run this script from Source/Core/ to find all the #include cycles.
'''
import subprocess
def get_local_includes_for(path):
lines = open(path).read().split('\n')
includes = [l.strip() for l in lines if l.strip().startswith('#include')]
return [i.split()[1][1:-1] for i in includes if '"' in i.split()[1]]
def find_all_files():
'''Could probably use os.walk, but meh.'''
f = subprocess.check_output(['find', '.', '-name', '*.h'],
universal_newlines=True).strip().split('\n')
return [p[2:] for p in f]
def make_include_graph():
return { f: get_local_includes_for(f) for f in find_all_files() }
def strongly_connected_components(graph):
"""
Tarjan's Algorithm (named for its discoverer, Robert Tarjan) is a graph theory algorithm
for finding the strongly connected components of a graph.
Based on: http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
"""
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors of `node`
try:
successors = graph[node]
except:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited; recurse on it
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
if __name__ == '__main__':
comp = strongly_connected_components(make_include_graph())
for c in comp:
if len(c) != 1:
print(c)
| gpl-2.0 | 259,585,647,588,296,740 | 31.875 | 107 | 0.587452 | false |
stevelle/glance | glance/registry/api/v2/__init__.py | 20 | 1125 | # Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glance.common import wsgi
from glance.registry.api.v2 import rpc
def init(mapper):
rpc_resource = rpc.create_resource()
mapper.connect("/rpc", controller=rpc_resource,
conditions=dict(method=["POST"]),
action="__call__")
class API(wsgi.Router):
"""WSGI entry point for all Registry requests."""
def __init__(self, mapper):
mapper = mapper or wsgi.APIMapper()
init(mapper)
super(API, self).__init__(mapper)
| apache-2.0 | 4,267,301,022,425,531,400 | 31.142857 | 78 | 0.671111 | false |
bolkedebruin/airflow | airflow/providers/docker/example_dags/example_docker_swarm_operator.py | 1 | 1606 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
from datetime import timedelta
from airflow.utils.dates import days_ago
from airflow import DAG
from airflow.providers.docker.operators.docker_swarm import DockerSwarmOperator
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': days_ago(1),
'email': ['[email protected]'],
'email_on_failure': False,
'email_on_retry': False
}
dag = DAG(
'docker_swarm_sample',
default_args=default_args,
schedule_interval=timedelta(minutes=10),
catchup=False
)
with dag as dag:
t1 = DockerSwarmOperator(
api_version='auto',
docker_url='tcp://localhost:2375', # Set your docker URL
command='/bin/sleep 10',
image='centos:latest',
auto_remove=True,
task_id='sleep_with_swarm',
)
"""
| apache-2.0 | 1,583,269,313,751,345,400 | 30.490196 | 79 | 0.704857 | false |
mezz64/home-assistant | homeassistant/components/rpi_pfio/binary_sensor.py | 14 | 2527 | """Support for binary sensor using the PiFace Digital I/O module on a RPi."""
import voluptuous as vol
from homeassistant.components import rpi_pfio
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import CONF_NAME, DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
CONF_INVERT_LOGIC = "invert_logic"
CONF_PORTS = "ports"
CONF_SETTLE_TIME = "settle_time"
DEFAULT_INVERT_LOGIC = False
DEFAULT_SETTLE_TIME = 20
PORT_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_SETTLE_TIME, default=DEFAULT_SETTLE_TIME): cv.positive_int,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_PORTS, default={}): vol.Schema({cv.positive_int: PORT_SCHEMA})}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the PiFace Digital Input devices."""
binary_sensors = []
ports = config.get(CONF_PORTS)
for port, port_entity in ports.items():
name = port_entity.get(CONF_NAME)
settle_time = port_entity[CONF_SETTLE_TIME] / 1000
invert_logic = port_entity[CONF_INVERT_LOGIC]
binary_sensors.append(
RPiPFIOBinarySensor(hass, port, name, settle_time, invert_logic)
)
add_entities(binary_sensors, True)
rpi_pfio.activate_listener(hass)
class RPiPFIOBinarySensor(BinarySensorEntity):
"""Represent a binary sensor that a PiFace Digital Input."""
def __init__(self, hass, port, name, settle_time, invert_logic):
"""Initialize the RPi binary sensor."""
self._port = port
self._name = name or DEVICE_DEFAULT_NAME
self._invert_logic = invert_logic
self._state = None
def read_pfio(port):
"""Read state from PFIO."""
self._state = rpi_pfio.read_input(self._port)
self.schedule_update_ha_state()
rpi_pfio.edge_detect(hass, self._port, read_pfio, settle_time)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the entity."""
return self._state != self._invert_logic
def update(self):
"""Update the PFIO state."""
self._state = rpi_pfio.read_input(self._port)
| apache-2.0 | 2,840,896,854,696,157,700 | 30.5875 | 86 | 0.657697 | false |
geishatokyo-lightning/lightning | lightning_core/test/testvg.py | 1 | 5533 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011 Geisha Tokyo Entertainment, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import unittest
from lightning_core.vg.vg import *
from lightning_core.vg.parser import *
from lxml import etree
class TestLinearGrad(unittest.TestCase):
def test_constructor(self):
c1 = [256,256,256,256]
c2 = [ 0, 0, 0, 0]
sp1 = Stop(c1, '100')
sp2 = Stop(c2, '0')
gtf = {'scaleX':'0', 'scaleY':'0.101'}
lg = LinearGradient('100', gtf, (sp1,sp2))
self.assertEqual(lg.get('id'), '100')
self.assertEqual(lg.get('gradientUnits'), 'userSpaceOnUse')
self.assertEqual(lg.get('x1'), '-819')
self.assertEqual(lg.get('x2'), '819')
self.assertEqual(lg.get('gradientTransform'), 'matrix(0.00 0.00 0.00 0.10 0.0000 0.0000)')
self.assertEqual(len(lg), 2)
self.assertEqual(lg[0].get('stop-color'), '#ffffff')
self.assertEqual(lg[0].get('stop-opacity'), '1.0')
self.assertEqual(lg[0].get('offset'), str(100.0/255))
self.assertEqual(lg[1].get('stop-color'), '#000000')
self.assertEqual(lg[1].get('stop-opacity'), str(0.0/255))
self.assertEqual(lg[1].get('offset'), str(0.0/255))
class TestTransform(unittest.TestCase):
def setUp(self):
filename = './lightning_core/test/xmlsamples.xml'
f = open(filename,'r')
samplexml = f.read()
self.poxml = etree.XML(samplexml).xpath('.//PLACE_OBJECT2_HAS_COLORTRANS/PlaceObject2')[0]
self.transform = Transform()
self.parser = Parser()
self.po = self.parser._get_place_object(self.poxml)
def test_constructor(self):
transform = Transform()
self.assertEqual(transform.sx, 1.0)
self.assertEqual(transform.sy, 1.0)
self.assertEqual(transform.tx, 0.0)
self.assertEqual(transform.ty, 0.0)
self.assertEqual(transform.wx, 0.0)
self.assertEqual(transform.wy, 0.0)
self.assertEqual(transform.ctf, [])
self.assertEqual(transform.depth, 1)
self.assertEqual(transform.clipDepth, None)
self.assertEqual(transform.name, None)
self.assertEqual(transform.visible, True)
def test_set_items_and_get_matrix(self):
transform = Transform()
transform.set_items(self.po.items())
self.assertEqual(transform.get_matrix(), (1.001770019531250, 0.0, 0.0, 1.0, -25.7, -57.0))
class TestTree(unittest.TestCase):
def test_constructor(self):
tree = Tree()
self.assertAlmostEqual(tree.sx, 1.0)
self.assertAlmostEqual(tree.sy, 1.0)
self.assertAlmostEqual(tree.wx, 0.0)
self.assertAlmostEqual(tree.wy, 0.0)
self.assertAlmostEqual(tree.tx, 0.0)
self.assertAlmostEqual(tree.ty, 0.0)
self.assertEqual(len(tree.ctf), 0.0)
self.assertEqual(tree.depth, 1)
self.assertEqual(tree.name, None)
self.assertEqual(len(tree.children), 0)
self.assertEqual(tree.parent, None)
def test_update(self):
tree = Tree()
tree.set_items({'tx':2.0})
self.assertAlmostEqual(tree.tx, 2.0)
def test_str(self):
tree = Tree()
self.assertEqual(str(tree), 'key=None\n')
tree.key = 'hoge'
self.assertEqual(str(tree), 'key=hoge\n')
tree2 = Tree()
tree2.key = 'fuga'
tree.children.append(tree2)
self.assertEqual(str(tree), 'key=hoge\n\tkey=fuga')
class TestAnimation(unittest.TestCase):
def test_constructor(self):
anim = Animation()
self.assertEqual(anim.key, None)
self.assertEqual(len(anim.frames), 0)
def test_appendFrame(self):
anim = Animation()
index = 1
sx = 1.0
sy = 1.0
wx = 0.0
wy = 0.0
tx = 0.0
ty = 0.0
ctf = []
anim.key = 'hoge'
anim.appendFrame(index, sx, sy, wx, wy, tx, ty, ctf)
self.assertEqual(anim.key, 'hoge')
self.assertEqual(len(anim.frames), 1)
frame = anim.frames[0]
self.assertEqual(frame['index'], 1)
self.assertAlmostEqual(frame.sx, 1.0)
self.assertAlmostEqual(frame['sy'], 1.0)
self.assertAlmostEqual(frame['wx'], 0.0)
self.assertAlmostEqual(frame['wy'], 0.0)
self.assertAlmostEqual(frame['tx'], 0.0)
self.assertAlmostEqual(frame['ty'], 0.0)
self.assertEqual(frame['ctf'], [])
if __name__ == '__main__':
unittest.main()
| mit | 349,963,908,923,093,100 | 37.423611 | 98 | 0.632207 | false |
glorizen/nupic | tests/integration/nupic/opf/opf_description_template_test/opf_description_template_test.py | 12 | 10082 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Tests OPF descriptionTemplate.py-based experiment/sub-experiment pair"""
import os
import pprint
import sys
import unittest2 as unittest
from pkg_resources import resource_filename
from nupic.frameworks.opf.opfhelpers import (
loadExperimentDescriptionScriptFromDir,
getExperimentDescriptionInterfaceFromModule
)
from nupic.support.unittesthelpers.testcasebase import (
TestCaseBase as HelperTestCaseBase)
# Our __main__ entry block sets this to an instance of MyTestEnvironment()
g_myEnv = None
g_debug = False
class MyTestEnvironment(object):
def __init__(self):
examplesDir = resource_filename("nupic", os.path.join("..", "examples"))
_debugOut("examplesDir=<%s>" % (examplesDir,))
assert os.path.exists(examplesDir), \
"%s is not present in filesystem" % examplesDir
# This is where we find OPF binaries (e.g., run_opf_experiment.py, etc.)
# In the autobuild, it is a read-only directory
self.__opfBinDir = resource_filename("nupic", os.path.join("..", "scripts"))
assert os.path.exists(self.__opfBinDir), \
"%s is not present in filesystem" % self.__opfBinDir
_debugOut("self.__opfBinDir=<%s>" % self.__opfBinDir)
# Where this script is running from (our autotest counterpart may have
# copied it from its original location)
self.__testRunDir = os.path.abspath(os.path.dirname(__file__))
_debugOut("self.__testRunDir=<%s>" % self.__testRunDir)
# Parent directory of our private OPF experiments
self.__opfExperimentsParentDir = os.path.join(self.__testRunDir,
"experiments")
assert os.path.exists(self.__opfExperimentsParentDir), \
"%s is not present in filesystem" % self.__opfExperimentsParentDir
_debugOut("self.__opfExperimentsParentDir=<%s>"
% self.__opfExperimentsParentDir)
def getOpfRunExperimentPyPath(self):
return os.path.join(self.__opfBinDir, "run_opf_experiment.py")
def getOpfExperimentPath(self, experimentName):
"""
experimentName: e.g., "gym"; this string will be used to form
a directory path to the experiment.
Returns: absolute path to the experiment directory
"""
path = os.path.join(self.__opfExperimentsParentDir, experimentName)
assert os.path.isdir(path), \
"Experiment path %s doesn't exist or is not a directory" % (path,)
return path
class MyTestCaseBase(HelperTestCaseBase):
def setUp(self):
""" Method called to prepare the test fixture. This is called immediately
before calling the test method; any exception raised by this method will be
considered an error rather than a test failure. The default implementation
does nothing.
"""
global g_myEnv
if not g_myEnv:
# Setup environment
g_myEnv = MyTestEnvironment()
def tearDown(self):
""" Method called immediately after the test method has been called and the
result recorded. This is called even if the test method raised an exception,
so the implementation in subclasses may need to be particularly careful
about checking internal state. Any exception raised by this method will be
considered an error rather than a test failure. This method will only be
called if the setUp() succeeds, regardless of the outcome of the test
method. The default implementation does nothing.
"""
# Reset our log items
self.resetExtraLogItems()
def shortDescription(self):
""" Override to force unittest framework to use test method names instead
of docstrings in the report.
"""
return None
def executePositiveOpfExperiment(self, experimentName, short=False):
""" Executes a positive OPF RunExperiment test as a subprocess and validates
its exit status.
experimentName: e.g., "gym"; this string will be used to form
a directory path to the experiment.
short: if True, attempt to run the experiment with --testMode
flag turned on, which causes all inference and training
iteration counts to be overridden with small counts.
Returns: result from _executeExternalCmdAndReapOutputs
"""
opfRunner = g_myEnv.getOpfRunExperimentPyPath()
opfExpDir = g_myEnv.getOpfExperimentPath(experimentName)
r = self.__executePositiveRunExperimentTest(runnerPath=opfRunner,
experimentDirPath=opfExpDir,
short=short)
return r
def __executePositiveRunExperimentTest(self,
runnerPath,
experimentDirPath,
customOptions=[],
short=False):
""" Executes a positive RunExperiment.py test and performs
basic validation
runnerPath: experiment running (LPF or OPF RunExperiment.py path)
experimentDirPath: directory containing the description.py file of interest
short: if True, attempt to run the experiment with --testMode
flag turned on, which causes all inference and training
iteration counts to be overridden with small counts.
NOTE: if the (possibly aggregated) dataset has fewer
rows than the count overrides, then an LPF experiment
will fail.
Returns: result from _executeExternalCmdAndReapOutputs
"""
#----------------------------------------
# Set up args
command = [
"python",
runnerPath,
experimentDirPath,
]
command.extend(customOptions)
if short:
command.append("--testMode")
self.addExtraLogItem({'command':command})
#----------------------------------------
# Execute RunExperiment.py as subprocess and collect results
r = _executeExternalCmdAndReapOutputs(command)
self.addExtraLogItem({'result':r})
_debugOut(("_executeExternalCmdAndReapOutputs(%s)=%s") % (command, r))
#----------------------------------------
# Check subprocess exit status
self.assertEqual(r['exitStatus'], 0,
("Expected status = 0 from %s; got: %s") % \
(runnerPath, r['exitStatus'],))
self.resetExtraLogItems()
return r
class PositiveTests(MyTestCaseBase):
#========================
def test_sub_experiment_override(self):
expDir = g_myEnv.getOpfExperimentPath("gym")
module = loadExperimentDescriptionScriptFromDir(expDir)
expIface = getExperimentDescriptionInterfaceFromModule(module)
modelDesc = expIface.getModelDescription()
tpActivationThreshold = modelDesc['modelParams'] \
['tpParams']['activationThreshold']
expectedValue = 12
self.assertEqual(tpActivationThreshold, expectedValue,
"Expected tp activationThreshold=%s, but got %s" % (
expectedValue, tpActivationThreshold))
def test_run_sub_experiment(self):
self.executePositiveOpfExperiment(experimentName="gym", short=True)
################################################################################
# Support functions
################################################################################
def _executeExternalCmdAndReapOutputs(args):
"""
args: Args list as defined for the args parameter in subprocess.Popen()
Returns: result dicionary:
{
'exitStatus':<exit-status-of-external-command>,
'stdoutData':"string",
'stderrData':"string"
}
"""
import subprocess
_debugOut(("Starting...\n<%s>") % \
(args,))
p = subprocess.Popen(args,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_debugOut(("Process started for <%s>") % (args,))
(stdoutData, stderrData) = p.communicate()
_debugOut(("Process completed for <%s>: exit status=%s, " + \
"stdoutDataType=%s, stdoutData=<%s>, stderrData=<%s>") % \
(args, p.returncode, type(stdoutData), stdoutData, stderrData))
result = dict(
exitStatus = p.returncode,
stdoutData = stdoutData,
stderrData = stderrData,
)
_debugOut(("args: <%s>: result:\n%s") % \
(args, pprint.pformat(result, indent=4)))
return result
def _debugOut(msg):
if g_debug:
callerTraceback = whoisCallersCaller()
print "OPF TestDescriptionTemplate (f=%s;line=%s): %s" % \
(callerTraceback.function, callerTraceback.lineno, msg,)
sys.stdout.flush()
def whoisCallersCaller():
"""
Returns: Traceback namedtuple for our caller's caller
"""
import inspect
frameObj = inspect.stack()[2][0]
return inspect.getframeinfo(frameObj)
if __name__ == "__main__":
g_myEnv = MyTestEnvironment()
unittest.longMessage = True
unittest.main()
| agpl-3.0 | 284,778,791,718,727,900 | 32.384106 | 80 | 0.621702 | false |
fdzh/shadowsocks | shadowsocks/crypto/openssl.py | 1038 | 5414 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
from shadowsocks import common
from shadowsocks.crypto import util
__all__ = ['ciphers']
libcrypto = None
loaded = False
buf_size = 2048
def load_openssl():
global loaded, libcrypto, buf
libcrypto = util.find_library(('crypto', 'eay32'),
'EVP_get_cipherbyname',
'libcrypto')
if libcrypto is None:
raise Exception('libcrypto(OpenSSL) not found')
libcrypto.EVP_get_cipherbyname.restype = c_void_p
libcrypto.EVP_CIPHER_CTX_new.restype = c_void_p
libcrypto.EVP_CipherInit_ex.argtypes = (c_void_p, c_void_p, c_char_p,
c_char_p, c_char_p, c_int)
libcrypto.EVP_CipherUpdate.argtypes = (c_void_p, c_void_p, c_void_p,
c_char_p, c_int)
libcrypto.EVP_CIPHER_CTX_cleanup.argtypes = (c_void_p,)
libcrypto.EVP_CIPHER_CTX_free.argtypes = (c_void_p,)
if hasattr(libcrypto, 'OpenSSL_add_all_ciphers'):
libcrypto.OpenSSL_add_all_ciphers()
buf = create_string_buffer(buf_size)
loaded = True
def load_cipher(cipher_name):
func_name = 'EVP_' + cipher_name.replace('-', '_')
if bytes != str:
func_name = str(func_name, 'utf-8')
cipher = getattr(libcrypto, func_name, None)
if cipher:
cipher.restype = c_void_p
return cipher()
return None
class OpenSSLCrypto(object):
def __init__(self, cipher_name, key, iv, op):
self._ctx = None
if not loaded:
load_openssl()
cipher_name = common.to_bytes(cipher_name)
cipher = libcrypto.EVP_get_cipherbyname(cipher_name)
if not cipher:
cipher = load_cipher(cipher_name)
if not cipher:
raise Exception('cipher %s not found in libcrypto' % cipher_name)
key_ptr = c_char_p(key)
iv_ptr = c_char_p(iv)
self._ctx = libcrypto.EVP_CIPHER_CTX_new()
if not self._ctx:
raise Exception('can not create cipher context')
r = libcrypto.EVP_CipherInit_ex(self._ctx, cipher, None,
key_ptr, iv_ptr, c_int(op))
if not r:
self.clean()
raise Exception('can not initialize cipher context')
def update(self, data):
global buf_size, buf
cipher_out_len = c_long(0)
l = len(data)
if buf_size < l:
buf_size = l * 2
buf = create_string_buffer(buf_size)
libcrypto.EVP_CipherUpdate(self._ctx, byref(buf),
byref(cipher_out_len), c_char_p(data), l)
# buf is copied to a str object when we access buf.raw
return buf.raw[:cipher_out_len.value]
def __del__(self):
self.clean()
def clean(self):
if self._ctx:
libcrypto.EVP_CIPHER_CTX_cleanup(self._ctx)
libcrypto.EVP_CIPHER_CTX_free(self._ctx)
ciphers = {
'aes-128-cfb': (16, 16, OpenSSLCrypto),
'aes-192-cfb': (24, 16, OpenSSLCrypto),
'aes-256-cfb': (32, 16, OpenSSLCrypto),
'aes-128-ofb': (16, 16, OpenSSLCrypto),
'aes-192-ofb': (24, 16, OpenSSLCrypto),
'aes-256-ofb': (32, 16, OpenSSLCrypto),
'aes-128-ctr': (16, 16, OpenSSLCrypto),
'aes-192-ctr': (24, 16, OpenSSLCrypto),
'aes-256-ctr': (32, 16, OpenSSLCrypto),
'aes-128-cfb8': (16, 16, OpenSSLCrypto),
'aes-192-cfb8': (24, 16, OpenSSLCrypto),
'aes-256-cfb8': (32, 16, OpenSSLCrypto),
'aes-128-cfb1': (16, 16, OpenSSLCrypto),
'aes-192-cfb1': (24, 16, OpenSSLCrypto),
'aes-256-cfb1': (32, 16, OpenSSLCrypto),
'bf-cfb': (16, 8, OpenSSLCrypto),
'camellia-128-cfb': (16, 16, OpenSSLCrypto),
'camellia-192-cfb': (24, 16, OpenSSLCrypto),
'camellia-256-cfb': (32, 16, OpenSSLCrypto),
'cast5-cfb': (16, 8, OpenSSLCrypto),
'des-cfb': (8, 8, OpenSSLCrypto),
'idea-cfb': (16, 8, OpenSSLCrypto),
'rc2-cfb': (16, 8, OpenSSLCrypto),
'rc4': (16, 0, OpenSSLCrypto),
'seed-cfb': (16, 16, OpenSSLCrypto),
}
def run_method(method):
cipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_aes_128_cfb():
run_method('aes-128-cfb')
def test_aes_256_cfb():
run_method('aes-256-cfb')
def test_aes_128_cfb8():
run_method('aes-128-cfb8')
def test_aes_256_ofb():
run_method('aes-256-ofb')
def test_aes_256_ctr():
run_method('aes-256-ctr')
def test_bf_cfb():
run_method('bf-cfb')
def test_rc4():
run_method('rc4')
if __name__ == '__main__':
test_aes_128_cfb()
| apache-2.0 | 5,075,317,264,500,755,000 | 28.911602 | 77 | 0.597525 | false |
MattDevo/edk2 | AppPkg/Applications/Python/Python-2.7.2/Lib/colorsys.py | 75 | 3847 | """Conversion functions between RGB and other color systems.
This modules provides two functions for each color system ABC:
rgb_to_abc(r, g, b) --> a, b, c
abc_to_rgb(a, b, c) --> r, g, b
All inputs and outputs are triples of floats in the range [0.0...1.0]
(with the exception of I and Q, which covers a slightly larger range).
Inputs outside the valid range may cause exceptions or invalid outputs.
Supported color systems:
RGB: Red, Green, Blue components
YIQ: Luminance, Chrominance (used by composite video signals)
HLS: Hue, Luminance, Saturation
HSV: Hue, Saturation, Value
"""
# References:
# http://en.wikipedia.org/wiki/YIQ
# http://en.wikipedia.org/wiki/HLS_color_space
# http://en.wikipedia.org/wiki/HSV_color_space
__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb",
"rgb_to_hsv","hsv_to_rgb"]
# Some floating point constants
ONE_THIRD = 1.0/3.0
ONE_SIXTH = 1.0/6.0
TWO_THIRD = 2.0/3.0
# YIQ: used by composite video signals (linear combinations of RGB)
# Y: perceived grey level (0.0 == black, 1.0 == white)
# I, Q: color components
def rgb_to_yiq(r, g, b):
y = 0.30*r + 0.59*g + 0.11*b
i = 0.60*r - 0.28*g - 0.32*b
q = 0.21*r - 0.52*g + 0.31*b
return (y, i, q)
def yiq_to_rgb(y, i, q):
r = y + 0.948262*i + 0.624013*q
g = y - 0.276066*i - 0.639810*q
b = y - 1.105450*i + 1.729860*q
if r < 0.0:
r = 0.0
if g < 0.0:
g = 0.0
if b < 0.0:
b = 0.0
if r > 1.0:
r = 1.0
if g > 1.0:
g = 1.0
if b > 1.0:
b = 1.0
return (r, g, b)
# HLS: Hue, Luminance, Saturation
# H: position in the spectrum
# L: color lightness
# S: color saturation
def rgb_to_hls(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
# XXX Can optimize (maxc+minc) and (maxc-minc)
l = (minc+maxc)/2.0
if minc == maxc:
return 0.0, l, 0.0
if l <= 0.5:
s = (maxc-minc) / (maxc+minc)
else:
s = (maxc-minc) / (2.0-maxc-minc)
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc:
h = bc-gc
elif g == maxc:
h = 2.0+rc-bc
else:
h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, l, s
def hls_to_rgb(h, l, s):
if s == 0.0:
return l, l, l
if l <= 0.5:
m2 = l * (1.0+s)
else:
m2 = l+s-(l*s)
m1 = 2.0*l - m2
return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD))
def _v(m1, m2, hue):
hue = hue % 1.0
if hue < ONE_SIXTH:
return m1 + (m2-m1)*hue*6.0
if hue < 0.5:
return m2
if hue < TWO_THIRD:
return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0
return m1
# HSV: Hue, Saturation, Value
# H: position in the spectrum
# S: color saturation ("purity")
# V: color brightness
def rgb_to_hsv(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
v = maxc
if minc == maxc:
return 0.0, 0.0, v
s = (maxc-minc) / maxc
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc:
h = bc-gc
elif g == maxc:
h = 2.0+rc-bc
else:
h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, s, v
def hsv_to_rgb(h, s, v):
if s == 0.0:
return v, v, v
i = int(h*6.0) # XXX assume int() truncates!
f = (h*6.0) - i
p = v*(1.0 - s)
q = v*(1.0 - s*f)
t = v*(1.0 - s*(1.0-f))
i = i%6
if i == 0:
return v, t, p
if i == 1:
return q, v, p
if i == 2:
return p, v, t
if i == 3:
return p, q, v
if i == 4:
return t, p, v
if i == 5:
return v, p, q
# Cannot get here
| bsd-2-clause | 8,328,727,964,265,440,000 | 22.660256 | 76 | 0.493371 | false |
srault95/netcall | examples/threading/server_threading_prefork.py | 1 | 3725 | #!/usr/bin/env python
# vim: fileencoding=utf-8 et ts=4 sts=4 sw=4 tw=0 fdm=indent
"""
A simple RPC server that shows how to:
* start several worker processes
* use zmq proxy device to load balance requests to the workers
* make each worker to serve multiple RPC services asynchronously
using the Python Threading multitasking
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2012-2014. Brian Granger, Min Ragan-Kelley, Alexander Glyzov
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE, distributed as part of this software.
#-----------------------------------------------------------------------------
from os import getpid
from time import sleep
from multiprocessing import Process, cpu_count
from zmq import ROUTER, DEALER
from zmq.devices import ThreadProxy
from netcall.threading import ThreadingRPCService, JSONSerializer
from netcall.utils import get_zmq_classes
class EchoService(ThreadingRPCService):
def echo(self, s):
print "<pid:%s> %r echo %r" % (getpid(), self.connected, s)
return s
def sleep(self, t):
print "<pid:%s> %r sleep %s" % (getpid(), self.connected, t)
sleep(t)
def error(self):
raise ValueError('raising ValueError for fun!')
class MathService(ThreadingRPCService):
def add(self, a, b):
print "<pid:%s> %r add %r %r" % (getpid(), self.connected, a, b)
return a+b
def subtract(self, a, b):
print "<pid:%s> %r subtract %r %r" % (getpid(), self.connected, a, b)
return a-b
def multiply(self, a, b):
print "<pid:%s> %r multiply %r %r" % (getpid(), self.connected, a, b)
return a*b
def divide(self, a, b):
print "<pid:%s> %r divide %r %r" % (getpid(), self.connected, a, b)
return a/b
class Worker(Process):
def run(self):
# Multiple RPCService instances can be run in a single process
# via Python Threads
Context, _ = get_zmq_classes()
context = Context()
# Custom serializer/deserializer functions can be passed in. The server
# side ones must match.
echo = EchoService(context=context, serializer=JSONSerializer())
echo.connect('ipc:///tmp/rpc-demo-echo.service')
# We create two Math services to simulate load balancing. A client can
# connect to both of these services and requests will be load balanced.
math1 = MathService(context=context)
math1.connect('ipc:///tmp/rpc-demo-math1.service')
math2 = MathService(context=context)
math2.connect('ipc:///tmp/rpc-demo-math2.service')
# Next we spawn service greenlets and wait for them to exit
echo .start()
math1 .start()
math2 .start()
echo .serve()
math1 .serve()
math2 .serve()
if __name__ == '__main__':
workers = [Worker() for _ in range(cpu_count())]
for w in workers:
w.start()
echo_proxy = ThreadProxy(ROUTER, DEALER)
math1_proxy = ThreadProxy(ROUTER, DEALER)
math2_proxy = ThreadProxy(ROUTER, DEALER)
echo_proxy .bind_in('tcp://127.0.0.1:5555')
math1_proxy .bind_in('tcp://127.0.0.1:5556')
math2_proxy .bind_in('tcp://127.0.0.1:5557')
echo_proxy .bind_out('ipc:///tmp/rpc-demo-echo.service')
math1_proxy .bind_out('ipc:///tmp/rpc-demo-math1.service')
math2_proxy .bind_out('ipc:///tmp/rpc-demo-math2.service')
echo_proxy .start()
math1_proxy .start()
math2_proxy .start()
while True:
echo_proxy .join(0.25)
math1_proxy .join(0.25)
math2_proxy .join(0.25)
| bsd-3-clause | 4,683,893,067,149,815,000 | 30.302521 | 79 | 0.600537 | false |
flavoi/diventi | diventi/adventures/migrations/0017_auto_20200504_2229.py | 1 | 1287 | # Generated by Django 2.2.12 on 2020-05-04 20:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('adventures', '0016_auto_20200503_1924'),
]
operations = [
migrations.RemoveField(
model_name='resolution',
name='antagonist_goals',
),
migrations.RemoveField(
model_name='situation',
name='resolution',
),
migrations.AddField(
model_name='antagonistgoal',
name='situations',
field=models.ManyToManyField(through='adventures.Resolution', to='adventures.Situation', verbose_name='situations'),
),
migrations.AddField(
model_name='resolution',
name='antagonist_goal',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='adventures.AntagonistGoal', verbose_name='antagonist goal'),
),
migrations.AddField(
model_name='resolution',
name='situation',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='adventures.Situation', verbose_name='situation'),
),
]
| apache-2.0 | 6,941,592,988,053,494,000 | 33.783784 | 169 | 0.614608 | false |
n3storm/django-dynamic-preferences | dynamic_preferences/models.py | 1 | 4395 | """
Preference models, queryset and managers that handle the logic for persisting preferences.
"""
from django.db import models
from django.db.models.query import QuerySet
from django.conf import settings
from django.utils.functional import cached_property
from dynamic_preferences import user_preferences_registry, global_preferences_registry
from dynamic_preferences.registries import preference_models
from .utils import update
class BasePreferenceModel(models.Model):
"""
A base model with common logic for all preferences models.
"""
#: The section under which the preference is declared
section = models.CharField(
max_length=255, db_index=True, blank=True, null=True, default=None)
#: a name for the preference
name = models.CharField(max_length=255, db_index=True)
#: a value, serialized to a string. This field should not be accessed directly, use :py:attr:`BasePreferenceModel.value` instead
raw_value = models.TextField(null=True, blank=True)
class Meta:
abstract = True
app_label = 'dynamic_preferences'
@cached_property
def preference(self):
return self.registry.get(section=self.section, name=self.name)
@property
def verbose_name(self):
return self.preference.get('verbose_name', self.preference.identifier)
@property
def help_text(self):
return self.preference.get('help_text', '')
def set_value(self, value):
"""
Save serialized self.value to self.raw_value
"""
self.raw_value = self.preference.serializer.serialize(value)
def get_value(self):
"""
Return deserialized self.raw_value
"""
return self.preference.serializer.deserialize(self.raw_value)
value = property(get_value, set_value)
def save(self, **kwargs):
if self.pk is None and not self.raw_value:
self.value = self.preference.default
super(BasePreferenceModel, self).save(**kwargs)
def __str__(self):
return self.__repr__()
def __repr__(self):
return '{0} - {1}/{2}'.format(self.__class__.__name__, self.section, self.name)
class GlobalPreferenceModel(BasePreferenceModel):
registry = global_preferences_registry
class Meta:
unique_together = ('section', 'name')
app_label = 'dynamic_preferences'
verbose_name = "global preference"
verbose_name_plural = "global preferences"
class PerInstancePreferenceModel(BasePreferenceModel):
"""For preferences that are tied to a specific model instance"""
#: the instance which is concerned by the preference
#: use a ForeignKey pointing to the model of your choice
instance = None
class Meta(BasePreferenceModel.Meta):
unique_together = ('instance', 'section', 'name')
abstract = True
@classmethod
def get_instance_model(cls):
return cls._meta.get_field('instance').rel.to
@property
def registry(self):
return preference_models.get_by_instance(self.instance)
class UserPreferenceModel(PerInstancePreferenceModel):
instance = models.ForeignKey(settings.AUTH_USER_MODEL)
class Meta(PerInstancePreferenceModel.Meta):
app_label = 'dynamic_preferences'
verbose_name = "user preference"
verbose_name_plural = "user preferences"
global_preferences_registry.preference_model = GlobalPreferenceModel
# Create default preferences for new instances
from django.db.models.signals import post_save
def create_default_per_instance_preferences(sender, created, instance, **kwargs):
"""Create default preferences for PerInstancePreferenceModel"""
if created:
try:
registry = preference_models.get_by_instance(instance)
registry.create_default_preferences(instance)
except AttributeError:
pass
def invalidate_cache(sender, created, instance, **kwargs):
if not isinstance(instance, BasePreferenceModel):
return
registry = preference_models.get_by_preference(instance)
linked_instance = getattr(instance, 'instance', None)
kwargs = {}
if linked_instance:
kwargs['instance'] = linked_instance
manager = registry.manager(**kwargs)
manager.to_cache(instance)
post_save.connect(create_default_per_instance_preferences)
post_save.connect(invalidate_cache)
| bsd-3-clause | -5,948,715,740,910,205,000 | 28.695946 | 132 | 0.689647 | false |
nekulin/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/win32/Demos/cerapi.py | 17 | 7436 | # A demo of the Windows CE Remote API
#
# This connects to a CE device, and interacts with it.
import wincerapi
import win32event
import win32api
import win32con
import os
import sys
import getopt
from repr import repr
def DumpPythonRegistry():
try:
h = wincerapi.CeRegOpenKeyEx(win32con.HKEY_LOCAL_MACHINE, "Software\\Python\\PythonCore\\%s\\PythonPath" % sys.winver)
except win32api.error:
print "The remote device does not appear to have Python installed"
return 0
path, typ = wincerapi.CeRegQueryValueEx(h, None)
print "The remote PythonPath is '%s'" % (str(path), )
h.Close()
return 1
def DumpRegistry(root, level=0):
# A recursive dump of the remote registry to test most functions.
h = wincerapi.CeRegOpenKeyEx(win32con.HKEY_LOCAL_MACHINE, None)
level_prefix = " " * level
index = 0
# Enumerate values.
while 1:
try:
name, data, typ = wincerapi.CeRegEnumValue(root, index)
except win32api.error:
break
print "%s%s=%s" % (level_prefix, name, repr(str(data)))
index = index+1
# Now enumerate all keys.
index=0
while 1:
try:
name, klass = wincerapi.CeRegEnumKeyEx(root, index)
except win32api.error:
break
print "%s%s\\" % (level_prefix, name)
subkey = wincerapi.CeRegOpenKeyEx(root, name)
DumpRegistry(subkey, level+1)
index = index+1
def DemoCopyFile():
# Create a file on the device, and write a string.
cefile = wincerapi.CeCreateFile("TestPython", win32con.GENERIC_WRITE, 0, None, win32con.OPEN_ALWAYS, 0, None)
wincerapi.CeWriteFile(cefile, "Hello from Python")
cefile.Close()
# reopen the file and check the data.
cefile = wincerapi.CeCreateFile("TestPython", win32con.GENERIC_READ, 0, None, win32con.OPEN_EXISTING, 0, None)
if wincerapi.CeReadFile(cefile, 100) != "Hello from Python":
print "Couldnt read the data from the device!"
cefile.Close()
# Delete the test file
wincerapi.CeDeleteFile("TestPython")
print "Created, wrote to, read from and deleted a test file!"
def DemoCreateProcess():
try:
hp, ht, pid, tid = wincerapi.CeCreateProcess("Windows\\Python.exe", "", None, None, 0, 0, None, "", None)
# Not necessary, except to see if handle closing raises an exception
# (if auto-closed, the error is suppressed)
hp.Close()
ht.Close()
print "Python is running on the remote device!"
except win32api.error, (hr, fn, msg):
print "Couldnt execute remote process -", msg
def DumpRemoteMachineStatus():
ACLineStatus, BatteryFlag, BatteryLifePercent, BatteryLifeTime, BatteryFullLifeTime, BackupBatteryFlag, BackupBatteryLifePercent, BackupBatteryLifeTime, BackupBatteryLifeTime = \
wincerapi.CeGetSystemPowerStatusEx()
if ACLineStatus:
power = "AC"
else:
power = "battery"
if BatteryLifePercent==255:
batPerc = "unknown"
else:
batPerc = BatteryLifePercent
print "The batteries are at %s%%, and is currently being powered by %s" % (batPerc, power)
memLoad, totalPhys, availPhys, totalPage, availPage, totalVirt, availVirt = \
wincerapi.CeGlobalMemoryStatus()
print "The memory is %d%% utilized." % (memLoad)
print "%-20s%-10s%-10s" % ("", "Total", "Avail")
print "%-20s%-10s%-10s" % ("Physical Memory", totalPhys, availPhys)
print "%-20s%-10s%-10s" % ("Virtual Memory", totalVirt, availVirt)
print "%-20s%-10s%-10s" % ("Paging file", totalPage, availPage)
storeSize, freeSize = wincerapi.CeGetStoreInformation()
print "%-20s%-10s%-10s" % ("File store", storeSize, freeSize)
print "The CE temp path is", wincerapi.CeGetTempPath()
print "The system info for the device is", wincerapi.CeGetSystemInfo()
def DumpRemoteFolders():
# Dump all special folders possible.
for name, val in wincerapi.__dict__.items():
if name[:6]=="CSIDL_":
try:
loc = str(wincerapi.CeGetSpecialFolderPath(val))
print "Folder %s is at %s" % (name, loc)
except win32api.error, details:
pass
# Get the shortcut targets for the "Start Menu"
print "Dumping start menu shortcuts..."
try:
startMenu = str(wincerapi.CeGetSpecialFolderPath(wincerapi.CSIDL_STARTMENU))
except win32api.error, details:
print "This device has no start menu!", details
startMenu = None
if startMenu:
for fileAttr in wincerapi.CeFindFiles(os.path.join(startMenu, "*")):
fileName = fileAttr[8]
fullPath = os.path.join(startMenu, str(fileName))
try:
resolved = wincerapi.CeSHGetShortcutTarget(fullPath)
except win32api.error, (rc, fn, msg):
resolved = "#Error - %s" % msg
print "%s->%s" % (fileName, resolved)
# print "The start menu is at",
# print wincerapi.CeSHGetShortcutTarget("\\Windows\\Start Menu\\Shortcut to Python.exe.lnk")
def usage():
print "Options:"
print "-a - Execute all demos"
print "-p - Execute Python process on remote device"
print "-r - Dump the remote registry"
print "-f - Dump all remote special folder locations"
print "-s - Dont dump machine status"
print "-y - Perform asynch init of CE connection"
def main():
async_init = bStartPython = bDumpRegistry = bDumpFolders = 0
bDumpStatus = 1
try:
opts, args = getopt.getopt(sys.argv[1:], "apr")
except getopt.error, why:
print "Invalid usage:", why
usage()
return
for o, v in opts:
if o=="-a":
bStartPython = bDumpRegistry = bDumpStatus = bDumpFolders = asynch_init = 1
if o=="-p":
bStartPython=1
if o=="-r":
bDumpRegistry=1
if o=="-s":
bDumpStatus=0
if o=="-f":
bDumpFolders = 1
if o=="-y":
print "Doing asynch init of CE connection"
async_init = 1
if async_init:
event, rc = wincerapi.CeRapiInitEx()
while 1:
rc = win32event.WaitForSingleObject(event, 500)
if rc==win32event.WAIT_OBJECT_0:
# We connected.
break
else:
print "Waiting for Initialize to complete (picture a Cancel button here :)"
else:
wincerapi.CeRapiInit()
print "Connected to remote CE device."
try:
verinfo = wincerapi.CeGetVersionEx()
print "The device is running windows CE version %d.%d - %s" % (verinfo[0], verinfo[1], verinfo[4])
if bDumpStatus:
print "Dumping remote machine status"
DumpRemoteMachineStatus()
if bDumpRegistry:
print "Dumping remote registry..."
DumpRegistry(win32con.HKEY_LOCAL_MACHINE)
if bDumpFolders:
print "Dumping remote folder information"
DumpRemoteFolders()
DemoCopyFile()
if bStartPython:
print "Starting remote Python process"
if DumpPythonRegistry():
DemoCreateProcess()
else:
print "Not trying to start Python, as it's not installed"
finally:
wincerapi.CeRapiUninit()
print "Disconnected"
if __name__=='__main__':
main()
| apache-2.0 | -3,793,568,527,841,336,000 | 34.075472 | 182 | 0.617805 | false |
AnishShah/tensorflow | tensorflow/contrib/boosted_trees/lib/learner/batch/categorical_split_handler_test.py | 2 | 22613 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for checking stats accumulator related ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.boosted_trees.lib.learner.batch import categorical_split_handler
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import split_info_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
def get_empty_tensors(gradient_shape, hessian_shape):
empty_hess_shape = [1] + hessian_shape.as_list()
empty_grad_shape = [1] + gradient_shape.as_list()
empty_gradients = constant_op.constant(
[], dtype=dtypes.float32, shape=empty_grad_shape)
empty_hessians = constant_op.constant(
[], dtype=dtypes.float32, shape=empty_hess_shape)
return empty_gradients, empty_hessians
class EqualitySplitHandlerTest(test_util.TensorFlowTestCase):
def testGenerateFeatureSplitCandidates(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Feature ID |
# i0 | (0.2, 0.12) | 0 | 1,2 |
# i1 | (-0.5, 0.07) | 0 | |
# i2 | (1.2, 0.2) | 0 | 2 |
# i3 | (4.0, 0.13) | 1 | 1 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [0, 0, 0, 1]
indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
update_2 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1, update_2]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (sess.run(
[are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(0.2 + 1.2 - 0.1) / (0.12 + 0.2 + 1)
expected_left_weight = -0.9848484848484846
# (0.2 + 1.2 - 0.1) ** 2 / (0.12 + 0.2 + 1)
expected_left_gain = 1.2803030303030298
# -(-0.5 + 0.1) / (0.07 + 1)
expected_right_weight = 0.37383177570093457
# (-0.5 + 0.1) ** 2 / (0.07 + 1)
expected_right_gain = 0.14953271028037385
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertEqual(0, split_node.feature_column)
self.assertEqual(2, split_node.feature_id)
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
# Check the split on partition 1.
# (-4 + 0.1) / (0.13 + 1)
expected_left_weight = -3.4513274336283186
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_left_gain = 13.460176991150442
expected_right_weight = 0
expected_right_gain = 0
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_bias_gain = 13.460176991150442
# Verify candidate for partition 1, there's only one active feature here
# so zero gain is expected.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertEqual(1, split_node.feature_id)
def testObliviousFeatureSplitGeneration(self):
with self.test_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Feature ID |
# i0 | (0.2, 0.12) | 1 | 1 |
# i1 | (-0.5, 0.07) | 1 | 2 |
# i2 | (1.2, 0.2) | 1 | 1 |
# i3 | (4.0, 0.13) | 2 | 2 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [1, 1, 1, 2]
indices = [[0, 0], [1, 0], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 1, 2], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0,
weak_learner_type=learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
update_2 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1, update_2]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (
sess.run([are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertAllEqual([1, 2], partitions)
# For partition 1.
# -(0.2 + 1.2 - 0.1) / (0.12 + 0.2 + 1)
expected_left_weight1 = -0.9848484848484846
# (0.2 + 1.2 - 0.1) ** 2 / (0.12 + 0.2 + 1)
expected_left_gain1 = 1.2803030303030298
# -(-0.5 + 0.1) / (0.07 + 1)
expected_right_weight1 = 0.37383177570093457
# (-0.5 + 0.1) ** 2 / (0.07 + 1)
expected_right_gain1 = 0.14953271028037385
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain1 = 0.46043165467625885
split_info = split_info_pb2.ObliviousSplitInfo()
split_info.ParseFromString(splits[0])
# Children of partition 1.
left_child = split_info.children[0].vector
right_child = split_info.children[1].vector
split_node = split_info.split_node.oblivious_categorical_id_binary_split
self.assertEqual(0, split_node.feature_column)
self.assertEqual(1, split_node.feature_id)
self.assertAllClose([expected_left_weight1], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight1], right_child.value, 0.00001)
# For partition2.
expected_left_weight2 = 0
expected_left_gain2 = 0
# -(4 - 0.1) / (0.13 + 1)
expected_right_weight2 = -3.4513274336283186
# (4 - 0.1) ** 2 / (0.13 + 1)
expected_right_gain2 = 13.460176991150442
# (4 - 0.1) ** 2 / (0.13 + 1)
expected_bias_gain2 = 13.460176991150442
# Children of partition 2.
left_child = split_info.children[2].vector
right_child = split_info.children[3].vector
self.assertAllClose([expected_left_weight2], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight2], right_child.value, 0.00001)
self.assertAllClose(
expected_left_gain1 + expected_right_gain1 - expected_bias_gain1 +
expected_left_gain2 + expected_right_gain2 - expected_bias_gain2,
gains[0], 0.00001)
def testGenerateFeatureSplitCandidatesSumReduction(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Feature ID |
# i0 | (0.2, 0.12) | 0 | 1,2 |
# i1 | (-0.5, 0.07) | 0 | |
# i2 | (1.2, 0.2) | 0 | 2 |
# i3 | (4.0, 0.13) | 1 | 1 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [0, 0, 0, 1]
indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0,
loss_uses_sum_reduction=True)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
update_2 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1, update_2]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (
sess.run([are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(0.4 + 2.4 - 0.1) / (0.24 + 0.4 + 1)
expected_left_weight = -1.6463414634146338
# (0.4 + 2.4 - 0.1) ** 2 / (0.24 + 0.4 + 1)
expected_left_gain = 4.445121951219511
# -(-1 + 0.1) / (0.14 + 1)
expected_right_weight = 0.789473684211
# (-1 + 0.1) ** 2 / (0.14 + 1)
expected_right_gain = 0.710526315789
# (0.4 + -1 + 2.4 - 0.1) ** 2 / (0.24 + 0.14 + 0.4 + 1)
expected_bias_gain = 1.6235955056179772
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertEqual(0, split_node.feature_column)
self.assertEqual(2, split_node.feature_id)
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
# Check the split on partition 1.
# (-8 + 0.1) / (0.26 + 1)
expected_left_weight = -6.26984126984
# (-8 + 0.1) ** 2 / (0.26 + 1)
expected_left_gain = 49.5317460317
expected_right_weight = 0
expected_right_gain = 0
# (-8 + 0.1) ** 2 / (0.26 + 1)
expected_bias_gain = 49.5317460317
# Verify candidate for partition 1, there's only one active feature here
# so zero gain is expected.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertEqual(1, split_node.feature_id)
def testGenerateFeatureSplitCandidatesMulticlass(self):
with self.cached_session() as sess:
# Batch size is 4, 2 gradients per each instance.
gradients = array_ops.constant(
[[0.2, 0.1], [-0.5, 0.2], [1.2, 3.4], [4.0, -3.5]], shape=[4, 2])
# 2x2 matrix for each instance
hessian_0 = [[0.12, 0.02], [0.3, 0.11]]
hessian_1 = [[0.07, -0.2], [-0.5, 0.2]]
hessian_2 = [[0.2, -0.23], [-0.8, 0.9]]
hessian_3 = [[0.13, -0.3], [-1.5, 2.2]]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
partition_ids = [0, 0, 0, 1]
indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2, 2])
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN,
init_stamp_token=0)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (sess.run(
[are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertAllEqual([0, 1], partitions)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(1, split_node.feature_id)
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertEqual(2, len(left_child.value))
self.assertEqual(0, len(right_child.value))
self.assertEqual(1, split_node.feature_id)
def testEmpty(self):
with self.cached_session() as sess:
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [0, 0, 0, 1]
indices = array_ops.constant([], dtype=dtypes.int64, shape=[0, 2])
values = array_ops.constant([], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (sess.run(
[are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
def testInactive(self):
with self.cached_session() as sess:
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [0, 0, 0, 1]
indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([False, False]))
with ops.control_dependencies([update_1]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (sess.run(
[are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
if __name__ == "__main__":
googletest.main()
| apache-2.0 | -7,559,824,180,958,474,000 | 37.853952 | 88 | 0.617079 | false |
dinghino/ecommerce_api | tests/test_pictures.py | 2 | 7336 | """
Test suite for PictureHandler and ItemPictureHandler
"""
from tests.test_case import TestCase
import json
from io import BytesIO
import os
import uuid
import http.client as client
from models import Item, Picture
from tests import test_utils
import utils
EXPECTED_RESULTS = test_utils.RESULTS['pictures']
TEST_IMAGE_FOLDER = 'test_images'
TEST_ITEM = {
'uuid': '429994bf-784e-47cc-a823-e0c394b823e8',
'name': 'mario',
'price': 20.20,
'description': 'svariati mariii',
'availability': 1,
'category': 'scarpe',
}
TEST_ITEM2 = {
'uuid': 'd46b13a1-f4bb-4cfb-8076-6953358145f3',
'name': 'GINO',
'price': 30.20,
'description': 'svariati GINIIIII',
'availability': 1,
'category': 'accessori',
}
TEST_PICTURE = {
'uuid': 'df690434-a488-419f-899e-8853cba1a22b',
'extension': 'jpg'
}
TEST_PICTURE2 = {
'uuid': 'c0001a48-10a3-43c1-b87b-eabac0b2d42f',
'extension': 'png'
}
WRONG_UUID = 'e8e42371-46de-4f5e-8927-e2cc34826269'
class TestPictures(TestCase):
@classmethod
def setup_class(cls):
super(TestPictures, cls).setup_class()
utils.get_image_folder = lambda: os.path.join(utils.get_project_root(),
TEST_IMAGE_FOLDER)
test_utils.get_image_folder = utils.get_image_folder
def test_get_picture__success(self):
test_utils.setup_images()
item = Item.create(**TEST_ITEM)
picture = Picture.create(item=item, **TEST_PICTURE)
open("{path}/{picture_uuid}.jpg".format(
path=utils.get_image_folder(),
picture_uuid=picture.uuid), "wb")
resp = self.app.get('/pictures/{picture_uuid}'.format(
picture_uuid=picture.uuid))
assert resp.status_code == client.OK
test_picture = TEST_PICTURE.copy()
test_picture['item_uuid'] = item.uuid
assert resp.data == b''
assert resp.headers['Content-Type'] == 'image/jpeg'
test_utils.clean_images()
def test_get_picture__missing(self):
resp = self.app.get('/pictures/{picture_uuid}'.format(
picture_uuid=WRONG_UUID))
assert resp.status_code == client.NOT_FOUND
def test_get_item_pictures__success(self):
item = Item.create(**TEST_ITEM)
Picture.create(item=item, **TEST_PICTURE)
Picture.create(item=item, **TEST_PICTURE2)
resp = self.app.get('/items/{item_uuid}/pictures/'.format(
item_uuid=item.uuid))
assert resp.status_code == client.OK
test_utils.assert_valid_response(
resp.data, EXPECTED_RESULTS['get_item_pictures__success'])
def test_get_item_pictures__empty(self):
item = Item.create(**TEST_ITEM)
resp = self.app.get('/items/{item_uuid}/pictures/'.format(
item_uuid=item.uuid))
pictures = json.loads(resp.data)
assert not pictures
def test_get_item_pictures__wrong_item_uuid(self):
resp = self.app.get('/items/{item_uuid}/pictures/'.format(
item_uuid=WRONG_UUID))
assert resp.status_code == client.NOT_FOUND
def test_post_picture__success(self):
item = Item.create(**TEST_ITEM)
resp = self.app.post('/items/{item_uuid}/pictures/'.format(
item_uuid=item.uuid),
data={'image': (BytesIO(b'my file contents'), 'testimage.jpg')},
content_type='multipart/form-data')
assert resp.status_code == client.CREATED
assert len(Picture.select()) == 1
picture = Picture.get()
assert picture.item == item
assert picture.extension == 'jpg'
assert type(picture.uuid) == uuid.UUID
def test_post_item_pictures__wrong_item_uuid(self):
resp = self.app.post('/items/{item_uuid}/pictures/'.format(
item_uuid=WRONG_UUID),
data={'image': (BytesIO(b'my file contents'), 'testimage.jpg')},
content_type='multipart/form-data')
assert resp.status_code == client.NOT_FOUND
assert Picture.select().count() == 0
def test_post_item_pictures__wrong_extension(self):
item = Item.create(**TEST_ITEM)
resp = self.app.post('/items/{item_uuid}/pictures/'.format(
item_uuid=item.uuid),
data={'image': (BytesIO(b'my file contents'), 'testimage.txt')},
content_type='multipart/form-data')
assert resp.status_code == client.BAD_REQUEST
assert Picture.select().count() == 0
def test_post_picture__no_image(self):
item = Item.create(**TEST_ITEM)
resp = self.app.post('/items/{item_uuid}/pictures/'.format(
item_uuid=item.uuid),
data={},
content_type='multipart/form-data')
assert resp.status_code == client.BAD_REQUEST
assert Picture.select().count() == 0
def test_delete_picture__success(self):
test_utils.setup_images()
item = Item.create(**TEST_ITEM)
picture = Picture.create(item=item, **TEST_PICTURE)
picture2 = Picture.create(item=item, **TEST_PICTURE2)
open("{path}/{picture_uuid}.{extension}".format(
path=utils.get_image_folder(),
picture_uuid=picture.uuid,
extension=picture.extension), "wb")
open("{path}/{picture_uuid}.{extension}".format(
path=utils.get_image_folder(),
picture_uuid=WRONG_UUID,
extension='jpg'), "wb")
open("{path}/{picture_uuid}.{extension}".format(
path=utils.get_image_folder(),
picture_uuid=picture2.uuid,
extension=picture2.extension), "wb")
resp = self.app.delete('/pictures/{picture_uuid}'.format(
picture_uuid=picture.uuid))
assert resp.status_code == client.NO_CONTENT
assert Picture.select().count() == 1
assert Item.select().count() == 1
item2 = Item.get()
assert str(item2.uuid) == TEST_ITEM['uuid']
assert item2.name == TEST_ITEM['name']
assert float(item2.price) == TEST_ITEM['price']
assert item2.description == TEST_ITEM['description']
assert os.path.isfile("{path}/{picture_uuid}.{extension}".format(
path=utils.get_image_folder(),
picture_uuid=WRONG_UUID,
extension='jpg'))
assert not os.path.isfile("{path}/{picture_uuid}.{extension}".format(
path=utils.get_image_folder(),
picture_uuid=picture.uuid,
extension=picture.extension))
assert os.path.isfile("{path}/{picture_uuid}.{extension}".format(
path=utils.get_image_folder(),
picture_uuid=picture2.uuid,
extension=picture2.extension))
test_utils.clean_images()
def test_delete_picture__wrong_uuid(self):
resp = self.app.delete('/pictures/{picture_uuid}'.format(
picture_uuid=WRONG_UUID))
assert resp.status_code == client.NOT_FOUND
def test_delete_pictures__missing_file(self):
item = Item.create(**TEST_ITEM)
picture = Picture.create(item=item, **TEST_PICTURE)
resp = self.app.delete('/pictures/{picture_uuid}'.format(
picture_uuid=picture.uuid))
assert resp.status_code == client.NO_CONTENT
assert not Picture.select().exists()
assert Item.select().exists()
| gpl-3.0 | 403,378,789,863,538,800 | 34.960784 | 79 | 0.605234 | false |
amagdas/eve | eve/tests/methods/delete.py | 10 | 29272 | from eve.tests import TestBase
from eve.tests.utils import DummyEvent
from eve.tests.test_settings import MONGO_DBNAME
from eve import ETAG
from bson import ObjectId
from eve.utils import ParsedRequest
import simplejson as json
import copy
from eve.methods.delete import deleteitem_internal
class TestDelete(TestBase):
def setUp(self):
super(TestDelete, self).setUp()
# Etag used to delete an item (a contact)
self.etag_headers = [('If-Match', self.item_etag)]
def test_unknown_resource(self):
url = '%s%s/' % (self.unknown_resource_url, self.item_id)
_, status = self.delete(url)
self.assert404(status)
def test_delete_from_resource_endpoint(self):
r, status = self.delete(self.known_resource_url)
self.assert204(status)
r, status = self.parse_response(self.test_client.get(
self.known_resource_url))
self.assert200(status)
self.assertEqual(len(r['_items']), 0)
def test_delete_from_resource_endpoint_write_concern(self):
# should get a 500 since there's no replicaset on the mongod instance
self.domain['contacts']['mongo_write_concern'] = {'w': 2}
_, status = self.delete(self.known_resource_url)
self.assert500(status)
def test_delete_from_resource_endpoint_different_resource(self):
r, status = self.delete(self.different_resource_url)
self.assert204(status)
r, status = self.parse_response(self.test_client.get(
self.different_resource_url))
self.assert200(status)
self.assertEqual(len(r['_items']), 0)
# deletion of 'users' will still lave 'contacts' untouched (same db
# collection)
r, status = self.parse_response(self.test_client.get(
self.known_resource_url))
self.assert200(status)
self.assertEqual(len(r['_items']), 25)
def test_delete_empty_resource(self):
url = '%s%s/' % (self.empty_resource_url, self.item_id)
_, status = self.delete(url)
self.assert404(status)
def test_delete_readonly_resource(self):
_, status = self.delete(self.readonly_id_url)
self.assert405(status)
def test_delete_unknown_item(self):
url = '%s%s/' % (self.known_resource_url, self.unknown_item_id)
_, status = self.delete(url)
self.assert404(status)
def test_delete_ifmatch_missing(self):
_, status = self.delete(self.item_id_url)
self.assert403(status)
def test_delete_ifmatch_disabled(self):
self.app.config['IF_MATCH'] = False
_, status = self.delete(self.item_id_url)
self.assert204(status)
def test_delete_ifmatch_bad_etag(self):
_, status = self.delete(self.item_id_url,
headers=[('If-Match', 'not-quite-right')])
self.assert412(status)
def test_delete(self):
r, status = self.delete(self.item_id_url, headers=self.etag_headers)
self.assert204(status)
r = self.test_client.get(self.item_id_url)
self.assert404(r.status_code)
def test_delete_non_existant(self):
url = self.item_id_url[:-5] + "00000"
r, status = self.delete(url, headers=self.etag_headers)
self.assert404(status)
def test_delete_write_concern(self):
# should get a 500 since there's no replicaset on the mongod instance
self.domain['contacts']['mongo_write_concern'] = {'w': 2}
_, status = self.delete(self.item_id_url,
headers=[('If-Match', self.item_etag)])
self.assert500(status)
def test_delete_different_resource(self):
r, status = self.delete(self.user_id_url,
headers=[('If-Match', self.user_etag)])
self.assert204(status)
r = self.test_client.get(self.user_id_url)
self.assert404(r.status_code)
def test_delete_with_post_override(self):
# POST request with DELETE override turns into a DELETE
headers = [('X-HTTP-Method-Override', 'DELETE'),
('If-Match', self.item_etag)]
r = self.test_client.post(self.item_id_url, data={}, headers=headers)
self.assert204(r.status_code)
def test_delete_subresource(self):
_db = self.connection[MONGO_DBNAME]
# create random contact
fake_contact = self.random_contacts(1)
fake_contact_id = _db.contacts.insert(fake_contact)[0]
# grab parent collection count; we will use this later to make sure we
# didn't delete all the users in the datanase. We add one extra invoice
# to make sure that the actual count will never be 1 (which would
# invalidate the test)
_db.invoices.insert({'inv_number': 1})
response, status = self.get('invoices')
invoices = len(response[self.app.config['ITEMS']])
# update first invoice to reference the new contact
_db.invoices.update({'_id': ObjectId(self.invoice_id)},
{'$set': {'person': fake_contact_id}})
# verify that the only document retrieved is referencing the correct
# parent document
response, status = self.get('users/%s/invoices' % fake_contact_id)
person_id = ObjectId(response[self.app.config['ITEMS']][0]['person'])
self.assertEqual(person_id, fake_contact_id)
# delete all documents at the sub-resource endpoint
response, status = self.delete('users/%s/invoices' % fake_contact_id)
self.assert204(status)
# verify that the no documents are left at the sub-resource endpoint
response, status = self.get('users/%s/invoices' % fake_contact_id)
self.assertEqual(len(response['_items']), 0)
# verify that other documents in the invoices collection have not neen
# deleted
response, status = self.get('invoices')
self.assertEqual(len(response['_items']), invoices - 1)
def test_delete_subresource_item(self):
_db = self.connection[MONGO_DBNAME]
# create random contact
fake_contact = self.random_contacts(1)
fake_contact_id = _db.contacts.insert(fake_contact)[0]
# update first invoice to reference the new contact
_db.invoices.update({'_id': ObjectId(self.invoice_id)},
{'$set': {'person': fake_contact_id}})
# GET all invoices by new contact
response, status = self.get('users/%s/invoices/%s' %
(fake_contact_id, self.invoice_id))
etag = response[ETAG]
headers = [('If-Match', etag)]
response, status = self.delete('users/%s/invoices/%s' %
(fake_contact_id, self.invoice_id),
headers=headers)
self.assert204(status)
def test_deleteitem_internal(self):
# test that deleteitem_internal is available and working properly.
with self.app.test_request_context(self.item_id_url):
r, _, _, status = deleteitem_internal(
self.known_resource, concurrency_check=False,
**{'_id': self.item_id})
self.assert204(status)
r = self.test_client.get(self.item_id_url)
self.assert404(r.status_code)
def delete(self, url, headers=None):
r = self.test_client.delete(url, headers=headers)
return self.parse_response(r)
class TestSoftDelete(TestDelete):
def setUp(self):
super(TestSoftDelete, self).setUp()
# Enable soft delete
self.app.config['SOFT_DELETE'] = True
domain = copy.copy(self.domain)
for resource, settings in domain.items():
# rebuild resource settings for soft delete
del settings['soft_delete']
self.app.register_resource(resource, settings)
# alias for the configured DELETED field name
self.deleted_field = self.app.config['DELETED']
# TestDelete overrides
def test_delete(self):
"""Soft delete should mark an item as deleted and cause subsequent
requests to return 404 Not Found responses. 404s in response to GET
requests should include the document in their body with the _deleted
flag set to True.
"""
r, status = self.delete(self.item_id_url, headers=self.etag_headers)
self.assert204(status)
r = self.test_client.get(self.item_id_url)
data, status = self.parse_response(r)
self.assert404(status)
self.assertEqual(data.get(self.deleted_field), True)
self.assertNotEqual(data.get('_etag'), self.item_etag)
# 404 should still include a status and an error field
self.assertTrue(self.app.config['ERROR'] in data)
def test_deleteitem_internal(self):
"""Deleteitem internal should honor soft delete settings.
"""
# test that deleteitem_internal is available and working properly.
with self.app.test_request_context(self.item_id_url):
r, _, _, status = deleteitem_internal(
self.known_resource, concurrency_check=False,
**{'_id': self.item_id})
self.assert204(status)
r = self.test_client.get(self.item_id_url)
data, status = self.parse_response(r)
self.assert404(status)
self.assertEqual(data.get(self.deleted_field), True)
def test_delete_different_resource(self):
r, status = self.delete(self.user_id_url,
headers=[('If-Match', self.user_etag)])
self.assert204(status)
r = self.test_client.get(self.user_id_url)
data, status = self.parse_response(r)
self.assert404(status)
self.assertEqual(data.get(self.deleted_field), True)
def test_delete_from_resource_endpoint(self):
"""Soft deleting an entire resource should mark each individual item
as deleted, queries to that resource should return no items, and GETs
on any individual items should return 404 responses.
"""
# TestDelete deletes resource at known_resource_url, and confirms
# subsequent queries to the resource return zero items
super(TestSoftDelete, self).test_delete_from_resource_endpoint()
r = self.test_client.get(self.item_id_url)
data, status = self.parse_response(r)
self.assert404(status)
self.assertEqual(data.get(self.deleted_field), True)
# TetsSoftDelete specific tests
def test_restore_softdeleted(self):
"""Sending a PUT or PATCH to a soft deleted document should restore the
document.
"""
def soft_delete_item(etag):
r, status = self.delete(
self.item_id_url, headers=[('If-Match', etag)])
self.assert204(status)
# GET soft deleted etag
return self.test_client.get(self.item_id_url)
# Restore via PATCH
deleted_etag = soft_delete_item(self.item_etag).headers['ETag']
r = self.test_client.patch(
self.item_id_url,
data=json.dumps({}),
headers=[('Content-Type', 'application/json'),
('If-Match', deleted_etag)])
self.assert200(r.status_code)
r = self.test_client.get(self.item_id_url)
self.assert200(r.status_code)
new_etag = r.headers['ETag']
# Restore via PUT
r = soft_delete_item(new_etag)
deleted_etag = r.headers['ETag']
restored_doc = {"ref": "1234567890123456789012345"}
r = self.test_client.put(
self.item_id_url,
data=json.dumps(restored_doc),
headers=[('Content-Type', 'application/json'),
('If-Match', deleted_etag)])
self.assert200(r.status_code)
r = self.test_client.get(self.item_id_url)
self.assert200(r.status_code)
def test_multiple_softdelete(self):
"""After an item has been soft deleted, subsequent DELETEs should
return a 404 Not Found response.
"""
r, status = self.delete(self.item_id_url, headers=self.etag_headers)
self.assert204(status)
# GET soft deleted etag
r = self.test_client.get(self.item_id_url)
new_etag = r.headers['ETag']
# Second soft DELETE should return 404 Not Found
r, status = self.delete(
self.item_id_url, headers=[('If-Match', new_etag)])
self.assert404(status)
def test_softdelete_deleted_field(self):
"""The configured 'deleted' field should be added to all documents to indicate
whether that document has been soft deleted or not.
"""
r = self.test_client.get(self.item_id_url)
data, status = self.parse_response(r)
self.assert200(status)
self.assertEqual(data.get(self.deleted_field), False)
def test_softdelete_show_deleted(self):
"""GETs on resource endpoints should include soft deleted items when
the 'show_deleted' param is included in the query, or when the DELETED
field is explicitly included in the lookup.
"""
r, status = self.delete(self.item_id_url, headers=self.etag_headers)
self.assert204(status)
data, status = self.get(self.known_resource)
after_softdelete_count = data[self.app.config['META']]['total']
self.assertEqual(after_softdelete_count, self.known_resource_count - 1)
data, status = self.get(self.known_resource, query="?show_deleted")
show_deleted_count = data[self.app.config['META']]['total']
self.assertEqual(show_deleted_count, self.known_resource_count)
# Test show_deleted with additional queries
role_query = '?where={"role": "' + self.item['role'] + '"}'
data, status = self.get(self.known_resource, query=role_query)
role_count = data[self.app.config['META']]['total']
data, status = self.get(
self.known_resource, query=role_query + "&show_deleted")
show_deleted_role_count = data[self.app.config['META']]['total']
self.assertEqual(show_deleted_role_count, role_count + 1)
# Test explicit _deleted query
data, status = self.get(
self.known_resource, query='?where={"_deleted": true}')
deleted_query_count = data[self.app.config['META']]['total']
self.assertEqual(deleted_query_count, 1)
def test_softdeleted_embedded_doc(self):
"""Soft deleted documents embedded in other documents should not be
included. They will resolve to None as if the document was actually
deleted.
"""
# Set up and confirm embedded document
_db = self.connection[MONGO_DBNAME]
fake_contact = self.random_contacts(1)
fake_contact_id = _db.contacts.insert(fake_contact)[0]
fake_contact_url = self.known_resource_url + "/" + str(fake_contact_id)
_db.invoices.update({'_id': ObjectId(self.invoice_id)},
{'$set': {'person': fake_contact_id}})
invoices = self.domain['invoices']
invoices['embedding'] = True
invoices['schema']['person']['data_relation']['embeddable'] = True
embedded = '{"person": 1}'
r = self.test_client.get(
self.invoice_id_url + '?embedded=%s' % embedded)
data, status = self.parse_response(r)
self.assert200(status)
self.assertTrue('location' in data['person'])
# Get embedded doc etag so we can delete it
r = self.test_client.get(fake_contact_url)
embedded_contact_etag = r.headers['ETag']
# Delete embedded contact
data, status = self.delete(
fake_contact_url, headers=[('If-Match', embedded_contact_etag)])
self.assert204(status)
# embedded 'person' should now be empty
r = self.test_client.get(
self.invoice_id_url + '?embedded=%s' % embedded)
data, status = self.parse_response(r)
self.assert200(status)
self.assertEqual(data['person'], None)
def test_softdeleted_get_response_skips_embedded_expansion(self):
"""Soft deleted documents should not expand their embedded documents when
returned in a 404 Not Found response. The deleted document data should
reflect the state of the document when it was deleted, not change if
still active embedded documents are updated
"""
# Confirm embedded document works before delete
_db = self.connection[MONGO_DBNAME]
fake_contact = self.random_contacts(1)
fake_contact_id = _db.contacts.insert(fake_contact)[0]
_db.invoices.update({'_id': ObjectId(self.invoice_id)},
{'$set': {'person': fake_contact_id}})
invoices = self.domain['invoices']
invoices['embedding'] = True
invoices['schema']['person']['data_relation']['embeddable'] = True
embedded = '{"person": 1}'
r = self.test_client.get(
self.invoice_id_url + '?embedded=%s' % embedded)
invoice_etag = r.headers['ETag']
data, status = self.parse_response(r)
self.assert200(status)
self.assertTrue('location' in data['person'])
# Soft delete document
data, status = self.delete(
self.invoice_id_url, headers=[('If-Match', invoice_etag)])
self.assert204(status)
# Document in 404 should not expand person
r = self.test_client.get(
self.invoice_id_url + '?embedded=%s' % embedded)
data, status = self.parse_response(r)
self.assert404(status)
self.assertEqual(data['person'], str(fake_contact_id))
def test_softdelete_caching(self):
"""404 Not Found responses after soft delete should be cacheable
"""
# Soft delete item
r, status = self.delete(self.item_id_url, headers=self.etag_headers)
self.assert204(status)
# delete should have invalidated any previously cached 200 responses
r = self.test_client.get(
self.item_id_url, headers=[('If-None-Match', self.item_etag)])
self.assert404(r.status_code)
post_delete_etag = r.headers['ETag']
# validate cached 404 response data
r = status = self.test_client.get(
self.item_id_url, headers=[('If-None-Match', post_delete_etag)])
self.assert304(r.status_code)
def test_softdelete_datalayer(self):
"""Soft deleted items should not be returned by find methods in the Eve
data layer unless show_deleted is explicitly configured in the request,
the deleted field is included in the lookup, or the operation is 'raw'.
"""
# Soft delete item
r, status = self.delete(self.item_id_url, headers=self.etag_headers)
self.assert204(status)
with self.app.test_request_context():
# find_one should only return item if a request w/ show_deleted ==
# True is passed or if the deleted field is part of the lookup
req = ParsedRequest()
doc = self.app.data.find_one(
self.known_resource, req, _id=self.item_id)
self.assertEqual(doc, None)
req.show_deleted = True
doc = self.app.data.find_one(
self.known_resource, req, _id=self.item_id)
self.assertNotEqual(doc, None)
self.assertEqual(doc.get(self.deleted_field), True)
req.show_deleted = False
doc = self.app.data.find_one(
self.known_resource, req, _id=self.item_id, _deleted=True)
self.assertNotEqual(doc, None)
self.assertEqual(doc.get(self.deleted_field), True)
# find_one_raw should always return a document, soft deleted or not
doc = self.app.data.find_one_raw(
self.known_resource, _id=ObjectId(self.item_id))
self.assertNotEqual(doc, None)
self.assertEqual(doc.get(self.deleted_field), True)
# find should only return deleted items if a request with
# show_deleted == True is passed or if the deleted field is part of
# the lookup
req.show_deleted = False
docs = self.app.data.find(self.known_resource, req, None)
undeleted_count = docs.count()
req.show_deleted = True
docs = self.app.data.find(self.known_resource, req, None)
with_deleted_count = docs.count()
self.assertEqual(undeleted_count, with_deleted_count - 1)
req.show_deleted = False
docs = self.app.data.find(
self.known_resource, req, {self.deleted_field: True})
deleted_count = docs.count()
self.assertEqual(deleted_count, 1)
# find_list_of_ids will return deleted documents if given their id
docs = self.app.data.find_list_of_ids(
self.known_resource, [ObjectId(self.item_id)])
self.assertEqual(docs.count(), 1)
def test_softdelete_db_fields(self):
"""Documents created when soft delete is enabled should include and
maintain the DELETED field in the db.
"""
r = self.test_client.post(self.known_resource_url, data={
'ref': "1234567890123456789054321"
})
data, status = self.parse_response(r)
self.assert201(status)
new_item_id = data[self.app.config['ID_FIELD']]
new_item_etag = data[self.app.config['ETAG']]
with self.app.test_request_context():
db_stored_doc = self.app.data.find_one_raw(
self.known_resource, _id=ObjectId(new_item_id))
self.assertTrue(self.deleted_field in db_stored_doc)
# PUT updates to the document should maintain the DELETED field
r = self.test_client.put(
self.known_resource_url + "/" + new_item_id,
data={'ref': '5432109876543210987654321'},
headers=[('If-Match', new_item_etag)]
)
data, status = self.parse_response(r)
self.assert200(status)
new_item_etag = data[self.app.config['ETAG']]
with self.app.test_request_context():
db_stored_doc = self.app.data.find_one_raw(
self.known_resource, _id=ObjectId(new_item_id))
self.assertTrue(self.deleted_field in db_stored_doc)
# PATCH updates to the document should maintain the DELETED field
r = self.test_client.patch(
self.known_resource_url + "/" + new_item_id,
data={'ref': '5555544444333332222211111'},
headers=[('If-Match', new_item_etag)]
)
self.assert200(r.status_code)
with self.app.test_request_context():
db_stored_doc = self.app.data.find_one_raw(
self.known_resource, _id=ObjectId(new_item_id))
self.assertTrue(self.deleted_field in db_stored_doc)
class TestResourceSpecificSoftDelete(TestBase):
def setUp(self):
super(TestResourceSpecificSoftDelete, self).setUp()
# Enable soft delete for one resource
domain = copy.copy(self.domain)
resource_settings = domain[self.known_resource]
resource_settings['soft_delete'] = True
self.app.register_resource(self.known_resource, resource_settings)
self.deleted_field = self.app.config['DELETED']
# Etag used to delete an item (a contact)
self.etag_headers = [('If-Match', self.item_etag)]
def test_resource_specific_softdelete(self):
""" Resource level soft delete configuration should override
application configuration.
"""
# Confirm soft delete is enabled for known resource.
data, status = self.delete(self.item_id_url, headers=self.etag_headers)
self.assert204(status)
r = self.test_client.get(self.item_id_url)
data, status = self.parse_response(r)
self.assert404(status)
self.assertEqual(data.get(self.deleted_field), True)
# DELETE on other resources should be hard deletes
data, status = self.delete(
self.invoice_id_url, headers=[('If-Match', self.invoice_etag)])
self.assert204(status)
r = self.test_client.get(self.invoice_id_url)
data, status = self.parse_response(r)
self.assert404(status)
self.assertTrue(self.deleted_field not in data)
class TestDeleteEvents(TestBase):
def test_on_pre_DELETE_for_item(self):
devent = DummyEvent(self.before_delete)
self.app.on_pre_DELETE += devent
self.delete_item()
self.assertEqual('contacts', devent.called[0])
self.assertFalse(devent.called[1] is None)
def test_on_pre_DELETE_resource_for_item(self):
devent = DummyEvent(self.before_delete)
self.app.on_pre_DELETE_contacts += devent
self.delete_item()
self.assertFalse(devent.called is None)
def test_on_pre_DELETE_for_resource(self):
devent = DummyEvent(self.before_delete)
self.app.on_pre_DELETE += devent
self.delete_resource()
self.assertFalse(devent.called is None)
def test_on_pre_DELETE_resource_for_resource(self):
devent = DummyEvent(self.before_delete)
self.app.on_pre_DELETE_contacts += devent
self.delete_resource()
self.assertFalse(devent.called is None)
def test_on_pre_DELETE_dynamic_filter(self):
def filter_this(resource, request, lookup):
lookup["_id"] = self.unknown_item_id
self.app.on_pre_DELETE += filter_this
# Would normally delete the known document; will return 404 instead.
r, s = self.parse_response(self.delete_item())
self.assert404(s)
def test_on_post_DELETE_for_item(self):
devent = DummyEvent(self.after_delete)
self.app.on_post_DELETE += devent
self.delete_item()
self.assertFalse(devent.called is None)
def test_on_post_DELETE_resource_for_item(self):
devent = DummyEvent(self.after_delete)
self.app.on_post_DELETE_contacts += devent
self.delete_item()
self.assertFalse(devent.called is None)
def test_on_post_DELETE_for_resource(self):
devent = DummyEvent(self.after_delete)
self.app.on_post_DELETE += devent
self.delete_resource()
self.assertFalse(devent.called is None)
def test_on_post_DELETE_resource_for_resource(self):
devent = DummyEvent(self.after_delete)
self.app.on_post_DELETE_contacts += devent
self.delete_resource()
self.assertFalse(devent.called is None)
def test_on_delete_resource(self):
devent = DummyEvent(self.before_delete)
self.app.on_delete_resource += devent
self.delete_resource()
self.assertEqual(('contacts',), devent.called)
def test_on_delete_resource_contacts(self):
devent = DummyEvent(self.before_delete)
self.app.on_delete_resource_contacts += devent
self.delete_resource()
self.assertEqual(tuple(), devent.called)
def test_on_deleted_resource(self):
devent = DummyEvent(self.after_delete)
self.app.on_deleted_resource += devent
self.delete_resource()
self.assertEqual(('contacts',), devent.called)
def test_on_deleted_resource_contacts(self):
devent = DummyEvent(self.after_delete)
self.app.on_deleted_resource_contacts += devent
self.delete_resource()
self.assertEqual(tuple(), devent.called)
def test_on_delete_item(self):
devent = DummyEvent(self.before_delete)
self.app.on_delete_item += devent
self.delete_item()
self.assertEqual('contacts', devent.called[0])
self.assertEqual(
self.item_id, str(devent.called[1][self.app.config['ID_FIELD']]))
def test_on_delete_item_contacts(self):
devent = DummyEvent(self.before_delete)
self.app.on_delete_item_contacts += devent
self.delete_item()
self.assertEqual(
self.item_id, str(devent.called[0][self.app.config['ID_FIELD']]))
def test_on_deleted_item(self):
devent = DummyEvent(self.after_delete)
self.app.on_deleted_item += devent
self.delete_item()
self.assertEqual('contacts', devent.called[0])
self.assertEqual(
self.item_id, str(devent.called[1][self.app.config['ID_FIELD']]))
def test_on_deleted_item_contacts(self):
devent = DummyEvent(self.after_delete)
self.app.on_deleted_item_contacts += devent
self.delete_item()
self.assertEqual(
self.item_id, str(devent.called[0][self.app.config['ID_FIELD']]))
def delete_resource(self):
self.test_client.delete(self.known_resource_url)
def delete_item(self):
return self.test_client.delete(
self.item_id_url, headers=[('If-Match', self.item_etag)])
def before_delete(self):
db = self.connection[MONGO_DBNAME]
return db.contacts.find_one(ObjectId(self.item_id)) is not None
def after_delete(self):
return not self.before_delete()
| bsd-3-clause | -8,660,333,842,002,928,000 | 39.48686 | 86 | 0.61827 | false |
mrucci/moto | moto/cloudwatch/models.py | 3 | 3720 | from moto.core import BaseBackend
import boto.ec2.cloudwatch
import datetime
class Dimension(object):
def __init__(self, name, value):
self.name = name
self.value = value
class FakeAlarm(object):
def __init__(self, name, comparison_operator, evaluation_periods, period,
threshold, statistic, description, dimensions, alarm_actions,
ok_actions, insufficient_data_actions, unit):
self.name = name
self.comparison_operator = comparison_operator
self.evaluation_periods = evaluation_periods
self.period = period
self.threshold = threshold
self.statistic = statistic
self.description = description
self.dimensions = [Dimension(dimension['name'], dimension['value']) for dimension in dimensions]
self.alarm_actions = alarm_actions
self.ok_actions = ok_actions
self.insufficient_data_actions = insufficient_data_actions
self.unit = unit
self.state_updated_timestamp = datetime.datetime.now()
self.configuration_updated_timestamp = datetime.datetime.now()
class MetricDatum(object):
def __init__(self, namespace, name, value, dimensions):
self.namespace = namespace
self.name = name
self.value = value
self.dimensions = [Dimension(dimension['name'], dimension['value']) for dimension in dimensions]
class CloudWatchBackend(BaseBackend):
def __init__(self):
self.alarms = {}
self.metric_data = []
def put_metric_alarm(self, name, comparison_operator, evaluation_periods,
period, threshold, statistic, description, dimensions,
alarm_actions, ok_actions, insufficient_data_actions, unit):
alarm = FakeAlarm(name, comparison_operator, evaluation_periods, period,
threshold, statistic, description, dimensions, alarm_actions,
ok_actions, insufficient_data_actions, unit)
self.alarms[name] = alarm
return alarm
def get_all_alarms(self):
return self.alarms.values()
@staticmethod
def _list_element_starts_with(items, needle):
"""True of any of the list elements starts with needle"""
for item in items:
if item.startswith(needle):
return True
return False
def get_alarms_by_action_prefix(self, action_prefix):
return [
alarm
for alarm in self.alarms.values()
if CloudWatchBackend._list_element_starts_with(
alarm.alarm_actions, action_prefix
)
]
def get_alarms_by_alarm_name_prefix(self, name_prefix):
return [
alarm
for alarm in self.alarms.values()
if alarm.name.startswith(name_prefix)
]
def get_alarms_by_alarm_names(self, alarm_names):
return [
alarm
for alarm in self.alarms.values()
if alarm.name in alarm_names
]
def get_alarms_by_state_value(self, state):
raise NotImplementedError(
"DescribeAlarm by state is not implemented in moto."
)
def delete_alarms(self, alarm_names):
for alarm_name in alarm_names:
self.alarms.pop(alarm_name, None)
def put_metric_data(self, namespace, metric_data):
for name, value, dimensions in metric_data:
self.metric_data.append(MetricDatum(namespace, name, value, dimensions))
def get_all_metrics(self):
return self.metric_data
cloudwatch_backends = {}
for region in boto.ec2.cloudwatch.regions():
cloudwatch_backends[region.name] = CloudWatchBackend()
| apache-2.0 | -609,949,406,410,304,500 | 33.444444 | 104 | 0.625 | false |
robk5uj/invenio | modules/websubmit/lib/functions/Ask_For_Record_Details_Confirmation.py | 35 | 5952 | ## This file is part of Invenio.
## Copyright (C) 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Display the details of a record on which some operation is to be carried
out and prompt for the user's confirmation that it is the correct record.
Upon the clicking of the confirmation button, augment step by one.
"""
__revision__ = "$Id$"
import cgi
from invenio.config import CFG_SITE_ADMIN_EMAIL
from invenio.websubmit_config import \
InvenioWebSubmitFunctionStop, \
InvenioWebSubmitFunctionError
from invenio.search_engine import print_record, record_exists
## Details of record to display to the user for confirmation:
CFG_DOCUMENT_DETAILS_MESSAGE = """
<div>
We're about to process your request for the following document:<br /><br />
<table border="0">
<tr>
<td>Report Number(s):</td><td>%(report-numbers)s</td>
</tr>
<tr>
<td>Title:</td><td>%(title)s</td>
</tr>
<tr>
<td>Author(s):</td><td>%(author)s</td>
</tr>
</table>
<br />
If this is correct, please CONFIRM it:<br />
<br />
<input type="submit" width="350" height="50"
name="CONFIRM" value="CONFIRM"
onClick="document.forms[0].step.value=%(newstep)s;">
<br />
If you think that there is a problem, please contact
<a href="mailto:%(admin-email)s">%(admin-email)s</a>.<br />
</div>
"""
def Ask_For_Record_Details_Confirmation(parameters, \
curdir, \
form, \
user_info=None):
"""
Display the details of a record on which some operation is to be carried
out and prompt for the user's confirmation that it is the correct record.
Upon the clicking of the confirmation button, augment step by one.
Given the "recid" (001) of a record, retrieve the basic metadata
(title, report-number(s) and author(s)) and display them in the
user's browser along with a prompt asking them to confirm that
it is indeed the record that they expected to see.
The function depends upon the presence of the "sysno" global and the
presence of the "step" field in the "form" parameter.
When the user clicks on the "confirm" button, step will be augmented by
1 and the form will be submitted.
@parameters: None.
@return: None.
@Exceptions raise: InvenioWebSubmitFunctionError if problems are
encountered;
InvenioWebSubmitFunctionStop in order to display the details of the
record and the confirmation message.
"""
global sysno
## Make sure that we know the current step:
try:
current_step = int(form['step'])
except TypeError:
## Can't determine step.
msg = "Unable to determine submission step. Cannot continue."
raise InvenioWebSubmitFunctionError(msg)
else:
newstep = current_step + 1
## Make sure that the sysno is valid:
try:
working_recid = int(sysno)
except TypeError:
## Unable to find the details of this record - cannot query the database
msg = "Unable to retrieve details of record - record id was invalid."
raise InvenioWebSubmitFunctionError(msg)
if not record_exists(working_recid):
## Record doesn't exist.
msg = "Unable to retrieve details of record [%s] - record does not " \
"exist." % working_recid
raise InvenioWebSubmitFunctionError(msg)
## Retrieve the details to be displayed:
##
## Author(s):
rec_authors = ""
rec_first_author = print_record(int(sysno), 'tm', "100__a")
rec_other_authors = print_record(int(sysno), 'tm', "700__a")
if rec_first_author != "":
rec_authors += "".join(["%s<br />\n" % cgi.escape(author.strip()) for \
author in rec_first_author.split("\n")])
if rec_other_authors != "":
rec_authors += "".join(["%s<br />\n" % cgi.escape(author.strip()) for \
author in rec_other_authors.split("\n")])
## Title:
rec_title = "".join(["%s<br />\n" % cgi.escape(title.strip()) for title in \
print_record(int(sysno), 'tm', "245__a").split("\n")])
## Report numbers:
rec_reportnums = ""
rec_reportnum = print_record(int(sysno), 'tm', "037__a")
rec_other_reportnums = print_record(int(sysno), 'tm', "088__a")
if rec_reportnum != "":
rec_reportnums += "".join(["%s<br />\n" % cgi.escape(repnum.strip()) \
for repnum in rec_reportnum.split("\n")])
if rec_other_reportnums != "":
rec_reportnums += "".join(["%s<br />\n" % cgi.escape(repnum.strip()) \
for repnum in \
rec_other_reportnums.split("\n")])
raise InvenioWebSubmitFunctionStop(CFG_DOCUMENT_DETAILS_MESSAGE % \
{ 'report-numbers' : rec_reportnums, \
'title' : rec_title, \
'author' : rec_authors, \
'newstep' : newstep, \
'admin-email' : CFG_SITE_ADMIN_EMAIL, \
} )
| gpl-2.0 | -7,923,146,746,006,533,000 | 40.048276 | 80 | 0.598958 | false |
olhoneles/politicos | settings.py | 1 | 1327 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2018, Marcelo Jorge Vieira <[email protected]>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License
# for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from tornado.options import define, options
define('debug', default=True, help='debug mode')
define('port', default=8888, help='port to listen on', type=int)
define('redis_port', default=6379, help='redis port')
define('redis_host', default='localhost', help='redis hostname or IP')
define('es_hosts', default='localhost', help='elasticsearch hosts')
define('es_index', default='politicians', help='elasticsearch index')
options.parse_command_line()
define('per_page', default=10, help='items per page')
define('max_per_page', default=50, help='max items per page')
| agpl-3.0 | 6,976,442,247,888,095,000 | 43.233333 | 79 | 0.740015 | false |
GheRivero/ansible | lib/ansible/modules/cloud/azure/azure_rm_acs.py | 15 | 29357 | #!/usr/bin/python
#
# Copyright (c) 2017 Julien Stroheker, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_acs
version_added: "2.4"
short_description: Manage an Azure Container Service Instance (ACS).
description:
- Create, update and delete an Azure Container Service Instance.
options:
resource_group:
description:
- Name of a resource group where the Container Services exists or will be created.
required: true
name:
description:
- Name of the Container Services instance.
required: true
state:
description:
- Assert the state of the ACS. Use 'present' to create or update an ACS and 'absent' to delete it.
default: present
choices:
- absent
- present
location:
description:
- Valid azure location. Defaults to location of the resource group.
orchestration_platform:
description:
- Specifies the Container Orchestration Platform to use. Currently can be either DCOS, Kubernetes or Swarm.
choices:
- 'DCOS'
- 'Kubernetes'
- 'Swarm'
required: true
master_profile:
description:
- Master profile suboptions.
required: true
suboptions:
count:
description:
- Number of masters (VMs) in the container service cluster. Allowed values are 1, 3, and 5.
required: true
choices:
- 1
- 3
- 5
vm_size:
description:
- The VM Size of each of the Agent Pool VM's (e.g. Standard_F1 / Standard_D2v2).
required: true
version_added: 2.5
dns_prefix:
description:
- The DNS Prefix to use for the Container Service master nodes.
required: true
linux_profile:
description:
- The linux profile suboptions.
required: true
suboptions:
admin_username:
description:
- The Admin Username for the Cluster.
required: true
ssh_key:
description:
- The Public SSH Key used to access the cluster.
required: true
agent_pool_profiles:
description:
- The agent pool profile suboptions.
required: true
suboptions:
name:
description:
- Unique name of the agent pool profile in the context of the subscription and resource group.
required: true
count:
description:
- Number of agents (VMs) to host docker containers. Allowed values must be in the range of 1 to 100 (inclusive).
required: true
dns_prefix:
description:
- The DNS Prefix given to Agents in this Agent Pool.
required: true
vm_size:
description:
- The VM Size of each of the Agent Pool VM's (e.g. Standard_F1 / Standard_D2v2).
required: true
service_principal:
description:
- The service principal suboptions.
suboptions:
client_id:
description:
- The ID for the Service Principal.
required: false
client_secret:
description:
- The secret password associated with the service principal.
required: false
diagnostics_profile:
description:
- Should VM Diagnostics be enabled for the Container Service VM's.
required: true
type: bool
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Julien Stroheker (@julienstroheker)"
'''
EXAMPLES = '''
- name: Create an azure container services instance running Kubernetes
azure_rm_acs:
name: acctestcontservice1
location: eastus
resource_group: Testing
orchestration_platform: Kubernetes
master_profile:
- count: 3
dns_prefix: acsk8smasterdns
vm_size: Standard_D2_v2
linux_profile:
- admin_username: azureuser
ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
service_principal:
- client_id: "cf72ca99-f6b9-4004-b0e0-bee10c521948"
client_secret: "mySPNp@ssw0rd!"
agent_pool_profiles:
- name: default
count: 5
dns_prefix: acsk8sagent
vm_size: Standard_D2_v2
diagnostics_profile: false
tags:
Environment: Production
- name: Create an azure container services instance running DCOS
azure_rm_acs:
name: acctestcontservice2
location: eastus
resource_group: Testing
orchestration_platform: DCOS
master_profile:
- count: 3
dns_prefix: acsdcosmasterdns
vm_size: Standard_D2_v2
linux_profile:
- admin_username: azureuser
ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
agent_pool_profiles:
- name: default
count: 5
dns_prefix: acscdcosagent
vm_size: Standard_D2_v2
diagnostics_profile: false
tags:
Environment: Production
- name: Create an azure container services instance running Swarm
azure_rm_acs:
name: acctestcontservice3
location: eastus
resource_group: Testing
orchestration_platform: Swarm
master_profile:
- count: 3
dns_prefix: acsswarmmasterdns
vm_size: Standard_D2_v2
linux_profile:
- admin_username: azureuser
ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
agent_pool_profiles:
- name: default
count: 5
dns_prefix: acsswarmagent
vm_size: Standard_D2_v2
diagnostics_profile: false
tags:
Environment: Production
# Deletes the specified container service in the specified subscription and resource group.
# The operation does not delete other resources created as part of creating a container service,
# including storage accounts, VMs, and availability sets. All the other resources created with the container
# service are part of the same resource group and can be deleted individually.
- name: Remove an azure container services instance
azure_rm_acs:
name: acctestcontservice3
location: eastus
resource_group: Testing
state: absent
orchestration_platform: Swarm
master_profile:
- count: 1
vm_size: Standard_A0
dns_prefix: acstestingmasterdns5
linux_profile:
- admin_username: azureuser
ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
agent_pool_profiles:
- name: default
count: 4
dns_prefix: acctestagent15
vm_size: Standard_A0
diagnostics_profile: false
tags:
Ansible: azure_rm_acs
'''
RETURN = '''
state:
description: Current state of the azure container service
returned: always
type: dict
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.containerservice.models import (
ContainerService, ContainerServiceOrchestratorProfile, ContainerServiceCustomProfile,
ContainerServiceServicePrincipalProfile, ContainerServiceMasterProfile,
ContainerServiceAgentPoolProfile, ContainerServiceWindowsProfile,
ContainerServiceLinuxProfile, ContainerServiceSshConfiguration,
ContainerServiceDiagnosticsProfile, ContainerServiceSshPublicKey,
ContainerServiceVMDiagnostics
)
except ImportError:
# This is handled in azure_rm_common
pass
def create_agent_pool_profile_instance(agentpoolprofile):
'''
Helper method to serialize a dict to a ContainerServiceAgentPoolProfile
:param: agentpoolprofile: dict with the parameters to setup the ContainerServiceAgentPoolProfile
:return: ContainerServiceAgentPoolProfile
'''
return ContainerServiceAgentPoolProfile(
name=agentpoolprofile['name'],
count=agentpoolprofile['count'],
dns_prefix=agentpoolprofile['dns_prefix'],
vm_size=agentpoolprofile['vm_size']
)
def create_orch_platform_instance(orchestrator):
'''
Helper method to serialize a dict to a ContainerServiceOrchestratorProfile
:param: orchestrator: dict with the parameters to setup the ContainerServiceOrchestratorProfile
:return: ContainerServiceOrchestratorProfile
'''
return ContainerServiceOrchestratorProfile(
orchestrator_type=orchestrator,
)
def create_service_principal_profile_instance(spnprofile):
'''
Helper method to serialize a dict to a ContainerServiceServicePrincipalProfile
:param: spnprofile: dict with the parameters to setup the ContainerServiceServicePrincipalProfile
:return: ContainerServiceServicePrincipalProfile
'''
return ContainerServiceServicePrincipalProfile(
client_id=spnprofile[0]['client_id'],
secret=spnprofile[0]['client_secret']
)
def create_linux_profile_instance(linuxprofile):
'''
Helper method to serialize a dict to a ContainerServiceLinuxProfile
:param: linuxprofile: dict with the parameters to setup the ContainerServiceLinuxProfile
:return: ContainerServiceLinuxProfile
'''
return ContainerServiceLinuxProfile(
admin_username=linuxprofile[0]['admin_username'],
ssh=create_ssh_configuration_instance(linuxprofile[0]['ssh_key'])
)
def create_ssh_configuration_instance(sshconf):
'''
Helper method to serialize a dict to a ContainerServiceSshConfiguration
:param: sshconf: dict with the parameters to setup the ContainerServiceSshConfiguration
:return: ContainerServiceSshConfiguration
'''
listssh = []
key = ContainerServiceSshPublicKey(key_data=str(sshconf))
listssh.append(key)
return ContainerServiceSshConfiguration(
public_keys=listssh
)
def create_master_profile_instance(masterprofile):
'''
Helper method to serialize a dict to a ContainerServiceMasterProfile
Note: first_consecutive_static_ip is specifically set to None, for Azure server doesn't accept
request body with this property. This should be an inconsistency bug before Azure client SDK
and Azure server.
:param: masterprofile: dict with the parameters to setup the ContainerServiceMasterProfile
:return: ContainerServiceMasterProfile
'''
return ContainerServiceMasterProfile(
count=masterprofile[0]['count'],
dns_prefix=masterprofile[0]['dns_prefix'],
vm_size=masterprofile[0]['vm_size'],
first_consecutive_static_ip=None
)
def create_diagnostics_profile_instance(diagprofile):
'''
Helper method to serialize a dict to a ContainerServiceDiagnosticsProfile
:param: diagprofile: dict with the parameters to setup the ContainerServiceDiagnosticsProfile
:return: ContainerServiceDiagnosticsProfile
'''
return ContainerServiceDiagnosticsProfile(
vm_diagnostics=create_vm_diagnostics_instance(diagprofile)
)
def create_vm_diagnostics_instance(vmdiag):
'''
Helper method to serialize a dict to a ContainerServiceVMDiagnostics
:param: vmdiag: dict with the parameters to setup the ContainerServiceVMDiagnostics
:return: ContainerServiceVMDiagnostics
'''
return ContainerServiceVMDiagnostics(
enabled=vmdiag
)
def create_acs_dict(acs):
'''
Helper method to deserialize a ContainerService to a dict
:param: acs: ContainerService or AzureOperationPoller with the Azure callback object
:return: dict with the state on Azure
'''
service_principal_profile_dict = None
if acs.orchestrator_profile.orchestrator_type == 'Kubernetes':
service_principal_profile_dict = create_service_principal_profile_dict(acs.service_principal_profile)
return dict(
id=acs.id,
name=acs.name,
location=acs.location,
tags=acs.tags,
orchestrator_profile=create_orchestrator_profile_dict(acs.orchestrator_profile),
master_profile=create_master_profile_dict(acs.master_profile),
linux_profile=create_linux_profile_dict(acs.linux_profile),
service_principal_profile=service_principal_profile_dict,
diagnostics_profile=create_diagnotstics_profile_dict(acs.diagnostics_profile),
provisioning_state=acs.provisioning_state,
agent_pool_profiles=create_agent_pool_profiles_dict(acs.agent_pool_profiles),
type=acs.type
)
def create_linux_profile_dict(linuxprofile):
'''
Helper method to deserialize a ContainerServiceLinuxProfile to a dict
:param: linuxprofile: ContainerServiceLinuxProfile with the Azure callback object
:return: dict with the state on Azure
'''
return dict(
ssh_key=linuxprofile.ssh.public_keys[0].key_data,
admin_username=linuxprofile.admin_username
)
def create_master_profile_dict(masterprofile):
'''
Helper method to deserialize a ContainerServiceMasterProfile to a dict
:param: masterprofile: ContainerServiceMasterProfile with the Azure callback object
:return: dict with the state on Azure
'''
return dict(
count=masterprofile.count,
fqdn=masterprofile.fqdn,
vm_size=masterprofile.vm_size,
dns_prefix=masterprofile.dns_prefix
)
def create_service_principal_profile_dict(serviceprincipalprofile):
'''
Helper method to deserialize a ContainerServiceServicePrincipalProfile to a dict
Note: For security reason, the service principal secret is skipped on purpose.
:param: serviceprincipalprofile: ContainerServiceServicePrincipalProfile with the Azure callback object
:return: dict with the state on Azure
'''
return dict(
client_id=serviceprincipalprofile.client_id
)
def create_diagnotstics_profile_dict(diagnosticsprofile):
'''
Helper method to deserialize a ContainerServiceVMDiagnostics to a dict
:param: diagnosticsprofile: ContainerServiceVMDiagnostics with the Azure callback object
:return: dict with the state on Azure
'''
return dict(
vm_diagnostics=diagnosticsprofile.vm_diagnostics.enabled
)
def create_orchestrator_profile_dict(orchestratorprofile):
'''
Helper method to deserialize a ContainerServiceOrchestratorProfile to a dict
:param: orchestratorprofile: ContainerServiceOrchestratorProfile with the Azure callback object
:return: dict with the state on Azure
'''
return dict(
orchestrator_type=str(orchestratorprofile.orchestrator_type)
)
def create_agent_pool_profiles_dict(agentpoolprofiles):
'''
Helper method to deserialize a ContainerServiceAgentPoolProfile to a dict
:param: agentpoolprofiles: ContainerServiceAgentPoolProfile with the Azure callback object
:return: dict with the state on Azure
'''
return [dict(
count=profile.count,
vm_size=profile.vm_size,
name=profile.name,
dns_prefix=profile.dns_prefix,
fqdn=profile.fqdn
) for profile in agentpoolprofiles]
class AzureRMContainerService(AzureRMModuleBase):
"""Configuration class for an Azure RM container service resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
state=dict(
type='str',
required=False,
default='present',
choices=['present', 'absent']
),
location=dict(
type='str',
required=False
),
orchestration_platform=dict(
type='str',
required=True,
choices=['DCOS', 'Kubernetes', 'Swarm']
),
master_profile=dict(
type='list',
required=True
),
linux_profile=dict(
type='list',
required=True
),
agent_pool_profiles=dict(
type='list',
required=True
),
service_principal=dict(
type='list',
required=False
),
diagnostics_profile=dict(
type='bool',
required=True
)
)
self.resource_group = None
self.name = None
self.location = None
self.tags = None
self.state = None
self.orchestration_platform = None
self.master_profile = None
self.linux_profile = None
self.agent_pool_profiles = None
self.service_principal = None
self.diagnostics_profile = None
self.results = dict(changed=False, state=dict())
super(AzureRMContainerService, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
resource_group = None
response = None
results = dict()
to_be_updated = False
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
# Check if the ACS instance already present in the RG
if self.state == 'present':
if self.orchestration_platform == 'Kubernetes':
if not self.service_principal:
self.fail('service_principal should be specified when using Kubernetes')
if not self.service_principal[0].get('client_id'):
self.fail('service_principal.client_id should be specified when using Kubernetes')
if not self.service_principal[0].get('client_secret'):
self.fail('service_principal.client_secret should be specified when using Kubernetes')
mastercount = self.master_profile[0].get('count')
if mastercount != 1 and mastercount != 3 and mastercount != 5:
self.fail('Master Count number wrong : {} / should be 1 3 or 5'.format(mastercount))
# For now Agent Pool cannot be more than 1, just remove this part in the future if it change
agentpoolcount = len(self.agent_pool_profiles)
if agentpoolcount > 1:
self.fail('You cannot specify more than agent_pool_profiles')
response = self.get_acs()
self.results['state'] = response
if not response:
to_be_updated = True
else:
self.log('Results : {0}'.format(response))
update_tags, response['tags'] = self.update_tags(response['tags'])
if response['provisioning_state'] == "Succeeded":
if update_tags:
to_be_updated = True
def is_property_changed(profile, property, ignore_case=False):
base = response[profile].get(property)
new = getattr(self, profile)[0].get(property)
if ignore_case:
return base.lower() != new.lower()
else:
return base != new
# Cannot Update the master count for now // Uncomment this block in the future to support it
if is_property_changed('master_profile', 'count'):
# self.log(("Master Profile Count Diff, Was {0} / Now {1}"
# .format(response['master_profile'].count,
# self.master_profile[0].get('count'))))
# to_be_updated = True
self.module.warn("master_profile.count cannot be updated")
# Cannot Update the master vm_size for now. Could be a client SDK bug
# Uncomment this block in the future to support it
if is_property_changed('master_profile', 'vm_size', True):
# self.log(("Master Profile VM Size Diff, Was {0} / Now {1}"
# .format(response['master_profile'].get('vm_size'),
# self.master_profile[0].get('vm_size'))))
# to_be_updated = True
self.module.warn("master_profile.vm_size cannot be updated")
# Cannot Update the SSH Key for now // Uncomment this block in the future to support it
if is_property_changed('linux_profile', 'ssh_key'):
# self.log(("Linux Profile Diff SSH, Was {0} / Now {1}"
# .format(response['linux_profile'].ssh.public_keys[0].key_data,
# self.linux_profile[0].get('ssh_key'))))
# to_be_updated = True
self.module.warn("linux_profile.ssh_key cannot be updated")
# self.log("linux_profile response : {0}".format(response['linux_profile'].get('admin_username')))
# self.log("linux_profile self : {0}".format(self.linux_profile[0].get('admin_username')))
# Cannot Update the Username for now // Uncomment this block in the future to support it
if is_property_changed('linux_profile', 'admin_username'):
# self.log(("Linux Profile Diff User, Was {0} / Now {1}"
# .format(response['linux_profile'].admin_username,
# self.linux_profile[0].get('admin_username'))))
# to_be_updated = True
self.module.warn("linux_profile.admin_username cannot be updated")
# Cannot have more that one agent pool profile for now // Uncomment this block in the future to support it
# if len(response['agent_pool_profiles']) != len(self.agent_pool_profiles):
# self.log("Agent Pool count is diff, need to updated")
# to_be_updated = True
for profile_result in response['agent_pool_profiles']:
matched = False
for profile_self in self.agent_pool_profiles:
if profile_result['name'] == profile_self['name']:
matched = True
if profile_result['count'] != profile_self['count'] or profile_result['vm_size'] != \
profile_self['vm_size']:
self.log(("Agent Profile Diff - Count was {0} / Now {1} - Vm_size was {2} / Now {3}"
.format(profile_result['count'], profile_self['count'],
profile_result['vm_size'], profile_self['vm_size'])))
to_be_updated = True
if not matched:
self.log("Agent Pool not found")
to_be_updated = True
if to_be_updated:
self.log("Need to Create / Update the ACS instance")
if self.check_mode:
return self.results
self.results['state'] = self.create_update_acs()
self.results['changed'] = True
self.log("Creation / Update done")
elif self.state == 'absent':
if self.check_mode:
return self.results
self.delete_acs()
self.log("ACS instance deleted")
return self.results
def create_update_acs(self):
'''
Creates or updates a container service with the specified configuration of orchestrator, masters, and agents.
:return: deserialized ACS instance state dictionary
'''
self.log("Creating / Updating the ACS instance {0}".format(self.name))
service_principal_profile = None
agentpools = []
if self.agent_pool_profiles:
for profile in self.agent_pool_profiles:
self.log("Trying to push the following Profile {0}".format(profile))
agentpools.append(create_agent_pool_profile_instance(profile))
if self.orchestration_platform == 'Kubernetes':
service_principal_profile = create_service_principal_profile_instance(self.service_principal)
parameters = ContainerService(
location=self.location,
tags=self.tags,
orchestrator_profile=create_orch_platform_instance(self.orchestration_platform),
service_principal_profile=service_principal_profile,
linux_profile=create_linux_profile_instance(self.linux_profile),
master_profile=create_master_profile_instance(self.master_profile),
agent_pool_profiles=agentpools,
diagnostics_profile=create_diagnostics_profile_instance(self.diagnostics_profile)
)
# self.log("orchestrator_profile : {0}".format(parameters.orchestrator_profile))
# self.log("service_principal_profile : {0}".format(parameters.service_principal_profile))
# self.log("linux_profile : {0}".format(parameters.linux_profile))
# self.log("ssh from yaml : {0}".format(results.get('linux_profile')[0]))
# self.log("ssh : {0}".format(parameters.linux_profile.ssh))
# self.log("master_profile : {0}".format(parameters.master_profile))
# self.log("agent_pool_profiles : {0}".format(parameters.agent_pool_profiles))
# self.log("vm_diagnostics : {0}".format(parameters.diagnostics_profile.vm_diagnostics))
try:
poller = self.containerservice_client.container_services.create_or_update(self.resource_group, self.name,
parameters)
response = self.get_poller_result(poller)
except CloudError as exc:
self.log('Error attempting to create the ACS instance.')
self.fail("Error creating the ACS instance: {0}".format(str(exc)))
return create_acs_dict(response)
def delete_acs(self):
'''
Deletes the specified container service in the specified subscription and resource group.
The operation does not delete other resources created as part of creating a container service,
including storage accounts, VMs, and availability sets.
All the other resources created with the container service are part of the same resource group and can be deleted individually.
:return: True
'''
self.log("Deleting the ACS instance {0}".format(self.name))
try:
poller = self.containerservice_client.container_services.delete(self.resource_group, self.name)
self.get_poller_result(poller)
except CloudError as e:
self.log('Error attempting to delete the ACS instance.')
self.fail("Error deleting the ACS instance: {0}".format(str(e)))
return True
def get_acs(self):
'''
Gets the properties of the specified container service.
:return: deserialized ACS instance state dictionary
'''
self.log("Checking if the ACS instance {0} is present".format(self.name))
found = False
try:
response = self.containerservice_client.container_services.get(self.resource_group, self.name)
found = True
self.log("Response : {0}".format(response))
self.log("ACS instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the ACS instance.')
if found is True:
return create_acs_dict(response)
else:
return False
def main():
"""Main execution"""
AzureRMContainerService()
if __name__ == '__main__':
main()
| gpl-3.0 | 4,845,411,957,356,731,000 | 38.247326 | 135 | 0.600266 | false |
drmateo/ecto | test/benchmark/metrics.py | 4 | 4501 | #!/usr/bin/env python
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import ecto
import ecto_test
import sys
def test_nodelay():
plasm = ecto.Plasm()
ping = ecto_test.Ping("Ping")
metrics = ecto_test.Metrics("Metrics", queue_size=10)
plasm.connect(ping[:] >> metrics[:])
sched = ecto.Scheduler(plasm)
sched.execute(niter=10000)
print "Hz:", metrics.outputs.hz, " Latency in seconds: %f" % metrics.outputs.latency_seconds
# these are kinda loose
assert metrics.outputs.hz > 5000
assert metrics.outputs.latency_seconds < 0.0001
def test_20hz():
plasm = ecto.Plasm()
ping = ecto_test.Ping("Ping")
throttle = ecto_test.Throttle("Throttle", rate=20)
metrics = ecto_test.Metrics("Metrics", queue_size=10)
plasm.connect(ping[:] >> throttle[:],
throttle[:] >> metrics[:])
sched = ecto.Scheduler(plasm)
sched.execute(niter=100)
print "Hz:", metrics.outputs.hz, " Latency in seconds: %f" % metrics.outputs.latency_seconds
# these are kinda loose
assert 19 < metrics.outputs.hz < 21
assert 0.04 < metrics.outputs.latency_seconds < 0.06
def makeplasm(n_nodes):
plasm = ecto.Plasm()
ping = ecto_test.Ping("Ping")
throttle = ecto_test.Sleep("Sleep_0", seconds=1.0/n_nodes)
plasm.connect(ping[:] >> throttle[:])
for j in range(n_nodes-1): # one has already been added
throttle_next = ecto_test.Sleep("Sleep_%u" % (j+1), seconds=1.0/n_nodes)
plasm.connect(throttle, "out", throttle_next, "in")
throttle = throttle_next
metrics = ecto_test.Metrics("Metrics", queue_size=4)
plasm.connect(throttle[:] >> metrics[:])
# o = open('graph.dot', 'w')
# print >>o, plasm.viz()
# o.close()
# print "\n", plasm.viz(), "\n"
return (plasm, metrics)
def test_st(niter, n_nodes):
(plasm, metrics) = makeplasm(n_nodes)
#sched = ecto.Scheduler(plasm)
#sched.execute(niter)
sched = ecto.Scheduler(plasm)
sched.execute(niter)
print "Hz:", metrics.outputs.hz, " Latency in seconds:", metrics.outputs.latency_seconds
assert 0.95 < metrics.outputs.hz < 1.05
assert 0.95 < metrics.outputs.latency_seconds < 1.05
#
# It is hard to test the middle cases, i.e. if you have one thread
# per node, things should run at n_nodes hz and 1 second latency but
# if there are less than that, things are somewhere in the middle.
# Also your latency tends to be worse as you have to wait for the
# graph to "fill up"
#
def test_tp(niter, n_nodes):
(plasm, metrics) = makeplasm(n_nodes)
sched = ecto.Scheduler(plasm)
sched.execute(niter=niter)
print "Hz:", metrics.outputs.hz, " Latency in seconds:", metrics.outputs.latency_seconds
assert n_nodes * 0.95 < metrics.outputs.hz < n_nodes * 1.05
assert 0.9 < metrics.outputs.latency_seconds < 1.1
test_nodelay()
test_20hz()
test_st(5, 5)
test_st(5, 12)
test_tp(20, 15)
test_tp(20, 10)
test_tp(20, 5)
| bsd-3-clause | 7,519,755,807,337,033,000 | 33.891473 | 96 | 0.688291 | false |
hpcugent/hanythingondemand | hod/subcommands/relabel.py | 2 | 2618 | #!/usr/bin/env python
# #
# Copyright 2009-2016 Ghent University
#
# This file is part of hanythingondemand
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/hanythingondemand
#
# hanythingondemand is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# hanythingondemand is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with hanythingondemand. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Relabel a cluster.
@author: Ewan Higgs (Universiteit Gent)
@author: Kenneth Hoste (Universiteit Gent)
"""
import sys
from vsc.utils.generaloption import GeneralOption
from hod import VERSION as HOD_VERSION
from hod.subcommands.subcommand import SubCommand
import hod.cluster as hc
class RelabelOptions(GeneralOption):
"""Option parser for 'relabel' subcommand."""
VERSION = HOD_VERSION
ALLOPTSMANDATORY = False # let us use optionless arguments.
class RelabelSubCommand(SubCommand):
"""Implementation of HOD 'relabel' subcommand."""
CMD = 'relabel'
EXAMPLE = "<source-cluster-label> <dest-cluster-label>"
HELP = "Change the label of an existing job."
def run(self, args):
"""Run 'relabel' subcommand."""
optparser = RelabelOptions(go_args=args, envvar_prefix=self.envvar_prefix, usage=self.usage_txt)
try:
if len(optparser.args) != 3:
self.report_error(self.usage())
labels = hc.known_cluster_labels()
if optparser.args[1] not in labels:
self.report_error("Cluster with label '%s' not found", optparser.args[1])
try:
hc.mv_cluster_info(optparser.args[1], optparser.args[2])
except (IOError, OSError) as err:
self.report_error("Could not change label '%s' to '%s': %s", optparser.args[1], optparser.args[2], err)
except StandardError as err:
self._log_and_raise(err)
return 0
| gpl-2.0 | -950,074,991,606,819,100 | 35.361111 | 119 | 0.694423 | false |
aferr/TemporalPartitioningMemCtl | src/arch/x86/isa/insts/general_purpose/flags/push_and_pop.py | 90 | 2440 | # Copyright (c) 2007-2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop PUSHF {
.adjust_env oszIn64Override
rflags t1
st t1, ss, [1, t0, rsp], "-env.stackSize", dataSize=ssz
subi rsp, rsp, ssz
};
def macroop POPF {
.adjust_env oszIn64Override
ld t1, ss, [1, t0, rsp], dataSize=ssz
addi rsp, rsp, ssz
wrflags t1, t0
};
'''
| bsd-3-clause | -3,986,826,135,170,318,300 | 44.185185 | 72 | 0.772951 | false |
aaltinisik/OCBAltinkaya | addons/fetchmail/fetchmail.py | 6 | 15874 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import poplib
import time
from imaplib import IMAP4
from imaplib import IMAP4_SSL
from poplib import POP3
from poplib import POP3_SSL
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import zipfile
import base64
from openerp import addons
from openerp.osv import fields, osv
from openerp import tools, api
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MAX_POP_MESSAGES = 50
MAIL_TIMEOUT = 60
# Workaround for Python 2.7.8 bug https://bugs.python.org/issue23906
poplib._MAXLINE = 65536
class fetchmail_server(osv.osv):
"""Incoming POP/IMAP mail server account"""
_name = 'fetchmail.server'
_description = "POP/IMAP Server"
_order = 'priority'
_columns = {
'name':fields.char('Name', required=True, readonly=False),
'active':fields.boolean('Active', required=False),
'state':fields.selection([
('draft', 'Not Confirmed'),
('done', 'Confirmed'),
], 'Status', select=True, readonly=True, copy=False),
'server' : fields.char('Server Name', readonly=True, help="Hostname or IP of the mail server", states={'draft':[('readonly', False)]}),
'port' : fields.integer('Port', readonly=True, states={'draft':[('readonly', False)]}),
'type':fields.selection([
('pop', 'POP Server'),
('imap', 'IMAP Server'),
('local', 'Local Server'),
], 'Server Type', select=True, required=True, readonly=False),
'is_ssl':fields.boolean('SSL/TLS', help="Connections are encrypted with SSL/TLS through a dedicated port (default: IMAPS=993, POP3S=995)"),
'attach':fields.boolean('Keep Attachments', help="Whether attachments should be downloaded. "
"If not enabled, incoming emails will be stripped of any attachments before being processed"),
'original':fields.boolean('Keep Original', help="Whether a full original copy of each email should be kept for reference"
"and attached to each processed message. This will usually double the size of your message database."),
'date': fields.datetime('Last Fetch Date', readonly=True),
'user' : fields.char('Username', readonly=True, states={'draft':[('readonly', False)]}),
'password' : fields.char('Password', readonly=True, states={'draft':[('readonly', False)]}),
'action_id':fields.many2one('ir.actions.server', 'Server Action', help="Optional custom server action to trigger for each incoming mail, "
"on the record that was created or updated by this mail"),
'object_id': fields.many2one('ir.model', "Create a New Record", help="Process each incoming mail as part of a conversation "
"corresponding to this document type. This will create "
"new documents for new conversations, or attach follow-up "
"emails to the existing conversations (documents)."),
'priority': fields.integer('Server Priority', readonly=True, states={'draft':[('readonly', False)]}, help="Defines the order of processing, "
"lower values mean higher priority"),
'message_ids': fields.one2many('mail.mail', 'fetchmail_server_id', 'Messages', readonly=True),
'configuration' : fields.text('Configuration', readonly=True),
'script' : fields.char('Script', readonly=True),
}
_defaults = {
'state': "draft",
'type': "pop",
'active': True,
'priority': 5,
'attach': True,
'script': '/mail/static/scripts/openerp_mailgate.py',
}
def onchange_server_type(self, cr, uid, ids, server_type=False, ssl=False, object_id=False):
port = 0
values = {}
if server_type == 'pop':
port = ssl and 995 or 110
elif server_type == 'imap':
port = ssl and 993 or 143
else:
values['server'] = ''
values['port'] = port
conf = {
'dbname' : cr.dbname,
'uid' : uid,
'model' : 'MODELNAME',
}
if object_id:
m = self.pool.get('ir.model')
r = m.read(cr,uid,[object_id],['model'])
conf['model']=r[0]['model']
values['configuration'] = """Use the below script with the following command line options with your Mail Transport Agent (MTA)
openerp_mailgate.py --host=HOSTNAME --port=PORT -u %(uid)d -p PASSWORD -d %(dbname)s
Example configuration for the postfix mta running locally:
/etc/postfix/virtual_aliases:
@youdomain openerp_mailgate@localhost
/etc/aliases:
openerp_mailgate: "|/path/to/openerp-mailgate.py --host=localhost -u %(uid)d -p PASSWORD -d %(dbname)s"
""" % conf
return {'value':values}
def set_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids , {'state':'draft'})
return True
@api.cr_uid_ids_context
def connect(self, cr, uid, server_id, context=None):
if isinstance(server_id, (list,tuple)):
server_id = server_id[0]
server = self.browse(cr, uid, server_id, context)
if server.type == 'imap':
if server.is_ssl:
connection = IMAP4_SSL(server.server, int(server.port))
else:
connection = IMAP4(server.server, int(server.port))
connection.login(server.user, server.password)
elif server.type == 'pop':
if server.is_ssl:
connection = POP3_SSL(server.server, int(server.port))
else:
connection = POP3(server.server, int(server.port))
#TODO: use this to remove only unread messages
#connection.user("recent:"+server.user)
connection.user(server.user)
connection.pass_(server.password)
# Add timeout on socket
connection.sock.settimeout(MAIL_TIMEOUT)
return connection
def button_confirm_login(self, cr, uid, ids, context=None):
if context is None:
context = {}
for server in self.browse(cr, uid, ids, context=context):
try:
connection = server.connect()
server.write({'state':'done'})
except Exception, e:
_logger.exception("Failed to connect to %s server %s.", server.type, server.name)
raise osv.except_osv(_("Connection test failed!"), _("Here is what we got instead:\n %s.") % tools.ustr(e))
finally:
try:
if connection:
if server.type == 'imap':
connection.close()
elif server.type == 'pop':
connection.quit()
except Exception:
# ignored, just a consequence of the previous exception
pass
return True
def _fetch_mails(self, cr, uid, ids=False, context=None):
if not ids:
ids = self.search(cr, uid, [('state','=','done'),('type','in',['pop','imap'])])
return self.fetch_mail(cr, uid, ids, context=context)
def fetch_mail(self, cr, uid, ids, context=None):
"""WARNING: meant for cron usage only - will commit() after each email!"""
context = dict(context or {})
context['fetchmail_cron_running'] = True
mail_thread = self.pool.get('mail.thread')
action_pool = self.pool.get('ir.actions.server')
for server in self.browse(cr, uid, ids, context=context):
_logger.info('start checking for new emails on %s server %s', server.type, server.name)
context.update({'fetchmail_server_id': server.id, 'server_type': server.type})
count, failed = 0, 0
imap_server = False
pop_server = False
if server.type == 'imap':
try:
imap_server = server.connect()
imap_server.select()
result, data = imap_server.search(None, '(UNSEEN)')
for num in data[0].split():
res_id = None
result, data = imap_server.fetch(num, '(RFC822)')
imap_server.store(num, '-FLAGS', '\\Seen')
try:
res_id = mail_thread.message_process(cr, uid, server.object_id.model,
data[0][1],
save_original=server.original,
strip_attachments=(not server.attach),
context=context)
imap_server.store(num, '+FLAGS', '\\Seen')
except Exception:
_logger.exception('Failed to process mail from %s server %s.', server.type, server.name)
failed += 1
if res_id and server.action_id:
action_pool.run(cr, uid, [server.action_id.id], {'active_id': res_id, 'active_ids': [res_id], 'active_model': context.get("thread_model", server.object_id.model)})
cr.commit()
count += 1
_logger.info("Fetched %d email(s) on %s server %s; %d succeeded, %d failed.", count, server.type, server.name, (count - failed), failed)
except Exception:
_logger.exception("General failure when trying to fetch mail from %s server %s.", server.type, server.name)
finally:
if imap_server:
imap_server.close()
imap_server.logout()
elif server.type == 'pop':
try:
while True:
pop_server = server.connect()
(numMsgs, totalSize) = pop_server.stat()
pop_server.list()
for num in range(1, min(MAX_POP_MESSAGES, numMsgs) + 1):
(header, msges, octets) = pop_server.retr(num)
msg = '\n'.join(msges)
res_id = None
try:
res_id = mail_thread.message_process(cr, uid, server.object_id.model,
msg,
save_original=server.original,
strip_attachments=(not server.attach),
context=context)
pop_server.dele(num)
except Exception:
_logger.exception('Failed to process mail from %s server %s.', server.type, server.name)
failed += 1
if res_id and server.action_id:
action_pool.run(cr, uid, [server.action_id.id], {'active_id': res_id, 'active_ids': [res_id], 'active_model': context.get("thread_model", server.object_id.model)})
cr.commit()
if numMsgs < MAX_POP_MESSAGES:
break
pop_server.quit()
_logger.info("Fetched %d email(s) on %s server %s; %d succeeded, %d failed.", numMsgs, server.type, server.name, (numMsgs - failed), failed)
except Exception:
_logger.exception("General failure when trying to fetch mail from %s server %s.", server.type, server.name)
finally:
if pop_server:
pop_server.quit()
server.write({'date': time.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)})
return True
def _update_cron(self, cr, uid, context=None):
if context and context.get('fetchmail_cron_running'):
return
try:
cron = self.pool['ir.model.data'].get_object(
cr, uid, 'fetchmail', 'ir_cron_mail_gateway_action', context=context)
except ValueError:
# Nevermind if default cron cannot be found
return
# Enabled/Disable cron based on the number of 'done' server of type pop or imap
cron.toggle(model=self._name, domain=[('state','=','done'), ('type','in',['pop','imap'])])
def create(self, cr, uid, values, context=None):
res = super(fetchmail_server, self).create(cr, uid, values, context=context)
self._update_cron(cr, uid, context=context)
return res
def write(self, cr, uid, ids, values, context=None):
res = super(fetchmail_server, self).write(cr, uid, ids, values, context=context)
self._update_cron(cr, uid, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(fetchmail_server, self).unlink(cr, uid, ids, context=context)
self._update_cron(cr, uid, context=context)
return res
class mail_mail(osv.osv):
_inherit = "mail.mail"
_columns = {
'fetchmail_server_id': fields.many2one('fetchmail.server', "Inbound Mail Server",
readonly=True,
select=True,
oldname='server_id'),
}
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
fetchmail_server_id = context.get('fetchmail_server_id')
if fetchmail_server_id:
values['fetchmail_server_id'] = fetchmail_server_id
res = super(mail_mail, self).create(cr, uid, values, context=context)
return res
def write(self, cr, uid, ids, values, context=None):
if context is None:
context = {}
fetchmail_server_id = context.get('fetchmail_server_id')
if fetchmail_server_id:
values['fetchmail_server_id'] = fetchmail_server_id
res = super(mail_mail, self).write(cr, uid, ids, values, context=context)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,146,645,553,006,914,600 | 48.145511 | 195 | 0.525702 | false |
gnuhub/intellij-community | python/lib/Lib/site-packages/django/utils/autoreload.py | 135 | 4239 | # Autoreloading launcher.
# Borrowed from Peter Hunt and the CherryPy project (http://www.cherrypy.org).
# Some taken from Ian Bicking's Paste (http://pythonpaste.org/).
#
# Portions copyright (c) 2004, CherryPy Team ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the CherryPy Team nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, sys, time
try:
import thread
except ImportError:
import dummy_thread as thread
# This import does nothing, but it's necessary to avoid some race conditions
# in the threading module. See http://code.djangoproject.com/ticket/2330 .
try:
import threading
except ImportError:
pass
RUN_RELOADER = True
_mtimes = {}
_win = (sys.platform == "win32")
def code_changed():
global _mtimes, _win
for filename in filter(lambda v: v, map(lambda m: getattr(m, "__file__", None), sys.modules.values())):
if filename.endswith(".pyc") or filename.endswith(".pyo"):
filename = filename[:-1]
if not os.path.exists(filename):
continue # File might be in an egg, so it can't be reloaded.
stat = os.stat(filename)
mtime = stat.st_mtime
if _win:
mtime -= stat.st_ctime
if filename not in _mtimes:
_mtimes[filename] = mtime
continue
if mtime != _mtimes[filename]:
_mtimes = {}
return True
return False
def reloader_thread():
while RUN_RELOADER:
if code_changed():
sys.exit(3) # force reload
time.sleep(1)
def restart_with_reloader():
while True:
args = [sys.executable] + sys.argv
if sys.platform == "win32":
args = ['"%s"' % arg for arg in args]
new_environ = os.environ.copy()
new_environ["RUN_MAIN"] = 'true'
exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ)
if exit_code != 3:
return exit_code
def python_reloader(main_func, args, kwargs):
if os.environ.get("RUN_MAIN") == "true":
thread.start_new_thread(main_func, args, kwargs)
try:
reloader_thread()
except KeyboardInterrupt:
pass
else:
try:
sys.exit(restart_with_reloader())
except KeyboardInterrupt:
pass
def jython_reloader(main_func, args, kwargs):
from _systemrestart import SystemRestart
thread.start_new_thread(main_func, args)
while True:
if code_changed():
raise SystemRestart
time.sleep(1)
def main(main_func, args=None, kwargs=None):
if args is None:
args = ()
if kwargs is None:
kwargs = {}
if sys.platform.startswith('java'):
reloader = jython_reloader
else:
reloader = python_reloader
reloader(main_func, args, kwargs)
| apache-2.0 | 898,659,058,366,997,100 | 34.621849 | 107 | 0.664308 | false |
anomitra/articleScraper | PyQt-gpl-5.4.1/examples/widgets/stylesheet/stylesheeteditor.py | 3 | 4557 | #############################################################################
##
## Copyright (C) 2010 Hans-Peter Jansen <[email protected]>.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
###########################################################################
from PyQt5.QtCore import pyqtSlot, QFile, QRegExp, Qt, QTextStream
from PyQt5.QtWidgets import (QApplication, QDialog, QFileDialog, QMessageBox,
QStyleFactory)
from ui_stylesheeteditor import Ui_StyleSheetEditor
class StyleSheetEditor(QDialog):
def __init__(self, parent=None):
super(StyleSheetEditor, self).__init__(parent)
self.ui = Ui_StyleSheetEditor()
self.ui.setupUi(self)
regExp = QRegExp(r'.(.*)\+?Style')
defaultStyle = QApplication.style().metaObject().className()
if regExp.exactMatch(defaultStyle):
defaultStyle = regExp.cap(1)
self.ui.styleCombo.addItems(QStyleFactory.keys())
self.ui.styleCombo.setCurrentIndex(
self.ui.styleCombo.findText(defaultStyle, Qt.MatchContains))
self.ui.styleSheetCombo.setCurrentIndex(
self.ui.styleSheetCombo.findText('Coffee'))
self.loadStyleSheet('Coffee')
@pyqtSlot(str)
def on_styleCombo_activated(self, styleName):
QApplication.setStyle(styleName)
self.ui.applyButton.setEnabled(False)
@pyqtSlot(str)
def on_styleSheetCombo_activated(self, sheetName):
self.loadStyleSheet(sheetName)
def on_styleTextEdit_textChanged(self):
self.ui.applyButton.setEnabled(True)
def on_applyButton_clicked(self):
QApplication.instance().setStyleSheet(
self.ui.styleTextEdit.toPlainText())
self.ui.applyButton.setEnabled(False)
def on_saveButton_clicked(self):
fileName, _ = QFileDialog.getSaveFileName(self)
if fileName:
self.saveStyleSheet(fileName)
def loadStyleSheet(self, sheetName):
file = QFile(':/qss/%s.qss' % sheetName.lower())
file.open(QFile.ReadOnly)
styleSheet = file.readAll()
try:
# Python v2.
styleSheet = unicode(styleSheet, encoding='utf8')
except NameError:
# Python v3.
styleSheet = str(styleSheet, encoding='utf8')
self.ui.styleTextEdit.setPlainText(styleSheet)
QApplication.instance().setStyleSheet(styleSheet)
self.ui.applyButton.setEnabled(False)
def saveStyleSheet(self, fileName):
styleSheet = self.ui.styleTextEdit.toPlainText()
file = QFile(fileName)
if file.open(QFile.WriteOnly):
QTextStream(file) << styleSheet
else:
QMessageBox.information(self, "Unable to open file",
file.errorString())
| gpl-2.0 | 5,889,234,866,323,355,000 | 38.626087 | 77 | 0.667983 | false |
nebril/fuel-web | nailgun/nailgun/openstack/common/timeutils.py | 16 | 5967 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Time related utilities and helper functions.
"""
import calendar
import datetime
import time
import iso8601
import six
# ISO 8601 extended time format with microseconds
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
def isotime(at=None, subsecond=False):
"""Stringify time in ISO 8601 format."""
if not at:
at = utcnow()
st = at.strftime(_ISO8601_TIME_FORMAT
if not subsecond
else _ISO8601_TIME_FORMAT_SUBSECOND)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
st += ('Z' if tz == 'UTC' else tz)
return st
def parse_isotime(timestr):
"""Parse time from ISO 8601 format."""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(six.text_type(e))
except TypeError as e:
raise ValueError(six.text_type(e))
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
"""Returns formatted utcnow."""
if not at:
at = utcnow()
return at.strftime(fmt)
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
"""Turn a formatted time back into a datetime."""
return datetime.datetime.strptime(timestr, fmt)
def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC naive object."""
offset = timestamp.utcoffset()
if offset is None:
return timestamp
return timestamp.replace(tzinfo=None) - offset
def is_older_than(before, seconds):
"""Return True if before is older than seconds."""
if isinstance(before, six.string_types):
before = parse_strtime(before).replace(tzinfo=None)
return utcnow() - before > datetime.timedelta(seconds=seconds)
def is_newer_than(after, seconds):
"""Return True if after is newer than seconds."""
if isinstance(after, six.string_types):
after = parse_strtime(after).replace(tzinfo=None)
return after - utcnow() > datetime.timedelta(seconds=seconds)
def utcnow_ts():
"""Timestamp version of our utcnow function."""
if utcnow.override_time is None:
# NOTE(kgriffs): This is several times faster
# than going through calendar.timegm(...)
return int(time.time())
return calendar.timegm(utcnow().timetuple())
def utcnow():
"""Overridable version of utils.utcnow."""
if utcnow.override_time:
try:
return utcnow.override_time.pop(0)
except AttributeError:
return utcnow.override_time
return datetime.datetime.utcnow()
def iso8601_from_timestamp(timestamp):
"""Returns a iso8601 formated date from timestamp."""
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
utcnow.override_time = None
def set_time_override(override_time=None):
"""Overrides utils.utcnow.
Make it return a constant time or a list thereof, one at a time.
:param override_time: datetime instance or list thereof. If not
given, defaults to the current UTC time.
"""
utcnow.override_time = override_time or datetime.datetime.utcnow()
def advance_time_delta(timedelta):
"""Advance overridden time using a datetime.timedelta."""
assert(not utcnow.override_time is None)
try:
for dt in utcnow.override_time:
dt += timedelta
except TypeError:
utcnow.override_time += timedelta
def advance_time_seconds(seconds):
"""Advance overridden time by seconds."""
advance_time_delta(datetime.timedelta(0, seconds))
def clear_time_override():
"""Remove the overridden time."""
utcnow.override_time = None
def marshall_now(now=None):
"""Make an rpc-safe datetime with microseconds.
Note: tzinfo is stripped, but not required for relative times.
"""
if not now:
now = utcnow()
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
minute=now.minute, second=now.second,
microsecond=now.microsecond)
def unmarshall_time(tyme):
"""Unmarshall a datetime dict."""
return datetime.datetime(day=tyme['day'],
month=tyme['month'],
year=tyme['year'],
hour=tyme['hour'],
minute=tyme['minute'],
second=tyme['second'],
microsecond=tyme['microsecond'])
def delta_seconds(before, after):
"""Return the difference between two timing objects.
Compute the difference in seconds between two date, time, or
datetime objects (as a float, to microsecond resolution).
"""
delta = after - before
try:
return delta.total_seconds()
except AttributeError:
return ((delta.days * 24 * 3600) + delta.seconds +
float(delta.microseconds) / (10 ** 6))
def is_soon(dt, window):
"""Determines if time is going to happen in the next window seconds.
:params dt: the time
:params window: minimum seconds to remain to consider the time not soon
:return: True if expiration is within the given duration
"""
soon = (utcnow() + datetime.timedelta(seconds=window))
return normalize_time(dt) <= soon
| apache-2.0 | -4,972,100,476,774,550,000 | 29.28934 | 78 | 0.652589 | false |
molobrakos/home-assistant | homeassistant/components/fints/sensor.py | 7 | 9289 | """Read the balance of your bank accounts via FinTS."""
from collections import namedtuple
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_USERNAME, CONF_PIN, CONF_URL, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(hours=4)
ICON = 'mdi:currency-eur'
BankCredentials = namedtuple('BankCredentials', 'blz login pin url')
CONF_BIN = 'bank_identification_number'
CONF_ACCOUNTS = 'accounts'
CONF_HOLDINGS = 'holdings'
CONF_ACCOUNT = 'account'
ATTR_ACCOUNT = CONF_ACCOUNT
ATTR_BANK = 'bank'
ATTR_ACCOUNT_TYPE = 'account_type'
SCHEMA_ACCOUNTS = vol.Schema({
vol.Required(CONF_ACCOUNT): cv.string,
vol.Optional(CONF_NAME, default=None): vol.Any(None, cv.string),
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_BIN): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PIN): cv.string,
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ACCOUNTS, default=[]): cv.ensure_list(SCHEMA_ACCOUNTS),
vol.Optional(CONF_HOLDINGS, default=[]): cv.ensure_list(SCHEMA_ACCOUNTS),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensors.
Login to the bank and get a list of existing accounts. Create a
sensor for each account.
"""
credentials = BankCredentials(config[CONF_BIN], config[CONF_USERNAME],
config[CONF_PIN], config[CONF_URL])
fints_name = config.get(CONF_NAME, config[CONF_BIN])
account_config = {acc[CONF_ACCOUNT]: acc[CONF_NAME]
for acc in config[CONF_ACCOUNTS]}
holdings_config = {acc[CONF_ACCOUNT]: acc[CONF_NAME]
for acc in config[CONF_HOLDINGS]}
client = FinTsClient(credentials, fints_name)
balance_accounts, holdings_accounts = client.detect_accounts()
accounts = []
for account in balance_accounts:
if config[CONF_ACCOUNTS] and account.iban not in account_config:
_LOGGER.info('skipping account %s for bank %s',
account.iban, fints_name)
continue
account_name = account_config.get(account.iban)
if not account_name:
account_name = '{} - {}'.format(fints_name, account.iban)
accounts.append(FinTsAccount(client, account, account_name))
_LOGGER.debug('Creating account %s for bank %s',
account.iban, fints_name)
for account in holdings_accounts:
if config[CONF_HOLDINGS] and \
account.accountnumber not in holdings_config:
_LOGGER.info('skipping holdings %s for bank %s',
account.accountnumber, fints_name)
continue
account_name = holdings_config.get(account.accountnumber)
if not account_name:
account_name = '{} - {}'.format(
fints_name, account.accountnumber)
accounts.append(FinTsHoldingsAccount(client, account, account_name))
_LOGGER.debug('Creating holdings %s for bank %s',
account.accountnumber, fints_name)
add_entities(accounts, True)
class FinTsClient:
"""Wrapper around the FinTS3PinTanClient.
Use this class as Context Manager to get the FinTS3Client object.
"""
def __init__(self, credentials: BankCredentials, name: str):
"""Initialize a FinTsClient."""
self._credentials = credentials
self.name = name
@property
def client(self):
"""Get the client object.
As the fints library is stateless, there is not benefit in caching
the client objects. If that ever changes, consider caching the client
object and also think about potential concurrency problems.
"""
from fints.client import FinTS3PinTanClient
return FinTS3PinTanClient(
self._credentials.blz, self._credentials.login,
self._credentials.pin, self._credentials.url)
def detect_accounts(self):
"""Identify the accounts of the bank."""
from fints.dialog import FinTSDialogError
balance_accounts = []
holdings_accounts = []
for account in self.client.get_sepa_accounts():
try:
self.client.get_balance(account)
balance_accounts.append(account)
except IndexError:
# account is not a balance account.
pass
except FinTSDialogError:
# account is not a balance account.
pass
try:
self.client.get_holdings(account)
holdings_accounts.append(account)
except FinTSDialogError:
# account is not a holdings account.
pass
return balance_accounts, holdings_accounts
class FinTsAccount(Entity):
"""Sensor for a FinTS balance account.
A balance account contains an amount of money (=balance). The amount may
also be negative.
"""
def __init__(self, client: FinTsClient, account, name: str) -> None:
"""Initialize a FinTs balance account."""
self._client = client # type: FinTsClient
self._account = account
self._name = name # type: str
self._balance = None # type: float
self._currency = None # type: str
@property
def should_poll(self) -> bool:
"""Return True.
Data needs to be polled from the bank servers.
"""
return True
def update(self) -> None:
"""Get the current balance and currency for the account."""
bank = self._client.client
balance = bank.get_balance(self._account)
self._balance = balance.amount.amount
self._currency = balance.amount.currency
_LOGGER.debug('updated balance of account %s', self.name)
@property
def name(self) -> str:
"""Friendly name of the sensor."""
return self._name
@property
def state(self) -> float:
"""Return the balance of the account as state."""
return self._balance
@property
def unit_of_measurement(self) -> str:
"""Use the currency as unit of measurement."""
return self._currency
@property
def device_state_attributes(self) -> dict:
"""Additional attributes of the sensor."""
attributes = {
ATTR_ACCOUNT: self._account.iban,
ATTR_ACCOUNT_TYPE: 'balance',
}
if self._client.name:
attributes[ATTR_BANK] = self._client.name
return attributes
@property
def icon(self) -> str:
"""Set the icon for the sensor."""
return ICON
class FinTsHoldingsAccount(Entity):
"""Sensor for a FinTS holdings account.
A holdings account does not contain money but rather some financial
instruments, e.g. stocks.
"""
def __init__(self, client: FinTsClient, account, name: str) -> None:
"""Initialize a FinTs holdings account."""
self._client = client # type: FinTsClient
self._name = name # type: str
self._account = account
self._holdings = []
self._total = None # type: float
@property
def should_poll(self) -> bool:
"""Return True.
Data needs to be polled from the bank servers.
"""
return True
def update(self) -> None:
"""Get the current holdings for the account."""
bank = self._client.client
self._holdings = bank.get_holdings(self._account)
self._total = sum(h.total_value for h in self._holdings)
@property
def state(self) -> float:
"""Return total market value as state."""
return self._total
@property
def icon(self) -> str:
"""Set the icon for the sensor."""
return ICON
@property
def device_state_attributes(self) -> dict:
"""Additional attributes of the sensor.
Lists each holding of the account with the current value.
"""
attributes = {
ATTR_ACCOUNT: self._account.accountnumber,
ATTR_ACCOUNT_TYPE: 'holdings',
}
if self._client.name:
attributes[ATTR_BANK] = self._client.name
for holding in self._holdings:
total_name = '{} total'.format(holding.name)
attributes[total_name] = holding.total_value
pieces_name = '{} pieces'.format(holding.name)
attributes[pieces_name] = holding.pieces
price_name = '{} price'.format(holding.name)
attributes[price_name] = holding.market_value
return attributes
@property
def name(self) -> str:
"""Friendly name of the sensor."""
return self._name
@property
def unit_of_measurement(self) -> str:
"""Get the unit of measurement.
Hardcoded to EUR, as the library does not provide the currency for the
holdings. And as FinTS is only used in Germany, most accounts will be
in EUR anyways.
"""
return "EUR"
| apache-2.0 | -8,379,134,397,970,261,000 | 31.823322 | 78 | 0.616859 | false |
irwinlove/django | django/template/__init__.py | 198 | 2022 | """
Django's support for templates.
The django.template namespace contains two independent subsystems:
1. Multiple Template Engines: support for pluggable template backends,
built-in backends and backend-independent APIs
2. Django Template Language: Django's own template engine, including its
built-in loaders, context processors, tags and filters.
Ideally these subsystems would be implemented in distinct packages. However
keeping them together made the implementation of Multiple Template Engines
less disruptive .
Here's a breakdown of which modules belong to which subsystem.
Multiple Template Engines:
- django.template.backends.*
- django.template.loader
- django.template.response
Django Template Language:
- django.template.base
- django.template.context
- django.template.context_processors
- django.template.loaders.*
- django.template.debug
- django.template.defaultfilters
- django.template.defaulttags
- django.template.engine
- django.template.loader_tags
- django.template.smartif
Shared:
- django.template.utils
"""
# Multiple Template Engines
from .engine import Engine
from .utils import EngineHandler
engines = EngineHandler()
__all__ = ('Engine', 'engines')
# Django Template Language
# Public exceptions
from .base import VariableDoesNotExist # NOQA isort:skip
from .context import ContextPopException # NOQA isort:skip
from .exceptions import TemplateDoesNotExist, TemplateSyntaxError # NOQA isort:skip
# Template parts
from .base import ( # NOQA isort:skip
Context, Node, NodeList, Origin, RequestContext, Template, Variable,
)
# Deprecated in Django 1.8, will be removed in Django 1.10.
from .base import resolve_variable # NOQA isort:skip
# Library management
from .library import Library # NOQA isort:skip
__all__ += ('Template', 'Context', 'RequestContext')
| bsd-3-clause | 8,233,015,189,121,580,000 | 27.478873 | 89 | 0.705242 | false |
lawl/pmbootstrap | pmb/aportgen/linux.py | 2 | 4781 | """
Copyright 2017 Oliver Smith
This file is part of pmbootstrap.
pmbootstrap is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pmbootstrap is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pmbootstrap. If not, see <http://www.gnu.org/licenses/>.
"""
import pmb.helpers.run
import pmb.aportgen.core
import pmb.parse.apkindex
import pmb.parse.arch
def generate_apkbuild(args, pkgname, manufacturer, name, arch):
device = "-".join(pkgname.split("-")[1:])
carch = pmb.parse.arch.alpine_to_kernel(arch)
content = """\
# Kernel config based on: arch/""" + carch + """/configs/(CHANGEME!)
pkgname=\"""" + pkgname + """\"
pkgver=3.x.x
pkgrel=0
pkgdesc=\"""" + manufacturer + " " + name + """ kernel fork\"
arch=\"""" + arch + """\"
_carch=\"""" + carch + """\"
_flavor=\"""" + device + """\"
url="https://kernel.org"
license="GPL2"
options="!strip !check !tracedeps"
makedepends="perl sed installkernel bash gmp-dev bc linux-headers elfutils-dev"
HOSTCC="${CC:-gcc}"
HOSTCC="${HOSTCC#${CROSS_COMPILE}}"
# Source
_repository="(CHANGEME!)"
_commit="ffffffffffffffffffffffffffffffffffffffff"
_config="config-${_flavor}.${arch}"
source="
$pkgname-$_commit.tar.gz::https://github.com/LineageOS/${_repository}/archive/${_commit}.tar.gz
$_config
compiler-gcc6.h
01_msm-fix-perf_trace_counters.patch
02_gpu-msm-fix-gcc5-compile.patch
"
builddir="$srcdir/${_repository}-${_commit}"
prepare() {
default_prepare
# gcc6 support
cp -v "$srcdir/compiler-gcc6.h" "$builddir/include/linux/"
# Remove -Werror from all makefiles
find . -type f -name Makefile -print0 | \\
xargs -0 sed -i 's/-Werror-/-W/g'
find . -type f -name Makefile -print0 | \\
xargs -0 sed -i 's/-Werror//g'
# Prepare kernel config ('yes ""' for kernels lacking olddefconfig)
cp "$srcdir"/$_config "$builddir"/.config
yes "" | make ARCH="$_carch" HOSTCC="$HOSTCC" oldconfig
}
menuconfig() {
cd "$builddir"
make ARCH="$_carch" menuconfig
cp .config "$startdir"/$_config
}
build() {
unset LDFLAGS
make ARCH="$_carch" CC="${CC:-gcc}" \\
KBUILD_BUILD_VERSION="$((pkgrel + 1 ))-postmarketOS"
}
package() {
# kernel.release
install -D "$builddir/include/config/kernel.release" \\
"$pkgdir/usr/share/kernel/$_flavor/kernel.release"
# zImage (find the right one)
cd "$builddir/arch/$_carch/boot"
_target="$pkgdir/boot/vmlinuz-$_flavor"
for _zimg in zImage-dtb Image.gz-dtb *zImage Image; do
[ -e "$_zimg" ] || continue
msg "zImage found: $_zimg"
install -Dm644 "$_zimg" "$_target"
break
done
if ! [ -e "$_target" ]; then
error "Could not find zImage in $PWD!"
return 1
fi
}
sha512sums="(run 'pmbootstrap checksum """ + pkgname + """' to fill)"
"""
# Write the file
with open(args.work + "/aportgen/APKBUILD", "w", encoding="utf-8") as handle:
for line in content.split("\n"):
handle.write(line[8:].replace(" " * 4, "\t") + "\n")
def generate(args, pkgname):
device = "-".join(pkgname.split("-")[1:])
deviceinfo = pmb.parse.deviceinfo(args, device)
# Copy gcc6 support header and the patches from lg-mako for now
# (automatically finding the right patches is planned in #688)
pmb.helpers.run.user(args, ["mkdir", "-p", args.work + "/aportgen"])
for file in ["compiler-gcc6.h", "01_msm-fix-perf_trace_counters.patch",
"02_gpu-msm-fix-gcc5-compile.patch"]:
pmb.helpers.run.user(args, ["cp", args.aports +
"/device/linux-lg-mako/" + file,
args.work + "/aportgen/"])
generate_apkbuild(args, pkgname, deviceinfo["manufacturer"],
deviceinfo["name"], deviceinfo["arch"])
| gpl-3.0 | -4,634,088,153,215,434,000 | 35.776923 | 107 | 0.560552 | false |
a-b/PopClip-Extensions | source/InstantTranslate/requests/packages/chardet/gb2312prober.py | 2994 | 1681 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import GB2312DistributionAnalysis
from .mbcssm import GB2312SMModel
class GB2312Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(GB2312SMModel)
self._mDistributionAnalyzer = GB2312DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "GB2312"
| mit | 8,072,512,576,601,159,000 | 40 | 69 | 0.718025 | false |
stevenmizuno/QGIS | python/user.py | 7 | 4676 | # -*- coding: utf-8 -*-
"""
***************************************************************************
user.py
---------------------
Date : January 2015
Copyright : (C) 2015 by Nathan Woodrow
Email : woodrow dot nathan at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nathan Woodrow'
__date__ = 'January 2015'
__copyright__ = '(C) 2015, Nathan Woodrow'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import sys
import glob
import traceback
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import Qgis, QgsApplication, QgsMessageLog
def load_user_expressions(path):
"""
Load all user expressions from the given paths
"""
#Loop all py files and import them
modules = glob.glob(path + "/*.py")
names = [os.path.basename(f)[:-3] for f in modules]
for name in names:
if name == "__init__":
continue
# As user expression functions should be registered with qgsfunction
# just importing the file is enough to get it to load the functions into QGIS
try:
__import__("expressions.{0}".format(name), locals(), globals())
except:
error = traceback.format_exc()
msgtitle = QCoreApplication.translate("UserExpressions", "User expressions")
msg = QCoreApplication.translate("UserExpressions", "The user expression {0} is not valid").format(name)
QgsMessageLog.logMessage(msg + "\n" + error, msgtitle, Qgis.Warning)
userpythonhome = os.path.join(QgsApplication.qgisSettingsDirPath(), "python")
expressionspath = os.path.join(userpythonhome, "expressions")
sys.path.append(userpythonhome)
if not os.path.exists(expressionspath):
os.makedirs(expressionspath)
initfile = os.path.join(expressionspath, "__init__.py")
if not os.path.exists(initfile):
open(initfile, "w").close()
template = """\"\"\"
Define a new function using the @qgsfunction decorator.
The function accept the following parameters
:param [any]: Define any parameters you want to pass to your function before
the following arguments.
:param feature: The current feature
:param parent: The QgsExpression object
:param context: If there is an argument called ``context`` found at the last
position, this variable will contain a ``QgsExpressionContext``
object, that gives access to various additional information like
expression variables. E.g. ``context.variable('layer_id')``
:returns: The result of the expression.
The @qgsfunction decorator accepts the following arguments:
:param args: Defines the number of arguments. With ``args='auto'`` the number
arguments will automatically be extracted from the signature.
:param group: The name of the group under which this expression function will
be listed.
:param usesgeometry: Set this to False if your function does not access
feature.geometry(). Defaults to True.
:param referenced_columns: An array of attribute names that are required to run
this function. Defaults to
[QgsFeatureRequest.ALL_ATTRIBUTES].
\"\"\"
from qgis.core import *
from qgis.gui import *
@qgsfunction(args='auto', group='Custom')
def my_sum(value1, value2, feature, parent):
\"\"\"
Calculates the sum of the two parameters value1 and value2.
<h2>Example usage:</h2>
<ul>
<li>my_sum(5, 8) -> 13</li>
<li>my_sum(\"fiel1\", \"field2\") -> 42</li>
</ul>
\"\"\"
return value1 + value2
"""
try:
import expressions
expressions.load = load_user_expressions
expressions.load(expressionspath)
expressions.template = template
except ImportError:
# We get a import error and crash for some reason even if we make the expressions package
# TODO Fix the crash on first load with no expressions folder
# But for now it's not the end of the world if it doesn't load the first time
pass
| gpl-2.0 | -5,102,627,090,604,626,000 | 36.709677 | 116 | 0.605004 | false |
heidtn/PyDataLearn | PyDataLearn/NeuralNet.py | 1 | 6181 | from math import tanh
from pysqlite2 import dbapi2 as sqlite
def dtanh(y):
#this effectively creates a smaller change multiplier when the value is closest to 0 (when the slope is steepest) P_D controller?
return 1.0-y*y
class SearchNet:
def __init__(self, dbname):
self.con = sqlite.connect(dbname)
def __del__(self):
self.con.close()
def maketables(self):
self.con.execute('create table hiddennode(create_key)')
self.con.execute('create table wordhidden(fromid, toid, strength)')
self.con.execute('create table hiddenurl(fromid, toid, strength)')
self.con.commit()
def getstrength(self, fromid, toid, layer):
#returns strength of connection from fromid to toid
#layer specifies the table, whether dendrites connecting input to hidden or hidden to output
if layer == 0: table = 'wordhidden'
else: table = 'hiddenurl'
res = self.con.execute('select strength from %s where fromid=%d and toid=%d' % (table, fromid, toid)).fetchone()
if res == None:
if layer == 0: return -0.2 #if extra word, we want negative effects
if layer == 1: return 0
return res[0]
def setstrength(self, fromid, toid, layer, strength):
if layer == 0: table = 'wordhidden'
else: table = 'hiddenurl'
res = self.con.execute('select rowid from %s where fromid=%d and toid=%d' % (table, fromid, toid)).fetchone()
if res == None:
#we generate nodes as we need them/use them
self.con.execute('insert into %s (fromid,toid,strength) values (%d,%d,%f)' % (table, fromid, toid, strength))
else:
rowid = res[0]
self.con.execute('update %s set strength=%f where rowid=%d' % (table, strength, rowid))
def generatehiddennode(self, wordids, urls):
#generates new nodes for searches we haven't done yet
if len(wordids) > 3: return None
#check to see if we've created a node for this set of words
createkey = '_'.join(sorted([str(wi) for wi in wordids])) #sorting ensures any combination of these words
res = self.con.execute("select rowid from hiddennode where create_key='%s'" % createkey).fetchone()
#if we haven't seen this set of words
if res == None:
cur = self.con.execute("insert into hiddennode (create_key) values ('%s')" % createkey)
hiddenid = cur.lastrowid
for wordid in wordids:
self.setstrength(wordid, hiddenid, 0, 1.0/len(wordids))
for urlid in urls:
self.setstrength(hiddenid, urlid, 1, 0.1)
self.con.commit()
def getallhiddenids(self, wordids, urlids):
l1 = {}
for wordid in wordids:
cur = self.con.execute('select toid from wordhidden where fromid=%d' % wordid)
for row in cur: l1[row[0]] = 1
for urlid in urlids:
cur = self.con.execute('select fromid from hiddenurl where toid=%d' % urlid)
for row in cur: l1[row[0]] = 1
return l1.keys()
#load weights into memory for speeeed
def setupnetwork(self, wordids, urlids):
#values lists
self.wordids = wordids #current list of words we're searching for
self.hiddenids = self.getallhiddenids(wordids, urlids) #current list of hidden ids relevant to our input wordids and urlids
self.urlids = urlids
#node outputs
self.ai = [1.0]*len(self.wordids) #input layer outputs for each word
self.ah = [1.0]*len(self.hiddenids) #hidden layer outputs
self.ao = [1.0]*len(self.urlids) #output layer outputs
#create weights matrix
self.wi = [[self.getstrength(wordid, hiddenid, 0) #2d array of weights between input array and hidden array
for hiddenid in self.hiddenids] #for each word what are the weights of all relevant hidden neurons
for wordid in self.wordids]
self.wo = [[self.getstrength(hiddenid, urlid, 1) #same as wi, but from hidden layer to output layer
for urlid in self.urlids]
for hiddenid in self.hiddenids]
def feedforward(self):
#only query words for inputs
for i in xrange(len(self.wordids)): #reset input layer values to 1
self.ai[i] = 1.0
#hidden activations
for j in xrange(len(self.hiddenids)):
tot = 0.0
for i in xrange(len(self.wordids)): #iterate through weights 2d array and apply to input layer strength
tot += self.ai[i]*self.wi[i][j]
self.ah[j] = tanh(tot) #set hidden layer outputs to tanh of sum of input weights axon=tanh(sum(dendrites))
#output activations (feed forward from hidden layer)
for k in xrange(len(self.urlids)):
tot = 0.0
for j in xrange(len(self.hiddenids)):
tot += self.ah[j]*self.wo[j][k]
self.ao[k] = tanh(tot)
#return the outputs of the output layer
return self.ao[:]
def backpropagate(self, targets, N=0.5):
#calcuate all errors for output
output_deltas = [0.0] * len(self.urlids)
for k in xrange(len(self.urlids)):
error = targets[k] - self.ao[k]
output_deltas[k] = dtanh(self.ao[k]) * error
#do the same for hiden layer
hidden_deltas = [0.0] * len(self.hiddenids)
for j in xrange(len(self.hiddenids)):
error = 0.0
for k in xrange(len(self.urlids)):
error += output_deltas[k]*self.wo[j][k]
hidden_deltas[j] = dtanh(self.ah[j])*error
#update the weights
for j in xrange(len(self.hiddenids)):
for k in xrange(len(self.urlids)):
change = output_deltas[k]*self.ah[j]
self.wo[j][k] = self.wo[j][k] + N*change
#update input weights
for j in xrange(len(self.wordids)):
for k in xrange(len(self.hiddenids)):
change = hidden_deltas[k]*self.ai[j]
self.wi[j][k] = self.wi[j][k] + N*change
def trainquery(self, wordids, urlids, selectedurl):
#generate the hidden nodes if we have new words
self.generatehiddennode(wordids, urlids)
self.setupnetwork(wordids, urlids)
self.feedforward()
targets = [0.0]*len(urlids)
targets[urlids.index(selectedurl)] = 1.0
self.backpropagate(targets)
self.updatedatabase()
def updatedatabase(self):
#save our instance variables into the database
for i in xrange(len(self.wordids)):
for j in xrange(len(self.hiddenids)):
self.setstrength(self.wordids[i], self.hiddenids[j], 0, self.wi[i][j])
for i in xrange(len(self.hiddenids)):
for j in xrange(len(self.urlids)):
self.setstrength(self.hiddenids[i],self.urlids[j], 1, self.wo[i][j])
self.con.commit()
def getresult(self, wordids, urlids):
self.setupnetwork(wordids, urlids)
return self.feedforward()
| mit | -4,046,923,939,482,413,000 | 36.011976 | 130 | 0.695195 | false |
dfdx2/ancfinder | scripts/update_311.py | 1 | 2533 | import datetime, json, urllib2, os, errno, requests
# Open/create file, deleting info already in it so that we can make fresh info
file_name = open('data/311.json', 'w')
issues = []
working = {'issues':issues}
data = {}
# Get date in the past to start
start_date = (datetime.datetime.today() + datetime.timedelta(-180)).isoformat()
# Request info from SeeClickFix API
url = 'https://seeclickfix.com/api/v2/issues?place_url=district-of-columbia&&after='+start_date+'&page=1&per_page=100'
response = urllib2.urlopen(url)
info = json.load(response)
endpoint = info['metadata']['pagination']['pages']
page = 1
while page < endpoint:
url = 'https://seeclickfix.com/api/v2/issues?place_url=district-of-columbia&&after='+start_date+'&page='+str(page)+'&per_page=100'
response = urllib2.urlopen(url)
info = json.load(response)
working['issues'] += info['issues']
page +=1
#Locate in ANC using lat/long coordinates, then calculate the totals
for issue in working['issues']:
url = 'http://gis.govtrack.us/boundaries/dc-smd-2013/?contains='+str(issue['lat'])+','+str(issue['lng'])
request = requests.get(url)
info = json.loads(request.text)
try:
smd = info['objects'][0]['external_id']
anc = info['objects'][0]['external_id'][:2]
variety = issue['summary']
print smd, issue['lng'], issue['lat'], variety
if anc in data:
if smd in data[anc]['smds']:
data[anc]['smds'][smd]['total'] += 1
else:
data[anc]['smds'][smd] = {}
data[anc]['smds'][smd]['total'] = 1
data[anc]['smds'][smd]['types'] = {}
data[anc]['total'] += 1
else:
data[anc] = {}
data[anc]['total'] = 1
data[anc]['types'] = {}
data[anc]['smds'] = {}
data[anc]['smds'][smd] = {}
data[anc]['smds'][smd]['total'] = 1
data[anc]['smds'][smd]['types'] = {}
if variety in data[anc]['types']:
data[anc]['types'][variety] += 1
if variety in data[anc]['smds'][smd]['types']:
data[anc]['smds'][smd]['types'][variety] += 1
else:
data[anc]['smds'][smd]['types'][variety] = 1
else:
data[anc]['types'][variety] = 1
data[anc]['smds'][smd]['types'][variety] = 1
except IndexError:
continue
# Save the JSON file
with open('data/311.json', 'w') as output:
json.dump(data, output, sort_keys=True, indent=4)
| cc0-1.0 | 4,926,069,178,004,750,000 | 35.185714 | 134 | 0.562179 | false |
aveshagarwal/openshift-ansible | roles/lib_openshift/src/lib/storageclass.py | 18 | 3122 | # pylint: skip-file
# flake8: noqa
# pylint: disable=too-many-instance-attributes
class StorageClassConfig(object):
''' Handle service options '''
# pylint: disable=too-many-arguments
def __init__(self,
name,
provisioner,
parameters=None,
annotations=None,
default_storage_class="false",
api_version='v1',
kubeconfig='/etc/origin/master/admin.kubeconfig',
mount_options=None,
reclaim_policy=None):
''' constructor for handling storageclass options '''
self.name = name
self.parameters = parameters
self.annotations = annotations
self.provisioner = provisioner
self.api_version = api_version
self.default_storage_class = str(default_storage_class).lower()
self.kubeconfig = kubeconfig
self.mount_options = mount_options
self.reclaim_policy = reclaim_policy
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiates a storageclass dict '''
self.data['apiVersion'] = self.api_version
self.data['kind'] = 'StorageClass'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['annotations'] = {}
if self.annotations is not None:
self.data['metadata']['annotations'] = self.annotations
self.data['metadata']['annotations']['storageclass.beta.kubernetes.io/is-default-class'] = \
self.default_storage_class
self.data['provisioner'] = self.provisioner
self.data['parameters'] = {}
if self.parameters is not None:
self.data['parameters'].update(self.parameters)
# default to aws if no params were passed
else:
self.data['parameters']['type'] = 'gp2'
self.data['mountOptions'] = self.mount_options or []
if self.reclaim_policy is not None:
self.data['reclaimPolicy'] = self.reclaim_policy
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class StorageClass(Yedit):
''' Class to model the oc storageclass object '''
annotations_path = "metadata.annotations"
provisioner_path = "provisioner"
parameters_path = "parameters"
mount_options_path = "mountOptions"
reclaim_policy_path = "reclaimPolicy"
kind = 'StorageClass'
def __init__(self, content):
'''StorageClass constructor'''
super(StorageClass, self).__init__(content=content)
def get_annotations(self):
''' get a list of ports '''
return self.get(StorageClass.annotations_path) or {}
def get_parameters(self):
''' get the service selector'''
return self.get(StorageClass.parameters_path) or {}
def get_mount_options(self):
''' get mount options'''
return self.get(StorageClass.mount_options_path) or []
def get_reclaim_policy(self):
''' get reclaim policy'''
return self.get(StorageClass.reclaim_policy_path)
| apache-2.0 | -6,098,967,058,869,367,000 | 32.934783 | 100 | 0.607944 | false |
izpack/izpack | izpack-wrapper/src/main/resources/utils/wrappers/izpack2jnlp/setup.py | 26 | 1070 | #!/usr/bin/env python
# ........................................................................... #
#
# IzPack - Copyright 2008 Julien Ponge, All Rights Reserved.
#
# http://izpack.org/
# http://izpack.codehaus.org/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ........................................................................... #
from distutils.core import setup
import py2exe
setup(
console = [{
'script': 'izpack2jnlp.py',
'icon_resources': [(0, 'app.ico')]
}],
script_args=['py2exe', '--bundle-files', '1']
)
| apache-2.0 | 2,066,892,054,722,120,400 | 33.666667 | 79 | 0.58972 | false |
bbc/kamaelia | Code/Python/Kamaelia/Kamaelia/Apps/Compose/GUI/ArgumentsPanel.py | 6 | 6027 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
from Kamaelia.UI.Tk.TkWindow import TkWindow
from Kamaelia.Support.Tk.Scrolling import ScrollingMenu
from Axon.Ipc import producerFinished, shutdownMicroprocess
import Tkinter
import pprint
class ArgumentsPanel(Tkinter.Frame):
def __init__(self, parent, theclass):
Tkinter.Frame.__init__(self, parent)
self.theclass = theclass
# pprint.pprint(theclass)
# build widgets
row=0
if self.theclass['classdoc']:
self.classdoclabel = Tkinter.Label(self, text = self.theclass['classdoc'], justify="left")
self.classdoclabel['font'] = " ".join(self.classdoclabel['font'].split(" ")[0:2])
self.classdoclabel.grid(row=row, column=0,columnspan=2,
sticky=Tkinter.N+Tkinter.E+Tkinter.W+Tkinter.S, padx=4, pady=4)
row+=1
if self.theclass['initdoc']:
self.initdoclabel = Tkinter.Label(self, text = self.theclass['initdoc'], justify="left")
self.initdoclabel['font'] = " ".join(self.initdoclabel['font'].split(" ")[0:2])
self.initdoclabel.grid(row=row, column=0, columnspan=2,
sticky=Tkinter.N+Tkinter.E+Tkinter.W+Tkinter.S, padx=4, pady=4)
row+=1
self.label = Tkinter.Label(self, text="ARGUMENTS:")
self.label.grid(row=row, column=0, columnspan=2,sticky=Tkinter.W+Tkinter.S, padx=4, pady=4)
row+=1
# enumerate std args
self.args = []
for arg in self.theclass['args']['std']:
arglabel = Tkinter.Label(self, text=arg[0])
arglabel.grid(row=row,column=0, sticky=Tkinter.E)
svar = Tkinter.StringVar()
argfield = Tkinter.Entry(self, bg="white", textvariable=svar, takefocus=1)
default=""
if len(arg)>=2:
default = arg[1]
svar.set(default)
argfield.grid(row=row,column=1, sticky=Tkinter.W)
self.args.append( (arg[0], svar, default) )
row+=1
# now do * and ** args
for argname in ["*","**"]:
if self.theclass['args'][argname]:
arglabel = Tkinter.Label(self, text=argname)
arglabel.grid(row=row,column=0, sticky=Tkinter.E)
arglabel = None
svar = Tkinter.StringVar()
argfield = Tkinter.Entry(self, bg="white", textvariable=svar, takefocus=1)
argfield.grid(row=row,column=1, sticky=Tkinter.W)
self.args.append( (argname, svar, "") )
row+=1
# self.rowconfigure(row, weight=1)
# self.grid()
def getDef(self):
return { "name" : self.theclass['class'],
"module" : self.theclass['module'],
"instantiation" : self.getInstantiation(),
"configuration" : self.getConfiguration()
}
def getConfiguration(self):
"""Return the instantiation string"""
argstr = ""
prefix = ""
SEQUENTIALARGS = []
TUPLEARGS = None
DICTARGS = None
for (argname, svar, default) in self.args:
unspecified = False
value = None
text = svar.get().strip()
default = default.strip()
if argname != "*" and argname != "**":
if default=="" or text != default:
if not text:
unspecified = True
value = text
SEQUENTIALARGS.append( [argname, unspecified,value, default ] )
else:
if text:
if argname == "*":
TUPLEARGS = text
if argname == "**":
DICTARGS = text
return { "args" : SEQUENTIALARGS,
"tupleargs" : TUPLEARGS ,
"dictargs" : DICTARGS,
"theclass" : self.theclass["theclass"], # FIXME: Is this a mistake, should we pass everything out?
}
def getInstantiation(self):
"""Return the instantiation string"""
argstr = ""
prefix = ""
for (argname, svar, default) in self.args:
text = svar.get().strip()
default = default.strip()
if argname != "*" and argname != "**":
if argname[0]=="[" and argname[-1]=="]":
if text:
argname=argname[1:-1]
argstr = argstr + prefix + argname + " = " + text
prefix=", "
elif (default=="" or text != default):
if not text:
text = "<<unspecified>>"
argstr = argstr + prefix + argname + " = " + text
prefix=", "
else:
if text:
argstr = argstr + prefix + text
prefix=", "
return argstr
| apache-2.0 | 2,363,039,460,338,130,000 | 36.90566 | 115 | 0.51767 | false |
NoahFlowa/glowing-spoon | forms.py | 2 | 1139 | from flask_wtf import Form
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired, Email, Length
class SignupForm(Form):
first_name = StringField('First name', validators=[DataRequired("Please enter your first name.")])
last_name = StringField('Last name', validators=[DataRequired("Please enter your last name.")])
email = StringField('Email', validators=[DataRequired("Please enter your email address."), Email("Please enter your email address.")])
password = PasswordField('Password', validators=[DataRequired("Please enter a password."), Length(min=6, message="Passwords must be 6 characters or more.")])
submit = SubmitField('Sign up')
class LoginForm(Form):
email = StringField('Email', validators=[DataRequired("Please enter your email address."), Email("Please enter your email address.")])
password = PasswordField('Password', validators=[DataRequired("Please enter a password.")])
submit = SubmitField("Sign in")
class AddressForm(Form):
address = StringField('Address', validators=[DataRequired("Please enter an address.")])
submit = SubmitField("Search") | apache-2.0 | -1,769,809,363,986,679,800 | 59 | 159 | 0.75417 | false |
tangfeng1/flask | flask/helpers.py | 133 | 36499 | # -*- coding: utf-8 -*-
"""
flask.helpers
~~~~~~~~~~~~~
Implements various helpers.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import pkgutil
import posixpath
import mimetypes
from time import time
from zlib import adler32
from threading import RLock
from werkzeug.routing import BuildError
from functools import update_wrapper
try:
from werkzeug.urls import url_quote
except ImportError:
from urlparse import quote as url_quote
from werkzeug.datastructures import Headers
from werkzeug.exceptions import NotFound
# this was moved in 0.7
try:
from werkzeug.wsgi import wrap_file
except ImportError:
from werkzeug.utils import wrap_file
from jinja2 import FileSystemLoader
from .signals import message_flashed
from .globals import session, _request_ctx_stack, _app_ctx_stack, \
current_app, request
from ._compat import string_types, text_type
# sentinel
_missing = object()
# what separators does this operating system provide that are not a slash?
# this is used by the send_from_directory function to ensure that nobody is
# able to access files from outside the filesystem.
_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep]
if sep not in (None, '/'))
def _endpoint_from_view_func(view_func):
"""Internal helper that returns the default endpoint for a given
function. This always is the function name.
"""
assert view_func is not None, 'expected view func if endpoint ' \
'is not provided.'
return view_func.__name__
def stream_with_context(generator_or_function):
"""Request contexts disappear when the response is started on the server.
This is done for efficiency reasons and to make it less likely to encounter
memory leaks with badly written WSGI middlewares. The downside is that if
you are using streamed responses, the generator cannot access request bound
information any more.
This function however can help you keep the context around for longer::
from flask import stream_with_context, request, Response
@app.route('/stream')
def streamed_response():
@stream_with_context
def generate():
yield 'Hello '
yield request.args['name']
yield '!'
return Response(generate())
Alternatively it can also be used around a specific generator::
from flask import stream_with_context, request, Response
@app.route('/stream')
def streamed_response():
def generate():
yield 'Hello '
yield request.args['name']
yield '!'
return Response(stream_with_context(generate()))
.. versionadded:: 0.9
"""
try:
gen = iter(generator_or_function)
except TypeError:
def decorator(*args, **kwargs):
gen = generator_or_function()
return stream_with_context(gen)
return update_wrapper(decorator, generator_or_function)
def generator():
ctx = _request_ctx_stack.top
if ctx is None:
raise RuntimeError('Attempted to stream with context but '
'there was no context in the first place to keep around.')
with ctx:
# Dummy sentinel. Has to be inside the context block or we're
# not actually keeping the context around.
yield None
# The try/finally is here so that if someone passes a WSGI level
# iterator in we're still running the cleanup logic. Generators
# don't need that because they are closed on their destruction
# automatically.
try:
for item in gen:
yield item
finally:
if hasattr(gen, 'close'):
gen.close()
# The trick is to start the generator. Then the code execution runs until
# the first dummy None is yielded at which point the context was already
# pushed. This item is discarded. Then when the iteration continues the
# real generator is executed.
wrapped_g = generator()
next(wrapped_g)
return wrapped_g
def make_response(*args):
"""Sometimes it is necessary to set additional headers in a view. Because
views do not have to return response objects but can return a value that
is converted into a response object by Flask itself, it becomes tricky to
add headers to it. This function can be called instead of using a return
and you will get a response object which you can use to attach headers.
If view looked like this and you want to add a new header::
def index():
return render_template('index.html', foo=42)
You can now do something like this::
def index():
response = make_response(render_template('index.html', foo=42))
response.headers['X-Parachutes'] = 'parachutes are cool'
return response
This function accepts the very same arguments you can return from a
view function. This for example creates a response with a 404 error
code::
response = make_response(render_template('not_found.html'), 404)
The other use case of this function is to force the return value of a
view function into a response which is helpful with view
decorators::
response = make_response(view_function())
response.headers['X-Parachutes'] = 'parachutes are cool'
Internally this function does the following things:
- if no arguments are passed, it creates a new response argument
- if one argument is passed, :meth:`flask.Flask.make_response`
is invoked with it.
- if more than one argument is passed, the arguments are passed
to the :meth:`flask.Flask.make_response` function as tuple.
.. versionadded:: 0.6
"""
if not args:
return current_app.response_class()
if len(args) == 1:
args = args[0]
return current_app.make_response(args)
def url_for(endpoint, **values):
"""Generates a URL to the given endpoint with the method provided.
Variable arguments that are unknown to the target endpoint are appended
to the generated URL as query arguments. If the value of a query argument
is ``None``, the whole pair is skipped. In case blueprints are active
you can shortcut references to the same blueprint by prefixing the
local endpoint with a dot (``.``).
This will reference the index function local to the current blueprint::
url_for('.index')
For more information, head over to the :ref:`Quickstart <url-building>`.
To integrate applications, :class:`Flask` has a hook to intercept URL build
errors through :attr:`Flask.url_build_error_handlers`. The `url_for`
function results in a :exc:`~werkzeug.routing.BuildError` when the current
app does not have a URL for the given endpoint and values. When it does, the
:data:`~flask.current_app` calls its :attr:`~Flask.url_build_error_handlers` if
it is not ``None``, which can return a string to use as the result of
`url_for` (instead of `url_for`'s default to raise the
:exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception.
An example::
def external_url_handler(error, endpoint, values):
"Looks up an external URL when `url_for` cannot build a URL."
# This is an example of hooking the build_error_handler.
# Here, lookup_url is some utility function you've built
# which looks up the endpoint in some external URL registry.
url = lookup_url(endpoint, **values)
if url is None:
# External lookup did not have a URL.
# Re-raise the BuildError, in context of original traceback.
exc_type, exc_value, tb = sys.exc_info()
if exc_value is error:
raise exc_type, exc_value, tb
else:
raise error
# url_for will use this result, instead of raising BuildError.
return url
app.url_build_error_handlers.append(external_url_handler)
Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and
`endpoint` and `values` are the arguments passed into `url_for`. Note
that this is for building URLs outside the current application, and not for
handling 404 NotFound errors.
.. versionadded:: 0.10
The `_scheme` parameter was added.
.. versionadded:: 0.9
The `_anchor` and `_method` parameters were added.
.. versionadded:: 0.9
Calls :meth:`Flask.handle_build_error` on
:exc:`~werkzeug.routing.BuildError`.
:param endpoint: the endpoint of the URL (name of the function)
:param values: the variable arguments of the URL rule
:param _external: if set to ``True``, an absolute URL is generated. Server
address can be changed via ``SERVER_NAME`` configuration variable which
defaults to `localhost`.
:param _scheme: a string specifying the desired URL scheme. The `_external`
parameter must be set to ``True`` or a :exc:`ValueError` is raised. The default
behavior uses the same scheme as the current request, or
``PREFERRED_URL_SCHEME`` from the :ref:`app configuration <config>` if no
request context is available. As of Werkzeug 0.10, this also can be set
to an empty string to build protocol-relative URLs.
:param _anchor: if provided this is added as anchor to the URL.
:param _method: if provided this explicitly specifies an HTTP method.
"""
appctx = _app_ctx_stack.top
reqctx = _request_ctx_stack.top
if appctx is None:
raise RuntimeError('Attempted to generate a URL without the '
'application context being pushed. This has to be '
'executed when application context is available.')
# If request specific information is available we have some extra
# features that support "relative" URLs.
if reqctx is not None:
url_adapter = reqctx.url_adapter
blueprint_name = request.blueprint
if not reqctx.request._is_old_module:
if endpoint[:1] == '.':
if blueprint_name is not None:
endpoint = blueprint_name + endpoint
else:
endpoint = endpoint[1:]
else:
# TODO: get rid of this deprecated functionality in 1.0
if '.' not in endpoint:
if blueprint_name is not None:
endpoint = blueprint_name + '.' + endpoint
elif endpoint.startswith('.'):
endpoint = endpoint[1:]
external = values.pop('_external', False)
# Otherwise go with the url adapter from the appctx and make
# the URLs external by default.
else:
url_adapter = appctx.url_adapter
if url_adapter is None:
raise RuntimeError('Application was not able to create a URL '
'adapter for request independent URL generation. '
'You might be able to fix this by setting '
'the SERVER_NAME config variable.')
external = values.pop('_external', True)
anchor = values.pop('_anchor', None)
method = values.pop('_method', None)
scheme = values.pop('_scheme', None)
appctx.app.inject_url_defaults(endpoint, values)
if scheme is not None:
if not external:
raise ValueError('When specifying _scheme, _external must be True')
url_adapter.url_scheme = scheme
try:
rv = url_adapter.build(endpoint, values, method=method,
force_external=external)
except BuildError as error:
# We need to inject the values again so that the app callback can
# deal with that sort of stuff.
values['_external'] = external
values['_anchor'] = anchor
values['_method'] = method
return appctx.app.handle_url_build_error(error, endpoint, values)
if anchor is not None:
rv += '#' + url_quote(anchor)
return rv
def get_template_attribute(template_name, attribute):
"""Loads a macro (or variable) a template exports. This can be used to
invoke a macro from within Python code. If you for example have a
template named :file:`_cider.html` with the following contents:
.. sourcecode:: html+jinja
{% macro hello(name) %}Hello {{ name }}!{% endmacro %}
You can access this from Python code like this::
hello = get_template_attribute('_cider.html', 'hello')
return hello('World')
.. versionadded:: 0.2
:param template_name: the name of the template
:param attribute: the name of the variable of macro to access
"""
return getattr(current_app.jinja_env.get_template(template_name).module,
attribute)
def flash(message, category='message'):
"""Flashes a message to the next request. In order to remove the
flashed message from the session and to display it to the user,
the template has to call :func:`get_flashed_messages`.
.. versionchanged:: 0.3
`category` parameter added.
:param message: the message to be flashed.
:param category: the category for the message. The following values
are recommended: ``'message'`` for any kind of message,
``'error'`` for errors, ``'info'`` for information
messages and ``'warning'`` for warnings. However any
kind of string can be used as category.
"""
# Original implementation:
#
# session.setdefault('_flashes', []).append((category, message))
#
# This assumed that changes made to mutable structures in the session are
# are always in sync with the session object, which is not true for session
# implementations that use external storage for keeping their keys/values.
flashes = session.get('_flashes', [])
flashes.append((category, message))
session['_flashes'] = flashes
message_flashed.send(current_app._get_current_object(),
message=message, category=category)
def get_flashed_messages(with_categories=False, category_filter=[]):
"""Pulls all flashed messages from the session and returns them.
Further calls in the same request to the function will return
the same messages. By default just the messages are returned,
but when `with_categories` is set to ``True``, the return value will
be a list of tuples in the form ``(category, message)`` instead.
Filter the flashed messages to one or more categories by providing those
categories in `category_filter`. This allows rendering categories in
separate html blocks. The `with_categories` and `category_filter`
arguments are distinct:
* `with_categories` controls whether categories are returned with message
text (``True`` gives a tuple, where ``False`` gives just the message text).
* `category_filter` filters the messages down to only those matching the
provided categories.
See :ref:`message-flashing-pattern` for examples.
.. versionchanged:: 0.3
`with_categories` parameter added.
.. versionchanged:: 0.9
`category_filter` parameter added.
:param with_categories: set to ``True`` to also receive categories.
:param category_filter: whitelist of categories to limit return values
"""
flashes = _request_ctx_stack.top.flashes
if flashes is None:
_request_ctx_stack.top.flashes = flashes = session.pop('_flashes') \
if '_flashes' in session else []
if category_filter:
flashes = list(filter(lambda f: f[0] in category_filter, flashes))
if not with_categories:
return [x[1] for x in flashes]
return flashes
def send_file(filename_or_fp, mimetype=None, as_attachment=False,
attachment_filename=None, add_etags=True,
cache_timeout=None, conditional=False):
"""Sends the contents of a file to the client. This will use the
most efficient method available and configured. By default it will
try to use the WSGI server's file_wrapper support. Alternatively
you can set the application's :attr:`~Flask.use_x_sendfile` attribute
to ``True`` to directly emit an ``X-Sendfile`` header. This however
requires support of the underlying webserver for ``X-Sendfile``.
By default it will try to guess the mimetype for you, but you can
also explicitly provide one. For extra security you probably want
to send certain files as attachment (HTML for instance). The mimetype
guessing requires a `filename` or an `attachment_filename` to be
provided.
Please never pass filenames to this function from user sources;
you should use :func:`send_from_directory` instead.
.. versionadded:: 0.2
.. versionadded:: 0.5
The `add_etags`, `cache_timeout` and `conditional` parameters were
added. The default behavior is now to attach etags.
.. versionchanged:: 0.7
mimetype guessing and etag support for file objects was
deprecated because it was unreliable. Pass a filename if you are
able to, otherwise attach an etag yourself. This functionality
will be removed in Flask 1.0
.. versionchanged:: 0.9
cache_timeout pulls its default from application config, when None.
:param filename_or_fp: the filename of the file to send in `latin-1`.
This is relative to the :attr:`~Flask.root_path`
if a relative path is specified.
Alternatively a file object might be provided in
which case ``X-Sendfile`` might not work and fall
back to the traditional method. Make sure that the
file pointer is positioned at the start of data to
send before calling :func:`send_file`.
:param mimetype: the mimetype of the file if provided, otherwise
auto detection happens.
:param as_attachment: set to ``True`` if you want to send this file with
a ``Content-Disposition: attachment`` header.
:param attachment_filename: the filename for the attachment if it
differs from the file's filename.
:param add_etags: set to ``False`` to disable attaching of etags.
:param conditional: set to ``True`` to enable conditional responses.
:param cache_timeout: the timeout in seconds for the headers. When ``None``
(default), this value is set by
:meth:`~Flask.get_send_file_max_age` of
:data:`~flask.current_app`.
"""
mtime = None
if isinstance(filename_or_fp, string_types):
filename = filename_or_fp
file = None
else:
from warnings import warn
file = filename_or_fp
filename = getattr(file, 'name', None)
# XXX: this behavior is now deprecated because it was unreliable.
# removed in Flask 1.0
if not attachment_filename and not mimetype \
and isinstance(filename, string_types):
warn(DeprecationWarning('The filename support for file objects '
'passed to send_file is now deprecated. Pass an '
'attach_filename if you want mimetypes to be guessed.'),
stacklevel=2)
if add_etags:
warn(DeprecationWarning('In future flask releases etags will no '
'longer be generated for file objects passed to the send_file '
'function because this behavior was unreliable. Pass '
'filenames instead if possible, otherwise attach an etag '
'yourself based on another value'), stacklevel=2)
if filename is not None:
if not os.path.isabs(filename):
filename = os.path.join(current_app.root_path, filename)
if mimetype is None and (filename or attachment_filename):
mimetype = mimetypes.guess_type(filename or attachment_filename)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
headers = Headers()
if as_attachment:
if attachment_filename is None:
if filename is None:
raise TypeError('filename unavailable, required for '
'sending as attachment')
attachment_filename = os.path.basename(filename)
headers.add('Content-Disposition', 'attachment',
filename=attachment_filename)
if current_app.use_x_sendfile and filename:
if file is not None:
file.close()
headers['X-Sendfile'] = filename
headers['Content-Length'] = os.path.getsize(filename)
data = None
else:
if file is None:
file = open(filename, 'rb')
mtime = os.path.getmtime(filename)
headers['Content-Length'] = os.path.getsize(filename)
data = wrap_file(request.environ, file)
rv = current_app.response_class(data, mimetype=mimetype, headers=headers,
direct_passthrough=True)
# if we know the file modification date, we can store it as
# the time of the last modification.
if mtime is not None:
rv.last_modified = int(mtime)
rv.cache_control.public = True
if cache_timeout is None:
cache_timeout = current_app.get_send_file_max_age(filename)
if cache_timeout is not None:
rv.cache_control.max_age = cache_timeout
rv.expires = int(time() + cache_timeout)
if add_etags and filename is not None:
try:
rv.set_etag('flask-%s-%s-%s' % (
os.path.getmtime(filename),
os.path.getsize(filename),
adler32(
filename.encode('utf-8') if isinstance(filename, text_type)
else filename
) & 0xffffffff
))
except OSError:
warn('Access %s failed, maybe it does not exist, so ignore etags in '
'headers' % filename, stacklevel=2)
if conditional:
rv = rv.make_conditional(request)
# make sure we don't send x-sendfile for servers that
# ignore the 304 status code for x-sendfile.
if rv.status_code == 304:
rv.headers.pop('x-sendfile', None)
return rv
def safe_join(directory, filename):
"""Safely join `directory` and `filename`.
Example usage::
@app.route('/wiki/<path:filename>')
def wiki_page(filename):
filename = safe_join(app.config['WIKI_FOLDER'], filename)
with open(filename, 'rb') as fd:
content = fd.read() # Read and process the file content...
:param directory: the base directory.
:param filename: the untrusted filename relative to that directory.
:raises: :class:`~werkzeug.exceptions.NotFound` if the resulting path
would fall out of `directory`.
"""
filename = posixpath.normpath(filename)
for sep in _os_alt_seps:
if sep in filename:
raise NotFound()
if os.path.isabs(filename) or \
filename == '..' or \
filename.startswith('../'):
raise NotFound()
return os.path.join(directory, filename)
def send_from_directory(directory, filename, **options):
"""Send a file from a given directory with :func:`send_file`. This
is a secure way to quickly expose static files from an upload folder
or something similar.
Example usage::
@app.route('/uploads/<path:filename>')
def download_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename, as_attachment=True)
.. admonition:: Sending files and Performance
It is strongly recommended to activate either ``X-Sendfile`` support in
your webserver or (if no authentication happens) to tell the webserver
to serve files for the given path on its own without calling into the
web application for improved performance.
.. versionadded:: 0.5
:param directory: the directory where all the files are stored.
:param filename: the filename relative to that directory to
download.
:param options: optional keyword arguments that are directly
forwarded to :func:`send_file`.
"""
filename = safe_join(directory, filename)
if not os.path.isabs(filename):
filename = os.path.join(current_app.root_path, filename)
if not os.path.isfile(filename):
raise NotFound()
options.setdefault('conditional', True)
return send_file(filename, **options)
def get_root_path(import_name):
"""Returns the path to a package or cwd if that cannot be found. This
returns the path of a package or the folder that contains a module.
Not to be confused with the package path returned by :func:`find_package`.
"""
# Module already imported and has a file attribute. Use that first.
mod = sys.modules.get(import_name)
if mod is not None and hasattr(mod, '__file__'):
return os.path.dirname(os.path.abspath(mod.__file__))
# Next attempt: check the loader.
loader = pkgutil.get_loader(import_name)
# Loader does not exist or we're referring to an unloaded main module
# or a main module without path (interactive sessions), go with the
# current working directory.
if loader is None or import_name == '__main__':
return os.getcwd()
# For .egg, zipimporter does not have get_filename until Python 2.7.
# Some other loaders might exhibit the same behavior.
if hasattr(loader, 'get_filename'):
filepath = loader.get_filename(import_name)
else:
# Fall back to imports.
__import__(import_name)
mod = sys.modules[import_name]
filepath = getattr(mod, '__file__', None)
# If we don't have a filepath it might be because we are a
# namespace package. In this case we pick the root path from the
# first module that is contained in our package.
if filepath is None:
raise RuntimeError('No root path can be found for the provided '
'module "%s". This can happen because the '
'module came from an import hook that does '
'not provide file name information or because '
'it\'s a namespace package. In this case '
'the root path needs to be explicitly '
'provided.' % import_name)
# filepath is import_name.py for a module, or __init__.py for a package.
return os.path.dirname(os.path.abspath(filepath))
def _matching_loader_thinks_module_is_package(loader, mod_name):
"""Given the loader that loaded a module and the module this function
attempts to figure out if the given module is actually a package.
"""
# If the loader can tell us if something is a package, we can
# directly ask the loader.
if hasattr(loader, 'is_package'):
return loader.is_package(mod_name)
# importlib's namespace loaders do not have this functionality but
# all the modules it loads are packages, so we can take advantage of
# this information.
elif (loader.__class__.__module__ == '_frozen_importlib' and
loader.__class__.__name__ == 'NamespaceLoader'):
return True
# Otherwise we need to fail with an error that explains what went
# wrong.
raise AttributeError(
('%s.is_package() method is missing but is required by Flask of '
'PEP 302 import hooks. If you do not use import hooks and '
'you encounter this error please file a bug against Flask.') %
loader.__class__.__name__)
def find_package(import_name):
"""Finds a package and returns the prefix (or None if the package is
not installed) as well as the folder that contains the package or
module as a tuple. The package path returned is the module that would
have to be added to the pythonpath in order to make it possible to
import the module. The prefix is the path below which a UNIX like
folder structure exists (lib, share etc.).
"""
root_mod_name = import_name.split('.')[0]
loader = pkgutil.get_loader(root_mod_name)
if loader is None or import_name == '__main__':
# import name is not found, or interactive/main module
package_path = os.getcwd()
else:
# For .egg, zipimporter does not have get_filename until Python 2.7.
if hasattr(loader, 'get_filename'):
filename = loader.get_filename(root_mod_name)
elif hasattr(loader, 'archive'):
# zipimporter's loader.archive points to the .egg or .zip
# archive filename is dropped in call to dirname below.
filename = loader.archive
else:
# At least one loader is missing both get_filename and archive:
# Google App Engine's HardenedModulesHook
#
# Fall back to imports.
__import__(import_name)
filename = sys.modules[import_name].__file__
package_path = os.path.abspath(os.path.dirname(filename))
# In case the root module is a package we need to chop of the
# rightmost part. This needs to go through a helper function
# because of python 3.3 namespace packages.
if _matching_loader_thinks_module_is_package(
loader, root_mod_name):
package_path = os.path.dirname(package_path)
site_parent, site_folder = os.path.split(package_path)
py_prefix = os.path.abspath(sys.prefix)
if package_path.startswith(py_prefix):
return py_prefix, package_path
elif site_folder.lower() == 'site-packages':
parent, folder = os.path.split(site_parent)
# Windows like installations
if folder.lower() == 'lib':
base_dir = parent
# UNIX like installations
elif os.path.basename(parent).lower() == 'lib':
base_dir = os.path.dirname(parent)
else:
base_dir = site_parent
return base_dir, package_path
return None, package_path
class locked_cached_property(object):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value. Works like the one in Werkzeug but has a lock for
thread safety.
"""
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
self.lock = RLock()
def __get__(self, obj, type=None):
if obj is None:
return self
with self.lock:
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
class _PackageBoundObject(object):
def __init__(self, import_name, template_folder=None, root_path=None):
#: The name of the package or module. Do not change this once
#: it was set by the constructor.
self.import_name = import_name
#: location of the templates. ``None`` if templates should not be
#: exposed.
self.template_folder = template_folder
if root_path is None:
root_path = get_root_path(self.import_name)
#: Where is the app root located?
self.root_path = root_path
self._static_folder = None
self._static_url_path = None
def _get_static_folder(self):
if self._static_folder is not None:
return os.path.join(self.root_path, self._static_folder)
def _set_static_folder(self, value):
self._static_folder = value
static_folder = property(_get_static_folder, _set_static_folder, doc='''
The absolute path to the configured static folder.
''')
del _get_static_folder, _set_static_folder
def _get_static_url_path(self):
if self._static_url_path is not None:
return self._static_url_path
if self.static_folder is not None:
return '/' + os.path.basename(self.static_folder)
def _set_static_url_path(self, value):
self._static_url_path = value
static_url_path = property(_get_static_url_path, _set_static_url_path)
del _get_static_url_path, _set_static_url_path
@property
def has_static_folder(self):
"""This is ``True`` if the package bound object's container has a
folder for static files.
.. versionadded:: 0.5
"""
return self.static_folder is not None
@locked_cached_property
def jinja_loader(self):
"""The Jinja loader for this package bound object.
.. versionadded:: 0.5
"""
if self.template_folder is not None:
return FileSystemLoader(os.path.join(self.root_path,
self.template_folder))
def get_send_file_max_age(self, filename):
"""Provides default cache_timeout for the :func:`send_file` functions.
By default, this function returns ``SEND_FILE_MAX_AGE_DEFAULT`` from
the configuration of :data:`~flask.current_app`.
Static file functions such as :func:`send_from_directory` use this
function, and :func:`send_file` calls this function on
:data:`~flask.current_app` when the given cache_timeout is ``None``. If a
cache_timeout is given in :func:`send_file`, that timeout is used;
otherwise, this method is called.
This allows subclasses to change the behavior when sending files based
on the filename. For example, to set the cache timeout for .js files
to 60 seconds::
class MyFlask(flask.Flask):
def get_send_file_max_age(self, name):
if name.lower().endswith('.js'):
return 60
return flask.Flask.get_send_file_max_age(self, name)
.. versionadded:: 0.9
"""
return current_app.config['SEND_FILE_MAX_AGE_DEFAULT']
def send_static_file(self, filename):
"""Function used internally to send static files from the static
folder to the browser.
.. versionadded:: 0.5
"""
if not self.has_static_folder:
raise RuntimeError('No static folder for this object')
# Ensure get_send_file_max_age is called in all cases.
# Here, we ensure get_send_file_max_age is called for Blueprints.
cache_timeout = self.get_send_file_max_age(filename)
return send_from_directory(self.static_folder, filename,
cache_timeout=cache_timeout)
def open_resource(self, resource, mode='rb'):
"""Opens a resource from the application's resource folder. To see
how this works, consider the following folder structure::
/myapplication.py
/schema.sql
/static
/style.css
/templates
/layout.html
/index.html
If you want to open the :file:`schema.sql` file you would do the
following::
with app.open_resource('schema.sql') as f:
contents = f.read()
do_something_with(contents)
:param resource: the name of the resource. To access resources within
subfolders use forward slashes as separator.
:param mode: resource file opening mode, default is 'rb'.
"""
if mode not in ('r', 'rb'):
raise ValueError('Resources can only be opened for reading')
return open(os.path.join(self.root_path, resource), mode)
| bsd-3-clause | 6,769,140,311,341,379,000 | 39.554444 | 85 | 0.628264 | false |
AlphaX2/FotoShareN9 | 1.6.1/fotoshare/opt/FotoShareN9/plugins/flickr/libs/flickrapi/reportinghttp.py | 10 | 2712 | # -*- encoding: utf-8 -*-
'''HTTPHandler that supports a callback method for progress reports.
'''
import urllib2
import httplib
import logging
__all__ = ['urlopen']
logging.basicConfig()
LOG = logging.getLogger(__name__)
progress_callback = None
class ReportingSocket(object):
'''Wrapper around a socket. Gives progress report through a
callback function.
'''
min_chunksize = 10240
def __init__(self, socket):
self.socket = socket
def sendall(self, bits):
'''Sends all data, calling the callback function for every
sent chunk.
'''
LOG.debug("SENDING: %s..." % bits[0:30])
total = len(bits)
sent = 0
chunksize = max(self.min_chunksize, total // 100)
while len(bits) > 0:
send = bits[0:chunksize]
self.socket.sendall(send)
sent += len(send)
if progress_callback:
progress = float(sent) / total * 100
progress_callback(progress, sent == total)
bits = bits[chunksize:]
def makefile(self, mode, bufsize):
'''Returns a file-like object for the socket.'''
return self.socket.makefile(mode, bufsize)
def close(self):
'''Closes the socket.'''
return self.socket.close()
class ProgressHTTPConnection(httplib.HTTPConnection):
'''HTTPConnection that gives regular progress reports during
sending of data.
'''
def connect(self):
'''Connects to a HTTP server.'''
httplib.HTTPConnection.connect(self)
self.sock = ReportingSocket(self.sock)
class ProgressHTTPHandler(urllib2.HTTPHandler):
'''HTTPHandler that gives regular progress reports during sending
of data.
'''
def http_open(self, req):
return self.do_open(ProgressHTTPConnection, req)
def set_callback(method):
'''Sets the callback function to use for progress reports.'''
global progress_callback # IGNORE:W0603
if not hasattr(method, '__call__'):
raise ValueError('Callback method must be callable')
progress_callback = method
def urlopen(url_or_request, callback, body=None):
'''Opens an URL using the ProgressHTTPHandler.'''
set_callback(callback)
opener = urllib2.build_opener(ProgressHTTPHandler)
return opener.open(url_or_request, body)
if __name__ == '__main__':
def upload(progress, finished):
'''Upload progress demo'''
LOG.info("%3.0f - %s" % (progress, finished))
conn = urlopen("http://www.flickr.com/", 'x' * 10245, upload)
data = conn.read()
LOG.info("Read data")
print data[:100].split('\n')[0]
| gpl-3.0 | 4,877,104,695,099,435,000 | 25.588235 | 69 | 0.610988 | false |
CyrilPeponnet/Archipel | ArchipelAgent/archipel-agent-vmparking/setup.py | 4 | 3362 | #
# setup.py
#
# Copyright (C) 2010 Antoine Mercadal <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup, find_packages
VERSION = '0.6.0'
AUTHOR = 'Antoine Mercadal'
MAIL = '[email protected]'
URL = 'http://archipelproject.org'
LICENSE = 'AGPL'
NAME = 'archipel-agent-vmparking'
SHORTDESCRIPTION = "Handle the virtual machine parking"
LONGDESCRIPTION = ""
ENTRY_POINTS = { 'archipel.plugin.hypervisor' : [
'factory=archipelagentvmparking:make_archipel_plugin'],
'archipel.plugin.virtualmachine' : [
'factory=archipelagentvmparking:make_archipel_plugin'],
'archipel.plugin' : [
'version=archipelagentvmparking:version']}
RPM_REQUIRED_DEPS = "archipel-core"
RPM_POST_INSTALL = "%post\narchipel-initinstall -m {0}\n".format(NAME)
## HACK FOR DEPS IN RPMS
from setuptools.command.bdist_rpm import bdist_rpm
def custom_make_spec_file(self):
spec = self._original_make_spec_file()
lineDescription = "%description"
spec.insert(spec.index(lineDescription) - 1, "requires: %s" % RPM_REQUIRED_DEPS)
spec.append(RPM_POST_INSTALL)
return spec
bdist_rpm._original_make_spec_file = bdist_rpm._make_spec_file
bdist_rpm._make_spec_file = custom_make_spec_file
## END OF HACK
setup(name=NAME,
version=VERSION,
description=SHORTDESCRIPTION,
long_description=LONGDESCRIPTION,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
'Intended Audience :: Telecommunications Industry',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Internet',
'Topic :: System :: Emulators',
'Topic :: System :: Operating System'],
keywords='archipel, virtualization, libvirt, orchestration',
author=AUTHOR,
author_email=MAIL,
url=URL,
license=LICENSE,
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
"archipel-core>=0.6.0beta"
],
entry_points=ENTRY_POINTS
)
| agpl-3.0 | 8,921,930,473,874,930,000 | 40.506173 | 84 | 0.640988 | false |
pantaloons/4charm | libvpx/tools/intersect-diffs.py | 98 | 2364 | #!/usr/bin/env python
## Copyright (c) 2012 The WebM project authors. All Rights Reserved.
##
## Use of this source code is governed by a BSD-style license
## that can be found in the LICENSE file in the root of the source
## tree. An additional intellectual property rights grant can be found
## in the file PATENTS. All contributing project authors may
## be found in the AUTHORS file in the root of the source tree.
##
"""Calculates the "intersection" of two unified diffs.
Given two diffs, A and B, it finds all hunks in B that had non-context lines
in A and prints them to stdout. This is useful to determine the hunks in B that
are relevant to A. The resulting file can be applied with patch(1) on top of A.
"""
__author__ = "[email protected]"
import sys
import diff
def FormatDiffHunks(hunks):
"""Re-serialize a list of DiffHunks."""
r = []
last_header = None
for hunk in hunks:
this_header = hunk.header[0:2]
if last_header != this_header:
r.extend(hunk.header)
last_header = this_header
else:
r.extend(hunk.header[2])
r.extend(hunk.lines)
r.append("\n")
return "".join(r)
def ZipHunks(rhs_hunks, lhs_hunks):
"""Join two hunk lists on filename."""
for rhs_hunk in rhs_hunks:
rhs_file = rhs_hunk.right.filename.split("/")[1:]
for lhs_hunk in lhs_hunks:
lhs_file = lhs_hunk.left.filename.split("/")[1:]
if lhs_file != rhs_file:
continue
yield (rhs_hunk, lhs_hunk)
def main():
old_hunks = [x for x in diff.ParseDiffHunks(open(sys.argv[1], "r"))]
new_hunks = [x for x in diff.ParseDiffHunks(open(sys.argv[2], "r"))]
out_hunks = []
# Join the right hand side of the older diff with the left hand side of the
# newer diff.
for old_hunk, new_hunk in ZipHunks(old_hunks, new_hunks):
if new_hunk in out_hunks:
continue
old_lines = old_hunk.right
new_lines = new_hunk.left
# Determine if this hunk overlaps any non-context line from the other
for i in old_lines.delta_line_nums:
if i in new_lines:
out_hunks.append(new_hunk)
break
if out_hunks:
print FormatDiffHunks(out_hunks)
sys.exit(1)
if __name__ == "__main__":
main()
| mit | 4,926,596,401,171,371,000 | 30.105263 | 79 | 0.619289 | false |
SatelliteQE/robottelo | tests/foreman/api/test_hostcollection.py | 1 | 14844 | """Unit tests for host collections.
:Requirement: Hostcollection
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: HostCollections
:Assignee: swadeley
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from random import choice
from random import randint
import pytest
from broker import VMBroker
from nailgun import entities
from requests.exceptions import HTTPError
from robottelo.datafactory import invalid_values_list
from robottelo.datafactory import parametrized
from robottelo.datafactory import valid_data_list
from robottelo.hosts import ContentHost
@pytest.fixture(scope='module')
def fake_hosts(module_org):
"""Create content hosts that can be shared by tests."""
hosts = [entities.Host(organization=module_org).create() for _ in range(2)]
return hosts
@pytest.mark.parametrize('name', **parametrized(valid_data_list()))
@pytest.mark.tier1
def test_positive_create_with_name(module_org, name):
"""Create host collections with different names.
:id: 8f2b9223-f5be-4cb1-8316-01ea747cae14
:parametrized: yes
:expectedresults: The host collection was successfully created and has
appropriate name.
:CaseImportance: Critical
"""
host_collection = entities.HostCollection(name=name, organization=module_org).create()
assert host_collection.name == name
@pytest.mark.tier1
def test_positive_list(module_org):
"""Create new host collection and then retrieve list of all existing
host collections
:id: 6ae32df2-b917-4830-8709-15fb272b76c1
:BZ: 1331875
:expectedresults: Returned list of host collections for the system
contains at least one collection
:CaseImportance: Critical
"""
entities.HostCollection(organization=module_org).create()
hc_list = entities.HostCollection().search()
assert len(hc_list) >= 1
@pytest.mark.tier1
def test_positive_list_for_organization():
"""Create host collection for specific organization. Retrieve list of
host collections for that organization
:id: 5f9de8ab-2c53-401b-add3-57d86c97563a
:expectedresults: The host collection was successfully created and
present in the list of collections for specific organization
:CaseImportance: Critical
"""
org = entities.Organization().create()
hc = entities.HostCollection(organization=org).create()
hc_list = entities.HostCollection(organization=org).search()
assert len(hc_list) == 1
assert hc_list[0].id == hc.id
@pytest.mark.parametrize('desc', **parametrized(valid_data_list()))
@pytest.mark.tier1
def test_positive_create_with_description(module_org, desc):
"""Create host collections with different descriptions.
:id: 9d13392f-8d9d-4ff1-8909-4233e4691055
:parametrized: yes
:expectedresults: The host collection was successfully created and has
appropriate description.
:CaseImportance: Critical
"""
host_collection = entities.HostCollection(description=desc, organization=module_org).create()
assert host_collection.description == desc
@pytest.mark.tier1
def test_positive_create_with_limit(module_org):
"""Create host collections with different limits.
:id: 86d9387b-7036-4794-96fd-5a3472dd9160
:expectedresults: The host collection was successfully created and has
appropriate limit.
:CaseImportance: Critical
"""
for _ in range(5):
limit = randint(1, 30)
host_collection = entities.HostCollection(max_hosts=limit, organization=module_org).create()
assert host_collection.max_hosts == limit
@pytest.mark.parametrize("unlimited", [False, True])
@pytest.mark.tier1
def test_positive_create_with_unlimited_hosts(module_org, unlimited):
"""Create host collection with different values of 'unlimited hosts'
parameter.
:id: d385574e-5794-4442-b6cd-e5ded001d877
:parametrized: yes
:expectedresults: The host collection was successfully created and has
appropriate 'unlimited hosts' parameter value.
:CaseImportance: Critical
"""
host_collection = entities.HostCollection(
max_hosts=None if unlimited else 1,
organization=module_org,
unlimited_hosts=unlimited,
).create()
assert host_collection.unlimited_hosts == unlimited
@pytest.mark.tier1
def test_positive_create_with_host(module_org, fake_hosts):
"""Create a host collection that contains a host.
:id: 9dc0ad72-58c2-4079-b1ca-2c4373472f0f
:expectedresults: The host collection can be read back, and it includes
one host.
:CaseImportance: Critical
:BZ: 1325989
"""
host_collection = entities.HostCollection(
host=[fake_hosts[0]], organization=module_org
).create()
assert len(host_collection.host) == 1
@pytest.mark.tier1
def test_positive_create_with_hosts(module_org, fake_hosts):
"""Create a host collection that contains hosts.
:id: bb8d2b42-9a8b-4c4f-ba0c-c56ae5a7eb1d
:expectedresults: The host collection can be read back, and it
references two hosts.
:CaseImportance: Critical
:BZ: 1325989
"""
host_collection = entities.HostCollection(host=fake_hosts, organization=module_org).create()
assert len(host_collection.host) == len(fake_hosts)
@pytest.mark.tier2
def test_positive_add_host(module_org, fake_hosts):
"""Add a host to host collection.
:id: da8bc901-7ac8-4029-bb62-af21aa4d3a88
:expectedresults: Host was added to the host collection.
:CaseLevel: Integration
:BZ:1325989
"""
host_collection = entities.HostCollection(organization=module_org).create()
host_collection.host_ids = [fake_hosts[0].id]
host_collection = host_collection.update(['host_ids'])
assert len(host_collection.host) == 1
@pytest.mark.upgrade
@pytest.mark.tier2
def test_positive_add_hosts(module_org, fake_hosts):
"""Add hosts to host collection.
:id: f76b4db1-ccd5-47ab-be15-8c7d91d03b22
:expectedresults: Hosts were added to the host collection.
:CaseLevel: Integration
:BZ: 1325989
"""
host_collection = entities.HostCollection(organization=module_org).create()
host_ids = [str(host.id) for host in fake_hosts]
host_collection.host_ids = host_ids
host_collection = host_collection.update(['host_ids'])
assert len(host_collection.host) == len(fake_hosts)
@pytest.mark.tier1
def test_positive_read_host_ids(module_org, fake_hosts):
"""Read a host collection and look at the ``host_ids`` field.
:id: 444a1528-64c8-41b6-ba2b-6c49799d5980
:expectedresults: The ``host_ids`` field matches the host IDs passed in
when creating the host collection.
:CaseImportance: Critical
:BZ:1325989
"""
host_collection = entities.HostCollection(host=fake_hosts, organization=module_org).create()
assert frozenset(host.id for host in host_collection.host) == frozenset(
host.id for host in fake_hosts
)
@pytest.mark.parametrize('new_name', **parametrized(valid_data_list()))
@pytest.mark.tier1
def test_positive_update_name(module_org, new_name):
"""Check if host collection name can be updated
:id: b2dedb99-6dd7-41be-8aaa-74065c820ac6
:parametrized: yes
:expectedresults: Host collection name was successfully updated
:CaseImportance: Critical
"""
host_collection = entities.HostCollection(organization=module_org).create()
host_collection.name = new_name
assert host_collection.update().name == new_name
@pytest.mark.parametrize('new_desc', **parametrized(valid_data_list()))
@pytest.mark.tier1
def test_positive_update_description(module_org, new_desc):
"""Check if host collection description can be updated
:id: f8e9bd1c-1525-4b5f-a07c-eb6b6e7aa628
:parametrized: yes
:expectedresults: Host collection description was updated
:CaseImportance: Critical
"""
host_collection = entities.HostCollection(organization=module_org).create()
host_collection.description = new_desc
assert host_collection.update().description == new_desc
@pytest.mark.tier1
def test_positive_update_limit(module_org):
"""Check if host collection limit can be updated
:id: 4eda7796-cd81-453b-9b72-4ef84b2c1d8c
:expectedresults: Host collection limit was updated
:CaseImportance: Critical
"""
host_collection = entities.HostCollection(
max_hosts=1, organization=module_org, unlimited_hosts=False
).create()
for limit in (1, 3, 5, 10, 20):
host_collection.max_hosts = limit
assert host_collection.update().max_hosts == limit
@pytest.mark.tier1
def test_positive_update_unlimited_hosts(module_org):
"""Check if host collection 'unlimited hosts' parameter can be updated
:id: 09a3973d-9832-4255-87bf-f9eaeab4aee8
:expectedresults: Host collection 'unlimited hosts' parameter was
updated
:CaseImportance: Critical
"""
random_unlimited = choice([True, False])
host_collection = entities.HostCollection(
max_hosts=1 if not random_unlimited else None,
organization=module_org,
unlimited_hosts=random_unlimited,
).create()
for unlimited in (not random_unlimited, random_unlimited):
host_collection.max_hosts = 1 if not unlimited else None
host_collection.unlimited_hosts = unlimited
host_collection = host_collection.update(['max_hosts', 'unlimited_hosts'])
assert host_collection.unlimited_hosts == unlimited
@pytest.mark.tier1
def test_positive_update_host(module_org, fake_hosts):
"""Update host collection's host.
:id: 23082854-abcf-4085-be9c-a5d155446acb
:expectedresults: The host collection was updated with a new host.
:CaseImportance: Critical
"""
host_collection = entities.HostCollection(
host=[fake_hosts[0]], organization=module_org
).create()
host_collection.host_ids = [fake_hosts[1].id]
host_collection = host_collection.update(['host_ids'])
assert host_collection.host[0].id == fake_hosts[1].id
@pytest.mark.upgrade
@pytest.mark.tier1
def test_positive_update_hosts(module_org, fake_hosts):
"""Update host collection's hosts.
:id: 0433b37d-ae16-456f-a51d-c7b800334861
:expectedresults: The host collection was updated with new hosts.
:CaseImportance: Critical
"""
host_collection = entities.HostCollection(host=fake_hosts, organization=module_org).create()
new_hosts = [entities.Host(organization=module_org).create() for _ in range(2)]
host_ids = [str(host.id) for host in new_hosts]
host_collection.host_ids = host_ids
host_collection = host_collection.update(['host_ids'])
assert {host.id for host in host_collection.host} == {host.id for host in new_hosts}
@pytest.mark.upgrade
@pytest.mark.tier1
def test_positive_delete(module_org):
"""Check if host collection can be deleted
:id: 13a16cd2-16ce-4966-8c03-5d821edf963b
:expectedresults: Host collection was successfully deleted
:CaseImportance: Critical
"""
host_collection = entities.HostCollection(organization=module_org).create()
host_collection.delete()
with pytest.raises(HTTPError):
host_collection.read()
@pytest.mark.parametrize('name', **parametrized(invalid_values_list()))
@pytest.mark.tier1
def test_negative_create_with_invalid_name(module_org, name):
"""Try to create host collections with different invalid names
:id: 38f67d04-a19d-4eab-a577-21b8d62c7389
:parametrized: yes
:expectedresults: The host collection was not created
:CaseImportance: Critical
"""
with pytest.raises(HTTPError):
entities.HostCollection(name=name, organization=module_org).create()
@pytest.mark.tier1
def test_positive_add_remove_subscription(module_org, module_ak_cv_lce):
"""Try to bulk add and remove a subscription to members of a host collection.
:id: c4ec5727-eb25-452e-a91f-87cafb16666b
:steps:
1. Create HC, add AK to HC
2. Create product so we can use it's subscription
3. Create some VMs and register them with AK so they are in HC
4. Add the subscription to the members of the Host Collection
5. Assert subscription is added
6. Bulk remove subscription
7. Assert it is removed
:expectedresults: subscription added to, and removed from, members of host collection
:CaseImportance: Critical
"""
# this command creates a host collection and "appends", makes available, to the AK
module_ak_cv_lce.host_collection.append(
entities.HostCollection(organization=module_org).create()
)
# Move HC from Add tab to List tab on AK view
module_ak_cv_lce = module_ak_cv_lce.update(['host_collection'])
# Create a product so we have a subscription to use
product = entities.Product(organization=module_org).create()
prod_name = product.name
product_subscription = entities.Subscription(organization=module_org).search(
query={'search': f'name={prod_name}'}
)[0]
# Create and register VMs as members of Host Collection
with VMBroker(nick='rhel7', host_classes={'host': ContentHost}, _count=2) as hosts:
for client in hosts:
client.install_katello_ca()
client.register_contenthost(module_org.label, module_ak_cv_lce.name)
# Read host_collection back from Satellite to get host_ids
host_collection = module_ak_cv_lce.host_collection[0].read()
host_ids = [host.id for host in host_collection.host]
# Add subscription
# Call nailgun to make the API PUT to members of Host Collection
entities.Host().bulk_add_subscriptions(
data={
"organization_id": module_org.id,
"included": {"ids": host_ids},
"subscriptions": [{"id": product_subscription.id, "quantity": 1}],
}
)
# GET the subscriptions from hosts and assert they are there
for host_id in host_ids:
req = entities.HostSubscription(host=host_id).subscriptions()
assert (
prod_name in req['results'][0]['product_name']
), 'Subscription not applied to HC members'
# Remove the subscription
# Call nailgun to make the API PUT to members of Host Collection
entities.Host().bulk_remove_subscriptions(
data={
"organization_id": module_org.id,
"included": {"ids": host_ids},
"subscriptions": [{"id": product_subscription.id, "quantity": 1}],
}
)
# GET the subscriptions from hosts and assert they are gone
for host_id in host_ids:
req = entities.HostSubscription(host=host_id).subscriptions()
assert not req['results'], 'Subscription not removed from HC members'
| gpl-3.0 | -7,841,114,450,710,041,000 | 31.060475 | 100 | 0.699542 | false |
druids/django-chamber | setup.py | 1 | 1138 | from setuptools import setup, find_packages
from chamber.version import get_version
setup(
name='django-chamber',
version=get_version(),
description='Utilities library meant as a complement to django-is-core.',
author='Lubos Matl, Oskar Hollmann',
author_email='[email protected], [email protected]',
url='http://github.com/druids/django-chamber',
packages=find_packages(include=['chamber']),
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
],
install_requires=[
'Django>=2.2',
'Unidecode>=1.1.1',
'pyprind>=2.11.2',
'filemagic>=1.6',
],
extras_require={
'boto3storage': ['django-storages<2.0', 'boto3'],
},
)
| bsd-3-clause | -6,346,957,805,061,011,000 | 31.514286 | 89 | 0.615114 | false |
rgom/Pydev | plugins/org.python.pydev.jython/Lib/cmd.py | 145 | 15026 | """A generic class to build line-oriented command interpreters.
Interpreters constructed with this class obey the following conventions:
1. End of file on input is processed as the command 'EOF'.
2. A command is parsed out of each line by collecting the prefix composed
of characters in the identchars member.
3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method
is passed a single argument consisting of the remainder of the line.
4. Typing an empty line repeats the last command. (Actually, it calls the
method `emptyline', which may be overridden in a subclass.)
5. There is a predefined `help' method. Given an argument `topic', it
calls the command `help_topic'. With no arguments, it lists all topics
with defined help_ functions, broken into up to three topics; documented
commands, miscellaneous help topics, and undocumented commands.
6. The command '?' is a synonym for `help'. The command '!' is a synonym
for `shell', if a do_shell method exists.
7. If completion is enabled, completing commands will be done automatically,
and completing of commands args is done by calling complete_foo() with
arguments text, line, begidx, endidx. text is string we are matching
against, all returned matches must begin with it. line is the current
input line (lstripped), begidx and endidx are the beginning and end
indexes of the text being matched, which could be used to provide
different completion depending upon which position the argument is in.
The `default' method may be overridden to intercept commands for which there
is no do_ method.
The `completedefault' method may be overridden to intercept completions for
commands that have no complete_ method.
The data member `self.ruler' sets the character used to draw separator lines
in the help messages. If empty, no ruler line is drawn. It defaults to "=".
If the value of `self.intro' is nonempty when the cmdloop method is called,
it is printed out on interpreter startup. This value may be overridden
via an optional argument to the cmdloop() method.
The data members `self.doc_header', `self.misc_header', and
`self.undoc_header' set the headers used for the help function's
listings of documented functions, miscellaneous topics, and undocumented
functions respectively.
These interpreters use raw_input; thus, if the readline module is loaded,
they automatically support Emacs-like command history and editing features.
"""
import string
__all__ = ["Cmd"]
PROMPT = '(Cmd) '
IDENTCHARS = string.ascii_letters + string.digits + '_'
class Cmd:
"""A simple framework for writing line-oriented command interpreters.
These are often useful for test harnesses, administrative tools, and
prototypes that will later be wrapped in a more sophisticated interface.
A Cmd instance or subclass instance is a line-oriented interpreter
framework. There is no good reason to instantiate Cmd itself; rather,
it's useful as a superclass of an interpreter class you define yourself
in order to inherit Cmd's methods and encapsulate action methods.
"""
prompt = PROMPT
identchars = IDENTCHARS
ruler = '='
lastcmd = ''
intro = None
doc_leader = ""
doc_header = "Documented commands (type help <topic>):"
misc_header = "Miscellaneous help topics:"
undoc_header = "Undocumented commands:"
nohelp = "*** No help on %s"
use_rawinput = 1
def __init__(self, completekey='tab', stdin=None, stdout=None):
"""Instantiate a line-oriented interpreter framework.
The optional argument 'completekey' is the readline name of a
completion key; it defaults to the Tab key. If completekey is
not None and the readline module is available, command completion
is done automatically. The optional arguments stdin and stdout
specify alternate input and output file objects; if not specified,
sys.stdin and sys.stdout are used.
"""
import sys
if stdin is not None:
self.stdin = stdin
else:
self.stdin = sys.stdin
if stdout is not None:
self.stdout = stdout
else:
self.stdout = sys.stdout
self.cmdqueue = []
self.completekey = completekey
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
"""
self.preloop()
if self.use_rawinput and self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey+": complete")
except ImportError:
pass
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro)+"\n")
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
if self.use_rawinput:
try:
line = raw_input(self.prompt)
except EOFError:
line = 'EOF'
else:
self.stdout.write(self.prompt)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = 'EOF'
else:
line = line.rstrip('\r\n')
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
finally:
if self.use_rawinput and self.completekey:
try:
import readline
readline.set_completer(self.old_completer)
except ImportError:
pass
def precmd(self, line):
"""Hook method executed just before the command line is
interpreted, but after the input prompt is generated and issued.
"""
return line
def postcmd(self, stop, line):
"""Hook method executed just after a command dispatch is finished."""
return stop
def preloop(self):
"""Hook method executed once when the cmdloop() method is called."""
pass
def postloop(self):
"""Hook method executed once when the cmdloop() method is about to
return.
"""
pass
def parseline(self, line):
"""Parse the line into a command name and a string containing
the arguments. Returns a tuple containing (command, args, line).
'command' and 'args' may be None if the line couldn't be parsed.
"""
line = line.strip()
if not line:
return None, None, line
elif line[0] == '?':
line = 'help ' + line[1:]
elif line[0] == '!':
if hasattr(self, 'do_shell'):
line = 'shell ' + line[1:]
else:
return None, None, line
i, n = 0, len(line)
while i < n and line[i] in self.identchars: i = i+1
cmd, arg = line[:i], line[i:].strip()
return cmd, arg, line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
This may be overridden, but should not normally need to be;
see the precmd() and postcmd() methods for useful execution hooks.
The return value is a flag indicating whether interpretation of
commands by the interpreter should stop.
"""
cmd, arg, line = self.parseline(line)
if not line:
return self.emptyline()
if cmd is None:
return self.default(line)
self.lastcmd = line
if line == 'EOF' :
self.lastcmd = ''
if cmd == '':
return self.default(line)
else:
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
return self.default(line)
return func(arg)
def emptyline(self):
"""Called when an empty line is entered in response to the prompt.
If this method is not overridden, it repeats the last nonempty
command entered.
"""
if self.lastcmd:
return self.onecmd(self.lastcmd)
def default(self, line):
"""Called on an input line when the command prefix is not recognized.
If this method is not overridden, it prints an error message and
returns.
"""
self.stdout.write('*** Unknown syntax: %s\n'%line)
def completedefault(self, *ignored):
"""Method called to complete an input line when no command-specific
complete_*() method is available.
By default, it returns an empty list.
"""
return []
def completenames(self, text, *ignored):
dotext = 'do_'+text
return [a[3:] for a in self.get_names() if a.startswith(dotext)]
def complete(self, text, state):
"""Return the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
"""
if state == 0:
import readline
origline = readline.get_line_buffer()
line = origline.lstrip()
stripped = len(origline) - len(line)
begidx = readline.get_begidx() - stripped
endidx = readline.get_endidx() - stripped
if begidx>0:
cmd, args, foo = self.parseline(line)
if cmd == '':
compfunc = self.completedefault
else:
try:
compfunc = getattr(self, 'complete_' + cmd)
except AttributeError:
compfunc = self.completedefault
else:
compfunc = self.completenames
self.completion_matches = compfunc(text, line, begidx, endidx)
try:
return self.completion_matches[state]
except IndexError:
return None
def get_names(self):
# This method used to pull in base class attributes
# at a time dir() didn't do it yet.
return dir(self.__class__)
def complete_help(self, *args):
commands = set(self.completenames(*args))
topics = set(a[5:] for a in self.get_names()
if a.startswith('help_' + args[0]))
return list(commands | topics)
def do_help(self, arg):
'List available commands with "help" or detailed help with "help cmd".'
if arg:
# XXX check arg syntax
try:
func = getattr(self, 'help_' + arg)
except AttributeError:
try:
doc=getattr(self, 'do_' + arg).__doc__
if doc:
self.stdout.write("%s\n"%str(doc))
return
except AttributeError:
pass
self.stdout.write("%s\n"%str(self.nohelp % (arg,)))
return
func()
else:
names = self.get_names()
cmds_doc = []
cmds_undoc = []
help = {}
for name in names:
if name[:5] == 'help_':
help[name[5:]]=1
names.sort()
# There can be duplicates if routines overridden
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
cmd=name[3:]
if cmd in help:
cmds_doc.append(cmd)
del help[cmd]
elif getattr(self, name).__doc__:
cmds_doc.append(cmd)
else:
cmds_undoc.append(cmd)
self.stdout.write("%s\n"%str(self.doc_leader))
self.print_topics(self.doc_header, cmds_doc, 15,80)
self.print_topics(self.misc_header, help.keys(),15,80)
self.print_topics(self.undoc_header, cmds_undoc, 15,80)
def print_topics(self, header, cmds, cmdlen, maxcol):
if cmds:
self.stdout.write("%s\n"%str(header))
if self.ruler:
self.stdout.write("%s\n"%str(self.ruler * len(header)))
self.columnize(cmds, maxcol-1)
self.stdout.write("\n")
def columnize(self, list, displaywidth=80):
"""Display a list of strings as a compact set of columns.
Each column is only as wide as necessary.
Columns are separated by two spaces (one was not legible enough).
"""
if not list:
self.stdout.write("<empty>\n")
return
nonstrings = [i for i in range(len(list))
if not isinstance(list[i], str)]
if nonstrings:
raise TypeError, ("list[i] not a string for i in %s" %
", ".join(map(str, nonstrings)))
size = len(list)
if size == 1:
self.stdout.write('%s\n'%str(list[0]))
return
# Try every row count from 1 upwards
for nrows in range(1, len(list)):
ncols = (size+nrows-1) // nrows
colwidths = []
totwidth = -2
for col in range(ncols):
colwidth = 0
for row in range(nrows):
i = row + nrows*col
if i >= size:
break
x = list[i]
colwidth = max(colwidth, len(x))
colwidths.append(colwidth)
totwidth += colwidth + 2
if totwidth > displaywidth:
break
if totwidth <= displaywidth:
break
else:
nrows = len(list)
ncols = 1
colwidths = [0]
for row in range(nrows):
texts = []
for col in range(ncols):
i = row + nrows*col
if i >= size:
x = ""
else:
x = list[i]
texts.append(x)
while texts and not texts[-1]:
del texts[-1]
for col in range(len(texts)):
texts[col] = texts[col].ljust(colwidths[col])
self.stdout.write("%s\n"%str(" ".join(texts)))
| epl-1.0 | 2,622,969,642,988,453,400 | 36.193069 | 79 | 0.554239 | false |
Jhaefner/PressureDrop | master_example.py | 1 | 2607 | """
@author: Jonah Haefner and Lane Carasik
Title: master_example.py
The purpose of this script is to ensure the four functions included in this package are functioning properly and as an example of use for the user.
It currently only provides checks for the inline geometry with the fluid at a Reynolds number of 22000.
The expected output is:
Zhukauskas: dP_1 = 21.94 kPa
Gaddis-Gnielinski: dP 2 = 25.67 kPa
Zhukauskas: Nu1 = 142.52
Gaddis-Gnielinski: Nu2 = 147.31
"""
import TORCHE as TE
# Geometric parameters
d = 0.0254 # Outside diameter of tube or cylinder (m)
a = 1.25 # Transverse pitch to diameter ratio
b = 1.25 # Longitudinal pitch to diameter ratio
geom = 'inline' # Tube geometry (inline or staggered)
N_rows = 10 # Number of tube rows
'''
# Fluid thermo-physical properties
rho = 1940 # Density of the working fluid - FLiBe salt (kg/m^3)
mu = 0.0056 # Dynamic visocity of the working fluid - FLiBe salt (Pa-s)
Pr = 1 # Prandtl number of the working fluid
Pr_w = 1 # Prandtl number of the working fluid based on the wall film temperature
'''
# Fluid thermo-physical properties - H2O
rho = 998.6 # Density of the working fluid - water at 20 C (kg/m^3)
mu = 0.00100124 # Dynamic visocity of the working fluid - water 20 C (Pa-s)
Pr = 6.99 # Prandtl number of the working fluid
Pr_w = 6.99 # Prandtl number of the working fluid based on the wall film temperature
# Flow behavior
vel = 0.5 # Free-stream velocity before interacting with the tube bank (m/s)
v_max = vel*(a/(a-1)) # Maximum velocity based in the minimum area between the tubes (m/s)
Re = rho*v_max*d/mu # Reynolds number of the flow based on the maximium velocity in the minimum area between tubes
# Expected Results
dP_Zu_Ex = 21.94 # Expected Zukauskas results for Pressure drop (kPa)
dP_GG_Ex = 25.67 # Expected Gaddis-Gnielinski results for Pressure drop (kPa)
Nu_Zu_Ex = 142.52 # Expected Zukauskas results for Nusselt Number
Nu_GG_Ex = 147.31 # Expected Gaddis-Gnielinski results for Nusselt Number
dP_1 = TE.dP_Zu(rho,a,b,geom,N_rows,vel,Re)
print('The Pressure Drop calculated by Zukauskas is',round(dP_1/1000,2),'kPa')
dP_2 = TE.dP_GG(rho,a,b,geom,N_rows,vel,Re,Return="")
print('The Pressure Drop calculated by Gaddis-Gnielinski is',round(dP_2/1000,2),'kPa')
Nu_1 = TE.HT_Zu(rho,Pr,Pr_w,a,b,d,geom,N_rows,vel,Re)
print('The Nusselt Number calculated by Zukauskas is', round(Nu_1,2))
Nu_2 = TE.HT_GG(rho,Pr,a,b,d,geom,N_rows,vel,Re)
print('The Nusselt Number calculated by Gnielinski is', round(Nu_2,2))
| mit | -4,553,694,799,183,054,000 | 43.736842 | 148 | 0.703874 | false |
krkhan/azure-linux-extensions | OSPatching/test/FakePatching3.py | 8 | 1623 | #!/usr/bin/python
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from AbstractPatching import AbstractPatching
sys.path.append('../patch')
class FakePatching(AbstractPatching):
def __init__(self, hutil=None):
super(FakePatching,self).__init__(hutil)
self.pkg_query_cmd = 'dpkg-query -L'
self.gap_between_stage = 20
self.download_duration = 60
self.security_download_list = ['a', 'b', 'c', 'd', 'e']
self.all_download_list = ['1', '2', '3', '4', 'a', 'b', 'c', 'd', 'e']
def install(self):
"""
Install for dependencies.
"""
pass
def check(self, category):
"""
Check valid upgrades,
Return the package list to download & upgrade
"""
if category == 'important':
return 0, self.security_download_list
else:
return 0, self.all_download_list
def download_package(self, package):
return 0
def patch_package(self, package):
return 0
def check_reboot(self):
return False
| apache-2.0 | 7,190,143,803,900,370,000 | 27.982143 | 78 | 0.635243 | false |
hehongliang/tensorflow | tensorflow/contrib/specs/python/summaries_test.py | 25 | 3070 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for specs-related summarization functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.specs.python import specs
from tensorflow.contrib.specs.python import summaries
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _rand(*size):
return np.random.uniform(size=size).astype("f")
class SummariesTest(test.TestCase):
def testStructure(self):
with self.cached_session():
inputs_shape = (1, 18, 19, 5)
inputs = constant_op.constant(_rand(*inputs_shape))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
self.assertEqual(
summaries.tf_spec_structure(
spec, input_shape=inputs_shape),
"_ variablev2 conv variablev2 biasadd relu")
def testStructureFromTensor(self):
with self.cached_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 conv variablev2 biasadd relu")
def testPrint(self):
with self.cached_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
summaries.tf_spec_print(spec, inputs)
def testSummary(self):
with self.cached_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
summaries.tf_spec_summary(spec, inputs)
if __name__ == "__main__":
test.main()
| apache-2.0 | -2,874,759,165,899,760,000 | 35.547619 | 80 | 0.661564 | false |
markYoungH/chromium.src | third_party/closure_linter/closure_linter/not_strict_test.py | 129 | 2318 | #!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gjslint --nostrict.
Tests errors that can be thrown by gjslint when not in strict mode.
"""
import os
import sys
import unittest
import gflags as flags
import unittest as googletest
from closure_linter import errors
from closure_linter import runner
from closure_linter.common import filetestcase
_RESOURCE_PREFIX = 'closure_linter/testdata'
flags.FLAGS.strict = False
flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
'limited_doc_checks.js')
# List of files under testdata to test.
# We need to list files explicitly since pyglib can't list directories.
_TEST_FILES = [
'not_strict.js'
]
class GJsLintTestSuite(unittest.TestSuite):
"""Test suite to run a GJsLintTest for each of several files.
If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in
testdata to test. Otherwise, _TEST_FILES is used.
"""
def __init__(self, tests=()):
unittest.TestSuite.__init__(self, tests)
argv = sys.argv and sys.argv[1:] or []
if argv:
test_files = argv
else:
test_files = _TEST_FILES
for test_file in test_files:
resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
self.addTest(filetestcase.AnnotatedFileTestCase(resource_path,
runner.Run,
errors.ByName))
if __name__ == '__main__':
# Don't let main parse args; it happens in the TestSuite.
googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite')
| bsd-3-clause | -1,590,271,656,067,317,000 | 30.324324 | 75 | 0.680759 | false |
gertingold/scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_Y.py | 47 | 2514 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import abs, sum, cos, pi
from .go_benchmark import Benchmark
class YaoLiu04(Benchmark):
r"""
Yao-Liu 4 objective function.
This class defines the Yao-Liu function 4 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{YaoLiu04}}(x) = {max}_i \left\{ \left | x_i \right | ,
1 \leq i \leq n \right\}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO line 1201. Gavana code and documentation differ.
max(abs(x)) != abs(max(x))
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return abs(x).max()
class YaoLiu09(Benchmark):
r"""
Yao-Liu 9 objective function.
This class defines the Yao-Liu [1]_ function 9 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{YaoLiu09}}(x) = \sum_{i=1}^n \left [ x_i^2
- 10 \cos(2 \pi x_i ) + 10 \right ]
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5.12, 5.12]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO Yao-Liu Fast Evolutionary programming is the the original ref.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.12] * self.N, [5.12] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return sum(x ** 2.0 - 10.0 * cos(2 * pi * x) + 10)
| bsd-3-clause | -3,328,109,869,316,967,400 | 28.232558 | 84 | 0.57677 | false |
achals/servo | tests/wpt/web-platform-tests/tools/py/py/_path/local.py | 171 | 32118 | """
local path implementation.
"""
from __future__ import with_statement
from contextlib import contextmanager
import sys, os, re, atexit, io
import py
from py._path import common
from py._path.common import iswin32
from stat import S_ISLNK, S_ISDIR, S_ISREG
from os.path import abspath, normpath, isabs, exists, isdir, isfile, islink, dirname
if sys.version_info > (3,0):
def map_as_list(func, iter):
return list(map(func, iter))
else:
map_as_list = map
class Stat(object):
def __getattr__(self, name):
return getattr(self._osstatresult, "st_" + name)
def __init__(self, path, osstatresult):
self.path = path
self._osstatresult = osstatresult
@property
def owner(self):
if iswin32:
raise NotImplementedError("XXX win32")
import pwd
entry = py.error.checked_call(pwd.getpwuid, self.uid)
return entry[0]
@property
def group(self):
""" return group name of file. """
if iswin32:
raise NotImplementedError("XXX win32")
import grp
entry = py.error.checked_call(grp.getgrgid, self.gid)
return entry[0]
def isdir(self):
return S_ISDIR(self._osstatresult.st_mode)
def isfile(self):
return S_ISREG(self._osstatresult.st_mode)
def islink(self):
st = self.path.lstat()
return S_ISLNK(self._osstatresult.st_mode)
class PosixPath(common.PathBase):
def chown(self, user, group, rec=0):
""" change ownership to the given user and group.
user and group may be specified by a number or
by a name. if rec is True change ownership
recursively.
"""
uid = getuserid(user)
gid = getgroupid(group)
if rec:
for x in self.visit(rec=lambda x: x.check(link=0)):
if x.check(link=0):
py.error.checked_call(os.chown, str(x), uid, gid)
py.error.checked_call(os.chown, str(self), uid, gid)
def readlink(self):
""" return value of a symbolic link. """
return py.error.checked_call(os.readlink, self.strpath)
def mklinkto(self, oldname):
""" posix style hard link to another name. """
py.error.checked_call(os.link, str(oldname), str(self))
def mksymlinkto(self, value, absolute=1):
""" create a symbolic link with the given value (pointing to another name). """
if absolute:
py.error.checked_call(os.symlink, str(value), self.strpath)
else:
base = self.common(value)
# with posix local paths '/' is always a common base
relsource = self.__class__(value).relto(base)
reldest = self.relto(base)
n = reldest.count(self.sep)
target = self.sep.join(('..', )*n + (relsource, ))
py.error.checked_call(os.symlink, target, self.strpath)
def getuserid(user):
import pwd
if not isinstance(user, int):
user = pwd.getpwnam(user)[2]
return user
def getgroupid(group):
import grp
if not isinstance(group, int):
group = grp.getgrnam(group)[2]
return group
FSBase = not iswin32 and PosixPath or common.PathBase
class LocalPath(FSBase):
""" object oriented interface to os.path and other local filesystem
related information.
"""
class ImportMismatchError(ImportError):
""" raised on pyimport() if there is a mismatch of __file__'s"""
sep = os.sep
class Checkers(common.Checkers):
def _stat(self):
try:
return self._statcache
except AttributeError:
try:
self._statcache = self.path.stat()
except py.error.ELOOP:
self._statcache = self.path.lstat()
return self._statcache
def dir(self):
return S_ISDIR(self._stat().mode)
def file(self):
return S_ISREG(self._stat().mode)
def exists(self):
return self._stat()
def link(self):
st = self.path.lstat()
return S_ISLNK(st.mode)
def __init__(self, path=None, expanduser=False):
""" Initialize and return a local Path instance.
Path can be relative to the current directory.
If path is None it defaults to the current working directory.
If expanduser is True, tilde-expansion is performed.
Note that Path instances always carry an absolute path.
Note also that passing in a local path object will simply return
the exact same path object. Use new() to get a new copy.
"""
if path is None:
self.strpath = py.error.checked_call(os.getcwd)
elif isinstance(path, common.PathBase):
self.strpath = path.strpath
elif isinstance(path, py.builtin._basestring):
if expanduser:
path = os.path.expanduser(path)
self.strpath = abspath(path)
else:
raise ValueError("can only pass None, Path instances "
"or non-empty strings to LocalPath")
def __hash__(self):
return hash(self.strpath)
def __eq__(self, other):
s1 = self.strpath
s2 = getattr(other, "strpath", other)
if iswin32:
s1 = s1.lower()
try:
s2 = s2.lower()
except AttributeError:
return False
return s1 == s2
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return self.strpath < getattr(other, "strpath", other)
def __gt__(self, other):
return self.strpath > getattr(other, "strpath", other)
def samefile(self, other):
""" return True if 'other' references the same file as 'self'.
"""
other = getattr(other, "strpath", other)
if not isabs(other):
other = abspath(other)
if self == other:
return True
if iswin32:
return False # there is no samefile
return py.error.checked_call(
os.path.samefile, self.strpath, other)
def remove(self, rec=1, ignore_errors=False):
""" remove a file or directory (or a directory tree if rec=1).
if ignore_errors is True, errors while removing directories will
be ignored.
"""
if self.check(dir=1, link=0):
if rec:
# force remove of readonly files on windows
if iswin32:
self.chmod(448, rec=1) # octcal 0700
py.error.checked_call(py.std.shutil.rmtree, self.strpath,
ignore_errors=ignore_errors)
else:
py.error.checked_call(os.rmdir, self.strpath)
else:
if iswin32:
self.chmod(448) # octcal 0700
py.error.checked_call(os.remove, self.strpath)
def computehash(self, hashtype="md5", chunksize=524288):
""" return hexdigest of hashvalue for this file. """
try:
try:
import hashlib as mod
except ImportError:
if hashtype == "sha1":
hashtype = "sha"
mod = __import__(hashtype)
hash = getattr(mod, hashtype)()
except (AttributeError, ImportError):
raise ValueError("Don't know how to compute %r hash" %(hashtype,))
f = self.open('rb')
try:
while 1:
buf = f.read(chunksize)
if not buf:
return hash.hexdigest()
hash.update(buf)
finally:
f.close()
def new(self, **kw):
""" create a modified version of this path.
the following keyword arguments modify various path parts::
a:/some/path/to/a/file.ext
xx drive
xxxxxxxxxxxxxxxxx dirname
xxxxxxxx basename
xxxx purebasename
xxx ext
"""
obj = object.__new__(self.__class__)
if not kw:
obj.strpath = self.strpath
return obj
drive, dirname, basename, purebasename,ext = self._getbyspec(
"drive,dirname,basename,purebasename,ext")
if 'basename' in kw:
if 'purebasename' in kw or 'ext' in kw:
raise ValueError("invalid specification %r" % kw)
else:
pb = kw.setdefault('purebasename', purebasename)
try:
ext = kw['ext']
except KeyError:
pass
else:
if ext and not ext.startswith('.'):
ext = '.' + ext
kw['basename'] = pb + ext
if ('dirname' in kw and not kw['dirname']):
kw['dirname'] = drive
else:
kw.setdefault('dirname', dirname)
kw.setdefault('sep', self.sep)
obj.strpath = normpath(
"%(dirname)s%(sep)s%(basename)s" % kw)
return obj
def _getbyspec(self, spec):
""" see new for what 'spec' can be. """
res = []
parts = self.strpath.split(self.sep)
args = filter(None, spec.split(',') )
append = res.append
for name in args:
if name == 'drive':
append(parts[0])
elif name == 'dirname':
append(self.sep.join(parts[:-1]))
else:
basename = parts[-1]
if name == 'basename':
append(basename)
else:
i = basename.rfind('.')
if i == -1:
purebasename, ext = basename, ''
else:
purebasename, ext = basename[:i], basename[i:]
if name == 'purebasename':
append(purebasename)
elif name == 'ext':
append(ext)
else:
raise ValueError("invalid part specification %r" % name)
return res
def dirpath(self, *args, **kwargs):
""" return the directory path joined with any given path arguments. """
if not kwargs:
path = object.__new__(self.__class__)
path.strpath = dirname(self.strpath)
if args:
path = path.join(*args)
return path
return super(LocalPath, self).dirpath(*args, **kwargs)
def join(self, *args, **kwargs):
""" return a new path by appending all 'args' as path
components. if abs=1 is used restart from root if any
of the args is an absolute path.
"""
sep = self.sep
strargs = [getattr(arg, "strpath", arg) for arg in args]
strpath = self.strpath
if kwargs.get('abs'):
newargs = []
for arg in reversed(strargs):
if isabs(arg):
strpath = arg
strargs = newargs
break
newargs.insert(0, arg)
for arg in strargs:
arg = arg.strip(sep)
if iswin32:
# allow unix style paths even on windows.
arg = arg.strip('/')
arg = arg.replace('/', sep)
strpath = strpath + sep + arg
obj = object.__new__(self.__class__)
obj.strpath = normpath(strpath)
return obj
def open(self, mode='r', ensure=False, encoding=None):
""" return an opened file with the given mode.
If ensure is True, create parent directories if needed.
"""
if ensure:
self.dirpath().ensure(dir=1)
if encoding:
return py.error.checked_call(io.open, self.strpath, mode, encoding=encoding)
return py.error.checked_call(open, self.strpath, mode)
def _fastjoin(self, name):
child = object.__new__(self.__class__)
child.strpath = self.strpath + self.sep + name
return child
def islink(self):
return islink(self.strpath)
def check(self, **kw):
if not kw:
return exists(self.strpath)
if len(kw) == 1:
if "dir" in kw:
return not kw["dir"] ^ isdir(self.strpath)
if "file" in kw:
return not kw["file"] ^ isfile(self.strpath)
return super(LocalPath, self).check(**kw)
_patternchars = set("*?[" + os.path.sep)
def listdir(self, fil=None, sort=None):
""" list directory contents, possibly filter by the given fil func
and possibly sorted.
"""
if fil is None and sort is None:
names = py.error.checked_call(os.listdir, self.strpath)
return map_as_list(self._fastjoin, names)
if isinstance(fil, py.builtin._basestring):
if not self._patternchars.intersection(fil):
child = self._fastjoin(fil)
if exists(child.strpath):
return [child]
return []
fil = common.FNMatcher(fil)
names = py.error.checked_call(os.listdir, self.strpath)
res = []
for name in names:
child = self._fastjoin(name)
if fil is None or fil(child):
res.append(child)
self._sortlist(res, sort)
return res
def size(self):
""" return size of the underlying file object """
return self.stat().size
def mtime(self):
""" return last modification time of the path. """
return self.stat().mtime
def copy(self, target, mode=False):
""" copy path to target."""
if self.check(file=1):
if target.check(dir=1):
target = target.join(self.basename)
assert self!=target
copychunked(self, target)
if mode:
copymode(self.strpath, target.strpath)
else:
def rec(p):
return p.check(link=0)
for x in self.visit(rec=rec):
relpath = x.relto(self)
newx = target.join(relpath)
newx.dirpath().ensure(dir=1)
if x.check(link=1):
newx.mksymlinkto(x.readlink())
continue
elif x.check(file=1):
copychunked(x, newx)
elif x.check(dir=1):
newx.ensure(dir=1)
if mode:
copymode(x.strpath, newx.strpath)
def rename(self, target):
""" rename this path to target. """
target = getattr(target, "strpath", target)
return py.error.checked_call(os.rename, self.strpath, target)
def dump(self, obj, bin=1):
""" pickle object into path location"""
f = self.open('wb')
try:
py.error.checked_call(py.std.pickle.dump, obj, f, bin)
finally:
f.close()
def mkdir(self, *args):
""" create & return the directory joined with args. """
p = self.join(*args)
py.error.checked_call(os.mkdir, getattr(p, "strpath", p))
return p
def write_binary(self, data, ensure=False):
""" write binary data into path. If ensure is True create
missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
with self.open('wb') as f:
f.write(data)
def write_text(self, data, encoding, ensure=False):
""" write text data into path using the specified encoding.
If ensure is True create missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
with self.open('w', encoding=encoding) as f:
f.write(data)
def write(self, data, mode='w', ensure=False):
""" write data into path. If ensure is True create
missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
if 'b' in mode:
if not py.builtin._isbytes(data):
raise ValueError("can only process bytes")
else:
if not py.builtin._istext(data):
if not py.builtin._isbytes(data):
data = str(data)
else:
data = py.builtin._totext(data, sys.getdefaultencoding())
f = self.open(mode)
try:
f.write(data)
finally:
f.close()
def _ensuredirs(self):
parent = self.dirpath()
if parent == self:
return self
if parent.check(dir=0):
parent._ensuredirs()
if self.check(dir=0):
try:
self.mkdir()
except py.error.EEXIST:
# race condition: file/dir created by another thread/process.
# complain if it is not a dir
if self.check(dir=0):
raise
return self
def ensure(self, *args, **kwargs):
""" ensure that an args-joined path exists (by default as
a file). if you specify a keyword argument 'dir=True'
then the path is forced to be a directory path.
"""
p = self.join(*args)
if kwargs.get('dir', 0):
return p._ensuredirs()
else:
p.dirpath()._ensuredirs()
if not p.check(file=1):
p.open('w').close()
return p
def stat(self, raising=True):
""" Return an os.stat() tuple. """
if raising == True:
return Stat(self, py.error.checked_call(os.stat, self.strpath))
try:
return Stat(self, os.stat(self.strpath))
except KeyboardInterrupt:
raise
except Exception:
return None
def lstat(self):
""" Return an os.lstat() tuple. """
return Stat(self, py.error.checked_call(os.lstat, self.strpath))
def setmtime(self, mtime=None):
""" set modification time for the given path. if 'mtime' is None
(the default) then the file's mtime is set to current time.
Note that the resolution for 'mtime' is platform dependent.
"""
if mtime is None:
return py.error.checked_call(os.utime, self.strpath, mtime)
try:
return py.error.checked_call(os.utime, self.strpath, (-1, mtime))
except py.error.EINVAL:
return py.error.checked_call(os.utime, self.strpath, (self.atime(), mtime))
def chdir(self):
""" change directory to self and return old current directory """
try:
old = self.__class__()
except py.error.ENOENT:
old = None
py.error.checked_call(os.chdir, self.strpath)
return old
@contextmanager
def as_cwd(self):
""" return context manager which changes to current dir during the
managed "with" context. On __enter__ it returns the old dir.
"""
old = self.chdir()
try:
yield old
finally:
old.chdir()
def realpath(self):
""" return a new path which contains no symbolic links."""
return self.__class__(os.path.realpath(self.strpath))
def atime(self):
""" return last access time of the path. """
return self.stat().atime
def __repr__(self):
return 'local(%r)' % self.strpath
def __str__(self):
""" return string representation of the Path. """
return self.strpath
def chmod(self, mode, rec=0):
""" change permissions to the given mode. If mode is an
integer it directly encodes the os-specific modes.
if rec is True perform recursively.
"""
if not isinstance(mode, int):
raise TypeError("mode %r must be an integer" % (mode,))
if rec:
for x in self.visit(rec=rec):
py.error.checked_call(os.chmod, str(x), mode)
py.error.checked_call(os.chmod, self.strpath, mode)
def pypkgpath(self):
""" return the Python package path by looking for the last
directory upwards which still contains an __init__.py.
Return None if a pkgpath can not be determined.
"""
pkgpath = None
for parent in self.parts(reverse=True):
if parent.isdir():
if not parent.join('__init__.py').exists():
break
if not isimportable(parent.basename):
break
pkgpath = parent
return pkgpath
def _ensuresyspath(self, ensuremode, path):
if ensuremode:
s = str(path)
if ensuremode == "append":
if s not in sys.path:
sys.path.append(s)
else:
if s != sys.path[0]:
sys.path.insert(0, s)
def pyimport(self, modname=None, ensuresyspath=True):
""" return path as an imported python module.
If modname is None, look for the containing package
and construct an according module name.
The module will be put/looked up in sys.modules.
if ensuresyspath is True then the root dir for importing
the file (taking __init__.py files into account) will
be prepended to sys.path if it isn't there already.
If ensuresyspath=="append" the root dir will be appended
if it isn't already contained in sys.path.
if ensuresyspath is False no modification of syspath happens.
"""
if not self.check():
raise py.error.ENOENT(self)
pkgpath = None
if modname is None:
pkgpath = self.pypkgpath()
if pkgpath is not None:
pkgroot = pkgpath.dirpath()
names = self.new(ext="").relto(pkgroot).split(self.sep)
if names[-1] == "__init__":
names.pop()
modname = ".".join(names)
else:
pkgroot = self.dirpath()
modname = self.purebasename
self._ensuresyspath(ensuresyspath, pkgroot)
__import__(modname)
mod = sys.modules[modname]
if self.basename == "__init__.py":
return mod # we don't check anything as we might
# we in a namespace package ... too icky to check
modfile = mod.__file__
if modfile[-4:] in ('.pyc', '.pyo'):
modfile = modfile[:-1]
elif modfile.endswith('$py.class'):
modfile = modfile[:-9] + '.py'
if modfile.endswith(os.path.sep + "__init__.py"):
if self.basename != "__init__.py":
modfile = modfile[:-12]
try:
issame = self.samefile(modfile)
except py.error.ENOENT:
issame = False
if not issame:
raise self.ImportMismatchError(modname, modfile, self)
return mod
else:
try:
return sys.modules[modname]
except KeyError:
# we have a custom modname, do a pseudo-import
mod = py.std.types.ModuleType(modname)
mod.__file__ = str(self)
sys.modules[modname] = mod
try:
py.builtin.execfile(str(self), mod.__dict__)
except:
del sys.modules[modname]
raise
return mod
def sysexec(self, *argv, **popen_opts):
""" return stdout text from executing a system child process,
where the 'self' path points to executable.
The process is directly invoked and not through a system shell.
"""
from subprocess import Popen, PIPE
argv = map_as_list(str, argv)
popen_opts['stdout'] = popen_opts['stderr'] = PIPE
proc = Popen([str(self)] + argv, **popen_opts)
stdout, stderr = proc.communicate()
ret = proc.wait()
if py.builtin._isbytes(stdout):
stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
if ret != 0:
if py.builtin._isbytes(stderr):
stderr = py.builtin._totext(stderr, sys.getdefaultencoding())
raise py.process.cmdexec.Error(ret, ret, str(self),
stdout, stderr,)
return stdout
def sysfind(cls, name, checker=None, paths=None):
""" return a path object found by looking at the systems
underlying PATH specification. If the checker is not None
it will be invoked to filter matching paths. If a binary
cannot be found, None is returned
Note: This is probably not working on plain win32 systems
but may work on cygwin.
"""
if isabs(name):
p = py.path.local(name)
if p.check(file=1):
return p
else:
if paths is None:
if iswin32:
paths = py.std.os.environ['Path'].split(';')
if '' not in paths and '.' not in paths:
paths.append('.')
try:
systemroot = os.environ['SYSTEMROOT']
except KeyError:
pass
else:
paths = [re.sub('%SystemRoot%', systemroot, path)
for path in paths]
else:
paths = py.std.os.environ['PATH'].split(':')
tryadd = []
if iswin32:
tryadd += os.environ['PATHEXT'].split(os.pathsep)
tryadd.append("")
for x in paths:
for addext in tryadd:
p = py.path.local(x).join(name, abs=True) + addext
try:
if p.check(file=1):
if checker:
if not checker(p):
continue
return p
except py.error.EACCES:
pass
return None
sysfind = classmethod(sysfind)
def _gethomedir(cls):
try:
x = os.environ['HOME']
except KeyError:
try:
x = os.environ["HOMEDRIVE"] + os.environ['HOMEPATH']
except KeyError:
return None
return cls(x)
_gethomedir = classmethod(_gethomedir)
#"""
#special class constructors for local filesystem paths
#"""
def get_temproot(cls):
""" return the system's temporary directory
(where tempfiles are usually created in)
"""
return py.path.local(py.std.tempfile.gettempdir())
get_temproot = classmethod(get_temproot)
def mkdtemp(cls, rootdir=None):
""" return a Path object pointing to a fresh new temporary directory
(which we created ourself).
"""
import tempfile
if rootdir is None:
rootdir = cls.get_temproot()
return cls(py.error.checked_call(tempfile.mkdtemp, dir=str(rootdir)))
mkdtemp = classmethod(mkdtemp)
def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3,
lock_timeout = 172800): # two days
""" return unique directory with a number greater than the current
maximum one. The number is assumed to start directly after prefix.
if keep is true directories with a number less than (maxnum-keep)
will be removed.
"""
if rootdir is None:
rootdir = cls.get_temproot()
def parse_num(path):
""" parse the number out of a path (if it matches the prefix) """
bn = path.basename
if bn.startswith(prefix):
try:
return int(bn[len(prefix):])
except ValueError:
pass
# compute the maximum number currently in use with the
# prefix
lastmax = None
while True:
maxnum = -1
for path in rootdir.listdir():
num = parse_num(path)
if num is not None:
maxnum = max(maxnum, num)
# make the new directory
try:
udir = rootdir.mkdir(prefix + str(maxnum+1))
except py.error.EEXIST:
# race condition: another thread/process created the dir
# in the meantime. Try counting again
if lastmax == maxnum:
raise
lastmax = maxnum
continue
break
# put a .lock file in the new directory that will be removed at
# process exit
if lock_timeout:
lockfile = udir.join('.lock')
mypid = os.getpid()
if hasattr(lockfile, 'mksymlinkto'):
lockfile.mksymlinkto(str(mypid))
else:
lockfile.write(str(mypid))
def try_remove_lockfile():
# in a fork() situation, only the last process should
# remove the .lock, otherwise the other processes run the
# risk of seeing their temporary dir disappear. For now
# we remove the .lock in the parent only (i.e. we assume
# that the children finish before the parent).
if os.getpid() != mypid:
return
try:
lockfile.remove()
except py.error.Error:
pass
atexit.register(try_remove_lockfile)
# prune old directories
if keep:
for path in rootdir.listdir():
num = parse_num(path)
if num is not None and num <= (maxnum - keep):
lf = path.join('.lock')
try:
t1 = lf.lstat().mtime
t2 = lockfile.lstat().mtime
if not lock_timeout or abs(t2-t1) < lock_timeout:
continue # skip directories still locked
except py.error.Error:
pass # assume that it means that there is no 'lf'
try:
path.remove(rec=1)
except KeyboardInterrupt:
raise
except: # this might be py.error.Error, WindowsError ...
pass
# make link...
try:
username = os.environ['USER'] #linux, et al
except KeyError:
try:
username = os.environ['USERNAME'] #windows
except KeyError:
username = 'current'
src = str(udir)
dest = src[:src.rfind('-')] + '-' + username
try:
os.unlink(dest)
except OSError:
pass
try:
os.symlink(src, dest)
except (OSError, AttributeError, NotImplementedError):
pass
return udir
make_numbered_dir = classmethod(make_numbered_dir)
def copymode(src, dest):
py.std.shutil.copymode(src, dest)
def copychunked(src, dest):
chunksize = 524288 # half a meg of bytes
fsrc = src.open('rb')
try:
fdest = dest.open('wb')
try:
while 1:
buf = fsrc.read(chunksize)
if not buf:
break
fdest.write(buf)
finally:
fdest.close()
finally:
fsrc.close()
def isimportable(name):
if name and (name[0].isalpha() or name[0] == '_'):
name = name.replace("_", '')
return not name or name.isalnum()
| mpl-2.0 | -6,434,963,838,657,129,000 | 34.255763 | 88 | 0.517654 | false |
carolinux/QGIS | scripts/mkuidefaults.py | 23 | 1400 | from PyQt4.QtCore import QCoreApplication, QSettings
def chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i+n]
QCoreApplication.setOrganizationName( "QGIS" )
QCoreApplication.setOrganizationDomain( "qgis.org" )
QCoreApplication.setApplicationName( "QGIS2" )
s = QSettings()
ba = s.value("/UI/geometry").toByteArray()
f = open("src/app/ui_defaults.h", "w")
f.write( "#ifndef UI_DEFAULTS_H\n#define UI_DEFAULTS_H\n\nstatic const unsigned char defaultUIgeometry[] =\n{\n" )
for chunk in chunks(ba,16):
f.write( " %s,\n" % ", ".join( map( lambda x : "0x%02x" % ord(x), chunk ) ) )
f.write( "};\n\nstatic const unsigned char defaultUIstate[] =\n{\n" )
ba = s.value("/UI/state").toByteArray()
for chunk in chunks(ba,16):
f.write( " %s,\n" % ", ".join( map( lambda x : "0x%02x" % ord(x), chunk ) ) )
ba = s.value("/Composer/geometry").toByteArray()
f.write( "};\n\nstatic const unsigned char defaultComposerUIgeometry[] =\n{\n" )
for chunk in chunks(ba,16):
f.write( " %s,\n" % ", ".join( map( lambda x : "0x%02x" % ord(x), chunk ) ) )
f.write( "};\n\nstatic const unsigned char defaultComposerUIstate[] =\n{\n" )
ba = s.value("/ComposerUI/state").toByteArray()
for chunk in chunks(ba,16):
f.write( " %s,\n" % ", ".join( map( lambda x : "0x%02x" % ord(x), chunk ) ) )
f.write( "};\n\n#endif // UI_DEFAULTS_H\n" )
f.close()
| gpl-2.0 | 7,778,649,341,955,544,000 | 30.111111 | 114 | 0.616429 | false |
pinterest/pinball | tests/pinball/master/master_handler_test.py | 6 | 3216 | # Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Validation tests for master handler."""
import sys
import unittest
from pinball.master.master_handler import MasterHandler
from pinball.master.thrift_lib.ttypes import ArchiveRequest
from pinball.master.thrift_lib.ttypes import GroupRequest
from pinball.master.thrift_lib.ttypes import ModifyRequest
from pinball.master.thrift_lib.ttypes import Query
from pinball.master.thrift_lib.ttypes import QueryAndOwnRequest
from pinball.master.thrift_lib.ttypes import QueryRequest
from pinball.master.thrift_lib.ttypes import Token
from tests.pinball.persistence.ephemeral_store import EphemeralStore
__author__ = 'Pawel Garbacki'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = [__author__]
__license__ = 'Apache'
__version__ = '2.0'
class MasterHandlerTestCase(unittest.TestCase):
def _insert_token(self, handler):
request = ModifyRequest()
token = Token(name='/some_other_dir/some_token', data='some data')
request.updates = [token]
response = handler.modify(request)
self.assertEqual(1, len(response.updates))
return response.updates[0]
def test_archive(self):
handler = MasterHandler(EphemeralStore())
token = self._insert_token(handler)
request = ArchiveRequest()
request.tokens = [token]
handler.archive(request)
# The logic handling the request is tested thoroughly in
# transaction tests. Here we only make sure that the plumbing is in
# place.
def test_group(self):
request = GroupRequest()
request.namePrefix = '/'
handler = MasterHandler(EphemeralStore())
response = handler.group(request)
self.assertEqual(1, len(response.counts))
self.assertEqual(1, response.counts.values()[0])
def test_modify(self):
handler = MasterHandler(EphemeralStore())
self._insert_token(handler)
def test_query(self):
query = Query()
query.namePrefix = ''
query.maxTokens = 10
request = QueryRequest()
request.queries = [query]
handler = MasterHandler(EphemeralStore())
response = handler.query(request)
self.assertEqual(1, len(response.tokens))
def test_query_and_own(self):
query = Query()
query.namePrefix = ''
query.maxTokens = 10
request = QueryAndOwnRequest()
request.owner = 'some_owner'
request.expirationTime = sys.maxint
request.query = query
handler = MasterHandler(EphemeralStore())
response = handler.query_and_own(request)
self.assertEqual(0, len(response.tokens))
| apache-2.0 | 6,024,022,944,879,980,000 | 35.545455 | 76 | 0.689988 | false |
isandlaTech/cohorte-runtime | python/src/lib/python/unidecode/x057.py | 252 | 4631 | data = (
'Guo ', # 0x00
'Yin ', # 0x01
'Hun ', # 0x02
'Pu ', # 0x03
'Yu ', # 0x04
'Han ', # 0x05
'Yuan ', # 0x06
'Lun ', # 0x07
'Quan ', # 0x08
'Yu ', # 0x09
'Qing ', # 0x0a
'Guo ', # 0x0b
'Chuan ', # 0x0c
'Wei ', # 0x0d
'Yuan ', # 0x0e
'Quan ', # 0x0f
'Ku ', # 0x10
'Fu ', # 0x11
'Yuan ', # 0x12
'Yuan ', # 0x13
'E ', # 0x14
'Tu ', # 0x15
'Tu ', # 0x16
'Tu ', # 0x17
'Tuan ', # 0x18
'Lue ', # 0x19
'Hui ', # 0x1a
'Yi ', # 0x1b
'Yuan ', # 0x1c
'Luan ', # 0x1d
'Luan ', # 0x1e
'Tu ', # 0x1f
'Ya ', # 0x20
'Tu ', # 0x21
'Ting ', # 0x22
'Sheng ', # 0x23
'Pu ', # 0x24
'Lu ', # 0x25
'Iri ', # 0x26
'Ya ', # 0x27
'Zai ', # 0x28
'Wei ', # 0x29
'Ge ', # 0x2a
'Yu ', # 0x2b
'Wu ', # 0x2c
'Gui ', # 0x2d
'Pi ', # 0x2e
'Yi ', # 0x2f
'Di ', # 0x30
'Qian ', # 0x31
'Qian ', # 0x32
'Zhen ', # 0x33
'Zhuo ', # 0x34
'Dang ', # 0x35
'Qia ', # 0x36
'Akutsu ', # 0x37
'Yama ', # 0x38
'Kuang ', # 0x39
'Chang ', # 0x3a
'Qi ', # 0x3b
'Nie ', # 0x3c
'Mo ', # 0x3d
'Ji ', # 0x3e
'Jia ', # 0x3f
'Zhi ', # 0x40
'Zhi ', # 0x41
'Ban ', # 0x42
'Xun ', # 0x43
'Tou ', # 0x44
'Qin ', # 0x45
'Fen ', # 0x46
'Jun ', # 0x47
'Keng ', # 0x48
'Tun ', # 0x49
'Fang ', # 0x4a
'Fen ', # 0x4b
'Ben ', # 0x4c
'Tan ', # 0x4d
'Kan ', # 0x4e
'Pi ', # 0x4f
'Zuo ', # 0x50
'Keng ', # 0x51
'Bi ', # 0x52
'Xing ', # 0x53
'Di ', # 0x54
'Jing ', # 0x55
'Ji ', # 0x56
'Kuai ', # 0x57
'Di ', # 0x58
'Jing ', # 0x59
'Jian ', # 0x5a
'Tan ', # 0x5b
'Li ', # 0x5c
'Ba ', # 0x5d
'Wu ', # 0x5e
'Fen ', # 0x5f
'Zhui ', # 0x60
'Po ', # 0x61
'Pan ', # 0x62
'Tang ', # 0x63
'Kun ', # 0x64
'Qu ', # 0x65
'Tan ', # 0x66
'Zhi ', # 0x67
'Tuo ', # 0x68
'Gan ', # 0x69
'Ping ', # 0x6a
'Dian ', # 0x6b
'Gua ', # 0x6c
'Ni ', # 0x6d
'Tai ', # 0x6e
'Pi ', # 0x6f
'Jiong ', # 0x70
'Yang ', # 0x71
'Fo ', # 0x72
'Ao ', # 0x73
'Liu ', # 0x74
'Qiu ', # 0x75
'Mu ', # 0x76
'Ke ', # 0x77
'Gou ', # 0x78
'Xue ', # 0x79
'Ba ', # 0x7a
'Chi ', # 0x7b
'Che ', # 0x7c
'Ling ', # 0x7d
'Zhu ', # 0x7e
'Fu ', # 0x7f
'Hu ', # 0x80
'Zhi ', # 0x81
'Chui ', # 0x82
'La ', # 0x83
'Long ', # 0x84
'Long ', # 0x85
'Lu ', # 0x86
'Ao ', # 0x87
'Tay ', # 0x88
'Pao ', # 0x89
'[?] ', # 0x8a
'Xing ', # 0x8b
'Dong ', # 0x8c
'Ji ', # 0x8d
'Ke ', # 0x8e
'Lu ', # 0x8f
'Ci ', # 0x90
'Chi ', # 0x91
'Lei ', # 0x92
'Gai ', # 0x93
'Yin ', # 0x94
'Hou ', # 0x95
'Dui ', # 0x96
'Zhao ', # 0x97
'Fu ', # 0x98
'Guang ', # 0x99
'Yao ', # 0x9a
'Duo ', # 0x9b
'Duo ', # 0x9c
'Gui ', # 0x9d
'Cha ', # 0x9e
'Yang ', # 0x9f
'Yin ', # 0xa0
'Fa ', # 0xa1
'Gou ', # 0xa2
'Yuan ', # 0xa3
'Die ', # 0xa4
'Xie ', # 0xa5
'Ken ', # 0xa6
'Jiong ', # 0xa7
'Shou ', # 0xa8
'E ', # 0xa9
'Ha ', # 0xaa
'Dian ', # 0xab
'Hong ', # 0xac
'Wu ', # 0xad
'Kua ', # 0xae
'[?] ', # 0xaf
'Tao ', # 0xb0
'Dang ', # 0xb1
'Kai ', # 0xb2
'Gake ', # 0xb3
'Nao ', # 0xb4
'An ', # 0xb5
'Xing ', # 0xb6
'Xian ', # 0xb7
'Huan ', # 0xb8
'Bang ', # 0xb9
'Pei ', # 0xba
'Ba ', # 0xbb
'Yi ', # 0xbc
'Yin ', # 0xbd
'Han ', # 0xbe
'Xu ', # 0xbf
'Chui ', # 0xc0
'Cen ', # 0xc1
'Geng ', # 0xc2
'Ai ', # 0xc3
'Peng ', # 0xc4
'Fang ', # 0xc5
'Que ', # 0xc6
'Yong ', # 0xc7
'Xun ', # 0xc8
'Jia ', # 0xc9
'Di ', # 0xca
'Mai ', # 0xcb
'Lang ', # 0xcc
'Xuan ', # 0xcd
'Cheng ', # 0xce
'Yan ', # 0xcf
'Jin ', # 0xd0
'Zhe ', # 0xd1
'Lei ', # 0xd2
'Lie ', # 0xd3
'Bu ', # 0xd4
'Cheng ', # 0xd5
'Gomi ', # 0xd6
'Bu ', # 0xd7
'Shi ', # 0xd8
'Xun ', # 0xd9
'Guo ', # 0xda
'Jiong ', # 0xdb
'Ye ', # 0xdc
'Nian ', # 0xdd
'Di ', # 0xde
'Yu ', # 0xdf
'Bu ', # 0xe0
'Ya ', # 0xe1
'Juan ', # 0xe2
'Sui ', # 0xe3
'Pi ', # 0xe4
'Cheng ', # 0xe5
'Wan ', # 0xe6
'Ju ', # 0xe7
'Lun ', # 0xe8
'Zheng ', # 0xe9
'Kong ', # 0xea
'Chong ', # 0xeb
'Dong ', # 0xec
'Dai ', # 0xed
'Tan ', # 0xee
'An ', # 0xef
'Cai ', # 0xf0
'Shu ', # 0xf1
'Beng ', # 0xf2
'Kan ', # 0xf3
'Zhi ', # 0xf4
'Duo ', # 0xf5
'Yi ', # 0xf6
'Zhi ', # 0xf7
'Yi ', # 0xf8
'Pei ', # 0xf9
'Ji ', # 0xfa
'Zhun ', # 0xfb
'Qi ', # 0xfc
'Sao ', # 0xfd
'Ju ', # 0xfe
'Ni ', # 0xff
)
| apache-2.0 | 6,737,367,888,080,055,000 | 16.949612 | 20 | 0.389117 | false |
akashsinghal/Speech-Memorization-App | Python_Backend/env/lib/python3.6/site-packages/pip/utils/packaging.py | 343 | 2080 | from __future__ import absolute_import
from email.parser import FeedParser
import logging
import sys
from pip._vendor.packaging import specifiers
from pip._vendor.packaging import version
from pip._vendor import pkg_resources
from pip import exceptions
logger = logging.getLogger(__name__)
def check_requires_python(requires_python):
"""
Check if the python version in use match the `requires_python` specifier.
Returns `True` if the version of python in use matches the requirement.
Returns `False` if the version of python in use does not matches the
requirement.
Raises an InvalidSpecifier if `requires_python` have an invalid format.
"""
if requires_python is None:
# The package provides no information
return True
requires_python_specifier = specifiers.SpecifierSet(requires_python)
# We only use major.minor.micro
python_version = version.parse('.'.join(map(str, sys.version_info[:3])))
return python_version in requires_python_specifier
def get_metadata(dist):
if (isinstance(dist, pkg_resources.DistInfoDistribution) and
dist.has_metadata('METADATA')):
return dist.get_metadata('METADATA')
elif dist.has_metadata('PKG-INFO'):
return dist.get_metadata('PKG-INFO')
def check_dist_requires_python(dist):
metadata = get_metadata(dist)
feed_parser = FeedParser()
feed_parser.feed(metadata)
pkg_info_dict = feed_parser.close()
requires_python = pkg_info_dict.get('Requires-Python')
try:
if not check_requires_python(requires_python):
raise exceptions.UnsupportedPythonVersion(
"%s requires Python '%s' but the running Python is %s" % (
dist.project_name,
requires_python,
'.'.join(map(str, sys.version_info[:3])),)
)
except specifiers.InvalidSpecifier as e:
logger.warning(
"Package %s has an invalid Requires-Python entry %s - %s" % (
dist.project_name, requires_python, e))
return
| apache-2.0 | 219,264,717,010,636,830 | 32.015873 | 77 | 0.666827 | false |
nickanderson/ansible | lib/ansible/inventory/ini.py | 25 | 7628 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
import ansible.constants as C
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.inventory.expand_hosts import detect_range
from ansible.inventory.expand_hosts import expand_hostname_range
from ansible import errors
from ansible import utils
import shlex
import re
import ast
class InventoryParser(object):
"""
Host inventory for ansible.
"""
def __init__(self, filename=C.DEFAULT_HOST_LIST):
with open(filename) as fh:
self.lines = fh.readlines()
self.groups = {}
self.hosts = {}
self._parse()
def _parse(self):
self._parse_base_groups()
self._parse_group_children()
self._add_allgroup_children()
self._parse_group_variables()
return self.groups
@staticmethod
def _parse_value(v):
if "#" not in v:
try:
return ast.literal_eval(v)
# Using explicit exceptions.
# Likely a string that literal_eval does not like. We wil then just set it.
except ValueError:
# For some reason this was thought to be malformed.
pass
except SyntaxError:
# Is this a hash with an equals at the end?
pass
return v
# [webservers]
# alpha
# beta:2345
# gamma sudo=True user=root
# delta asdf=jkl favcolor=red
def _add_allgroup_children(self):
for group in self.groups.values():
if group.depth == 0 and group.name != 'all':
self.groups['all'].add_child_group(group)
def _parse_base_groups(self):
# FIXME: refactor
ungrouped = Group(name='ungrouped')
all = Group(name='all')
all.add_child_group(ungrouped)
self.groups = dict(all=all, ungrouped=ungrouped)
active_group_name = 'ungrouped'
for line in self.lines:
line = utils.before_comment(line).strip()
if line.startswith("[") and line.endswith("]"):
active_group_name = line.replace("[","").replace("]","")
if ":vars" in line or ":children" in line:
active_group_name = active_group_name.rsplit(":", 1)[0]
if active_group_name not in self.groups:
new_group = self.groups[active_group_name] = Group(name=active_group_name)
active_group_name = None
elif active_group_name not in self.groups:
new_group = self.groups[active_group_name] = Group(name=active_group_name)
elif line.startswith(";") or line == '':
pass
elif active_group_name:
tokens = shlex.split(line)
if len(tokens) == 0:
continue
hostname = tokens[0]
port = C.DEFAULT_REMOTE_PORT
# Three cases to check:
# 0. A hostname that contains a range pesudo-code and a port
# 1. A hostname that contains just a port
if hostname.count(":") > 1:
# Possible an IPv6 address, or maybe a host line with multiple ranges
# IPv6 with Port XXX:XXX::XXX.port
# FQDN foo.example.com
if hostname.count(".") == 1:
(hostname, port) = hostname.rsplit(".", 1)
elif ("[" in hostname and
"]" in hostname and
":" in hostname and
(hostname.rindex("]") < hostname.rindex(":")) or
("]" not in hostname and ":" in hostname)):
(hostname, port) = hostname.rsplit(":", 1)
hostnames = []
if detect_range(hostname):
hostnames = expand_hostname_range(hostname)
else:
hostnames = [hostname]
for hn in hostnames:
host = None
if hn in self.hosts:
host = self.hosts[hn]
else:
host = Host(name=hn, port=port)
self.hosts[hn] = host
if len(tokens) > 1:
for t in tokens[1:]:
if t.startswith('#'):
break
try:
(k,v) = t.split("=", 1)
except ValueError, e:
raise errors.AnsibleError("Invalid ini entry: %s - %s" % (t, str(e)))
host.set_variable(k, self._parse_value(v))
self.groups[active_group_name].add_host(host)
# [southeast:children]
# atlanta
# raleigh
def _parse_group_children(self):
group = None
for line in self.lines:
line = line.strip()
if line is None or line == '':
continue
if line.startswith("[") and ":children]" in line:
line = line.replace("[","").replace(":children]","")
group = self.groups.get(line, None)
if group is None:
group = self.groups[line] = Group(name=line)
elif line.startswith("#") or line.startswith(";"):
pass
elif line.startswith("["):
group = None
elif group:
kid_group = self.groups.get(line, None)
if kid_group is None:
raise errors.AnsibleError("child group is not defined: (%s)" % line)
else:
group.add_child_group(kid_group)
# [webservers:vars]
# http_port=1234
# maxRequestsPerChild=200
def _parse_group_variables(self):
group = None
for line in self.lines:
line = line.strip()
if line.startswith("[") and ":vars]" in line:
line = line.replace("[","").replace(":vars]","")
group = self.groups.get(line, None)
if group is None:
raise errors.AnsibleError("can't add vars to undefined group: %s" % line)
elif line.startswith("#") or line.startswith(";"):
pass
elif line.startswith("["):
group = None
elif line == '':
pass
elif group:
if "=" not in line:
raise errors.AnsibleError("variables assigned to group must be in key=value form")
else:
(k, v) = [e.strip() for e in line.split("=", 1)]
group.set_variable(k, self._parse_value(v))
def get_host_variables(self, host):
return {}
| gpl-3.0 | 2,175,336,850,531,327,700 | 36.392157 | 102 | 0.510881 | false |
matteoalessiocarrara/HTML-Facebook-API | src/lib/fbwrapper/src/lib/bot_virtualbrowser/src/lib/human/src/requests2.py | 6 | 2370 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
#
# Copyright 2015 - 2016 Matteo Alessio Carrara <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
""" Estensione della libreria requests """
import logging
import os
import requests
import version
# Configurazione del sistema di logging
logger = logging.getLogger(version.lib_name)
logger.addHandler(logging.NullHandler())
class Session(requests.Session):
"""Versione modificata di requests.Session"""
def __init__(self):
super(Session, self).__init__()
self.__set_owner_pid()
def __set_owner_pid(self):
"""Imposta il pid del processo creatore, ovvero quello attuale"""
self.__owner_pid = os.getpid()
logger.debug("Owner pid: %s", self.__owner_pid)
def get_owner_pid(self):
"""Restituisce il pid del processo creatore"""
return self.__owner_pid
def get2(self, url, **kwargs):
"""
Versione modificata di get
* Controlla che questo oggetto non sia condiviso fra più processi
* Crea un eccezione HTTPError quando necessario
* Stampa informazioni di debug
"""
if os.getpid() != self.owner_pid:
# STACCAAAA STACCAAAAAAAAAAH
w = "Sembra che l'oggetto requests.Session sia utilizzato da più processi. Questo è sconsigliato e potrebbe creare dei problemi"
logger.warning(w)
if (url[:8] == "https://") and (os.getpid() != self.owner_pid):
logger.info("Casini in arrivo... io ti avevo avvertito, auguri :)")
ret = self.get(url, **kwargs)
try:
ret.raise_for_status()
except requests.HTTPError as e:
logger.error("url %s: %s ", url, e.message)
logger.debug("<!-- ret.text -->\n%s", ret.text)
raise
return ret
owner_pid = property(get_owner_pid)
| gpl-2.0 | -9,117,452,735,417,555,000 | 28.962025 | 131 | 0.705957 | false |
UTSA-ICS/keystone-kerberos | keystone/credential/backends/sql.py | 15 | 3846 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.common import sql
from keystone import credential
from keystone import exception
class CredentialModel(sql.ModelBase, sql.DictBase):
__tablename__ = 'credential'
attributes = ['id', 'user_id', 'project_id', 'blob', 'type']
id = sql.Column(sql.String(64), primary_key=True)
user_id = sql.Column(sql.String(64),
nullable=False)
project_id = sql.Column(sql.String(64))
blob = sql.Column(sql.JsonBlob(), nullable=False)
type = sql.Column(sql.String(255), nullable=False)
extra = sql.Column(sql.JsonBlob())
class Credential(credential.Driver):
# credential crud
@sql.handle_conflicts(conflict_type='credential')
def create_credential(self, credential_id, credential):
session = sql.get_session()
with session.begin():
ref = CredentialModel.from_dict(credential)
session.add(ref)
return ref.to_dict()
@sql.truncated
def list_credentials(self, hints):
session = sql.get_session()
credentials = session.query(CredentialModel)
credentials = sql.filter_limit_query(CredentialModel,
credentials, hints)
return [s.to_dict() for s in credentials]
def list_credentials_for_user(self, user_id):
session = sql.get_session()
query = session.query(CredentialModel)
refs = query.filter_by(user_id=user_id).all()
return [ref.to_dict() for ref in refs]
def _get_credential(self, session, credential_id):
ref = session.query(CredentialModel).get(credential_id)
if ref is None:
raise exception.CredentialNotFound(credential_id=credential_id)
return ref
def get_credential(self, credential_id):
session = sql.get_session()
return self._get_credential(session, credential_id).to_dict()
@sql.handle_conflicts(conflict_type='credential')
def update_credential(self, credential_id, credential):
session = sql.get_session()
with session.begin():
ref = self._get_credential(session, credential_id)
old_dict = ref.to_dict()
for k in credential:
old_dict[k] = credential[k]
new_credential = CredentialModel.from_dict(old_dict)
for attr in CredentialModel.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_credential, attr))
ref.extra = new_credential.extra
return ref.to_dict()
def delete_credential(self, credential_id):
session = sql.get_session()
with session.begin():
ref = self._get_credential(session, credential_id)
session.delete(ref)
def delete_credentials_for_project(self, project_id):
session = sql.get_session()
with session.begin():
query = session.query(CredentialModel)
query = query.filter_by(project_id=project_id)
query.delete()
def delete_credentials_for_user(self, user_id):
session = sql.get_session()
with session.begin():
query = session.query(CredentialModel)
query = query.filter_by(user_id=user_id)
query.delete()
| apache-2.0 | 3,235,210,525,702,219,300 | 35.980769 | 75 | 0.639626 | false |
topic2k/EventGhost | _build/builder/__init__.py | 1 | 7152 | # -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2019 EventGhost Project <http://www.eventghost.org/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
import argparse
import logging
import os
import sys
import tempfile
import threading
from os.path import abspath, dirname, exists, join
# Local imports
import builder
from builder import VirtualEnv
from builder.Logging import LogToFile
from builder.Utils import (
GetGitHubConfig, GetVersion, Is64bitInterpreter, IsCIBuild
)
logger = logging.getLogger()
class Task(object):
value = None
visible = True
enabled = True
activated = True
def __init__(self, buildSetup):
self.buildSetup = buildSetup
def Setup(self):
pass
def DoTask(self):
raise NotImplementedError
@classmethod
def GetId(cls):
return cls.__module__ + "." + cls.__name__
def Print(self, *args):
logger.log(22, " ".join(args))
class Builder(object):
def __init__(self):
if not VirtualEnv.Running() and VirtualEnv.Exists():
VirtualEnv.Activate()
global buildSetup
Task.buildSetup = self
buildSetup = self
self.pyVersionStr = "%d%d" % sys.version_info[:2]
self.buildDir = abspath(join(dirname(__file__), ".."))
self.sourceDir = abspath(join(self.buildDir, ".."))
self.libraryName = "lib%s" % self.pyVersionStr
self.libraryDir = join(self.sourceDir, self.libraryName)
self.dataDir = join(self.buildDir, "data")
self.docsDir = join(self.dataDir, "docs")
self.pyVersionDir = join(self.dataDir, "Python%s" % self.pyVersionStr)
self.outputDir = join(self.buildDir, "output")
self.websiteDir = join(self.outputDir, "website")
if Is64bitInterpreter():
print(
"ERROR: Sorry, EventGhost can't be built with the 64-bit "
"version of Python!"
)
sys.exit(1)
elif not exists(self.pyVersionDir):
print(
"ERROR: Sorry, EventGhost can't be built with Python %d.%d!"
% sys.version_info[:2]
)
sys.exit(1)
sys.path.append(self.sourceDir)
sys.path.append(join(self.libraryDir, "site-packages"))
self.args = self.ParseArgs()
self.showGui = not (
self.args.build or
self.args.check or
self.args.package or
self.args.release or
self.args.sync
)
if os.environ.get(
"APPVEYOR_REPO_COMMIT_MESSAGE", ""
).upper().startswith("VERBOSE:"):
self.args.verbose = True
os.chdir(self.buildDir)
if not exists(self.outputDir):
os.mkdir(self.outputDir)
LogToFile(join(self.outputDir, "Build.log"), self.args.verbose)
from CheckDependencies import CheckDependencies
if not CheckDependencies(self):
sys.exit(1)
try:
self.gitConfig = GetGitHubConfig()
except Exception as e:
msg = (
"WARNING: To change version or release to GitHub, you must:\n"
" $ git config --global github.user <your github username>\n"
" $ git config --global github.token <your github token>\n"
"To create a token, go to: https://github.com/settings/tokens\n"
)
if type(e) is ValueError:
msg = "WARNING: Specified `github.token` is invalid!\n" + msg
if not IsCIBuild():
token = ""
print msg
else:
token = os.environ["GITHUB_TOKEN"]
self.gitConfig = {
"all_repos": {
"EventGhost/EventGhost": {
"all_branches": ["master"],
"def_branch": "master",
"name": "EventGhost",
},
},
"branch": "master",
"repo": "EventGhost",
"repo_full": "EventGhost/EventGhost",
"token": token,
"user": "EventGhost",
}
self.appVersion = None
self.appVersionInfo = None
self.tmpDir = tempfile.mkdtemp()
self.appName = self.name
def ParseArgs(self):
parser = argparse.ArgumentParser()
parser.add_argument(
"-b", "--build",
action="store_true",
help="build imports, lib%s, and interpreters" % self.pyVersionStr,
)
parser.add_argument(
"-c", "--check",
action="store_true",
help="check source code for issues",
)
parser.add_argument(
"-m", "--make-env",
action="store_true",
help="auto-install dependencies into a virtualenv",
)
parser.add_argument(
"-p", "--package",
action="store_true",
help="build changelog, docs, and setup.exe",
)
parser.add_argument(
"-r", "--release",
action="store_true",
help="release to github and web if credentials available",
)
parser.add_argument(
"-s", "--sync",
action="store_true",
help="build and synchronize website",
)
parser.add_argument(
"-d", "--docs",
action="store_true",
help="build and synchronize usr and dev docs",
)
parser.add_argument(
"-u", "--url",
dest="websiteUrl",
default='',
type=str,
help="sftp url for doc synchronizing",
)
parser.add_argument(
"-vv", "--verbose",
action="store_true",
help="give a more verbose output",
)
parser.add_argument(
"-v", "--version",
action="store",
help="package as the specified version",
)
return parser.parse_args()
def Start(self):
from Tasks import TASKS
self.tasks = [task(self) for task in TASKS]
from Config import Config
self.config = Config(self, join(self.outputDir, "Build.ini"))
for task in self.tasks:
task.Setup()
(self.appVersion, self.appVersionInfo) = GetVersion(self)
if self.showGui:
import Gui
Gui.Main(self)
else:
builder.Tasks.Main(self)
| gpl-2.0 | -2,435,615,217,317,113,000 | 30.641593 | 80 | 0.549154 | false |
wolverineav/neutron | neutron/tests/unit/agent/common/test_ovs_lib.py | 3 | 39120 | # Copyright 2012, VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
import testtools
from neutron.agent.common import ovs_lib
from neutron.agent.common import utils
from neutron.common import exceptions
from neutron.plugins.common import constants
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
import constants as p_const
from neutron.tests import base
from neutron.tests import tools
OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0"
# some test data for get_vif_port_to_ofport_map that exhibited bug 1444269
OVSLIST_WITH_UNSET_PORT = (
'{"data":[["patch-tun",["map",[]],1],["tap2ab72a72-44",["map",[["attached-'
'mac","fa:16:3e:b0:f8:38"],["iface-id","2ab72a72-4407-4ef3-806a-b2172f3e4d'
'c7"],["iface-status","active"]]],2],["tap6b108774-15",["map",[["attached-'
'mac","fa:16:3e:02:f5:91"],["iface-id","6b108774-1559-45e9-a7c3-b714f11722'
'cf"],["iface-status","active"]]],["set",[]]]],"headings":["name","externa'
'l_ids","ofport"]}')
class OFCTLParamListMatcher(object):
def _parse(self, params):
actions_pos = params.find('actions')
return set(params[:actions_pos].split(',')), params[actions_pos:]
def __init__(self, params):
self.expected = self._parse(params)
def __eq__(self, other):
return self.expected == self._parse(other)
def __str__(self):
return 'ovs-ofctl parameters: %s, "%s"' % self.expected
__repr__ = __str__
class OVS_Lib_Test(base.BaseTestCase):
"""A test suite to exercise the OVS libraries shared by Neutron agents.
Note: these tests do not actually execute ovs-* utilities, and thus
can run on any system. That does, however, limit their scope.
"""
def setUp(self):
super(OVS_Lib_Test, self).setUp()
self.BR_NAME = "br-int"
self.br = ovs_lib.OVSBridge(self.BR_NAME)
self.execute = mock.patch.object(
utils, "execute", spec=utils.execute).start()
@property
def TO(self):
return "--timeout=%s" % self.br.vsctl_timeout
def _vsctl_args(self, *args):
cmd = ['ovs-vsctl', self.TO, '--oneline', '--format=json', '--']
cmd += args
return cmd
def _vsctl_mock(self, *args):
cmd = self._vsctl_args(*args)
return mock.call(cmd, run_as_root=True, log_fail_as_error=False)
def _verify_vsctl_mock(self, *args):
cmd = self._vsctl_args(*args)
self.execute.assert_called_once_with(cmd, run_as_root=True,
log_fail_as_error=False)
def test_vifport(self):
"""Create and stringify vif port, confirm no exceptions."""
pname = "vif1.0"
ofport = 5
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
# test __init__
port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br)
self.assertEqual(port.port_name, pname)
self.assertEqual(port.ofport, ofport)
self.assertEqual(port.vif_id, vif_id)
self.assertEqual(port.vif_mac, mac)
self.assertEqual(port.switch.br_name, self.BR_NAME)
# test __str__
str(port)
def _build_timeout_opt(self, exp_timeout):
return "--timeout=%d" % exp_timeout if exp_timeout else self.TO
def test_add_flow(self):
ofport = "99"
vid = 4000
lsw_id = 18
cidr = '192.168.1.0/24'
flow_dict_1 = collections.OrderedDict([
('cookie', 1234),
('priority', 2),
('dl_src', 'ca:fe:de:ad:be:ef'),
('actions', 'strip_vlan,output:0')])
flow_dict_2 = collections.OrderedDict([
('cookie', 1254),
('priority', 1),
('actions', 'normal')])
flow_dict_3 = collections.OrderedDict([
('cookie', 1257),
('priority', 2),
('actions', 'drop')])
flow_dict_4 = collections.OrderedDict([
('cookie', 1274),
('priority', 2),
('in_port', ofport),
('actions', 'drop')])
flow_dict_5 = collections.OrderedDict([
('cookie', 1284),
('priority', 4),
('in_port', ofport),
('dl_vlan', vid),
('actions', "strip_vlan,set_tunnel:%s,normal" % (lsw_id))])
flow_dict_6 = collections.OrderedDict([
('cookie', 1754),
('priority', 3),
('tun_id', lsw_id),
('actions', "mod_vlan_vid:%s,output:%s" % (vid, ofport))])
flow_dict_7 = collections.OrderedDict([
('cookie', 1256),
('priority', 4),
('nw_src', cidr),
('proto', 'arp'),
('actions', 'drop')])
self.br.add_flow(**flow_dict_1)
self.br.add_flow(**flow_dict_2)
self.br.add_flow(**flow_dict_3)
self.br.add_flow(**flow_dict_4)
self.br.add_flow(**flow_dict_5)
self.br.add_flow(**flow_dict_6)
self.br.add_flow(**flow_dict_7)
expected_calls = [
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,cookie=1234,"
"priority=2,dl_src=ca:fe:de:ad:be:ef,"
"actions=strip_vlan,output:0")),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,cookie=1254,"
"priority=1,actions=normal")),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,cookie=1257,"
"priority=2,actions=drop")),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,cookie=1274,"
"priority=2,in_port=%s,actions=drop" % ofport
)),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,cookie=1284,"
"priority=4,dl_vlan=%s,in_port=%s,"
"actions=strip_vlan,set_tunnel:%s,normal" %
(vid, ofport, lsw_id))),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,cookie=1754,"
"priority=3,"
"tun_id=%s,actions=mod_vlan_vid:%s,output:%s"
% (lsw_id, vid, ofport))),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,cookie=1256,"
"priority=4,nw_src=%s,arp,actions=drop"
% cidr)),
]
self.execute.assert_has_calls(expected_calls)
def _ofctl_args(self, cmd, *args):
cmd = ['ovs-ofctl', cmd]
cmd += args
return cmd
def _ofctl_mock(self, cmd, *args, **kwargs):
cmd = self._ofctl_args(cmd, *args)
return mock.call(cmd, run_as_root=True, **kwargs)
def _verify_ofctl_mock(self, cmd, *args, **kwargs):
cmd = self._ofctl_args(cmd, *args)
return self.execute.assert_called_once_with(cmd, run_as_root=True,
**kwargs)
def test_add_flow_timeout_set(self):
flow_dict = collections.OrderedDict([
('cookie', 1234),
('priority', 1),
('hard_timeout', 1000),
('idle_timeout', 2000),
('actions', 'normal')])
self.br.add_flow(**flow_dict)
self._verify_ofctl_mock(
"add-flows", self.BR_NAME, '-',
process_input="hard_timeout=1000,idle_timeout=2000,"
"priority=1,cookie=1234,actions=normal")
def test_add_flow_default_priority(self):
flow_dict = collections.OrderedDict([('actions', 'normal'),
('cookie', 1234)])
self.br.add_flow(**flow_dict)
self._verify_ofctl_mock(
"add-flows", self.BR_NAME, '-',
process_input="hard_timeout=0,idle_timeout=0,priority=1,"
"cookie=1234,actions=normal")
def _test_get_port_ofport(self, ofport, expected_result):
pname = "tap99"
self.br.vsctl_timeout = 0 # Don't waste precious time retrying
self.execute.return_value = self._encode_ovs_json(
['ofport'], [[ofport]])
self.assertEqual(self.br.get_port_ofport(pname), expected_result)
self._verify_vsctl_mock("--columns=ofport", "list", "Interface", pname)
def test_get_port_ofport_succeeds_for_valid_ofport(self):
self._test_get_port_ofport(6, 6)
def test_get_port_ofport_returns_invalid_ofport_for_non_int(self):
self._test_get_port_ofport([], ovs_lib.INVALID_OFPORT)
def test_get_port_ofport_returns_invalid_for_invalid(self):
self._test_get_port_ofport(ovs_lib.INVALID_OFPORT,
ovs_lib.INVALID_OFPORT)
def test_default_datapath(self):
# verify kernel datapath is default
expected = p_const.OVS_DATAPATH_SYSTEM
self.assertEqual(expected, self.br.datapath_type)
def test_non_default_datapath(self):
expected = p_const.OVS_DATAPATH_NETDEV
self.br = ovs_lib.OVSBridge(self.BR_NAME, datapath_type=expected)
self.assertEqual(expected, self.br.datapath_type)
def test_count_flows(self):
self.execute.return_value = 'ignore\nflow-1\n'
# counts the number of flows as total lines of output - 2
self.assertEqual(self.br.count_flows(), 1)
self._verify_ofctl_mock("dump-flows", self.BR_NAME, process_input=None)
def test_delete_flow(self):
ofport = "5"
lsw_id = 40
vid = 39
self.br.delete_flows(in_port=ofport)
self.br.delete_flows(tun_id=lsw_id)
self.br.delete_flows(dl_vlan=vid)
expected_calls = [
self._ofctl_mock("del-flows", self.BR_NAME, '-',
process_input="in_port=" + ofport),
self._ofctl_mock("del-flows", self.BR_NAME, '-',
process_input="tun_id=%s" % lsw_id),
self._ofctl_mock("del-flows", self.BR_NAME, '-',
process_input="dl_vlan=%s" % vid),
]
self.execute.assert_has_calls(expected_calls)
def test_delete_flow_with_priority_set(self):
params = {'in_port': '1',
'priority': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.delete_flows,
**params)
def test_dump_flows(self):
table = 23
nxst_flow = "NXST_FLOW reply (xid=0x4):"
flows = "\n".join([" cookie=0x0, duration=18042.514s, table=0, "
"n_packets=6, n_bytes=468, "
"priority=2,in_port=1 actions=drop",
" cookie=0x0, duration=18027.562s, table=0, "
"n_packets=0, n_bytes=0, "
"priority=3,in_port=1,dl_vlan=100 "
"actions=mod_vlan_vid:1,NORMAL",
" cookie=0x0, duration=18044.351s, table=0, "
"n_packets=9, n_bytes=594, priority=1 "
"actions=NORMAL", " cookie=0x0, "
"duration=18044.211s, table=23, n_packets=0, "
"n_bytes=0, priority=0 actions=drop"])
flow_args = '\n'.join([nxst_flow, flows])
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
run_ofctl.side_effect = [flow_args]
retflows = self.br.dump_flows_for_table(table)
self.assertEqual(flows, retflows)
def test_dump_flows_ovs_dead(self):
table = 23
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
run_ofctl.side_effect = ['']
retflows = self.br.dump_flows_for_table(table)
self.assertIsNone(retflows)
def test_mod_flow_with_priority_set(self):
params = {'in_port': '1',
'priority': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.mod_flow,
**params)
def test_mod_flow_no_actions_set(self):
params = {'in_port': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.mod_flow,
**params)
def test_run_ofctl_retry_on_socket_error(self):
err = RuntimeError('failed to connect to socket')
self.execute.side_effect = [err] * 5
with mock.patch('time.sleep') as sleep:
self.br.run_ofctl('add-flows', [])
self.assertEqual(5, sleep.call_count)
self.assertEqual(6, self.execute.call_count)
# a regular exception fails right away
self.execute.side_effect = RuntimeError('garbage')
self.execute.reset_mock()
with mock.patch('time.sleep') as sleep:
self.br.run_ofctl('add-flows', [])
self.assertEqual(0, sleep.call_count)
self.assertEqual(1, self.execute.call_count)
def test_add_tunnel_port(self):
pname = "tap99"
local_ip = "1.1.1.1"
remote_ip = "9.9.9.9"
ofport = 6
command = ["--may-exist", "add-port",
self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=gre", "options:df_default=true",
"options:remote_ip=" + remote_ip,
"options:local_ip=" + local_ip,
"options:in_key=flow",
"options:out_key=flow"])
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock(*command), None),
(self._vsctl_mock("--columns=ofport", "list", "Interface", pname),
self._encode_ovs_json(['ofport'], [[ofport]])),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(
self.br.add_tunnel_port(pname, remote_ip, local_ip),
ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_vxlan_fragmented_tunnel_port(self):
pname = "tap99"
local_ip = "1.1.1.1"
remote_ip = "9.9.9.9"
ofport = 6
vxlan_udp_port = "9999"
dont_fragment = False
command = ["--may-exist", "add-port", self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=" + constants.TYPE_VXLAN,
"options:dst_port=" + vxlan_udp_port,
"options:df_default=false",
"options:remote_ip=" + remote_ip,
"options:local_ip=" + local_ip,
"options:in_key=flow",
"options:out_key=flow"])
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock(*command), None),
(self._vsctl_mock("--columns=ofport", "list", "Interface", pname),
self._encode_ovs_json(['ofport'], [[ofport]])),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(
self.br.add_tunnel_port(pname, remote_ip, local_ip,
constants.TYPE_VXLAN, vxlan_udp_port,
dont_fragment),
ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_vxlan_csum_tunnel_port(self):
pname = "tap99"
local_ip = "1.1.1.1"
remote_ip = "9.9.9.9"
ofport = 6
vxlan_udp_port = "9999"
dont_fragment = True
tunnel_csum = True
command = ["--may-exist", "add-port", self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=" + constants.TYPE_VXLAN,
"options:dst_port=" + vxlan_udp_port,
"options:df_default=true",
"options:remote_ip=" + remote_ip,
"options:local_ip=" + local_ip,
"options:in_key=flow",
"options:out_key=flow",
"options:csum=true"])
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock(*command), None),
(self._vsctl_mock("--columns=ofport", "list", "Interface", pname),
self._encode_ovs_json(['ofport'], [[ofport]])),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(
self.br.add_tunnel_port(pname, remote_ip, local_ip,
constants.TYPE_VXLAN, vxlan_udp_port,
dont_fragment, tunnel_csum),
ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _test_get_vif_ports(self, is_xen=False):
pname = "tap99"
ofport = 6
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
id_field = 'xs-vif-uuid' if is_xen else 'iface-id'
external_ids = {"attached-mac": mac, id_field: vif_id}
self.br.get_ports_attributes = mock.Mock(return_value=[{
'name': pname, 'ofport': ofport, 'external_ids': external_ids}])
self.br.get_xapi_iface_id = mock.Mock(return_value=vif_id)
ports = self.br.get_vif_ports()
self.assertEqual(1, len(ports))
self.assertEqual(ports[0].port_name, pname)
self.assertEqual(ports[0].ofport, ofport)
self.assertEqual(ports[0].vif_id, vif_id)
self.assertEqual(ports[0].vif_mac, mac)
self.assertEqual(ports[0].switch.br_name, self.BR_NAME)
self.br.get_ports_attributes.assert_called_once_with(
'Interface',
columns=['name', 'external_ids', 'ofport'],
if_exists=True)
def _encode_ovs_json(self, headings, data):
# See man ovs-vsctl(8) for the encoding details.
r = {"data": [],
"headings": headings}
for row in data:
ovs_row = []
r["data"].append(ovs_row)
for cell in row:
if isinstance(cell, (str, int, list)):
ovs_row.append(cell)
elif isinstance(cell, dict):
ovs_row.append(["map", cell.items()])
elif isinstance(cell, set):
ovs_row.append(["set", cell])
else:
raise TypeError('%r not int, str, list, set or dict' %
type(cell))
return jsonutils.dumps(r)
def _test_get_vif_port_set(self, is_xen):
if is_xen:
id_key = 'xs-vif-uuid'
else:
id_key = 'iface-id'
headings = ['name', 'external_ids', 'ofport']
data = [
# A vif port on this bridge:
['tap99', {id_key: 'tap99id', 'attached-mac': 'tap99mac'}, 1],
# A vif port on this bridge not yet configured
['tap98', {id_key: 'tap98id', 'attached-mac': 'tap98mac'}, []],
# Another vif port on this bridge not yet configured
['tap97', {id_key: 'tap97id', 'attached-mac': 'tap97mac'},
['set', []]],
# Non-vif port on this bridge:
['bogus', {}, 2],
]
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME), 'tap99\\ntun22'),
(self._vsctl_mock("--if-exists",
"--columns=name,external_ids,ofport",
"list", "Interface", 'tap99', 'tun22'),
self._encode_ovs_json(headings, data)),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
if is_xen:
get_xapi_iface_id = mock.patch.object(self.br,
'get_xapi_iface_id').start()
get_xapi_iface_id.return_value = 'tap99id'
port_set = self.br.get_vif_port_set()
self.assertEqual(set(['tap99id']), port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
if is_xen:
get_xapi_iface_id.assert_called_once_with('tap99id')
def test_get_vif_port_to_ofport_map(self):
self.execute.return_value = OVSLIST_WITH_UNSET_PORT
results = self.br.get_vif_port_to_ofport_map()
expected = {'2ab72a72-4407-4ef3-806a-b2172f3e4dc7': 2, 'patch-tun': 1}
self.assertEqual(expected, results)
def test_get_vif_ports_nonxen(self):
self._test_get_vif_ports(is_xen=False)
def test_get_vif_ports_xen(self):
self._test_get_vif_ports(is_xen=True)
def test_get_vif_port_set_nonxen(self):
self._test_get_vif_port_set(False)
def test_get_vif_port_set_xen(self):
self._test_get_vif_port_set(True)
def test_get_vif_ports_list_ports_error(self):
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME), RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_ports)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_vif_port_set_list_ports_error(self):
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME), RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_vif_port_set_list_interface_error(self):
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME), 'tap99\n'),
(self._vsctl_mock("--if-exists",
"--columns=name,external_ids,ofport",
"list", "Interface", "tap99"), RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_port_tag_dict(self):
headings = ['name', 'tag']
data = [
['int-br-eth2', set()],
['patch-tun', set()],
['qr-76d9e6b6-21', 1],
['tapce5318ff-78', 1],
['tape1400310-e6', 1],
]
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME),
'\\n'.join((iface for iface, tag in data))),
(self._vsctl_mock("--columns=name,tag", "list", "Port"),
self._encode_ovs_json(headings, data)),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
port_tags = self.br.get_port_tag_dict()
self.assertEqual(
port_tags,
{u'int-br-eth2': [],
u'patch-tun': [],
u'qr-76d9e6b6-21': 1,
u'tapce5318ff-78': 1,
u'tape1400310-e6': 1}
)
def test_clear_db_attribute(self):
pname = "tap77"
self.br.clear_db_attribute("Port", pname, "tag")
self._verify_vsctl_mock("clear", "Port", pname, "tag")
def _test_iface_to_br(self, exp_timeout=None):
iface = 'tap0'
br = 'br-int'
if exp_timeout:
self.br.vsctl_timeout = exp_timeout
self.execute.return_value = 'br-int'
self.assertEqual(self.br.get_bridge_for_iface(iface), br)
self._verify_vsctl_mock("iface-to-br", iface)
def test_iface_to_br(self):
self._test_iface_to_br()
def test_iface_to_br_non_default_timeout(self):
new_timeout = 5
self._test_iface_to_br(new_timeout)
def test_iface_to_br_handles_ovs_vsctl_exception(self):
iface = 'tap0'
self.execute.side_effect = Exception
self.assertIsNone(self.br.get_bridge_for_iface(iface))
self._verify_vsctl_mock("iface-to-br", iface)
def test_delete_all_ports(self):
with mock.patch.object(self.br, 'get_port_name_list',
return_value=['port1']) as get_port:
with mock.patch.object(self.br, 'delete_port') as delete_port:
self.br.delete_ports(all_ports=True)
get_port.assert_called_once_with()
delete_port.assert_called_once_with('port1')
def test_delete_neutron_ports(self):
port1 = ovs_lib.VifPort('tap1234', 1, uuidutils.generate_uuid(),
'ca:fe:de:ad:be:ef', 'br')
port2 = ovs_lib.VifPort('tap5678', 2, uuidutils.generate_uuid(),
'ca:ee:de:ad:be:ef', 'br')
with mock.patch.object(self.br, 'get_vif_ports',
return_value=[port1, port2]) as get_ports:
with mock.patch.object(self.br, 'delete_port') as delete_port:
self.br.delete_ports(all_ports=False)
get_ports.assert_called_once_with()
delete_port.assert_has_calls([
mock.call('tap1234'),
mock.call('tap5678')
])
def test_delete_neutron_ports_list_error(self):
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME), RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.delete_ports, all_ports=False)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_bridges_not_default_timeout(self):
bridges = ['br-int', 'br-ex']
self.br.vsctl_timeout = 5
self.execute.return_value = 'br-int\\nbr-ex\n'
self.assertEqual(self.br.get_bridges(), bridges)
self._verify_vsctl_mock("list-br")
def test_get_local_port_mac_succeeds(self):
with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand',
return_value=mock.Mock(address='foo')):
self.assertEqual('foo', self.br.get_local_port_mac())
def test_get_local_port_mac_raises_exception_for_missing_mac(self):
with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand',
return_value=mock.Mock(address=None)):
with testtools.ExpectedException(Exception):
self.br.get_local_port_mac()
def test_get_vifs_by_ids(self):
db_list_res = [
{'name': 'qvo1', 'ofport': 1,
'external_ids': {'iface-id': 'pid1', 'attached-mac': '11'}},
{'name': 'qvo2', 'ofport': 2,
'external_ids': {'iface-id': 'pid2', 'attached-mac': '22'}},
{'name': 'qvo4', 'ofport': -1,
'external_ids': {'iface-id': 'pid4', 'attached-mac': '44'}},
]
self.br.get_ports_attributes = mock.Mock(return_value=db_list_res)
self.br.ovsdb = mock.Mock()
self.br.ovsdb.list_ports.return_value.execute.return_value = [
'qvo1', 'qvo2', 'qvo4']
by_id = self.br.get_vifs_by_ids(['pid1', 'pid2', 'pid3', 'pid4'])
# pid3 isn't on bridge and pid4 doesn't have a valid ofport
self.assertIsNone(by_id['pid3'])
self.assertIsNone(by_id['pid4'])
self.assertEqual('pid1', by_id['pid1'].vif_id)
self.assertEqual('qvo1', by_id['pid1'].port_name)
self.assertEqual(1, by_id['pid1'].ofport)
self.assertEqual('pid2', by_id['pid2'].vif_id)
self.assertEqual('qvo2', by_id['pid2'].port_name)
self.assertEqual(2, by_id['pid2'].ofport)
self.br.get_ports_attributes.assert_has_calls(
[mock.call('Interface', columns=['name', 'external_ids', 'ofport'],
if_exists=True)])
def _test_get_vif_port_by_id(self, iface_id, data, br_name=None,
extra_calls_and_values=None):
headings = ['external_ids', 'name', 'ofport']
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock("--columns=external_ids,name,ofport", "find",
"Interface",
'external_ids:iface-id=%s' % iface_id,
'external_ids:attached-mac!=""'),
self._encode_ovs_json(headings, data))]
if data:
if not br_name:
br_name = self.BR_NAME
# Only the last information list in 'data' is used, so if more
# than one vif is described in data, the rest must be declared
# in the argument 'expected_calls_and_values'.
if extra_calls_and_values:
expected_calls_and_values.extend(extra_calls_and_values)
expected_calls_and_values.append(
(self._vsctl_mock("iface-to-br",
data[-1][headings.index('name')]), br_name))
tools.setup_mock_calls(self.execute, expected_calls_and_values)
vif_port = self.br.get_vif_port_by_id(iface_id)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
return vif_port
def _assert_vif_port(self, vif_port, ofport=None, mac=None):
if not ofport or ofport == -1 or not mac:
self.assertIsNone(vif_port, "Got %s" % vif_port)
return
self.assertEqual('tap99id', vif_port.vif_id)
self.assertEqual(mac, vif_port.vif_mac)
self.assertEqual('tap99', vif_port.port_name)
self.assertEqual(ofport, vif_port.ofport)
def _test_get_vif_port_by_id_with_data(self, ofport=None, mac=None):
external_ids = [["iface-id", "tap99id"],
["iface-status", "active"],
["attached-mac", mac]]
data = [[["map", external_ids], "tap99",
ofport if ofport else ["set", []]]]
vif_port = self._test_get_vif_port_by_id('tap99id', data)
self._assert_vif_port(vif_port, ofport, mac)
def test_get_vif_by_port_id_with_ofport(self):
self._test_get_vif_port_by_id_with_data(
ofport=1, mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_without_ofport(self):
self._test_get_vif_port_by_id_with_data(mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_with_invalid_ofport(self):
self._test_get_vif_port_by_id_with_data(
ofport=-1, mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_with_no_data(self):
self.assertIsNone(self._test_get_vif_port_by_id('whatever', []))
def test_get_vif_by_port_id_different_bridge(self):
external_ids = [["iface-id", "tap99id"],
["iface-status", "active"]]
data = [[["map", external_ids], "tap99", 1]]
self.assertIsNone(self._test_get_vif_port_by_id('tap99id', data,
"br-ext"))
def test_get_vif_by_port_id_multiple_vifs(self):
external_ids = [["iface-id", "tap99id"],
["iface-status", "active"],
["attached-mac", "de:ad:be:ef:13:37"]]
data = [[["map", external_ids], "dummytap", 1],
[["map", external_ids], "tap99", 1337]]
extra_calls_and_values = [
(self._vsctl_mock("iface-to-br", "dummytap"), "br-ext")]
vif_port = self._test_get_vif_port_by_id(
'tap99id', data, extra_calls_and_values=extra_calls_and_values)
self._assert_vif_port(vif_port, ofport=1337, mac="de:ad:be:ef:13:37")
class TestDeferredOVSBridge(base.BaseTestCase):
def setUp(self):
super(TestDeferredOVSBridge, self).setUp()
self.br = mock.Mock()
self.mocked_do_action_flows = mock.patch.object(
self.br, 'do_action_flows').start()
self.add_flow_dict1 = dict(in_port=11, actions='drop')
self.add_flow_dict2 = dict(in_port=12, actions='drop')
self.mod_flow_dict1 = dict(in_port=21, actions='drop')
self.mod_flow_dict2 = dict(in_port=22, actions='drop')
self.del_flow_dict1 = dict(in_port=31)
self.del_flow_dict2 = dict(in_port=32)
def test_right_allowed_passthroughs(self):
expected_passthroughs = ('add_port', 'add_tunnel_port', 'delete_port')
self.assertEqual(expected_passthroughs,
ovs_lib.DeferredOVSBridge.ALLOWED_PASSTHROUGHS)
def _verify_mock_call(self, expected_calls):
self.mocked_do_action_flows.assert_has_calls(expected_calls)
self.assertEqual(len(expected_calls),
len(self.mocked_do_action_flows.mock_calls))
def test_apply_on_exit(self):
expected_calls = [
mock.call('add', [self.add_flow_dict1]),
mock.call('mod', [self.mod_flow_dict1]),
mock.call('del', [self.del_flow_dict1]),
]
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
self._verify_mock_call([])
self._verify_mock_call(expected_calls)
def test_apply_on_exit_with_errors(self):
try:
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
raise Exception()
except Exception:
self._verify_mock_call([])
else:
self.fail('Exception would be reraised')
def test_apply(self):
expected_calls = [
mock.call('add', [self.add_flow_dict1]),
mock.call('mod', [self.mod_flow_dict1]),
mock.call('del', [self.del_flow_dict1]),
]
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
self._verify_mock_call([])
deferred_br.apply_flows()
self._verify_mock_call(expected_calls)
self._verify_mock_call(expected_calls)
def test_apply_order(self):
expected_calls = [
mock.call('del', [self.del_flow_dict1, self.del_flow_dict2]),
mock.call('mod', [self.mod_flow_dict1, self.mod_flow_dict2]),
mock.call('add', [self.add_flow_dict1, self.add_flow_dict2]),
]
order = 'del', 'mod', 'add'
with ovs_lib.DeferredOVSBridge(self.br, order=order) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict2)
deferred_br.add_flow(**self.add_flow_dict2)
deferred_br.mod_flow(**self.mod_flow_dict2)
self._verify_mock_call(expected_calls)
def test_apply_full_ordered(self):
expected_calls = [
mock.call('add', [self.add_flow_dict1]),
mock.call('mod', [self.mod_flow_dict1]),
mock.call('del', [self.del_flow_dict1, self.del_flow_dict2]),
mock.call('add', [self.add_flow_dict2]),
mock.call('mod', [self.mod_flow_dict2]),
]
with ovs_lib.DeferredOVSBridge(self.br,
full_ordered=True) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict2)
deferred_br.add_flow(**self.add_flow_dict2)
deferred_br.mod_flow(**self.mod_flow_dict2)
self._verify_mock_call(expected_calls)
def test_getattr_unallowed_attr(self):
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
self.assertEqual(self.br.add_port, deferred_br.add_port)
def test_getattr_unallowed_attr_failure(self):
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
self.assertRaises(AttributeError, getattr, deferred_br, 'failure')
def test_default_cookie(self):
self.br = ovs_lib.OVSBridge("br-tun")
uuid_stamp1 = self.br.default_cookie
self.assertEqual(uuid_stamp1, self.br.default_cookie)
def test_cookie_passed_to_addmod(self):
self.br = ovs_lib.OVSBridge("br-tun")
stamp = str(self.br.default_cookie)
expected_calls = [
mock.call('add-flows', ['-'],
'hard_timeout=0,idle_timeout=0,priority=1,'
'cookie=' + stamp + ',actions=drop'),
mock.call('mod-flows', ['-'],
'cookie=' + stamp + ',actions=drop')
]
with mock.patch.object(self.br, 'run_ofctl') as f:
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
deferred_br.add_flow(actions='drop')
deferred_br.mod_flow(actions='drop')
f.assert_has_calls(expected_calls)
| apache-2.0 | 1,059,533,844,783,685,100 | 41.155172 | 79 | 0.547393 | false |
Ken69267/config-stuff | .vim/eclim/autoload/eclim/python/rope/base/oi/objectinfo.py | 115 | 8767 | import warnings
from rope.base import exceptions, resourceobserver
from rope.base.oi import objectdb, memorydb, transform
class ObjectInfoManager(object):
"""Stores object information
It uses an instance of `objectdb.ObjectDB` for storing
information.
"""
def __init__(self, project):
self.project = project
self.to_textual = transform.PyObjectToTextual(project)
self.to_pyobject = transform.TextualToPyObject(project)
self.doi_to_pyobject = transform.DOITextualToPyObject(project)
self._init_objectdb()
if project.prefs.get('validate_objectdb', False):
self._init_validation()
def _init_objectdb(self):
dbtype = self.project.get_prefs().get('objectdb_type', None)
persist = None
if dbtype is not None:
warnings.warn(
'"objectdb_type" project config is deprecated;\n'
'Use "save_objectdb" instead in your project '
'config file.\n(".ropeproject/config.py" by default)\n',
DeprecationWarning)
if dbtype != 'memory' and self.project.ropefolder is not None:
persist = True
self.validation = TextualValidation(self.to_pyobject)
db = memorydb.MemoryDB(self.project, persist=persist)
self.objectdb = objectdb.ObjectDB(db, self.validation)
def _init_validation(self):
self.objectdb.validate_files()
observer = resourceobserver.ResourceObserver(
changed=self._resource_changed, moved=self._resource_moved,
removed=self._resource_moved)
files = []
for path in self.objectdb.get_files():
resource = self.to_pyobject.path_to_resource(path)
if resource is not None and resource.project == self.project:
files.append(resource)
self.observer = resourceobserver.FilteredResourceObserver(observer,
files)
self.objectdb.add_file_list_observer(_FileListObserver(self))
self.project.add_observer(self.observer)
def _resource_changed(self, resource):
try:
self.objectdb.validate_file(
self.to_textual.resource_to_path(resource))
except exceptions.ModuleSyntaxError:
pass
def _resource_moved(self, resource, new_resource=None):
self.observer.remove_resource(resource)
if new_resource is not None:
old = self.to_textual.resource_to_path(resource)
new = self.to_textual.resource_to_path(new_resource)
self.objectdb.file_moved(old, new)
self.observer.add_resource(new_resource)
def get_returned(self, pyobject, args):
result = self.get_exact_returned(pyobject, args)
if result is not None:
return result
path, key = self._get_scope(pyobject)
if path is None:
return None
for call_info in self.objectdb.get_callinfos(path, key):
returned = call_info.get_returned()
if returned and returned[0] not in ('unknown', 'none'):
result = returned
break
if result is None:
result = returned
if result is not None:
return self.to_pyobject(result)
def get_exact_returned(self, pyobject, args):
path, key = self._get_scope(pyobject)
if path is not None:
returned = self.objectdb.get_returned(
path, key, self._args_to_textual(pyobject, args))
if returned is not None:
return self.to_pyobject(returned)
def _args_to_textual(self, pyfunction, args):
parameters = list(pyfunction.get_param_names(special_args=False))
arguments = args.get_arguments(parameters)[:len(parameters)]
textual_args = tuple([self.to_textual(arg)
for arg in arguments])
return textual_args
def get_parameter_objects(self, pyobject):
path, key = self._get_scope(pyobject)
if path is None:
return None
arg_count = len(pyobject.get_param_names(special_args=False))
unknowns = arg_count
parameters = [None] * arg_count
for call_info in self.objectdb.get_callinfos(path, key):
args = call_info.get_parameters()
for index, arg in enumerate(args[:arg_count]):
old = parameters[index]
if self.validation.is_more_valid(arg, old):
parameters[index] = arg
if self.validation.is_value_valid(arg):
unknowns -= 1
if unknowns == 0:
break
if unknowns < arg_count:
return [self.to_pyobject(parameter)
for parameter in parameters]
def get_passed_objects(self, pyfunction, parameter_index):
path, key = self._get_scope(pyfunction)
if path is None:
return []
result = []
for call_info in self.objectdb.get_callinfos(path, key):
args = call_info.get_parameters()
if len(args) > parameter_index:
parameter = self.to_pyobject(args[parameter_index])
if parameter is not None:
result.append(parameter)
return result
def doa_data_received(self, data):
def doi_to_normal(textual):
pyobject = self.doi_to_pyobject(textual)
return self.to_textual(pyobject)
function = doi_to_normal(data[0])
args = tuple([doi_to_normal(textual) for textual in data[1]])
returned = doi_to_normal(data[2])
if function[0] == 'defined' and len(function) == 3:
self._save_data(function, args, returned)
def function_called(self, pyfunction, params, returned=None):
function_text = self.to_textual(pyfunction)
params_text = tuple([self.to_textual(param)
for param in params])
returned_text = ('unknown',)
if returned is not None:
returned_text = self.to_textual(returned)
self._save_data(function_text, params_text, returned_text)
def save_per_name(self, scope, name, data):
path, key = self._get_scope(scope.pyobject)
if path is not None:
self.objectdb.add_pername(path, key, name, self.to_textual(data))
def get_per_name(self, scope, name):
path, key = self._get_scope(scope.pyobject)
if path is not None:
result = self.objectdb.get_pername(path, key, name)
if result is not None:
return self.to_pyobject(result)
def _save_data(self, function, args, returned=('unknown',)):
self.objectdb.add_callinfo(function[1], function[2], args, returned)
def _get_scope(self, pyobject):
resource = pyobject.get_module().get_resource()
if resource is None:
return None, None
textual = self.to_textual(pyobject)
if textual[0] == 'defined':
path = textual[1]
if len(textual) == 3:
key = textual[2]
else:
key = ''
return path, key
return None, None
def sync(self):
self.objectdb.sync()
def __str__(self):
return str(self.objectdb)
class TextualValidation(object):
def __init__(self, to_pyobject):
self.to_pyobject = to_pyobject
def is_value_valid(self, value):
# ???: Should none and unknown be considered valid?
if value is None or value[0] in ('none', 'unknown'):
return False
return self.to_pyobject(value) is not None
def is_more_valid(self, new, old):
if old is None:
return True
return new[0] not in ('unknown', 'none')
def is_file_valid(self, path):
return self.to_pyobject.path_to_resource(path) is not None
def is_scope_valid(self, path, key):
if key == '':
textual = ('defined', path)
else:
textual = ('defined', path, key)
return self.to_pyobject(textual) is not None
class _FileListObserver(object):
def __init__(self, object_info):
self.object_info = object_info
self.observer = self.object_info.observer
self.to_pyobject = self.object_info.to_pyobject
def removed(self, path):
resource = self.to_pyobject.path_to_resource(path)
if resource is not None:
self.observer.remove_resource(resource)
def added(self, path):
resource = self.to_pyobject.path_to_resource(path)
if resource is not None:
self.observer.add_resource(resource)
| mit | -5,783,436,503,925,363,000 | 36.788793 | 77 | 0.58994 | false |
AntidoteLabs/Antidote-DM | Antidotes DM/youtube_dl/extractor/footyroom.py | 13 | 1647 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class FootyRoomIE(InfoExtractor):
_VALID_URL = r'http://footyroom\.com/(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://footyroom.com/schalke-04-0-2-real-madrid-2015-02/',
'info_dict': {
'id': 'schalke-04-0-2-real-madrid-2015-02',
'title': 'Schalke 04 0 – 2 Real Madrid',
},
'playlist_count': 3,
'skip': 'Video for this match is not available',
}, {
'url': 'http://footyroom.com/georgia-0-2-germany-2015-03/',
'info_dict': {
'id': 'georgia-0-2-germany-2015-03',
'title': 'Georgia 0 – 2 Germany',
},
'playlist_count': 1,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
playlist = self._parse_json(
self._search_regex(
r'VideoSelector\.load\((\[.+?\])\);', webpage, 'video selector'),
playlist_id)
playlist_title = self._og_search_title(webpage)
entries = []
for video in playlist:
payload = video.get('payload')
if not payload:
continue
playwire_url = self._search_regex(
r'data-config="([^"]+)"', payload,
'playwire url', default=None)
if playwire_url:
entries.append(self.url_result(self._proto_relative_url(
playwire_url, 'http:'), 'Playwire'))
return self.playlist_result(entries, playlist_id, playlist_title)
| gpl-2.0 | -7,329,513,082,234,937,000 | 31.86 | 81 | 0.530736 | false |
FeMTTU/femus | external/jsoncpp/jsoncpp-src-0.5.0/test/rununittests.py | 249 | 2507 | import sys
import os
import os.path
import subprocess
from glob import glob
import optparse
VALGRIND_CMD = 'valgrind --tool=memcheck --leak-check=yes --undef-value-errors=yes'
class TestProxy(object):
def __init__( self, test_exe_path, use_valgrind=False ):
self.test_exe_path = os.path.normpath( os.path.abspath( test_exe_path ) )
self.use_valgrind = use_valgrind
def run( self, options ):
if self.use_valgrind:
cmd = VALGRIND_CMD.split()
else:
cmd = []
cmd.extend( [self.test_exe_path, '--test-auto'] + options )
process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
stdout = process.communicate()[0]
if process.returncode:
return False, stdout
return True, stdout
def runAllTests( exe_path, use_valgrind=False ):
test_proxy = TestProxy( exe_path, use_valgrind=use_valgrind )
status, test_names = test_proxy.run( ['--list-tests'] )
if not status:
print >> sys.stderr, "Failed to obtain unit tests list:\n" + test_names
return 1
test_names = [name.strip() for name in test_names.strip().split('\n')]
failures = []
for name in test_names:
print 'TESTING %s:' % name,
succeed, result = test_proxy.run( ['--test', name] )
if succeed:
print 'OK'
else:
failures.append( (name, result) )
print 'FAILED'
failed_count = len(failures)
pass_count = len(test_names) - failed_count
if failed_count:
print
for name, result in failures:
print result
print '%d/%d tests passed (%d failure(s))' % (
pass_count, len(test_names), failed_count)
return 1
else:
print 'All %d tests passed' % len(test_names)
return 0
def main():
from optparse import OptionParser
parser = OptionParser( usage="%prog [options] <path to test_lib_json.exe>" )
parser.add_option("--valgrind",
action="store_true", dest="valgrind", default=False,
help="run all the tests using valgrind to detect memory leaks")
parser.enable_interspersed_args()
options, args = parser.parse_args()
if len(args) != 1:
parser.error( 'Must provides at least path to test_lib_json executable.' )
sys.exit( 1 )
exit_code = runAllTests( args[0], use_valgrind=options.valgrind )
sys.exit( exit_code )
if __name__ == '__main__':
main()
| lgpl-2.1 | 5,270,087,245,698,930,000 | 33.342466 | 91 | 0.603111 | false |
xutian/virt-test | virttest/libvirt_xml/nwfilter_protocols/ah_ipv6.py | 26 | 5826 | """
ah-ipv6 protocl support class(es)
http://libvirt.org/formatnwfilter.html#nwfelemsRulesProtoMiscv6
"""
from virttest.libvirt_xml import accessors, xcepts
from virttest.libvirt_xml.nwfilter_protocols import base
class Ah_ipv6(base.TypedDeviceBase):
"""
Create new Ah_ipv6 xml instances
Properties:
attrs: libvirt_xml.nwfilter_protocols.Ah_ipv6.Attr instance
"""
__slots__ = ('attrs',)
def __init__(self, type_name='file', virsh_instance=base.base.virsh):
accessors.XMLElementNest('attrs', self, parent_xpath='/',
tag_name='ah_ipv6', subclass=self.Attr,
subclass_dargs={
'virsh_instance': virsh_instance})
super(Ah_ipv6, self).__init__(protocol_tag='ah-ipv6',
type_name=type_name,
virsh_instance=virsh_instance)
def new_attr(self, **dargs):
"""
Return a new Attr instance and set properties from dargs
:param dargs: dict of attributes
:return: new Attr instance
"""
new_one = self.Attr(virsh_instance=self.virsh)
for key, value in dargs.items():
setattr(new_one, key, value)
return new_one
def get_attr(self):
"""
Return ah-ipv6 attribute dict
:return: None if no ah-ipv6 in xml, dict of ah-ipv6's attributes.
"""
try:
ah_node = self.xmltreefile.reroot('/ah-ipv6')
except KeyError, detail:
raise xcepts.LibvirtXMLError(detail)
node = ah_node.getroot()
ah_attr = dict(node.items())
return ah_attr
class Attr(base.base.LibvirtXMLBase):
"""
Ah_ipv6 attribute XML class
Properties:
srcmacaddr: string, MAC address of sender
srcmacmask: string, Mask applied to MAC address of sender
dstmacaddr: string, MAC address of destination
dstmacmask: string, Mask applied to MAC address of destination
srcipaddr: string, Source IP address
srcipmask: string, Mask applied to source IP address
dstipaddr: string, Destination IP address
dstipmask: string, Mask applied to destination IP address
srcipfrom: string, Start of range of source IP address
srcipto: string, End of range of source IP address
dstipfrom: string, Start of range of destination IP address
dstipto: string, End of range of destination IP address
comment: string, text with max. 256 characters
state: string, comma separated list of NEW,ESTABLISHED,RELATED,INVALID or NONE
ipset: The name of an IPSet managed outside of libvirt
ipsetflags: flags for the IPSet; requires ipset attribute
"""
__slots__ = ('srcmacaddr', 'srcmacmask', 'dstmacaddr', 'dstmacmask',
'srcipaddr', 'srcipmask', 'dstipaddr', 'dstipmask',
'srcipfrom', 'srcipto', 'dstipfrom', 'dstipto',
'dscp', 'comment', 'state', 'ipset', 'ipsetflags')
def __init__(self, virsh_instance=base.base.virsh):
accessors.XMLAttribute('srcmacaddr', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='srcmacaddr')
accessors.XMLAttribute('srcmacmask', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='srcmacmask')
accessors.XMLAttribute('dstmacaddr', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='dstmacaddr')
accessors.XMLAttribute('dstmacmask', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='dstmacmask')
accessors.XMLAttribute('srcipaddr', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='srcipaddr')
accessors.XMLAttribute('srcipmask', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='srcipmask')
accessors.XMLAttribute('dstipaddr', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='dstipaddr')
accessors.XMLAttribute('dstipmask', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='dstipmask')
accessors.XMLAttribute('srcipfrom', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='srcipfrom')
accessors.XMLAttribute('srcipto', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='srcipto')
accessors.XMLAttribute('dstipfrom', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='dstipfrom')
accessors.XMLAttribute('dstipto', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='dstipto')
accessors.XMLAttribute('dscp', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='dscp')
accessors.XMLAttribute('comment', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='comment')
accessors.XMLAttribute('state', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='state')
accessors.XMLAttribute('ipset', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='ipset')
accessors.XMLAttribute('ipsetflags', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='ipsetflags')
super(self.__class__, self).__init__(virsh_instance=virsh_instance)
self.xml = '<ah-ipv6/>'
| gpl-2.0 | 6,297,954,871,406,577,000 | 44.874016 | 86 | 0.555784 | false |
olafhauk/mne-python | mne/datasets/__init__.py | 6 | 1103 | """Functions for fetching remote datasets.
See :ref:`datasets` for more information.
"""
from . import fieldtrip_cmc
from . import brainstorm
from . import visual_92_categories
from . import kiloword
from . import eegbci
from . import hf_sef
from . import misc
from . import mtrf
from . import sample
from . import somato
from . import multimodal
from . import fnirs_motor
from . import opm
from . import spm_face
from . import testing
from . import _fake
from . import phantom_4dbti
from . import sleep_physionet
from . import limo
from . import refmeg_noise
from .utils import (_download_all_example_data, fetch_hcp_mmp_parcellation,
fetch_aparc_sub_parcellation)
from ._fsaverage.base import fetch_fsaverage
__all__ = [
'_download_all_example_data', '_fake', 'brainstorm', 'eegbci',
'fetch_aparc_sub_parcellation', 'fetch_fsaverage',
'fetch_hcp_mmp_parcellation', 'fieldtrip_cmc', 'hf_sef', 'kiloword',
'misc', 'mtrf', 'multimodal', 'opm', 'phantom_4dbti', 'sample',
'sleep_physionet', 'somato', 'spm_face', 'testing', 'visual_92_categories',
'limo',
]
| bsd-3-clause | 710,153,336,659,270,500 | 28.810811 | 79 | 0.703536 | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/PyKDE4/kdeui/KButtonGroup.py | 1 | 1093 | # encoding: utf-8
# module PyKDE4.kdeui
# from /usr/lib/python3/dist-packages/PyKDE4/kdeui.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdecore as __PyKDE4_kdecore
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
import PyQt4.QtSvg as __PyQt4_QtSvg
class KButtonGroup(__PyQt4_QtGui.QGroupBox):
# no doc
def changed(self, *args, **kwargs): # real signature unknown
pass
def childEvent(self, *args, **kwargs): # real signature unknown
pass
def clicked(self, *args, **kwargs): # real signature unknown
pass
def id(self, *args, **kwargs): # real signature unknown
pass
def pressed(self, *args, **kwargs): # real signature unknown
pass
def released(self, *args, **kwargs): # real signature unknown
pass
def selected(self, *args, **kwargs): # real signature unknown
pass
def setSelected(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
| gpl-2.0 | -3,835,212,938,206,663,000 | 24.418605 | 82 | 0.654163 | false |
Intel-tensorflow/tensorflow | tensorflow/python/ops/ctc_ops.py | 6 | 57164 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CTC (Connectionist Temporal Classification) Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import uuid
from tensorflow.python.eager import context
from tensorflow.python.eager import function as function_eager
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_ctc_ops
from tensorflow.python.ops import inplace_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.nn_grad import _BroadcastMul
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
_DEFUN_API_NAME_ATTRIBUTE = "api_implements"
_DEFUN_DEVICE_ATTRIBUTE = "api_preferred_device"
_CPU_DEVICE_NAME = "CPU"
_GPU_DEVICE_NAME = "GPU"
def _get_context_device_type():
"""Parse the current context and return the device type, eg CPU/GPU."""
current_device = context.context().device_name
if current_device is None:
return None
return device.DeviceSpec.from_string(current_device).device_type
def _generate_defun_backend(unique_api_name, preferred_device, func):
function_attributes = {
_DEFUN_API_NAME_ATTRIBUTE: unique_api_name,
_DEFUN_DEVICE_ATTRIBUTE: preferred_device,
}
return function_eager.defun_with_attributes(
func=func, attributes=function_attributes, autograph=False)
# pylint: disable=protected-access, invalid-name
@tf_export(v1=["nn.ctc_loss"])
@dispatch.add_dispatch_support
def ctc_loss(labels,
inputs=None,
sequence_length=None,
preprocess_collapse_repeated=False,
ctc_merge_repeated=True,
ignore_longer_outputs_than_inputs=False,
time_major=True,
logits=None):
"""Computes the CTC (Connectionist Temporal Classification) Loss.
This op implements the CTC loss as presented in (Graves et al., 2006).
Input requirements:
```
sequence_length(b) <= time for all b
max(labels.indices(labels.indices[:, 1] == b, 2))
<= sequence_length(b) for all b.
```
Notes:
This class performs the softmax operation for you, so inputs should
be e.g. linear projections of outputs by an LSTM.
The `inputs` Tensor's innermost dimension size, `num_classes`, represents
`num_labels + 1` classes, where num_labels is the number of true labels, and
the largest value `(num_classes - 1)` is reserved for the blank label.
For example, for a vocabulary containing 3 labels `[a, b, c]`,
`num_classes = 4` and the labels indexing is `{a: 0, b: 1, c: 2, blank: 3}`.
Regarding the arguments `preprocess_collapse_repeated` and
`ctc_merge_repeated`:
If `preprocess_collapse_repeated` is True, then a preprocessing step runs
before loss calculation, wherein repeated labels passed to the loss
are merged into single labels. This is useful if the training labels come
from, e.g., forced alignments and therefore have unnecessary repetitions.
If `ctc_merge_repeated` is set False, then deep within the CTC calculation,
repeated non-blank labels will not be merged and are interpreted
as individual labels. This is a simplified (non-standard) version of CTC.
Here is a table of the (roughly) expected first order behavior:
* `preprocess_collapse_repeated=False`, `ctc_merge_repeated=True`
Classical CTC behavior: Outputs true repeated classes with blanks in
between, and can also output repeated classes with no blanks in
between that need to be collapsed by the decoder.
* `preprocess_collapse_repeated=True`, `ctc_merge_repeated=False`
Never learns to output repeated classes, as they are collapsed
in the input labels before training.
* `preprocess_collapse_repeated=False`, `ctc_merge_repeated=False`
Outputs repeated classes with blanks in between, but generally does not
require the decoder to collapse/merge repeated classes.
* `preprocess_collapse_repeated=True`, `ctc_merge_repeated=True`
Untested. Very likely will not learn to output repeated classes.
The `ignore_longer_outputs_than_inputs` option allows to specify the behavior
of the CTCLoss when dealing with sequences that have longer outputs than
inputs. If true, the CTCLoss will simply return zero gradient for those
items, otherwise an InvalidArgument error is returned, stopping training.
Args:
labels: An `int32` `SparseTensor`.
`labels.indices[i, :] == [b, t]` means `labels.values[i]` stores the id
for (batch b, time t). `labels.values[i]` must take on values in `[0,
num_labels)`. See `core/ops/ctc_ops.cc` for more details.
inputs: 3-D `float` `Tensor`.
If time_major == False, this will be a `Tensor` shaped: `[batch_size,
max_time, num_classes]`.
If time_major == True (default), this will be a `Tensor` shaped:
`[max_time, batch_size, num_classes]`. The logits.
sequence_length: 1-D `int32` vector, size `[batch_size]`. The sequence
lengths.
preprocess_collapse_repeated: Boolean. Default: False. If True, repeated
labels are collapsed prior to the CTC calculation.
ctc_merge_repeated: Boolean. Default: True.
ignore_longer_outputs_than_inputs: Boolean. Default: False. If True,
sequences with longer outputs than inputs will be ignored.
time_major: The shape format of the `inputs` Tensors. If True, these
`Tensors` must be shaped `[max_time, batch_size, num_classes]`. If False,
these `Tensors` must be shaped `[batch_size, max_time, num_classes]`.
Using `time_major = True` (default) is a bit more efficient because it
avoids transposes at the beginning of the ctc_loss calculation. However,
most TensorFlow data is batch-major, so by this function also accepts
inputs in batch-major form.
logits: Alias for inputs.
Returns:
A 1-D `float` `Tensor`, size `[batch]`, containing the negative log
probabilities.
Raises:
TypeError: if labels is not a `SparseTensor`.
References:
Connectionist Temporal Classification - Labeling Unsegmented Sequence Data
with Recurrent Neural Networks:
[Graves et al., 2006](https://dl.acm.org/citation.cfm?id=1143891)
([pdf](http://www.cs.toronto.edu/~graves/icml_2006.pdf))
"""
return _ctc_loss_impl(
labels,
inputs,
sequence_length,
preprocess_collapse_repeated,
ctc_merge_repeated,
ignore_longer_outputs_than_inputs,
time_major,
logits,
use_cudnn=False)
def _ctc_loss_impl(labels,
inputs=None,
sequence_length=None,
preprocess_collapse_repeated=False,
ctc_merge_repeated=True,
ignore_longer_outputs_than_inputs=False,
time_major=True,
logits=None,
use_cudnn=False):
# Helper function of ctc_loss with one additional param:
# use_cudnn: A bool to enable cuDNN CTC loss operation. If true, the blank
# index has to be 0.
# The second, third, etc output tensors contain the gradients. We use it in
# _CTCLossGrad() below.
if not isinstance(labels, sparse_tensor.SparseTensor):
raise TypeError("Expected labels (first argument) to be a SparseTensor")
# For internal calculations, we transpose to [time, batch, num_classes]
inputs = deprecation.deprecated_argument_lookup("logits", logits, "inputs",
inputs)
if not time_major:
inputs = array_ops.transpose(inputs, [1, 0, 2]) # (B,T,N) => (T,B,N)
# gen_ctc_ops.ctc_loss_v2 differs from gen_ctc_ops.ctc_loss. v2 assumes the
# blank index to be 0, but v1 views it as the last index.
if use_cudnn:
ctc_loss_func = gen_ctc_ops.ctc_loss_v2
else:
ctc_loss_func = gen_ctc_ops.ctc_loss
loss, _ = ctc_loss_func(
inputs,
labels.indices,
labels.values,
sequence_length,
preprocess_collapse_repeated=preprocess_collapse_repeated,
ctc_merge_repeated=ctc_merge_repeated,
ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs)
return loss
# pylint: disable=unused-argument
def _CTCLossGradImpl(op, grad_loss, _):
# Outputs are: loss, grad
#
# Currently there is no way to take the second derivative of this op
# due to the fused implementation's interaction with tf.gradients(),
# so we make sure we prevent silently incorrect results by raising
# an error if the second derivative is requested via prevent_gradient.
grad_without_gradient = array_ops.prevent_gradient(
op.outputs[1],
message="Currently there is no way to take the second "
" derivative of ctc_loss due to the fused implementation's interaction "
" with tf.gradients()")
# Return gradient for inputs and None for
# labels_indices, labels_values and sequence_length
return [_BroadcastMul(grad_loss, grad_without_gradient), None, None, None]
# pylint: disable=unused-argument
@ops.RegisterGradient("CTCLoss")
def _CTCLossGrad(op, grad_loss, _):
"""The derivative provided by CTC Loss.
Args:
op: the CTCLoss op.
grad_loss: The backprop for cost.
Returns:
The CTC Loss gradient.
"""
return _CTCLossGradImpl(op, grad_loss, _)
# pylint: disable=unused-argument
@ops.RegisterGradient("CTCLossV2")
def _CTCLossV2Grad(op, grad_loss, _):
"""The derivative provided by CTC Loss V2.
Args:
op: the CTCLossV2 op.
grad_loss: The backprop for cost.
Returns:
The CTC Loss V2 gradient.
"""
return _CTCLossGradImpl(op, grad_loss, _)
@tf_export("nn.ctc_greedy_decoder")
@dispatch.add_dispatch_support
def ctc_greedy_decoder(inputs,
sequence_length,
merge_repeated=True,
blank_index=None):
"""Performs greedy decoding on the logits given in input (best path).
Given a tensor as `inputs`, the `blank_index` parameter defines the class
index of the blank symbol.
For example:
If `blank_index` is equal to 1:
>>> inf = float("inf")
>>> logits = tf.constant([[[ 0., -inf, -inf],
... [ -2.3, -inf, -0.1]],
... [[ -inf, -0.5, -inf],
... [ -inf, -inf, -0.1]],
... [[ -inf, -inf, -inf],
... [ -0.1, -inf, -2.3]]])
>>> seq_lens = tf.constant([2, 3])
>>> outputs = tf.nn.ctc_greedy_decoder(
... logits,
... seq_lens,
... blank_index=1)
Notes:
- Regardless of the value of `merge_repeated`, if an index of a
given time and batch corresponds to the `blank_index`, no new
element is emitted.
- Default `blank_index` is `(num_classes - 1)`, unless overriden.
If `merge_repeated` is `True`, merge repeated classes in output.
This means that if consecutive logits' maximum indices are the same,
only the first of these is emitted. The sequence `A B B * B * B` (where '*'
is the blank label) becomes
* `A B B B` if `merge_repeated=True`.
* `A B B B B` if `merge_repeated=False`.
Args:
inputs: 3-D `float` `Tensor` sized `[max_time, batch_size, num_classes]`.
The logits.
sequence_length: 1-D `int32` vector containing sequence lengths, having size
`[batch_size]`.
merge_repeated: Boolean. Default: True.
blank_index: (Optional). Default: `num_classes - 1`. Define the class index
to use for the blank label. Negative values will start from num_classes,
ie, -1 will reproduce the ctc_greedy_decoder behavior of using
num_classes - 1 for the blank symbol, which corresponds to the default.
Returns:
A tuple `(decoded, neg_sum_logits)` where
decoded: A single-element list. `decoded[0]`
is an `SparseTensor` containing the decoded outputs s.t.:
`decoded.indices`: Indices matrix `(total_decoded_outputs, 2)`.
The rows store: `[batch, time]`.
`decoded.values`: Values vector, size `(total_decoded_outputs)`.
The vector stores the decoded classes.
`decoded.dense_shape`: Shape vector, size `(2)`.
The shape values are: `[batch_size, max_decoded_length]`
neg_sum_logits: A `float` matrix `(batch_size x 1)` containing, for the
sequence found, the negative of the sum of the greatest logit at each
timeframe.
"""
outputs = gen_ctc_ops.ctc_greedy_decoder(
inputs,
sequence_length,
merge_repeated=merge_repeated,
blank_index=blank_index)
(decoded_ix, decoded_val, decoded_shape, log_probabilities) = outputs
return ([sparse_tensor.SparseTensor(decoded_ix, decoded_val,
decoded_shape)], log_probabilities)
@tf_export(v1=["nn.ctc_beam_search_decoder"])
@dispatch.add_dispatch_support
def ctc_beam_search_decoder(inputs,
sequence_length,
beam_width=100,
top_paths=1,
merge_repeated=True):
"""Performs beam search decoding on the logits given in input.
**Note** The `ctc_greedy_decoder` is a special case of the
`ctc_beam_search_decoder` with `top_paths=1` and `beam_width=1` (but
that decoder is faster for this special case).
If `merge_repeated` is `True`, merge repeated classes in the output beams.
This means that if consecutive entries in a beam are the same,
only the first of these is emitted. That is, when the sequence is
`A B B * B * B` (where '*' is the blank label), the return value is:
* `A B` if `merge_repeated = True`.
* `A B B B` if `merge_repeated = False`.
Args:
inputs: 3-D `float` `Tensor`, size `[max_time x batch_size x num_classes]`.
The logits.
sequence_length: 1-D `int32` vector containing sequence lengths, having size
`[batch_size]`.
beam_width: An int scalar >= 0 (beam search beam width).
top_paths: An int scalar >= 0, <= beam_width (controls output size).
merge_repeated: Boolean. Default: True.
Returns:
A tuple `(decoded, log_probabilities)` where
decoded: A list of length top_paths, where `decoded[j]`
is a `SparseTensor` containing the decoded outputs:
`decoded[j].indices`: Indices matrix `(total_decoded_outputs[j] x 2)`
The rows store: [batch, time].
`decoded[j].values`: Values vector, size `(total_decoded_outputs[j])`.
The vector stores the decoded classes for beam j.
`decoded[j].dense_shape`: Shape vector, size `(2)`.
The shape values are: `[batch_size, max_decoded_length[j]]`.
log_probability: A `float` matrix `(batch_size x top_paths)` containing
sequence log-probabilities.
"""
decoded_ixs, decoded_vals, decoded_shapes, log_probabilities = (
gen_ctc_ops.ctc_beam_search_decoder(
inputs,
sequence_length,
beam_width=beam_width,
top_paths=top_paths,
merge_repeated=merge_repeated))
return ([
sparse_tensor.SparseTensor(ix, val, shape)
for (ix, val, shape) in zip(decoded_ixs, decoded_vals, decoded_shapes)
], log_probabilities)
@tf_export("nn.ctc_beam_search_decoder", v1=["nn.ctc_beam_search_decoder_v2"])
@dispatch.add_dispatch_support
def ctc_beam_search_decoder_v2(inputs,
sequence_length,
beam_width=100,
top_paths=1):
"""Performs beam search decoding on the logits given in input.
**Note** The `ctc_greedy_decoder` is a special case of the
`ctc_beam_search_decoder` with `top_paths=1` and `beam_width=1` (but
that decoder is faster for this special case).
Args:
inputs: 3-D `float` `Tensor`, size `[max_time, batch_size, num_classes]`.
The logits.
sequence_length: 1-D `int32` vector containing sequence lengths, having size
`[batch_size]`.
beam_width: An int scalar >= 0 (beam search beam width).
top_paths: An int scalar >= 0, <= beam_width (controls output size).
Returns:
A tuple `(decoded, log_probabilities)` where
decoded: A list of length top_paths, where `decoded[j]`
is a `SparseTensor` containing the decoded outputs:
`decoded[j].indices`: Indices matrix `[total_decoded_outputs[j], 2]`;
The rows store: `[batch, time]`.
`decoded[j].values`: Values vector, size `[total_decoded_outputs[j]]`.
The vector stores the decoded classes for beam `j`.
`decoded[j].dense_shape`: Shape vector, size `(2)`.
The shape values are: `[batch_size, max_decoded_length[j]]`.
log_probability: A `float` matrix `[batch_size, top_paths]` containing
sequence log-probabilities.
"""
# Note, merge_repeated is an invalid optimization that is removed from the
# public API: it returns low probability paths.
return ctc_beam_search_decoder(
inputs,
sequence_length=sequence_length,
beam_width=beam_width,
top_paths=top_paths,
merge_repeated=False)
ops.NotDifferentiable("CTCGreedyDecoder")
ops.NotDifferentiable("CTCBeamSearchDecoder")
def _ctc_state_trans(label_seq):
"""Compute CTC alignment model transition matrix.
Args:
label_seq: tensor of shape [batch_size, max_seq_length]
Returns:
tensor of shape [batch_size, states, states] with a state transition matrix
computed for each sequence of the batch.
"""
with ops.name_scope("ctc_state_trans"):
label_seq = ops.convert_to_tensor(label_seq, name="label_seq")
batch_size = _get_dim(label_seq, 0)
num_labels = _get_dim(label_seq, 1)
num_label_states = num_labels + 1
num_states = 2 * num_label_states
label_states = math_ops.range(num_label_states)
blank_states = label_states + num_label_states
# Start state to first label.
start_to_label = [[1, 0]]
# Blank to label transitions.
blank_to_label = array_ops.stack([label_states[1:], blank_states[:-1]], 1)
# Label to blank transitions.
label_to_blank = array_ops.stack([blank_states, label_states], 1)
# Scatter transitions that don't depend on sequence.
indices = array_ops.concat([start_to_label, blank_to_label, label_to_blank],
0)
values = array_ops.ones([_get_dim(indices, 0)])
trans = array_ops.scatter_nd(
indices, values, shape=[num_states, num_states])
trans += linalg_ops.eye(num_states) # Self-loops.
# Label to label transitions. Disallow transitions between repeated labels
# with no blank state in between.
batch_idx = array_ops.zeros_like(label_states[2:])
indices = array_ops.stack([batch_idx, label_states[2:], label_states[1:-1]],
1)
indices = array_ops.tile(
array_ops.expand_dims(indices, 0), [batch_size, 1, 1])
batch_idx = array_ops.expand_dims(math_ops.range(batch_size), 1) * [1, 0, 0]
indices += array_ops.expand_dims(batch_idx, 1)
repeats = math_ops.equal(label_seq[:, :-1], label_seq[:, 1:])
values = 1.0 - math_ops.cast(repeats, dtypes.float32)
batched_shape = [batch_size, num_states, num_states]
label_to_label = array_ops.scatter_nd(indices, values, batched_shape)
return array_ops.expand_dims(trans, 0) + label_to_label
def ctc_state_log_probs(seq_lengths, max_seq_length):
"""Computes CTC alignment initial and final state log probabilities.
Create the initial/final state values directly as log values to avoid
having to take a float64 log on tpu (which does not exist).
Args:
seq_lengths: int tensor of shape [batch_size], seq lengths in the batch.
max_seq_length: int, max sequence length possible.
Returns:
initial_state_log_probs, final_state_log_probs
"""
batch_size = _get_dim(seq_lengths, 0)
num_label_states = max_seq_length + 1
num_duration_states = 2
num_states = num_duration_states * num_label_states
log_0 = math_ops.cast(
math_ops.log(math_ops.cast(0, dtypes.float64) + 1e-307), dtypes.float32)
initial_state_log_probs = array_ops.one_hot(
indices=array_ops.zeros([batch_size], dtype=dtypes.int32),
depth=num_states,
on_value=0.0,
off_value=log_0,
axis=1)
label_final_state_mask = array_ops.one_hot(
seq_lengths, depth=num_label_states, axis=0)
duration_final_state_mask = array_ops.ones(
[num_duration_states, 1, batch_size])
final_state_mask = duration_final_state_mask * label_final_state_mask
final_state_log_probs = (1.0 - final_state_mask) * log_0
final_state_log_probs = array_ops.reshape(final_state_log_probs,
[num_states, batch_size])
return initial_state_log_probs, array_ops.transpose(final_state_log_probs)
def _ilabel_to_state(labels, num_labels, ilabel_log_probs):
"""Project ilabel log probs to state log probs."""
num_label_states = _get_dim(labels, 1)
blank = ilabel_log_probs[:, :, :1]
blank = array_ops.tile(blank, [1, 1, num_label_states + 1])
one_hot = array_ops.one_hot(labels, depth=num_labels)
one_hot = array_ops.expand_dims(one_hot, axis=0)
ilabel_log_probs = array_ops.expand_dims(ilabel_log_probs, axis=2)
state_log_probs = math_ops.reduce_sum(ilabel_log_probs * one_hot, axis=3)
state_log_probs = array_ops.concat([state_log_probs, blank], axis=2)
return array_ops.pad(
state_log_probs, [[0, 0], [0, 0], [1, 0]],
constant_values=math_ops.log(0.0))
def _state_to_olabel(labels, num_labels, states):
"""Sum state log probs to ilabel log probs."""
num_label_states = _get_dim(labels, 1) + 1
label_states = states[:, :, 1:num_label_states]
blank_states = states[:, :, num_label_states:]
one_hot = array_ops.one_hot(
labels - 1,
depth=(num_labels - 1),
on_value=0.0,
off_value=math_ops.log(0.0))
one_hot = array_ops.expand_dims(one_hot, axis=0)
label_states = array_ops.expand_dims(label_states, axis=3)
label_olabels = math_ops.reduce_logsumexp(label_states + one_hot, axis=2)
blank_olabels = math_ops.reduce_logsumexp(blank_states, axis=2, keepdims=True)
return array_ops.concat([blank_olabels, label_olabels], axis=-1)
# pylint: disable=redefined-outer-name
def _state_to_olabel_unique(labels, num_labels, states, unique):
"""Sum state log probs to ilabel log probs using unique label indices."""
num_label_states = _get_dim(labels, 1) + 1
label_states = states[:, :, 1:num_label_states]
blank_states = states[:, :, num_label_states:]
unique_y, unique_idx = unique
mul_reduce = _sum_states(unique_idx, label_states)
num_frames = states.shape[0]
batch_size = states.shape[1]
num_states = num_label_states - 1
batch_state_major = array_ops.transpose(mul_reduce, perm=[1, 2, 0])
batch_state_major = array_ops.reshape(batch_state_major,
[batch_size * num_states, num_frames])
batch_offset = math_ops.range(batch_size, dtype=unique_y.dtype) * num_labels
indices = unique_y + array_ops.expand_dims(batch_offset, axis=-1)
indices = array_ops.reshape(indices, [-1, 1])
scatter = array_ops.scatter_nd(
indices=indices,
updates=batch_state_major,
shape=[batch_size * num_labels, num_frames])
scatter = array_ops.reshape(scatter, [batch_size, num_labels, num_frames])
mask = array_ops.ones_like(batch_state_major, dtype=dtypes.bool)
mask = array_ops.scatter_nd(
indices=indices,
updates=mask,
shape=[batch_size * num_labels, num_frames])
mask = array_ops.reshape(mask, [batch_size, num_labels, num_frames])
scatter = array_ops.where(
mask, scatter,
array_ops.fill(array_ops.shape(scatter), math_ops.log(0.0)))
label_olabels = array_ops.transpose(scatter, [2, 0, 1])
label_olabels = label_olabels[:, :, 1:]
blank_olabels = math_ops.reduce_logsumexp(blank_states, axis=2, keepdims=True)
return array_ops.concat([blank_olabels, label_olabels], axis=-1)
def ctc_loss_and_grad(logits, labels, label_length, logit_length, unique=None):
"""Computes the CTC loss and gradients.
Most users will want fwd_bwd.ctc_loss
This function returns the computed gradient, it does not have a gradient
of its own defined.
Args:
logits: tensor of shape [frames, batch_size, num_labels]
labels: tensor of shape [batch_size, max_label_seq_length]
label_length: tensor of shape [batch_size] Length of reference label
sequence in labels.
logit_length: tensor of shape [batch_size] Length of input sequence in
logits.
unique: (optional) unique label indices as computed by unique(labels) If
supplied, enables an implementation that is faster and more memory
efficient on TPU.
Returns:
loss: tensor of shape [batch_size]
gradient: tensor of shape [frames, batch_size, num_labels]
"""
num_labels = _get_dim(logits, 2)
max_label_seq_length = _get_dim(labels, 1)
ilabel_log_probs = nn_ops.log_softmax(logits)
state_log_probs = _ilabel_to_state(labels, num_labels, ilabel_log_probs)
state_trans_probs = _ctc_state_trans(labels)
initial_state_log_probs, final_state_log_probs = ctc_state_log_probs(
label_length, max_label_seq_length)
fwd_bwd_log_probs, log_likelihood = _forward_backward_log(
state_trans_log_probs=math_ops.log(state_trans_probs),
initial_state_log_probs=initial_state_log_probs,
final_state_log_probs=final_state_log_probs,
observed_log_probs=state_log_probs,
sequence_length=logit_length)
if unique:
olabel_log_probs = _state_to_olabel_unique(labels, num_labels,
fwd_bwd_log_probs, unique)
else:
olabel_log_probs = _state_to_olabel(labels, num_labels, fwd_bwd_log_probs)
grad = math_ops.exp(ilabel_log_probs) - math_ops.exp(olabel_log_probs)
# Applies the sequence mask for the gradient. It is enough to appply the mask
# only for ilabel_log_probs because olabel_log_probs already consider the
# mask. However, it is just safe and clean to apply it for the gradient.
max_logit_length = _get_dim(logits, 0)
logit_mask = array_ops.sequence_mask(logit_length, max_logit_length,
dtypes.float32)
logit_mask = array_ops.transpose(logit_mask, perm=[1, 0])
logit_mask = array_ops.expand_dims(logit_mask, axis=2)
grad *= logit_mask
loss = -log_likelihood
return loss, grad
def _ctc_loss_grad(op, grad_loss, _):
grad = op.outputs[1]
grad = [array_ops.reshape(grad_loss, [1, -1, 1]) * grad]
grad += [None] * (len(op.inputs) - len(grad))
return grad
def _ctc_loss_op_standard(labels, logits, logit_length, logits_time_major,
blank_index):
part_before = logits[:, :, :blank_index]
part_after = logits[:, :, blank_index + 1:]
part_blank = logits[:, :, blank_index:blank_index + 1]
logits = array_ops.concat([part_before, part_after, part_blank], axis=2)
labels = sparse_tensor.SparseTensor(
labels.indices,
array_ops.where(labels.values < blank_index, labels.values,
labels.values - 1), labels.dense_shape)
return _ctc_loss_impl(
labels=labels,
inputs=logits,
sequence_length=logit_length,
time_major=logits_time_major,
use_cudnn=False)
def _ctc_loss_op_cudnn(labels, logits, logit_length, logits_time_major,
blank_index):
part_before = logits[:, :, :blank_index]
part_after = logits[:, :, blank_index + 1:]
part_blank = logits[:, :, blank_index:blank_index + 1]
logits = array_ops.concat([part_blank, part_before, part_after], axis=2)
labels = sparse_tensor.SparseTensor(
labels.indices,
array_ops.where(labels.values < blank_index, labels.values + 1,
labels.values), labels.dense_shape)
return _ctc_loss_impl(
labels=labels,
inputs=logits,
sequence_length=logit_length,
time_major=logits_time_major,
use_cudnn=True)
def _ctc_loss_shape(op):
return [op.inputs[2].get_shape(), op.inputs[0].get_shape()]
# pylint: disable=protected-access, invalid-name
@tf_export(v1=["nn.ctc_loss_v2"])
@dispatch.add_dispatch_support
def ctc_loss_v2(labels,
logits,
label_length,
logit_length,
logits_time_major=True,
unique=None,
blank_index=None,
name=None):
"""Computes CTC (Connectionist Temporal Classification) loss.
This op implements the CTC loss as presented in (Graves et al., 2006).
Notes:
- Same as the "Classic CTC" in TensorFlow 1.x's tf.compat.v1.nn.ctc_loss
setting of preprocess_collapse_repeated=False, ctc_merge_repeated=True
- Labels may be supplied as either a dense, zero-padded tensor with a
vector of label sequence lengths OR as a SparseTensor.
- On TPU and GPU: Only dense padded labels are supported.
- On CPU: Caller may use SparseTensor or dense padded labels but calling with
a SparseTensor will be significantly faster.
- Default blank label is 0 rather num_classes - 1, unless overridden by
blank_index.
Args:
labels: tensor of shape [batch_size, max_label_seq_length] or SparseTensor
logits: tensor of shape [frames, batch_size, num_labels], if
logits_time_major == False, shape is [batch_size, frames, num_labels].
label_length: tensor of shape [batch_size], None if labels is SparseTensor
Length of reference label sequence in labels.
logit_length: tensor of shape [batch_size] Length of input sequence in
logits.
logits_time_major: (optional) If True (default), logits is shaped [time,
batch, logits]. If False, shape is [batch, time, logits]
unique: (optional) Unique label indices as computed by
ctc_unique_labels(labels). If supplied, enable a faster, memory efficient
implementation on TPU.
blank_index: (optional) Set the class index to use for the blank label.
Negative values will start from num_classes, ie, -1 will reproduce the
ctc_loss behavior of using num_classes - 1 for the blank symbol. There is
some memory/performance overhead to switching from the default of 0 as an
additional shifted copy of the logits may be created.
name: A name for this `Op`. Defaults to "ctc_loss_dense".
Returns:
loss: tensor of shape [batch_size], negative log probabilities.
References:
Connectionist Temporal Classification - Labeling Unsegmented Sequence Data
with Recurrent Neural Networks:
[Graves et al., 2006](https://dl.acm.org/citation.cfm?id=1143891)
([pdf](http://www.cs.toronto.edu/~graves/icml_2006.pdf))
"""
if isinstance(labels, sparse_tensor.SparseTensor):
if blank_index is None:
raise ValueError(
"blank_index must be given when using SparseTensor labels.")
if blank_index < 0:
blank_index += _get_dim(logits, 2)
if blank_index != _get_dim(logits, 2) - 1:
logits = array_ops.concat([
logits[:, :, :blank_index],
logits[:, :, blank_index + 1:],
logits[:, :, blank_index:blank_index + 1],
],
axis=2)
labels = sparse_tensor.SparseTensor(
labels.indices,
array_ops.where(labels.values < blank_index, labels.values,
labels.values - 1), labels.dense_shape)
return ctc_loss(
labels=labels,
inputs=logits,
sequence_length=logit_length,
time_major=logits_time_major)
if blank_index is None:
blank_index = 0
return ctc_loss_dense(
labels=labels,
logits=logits,
label_length=label_length,
logit_length=logit_length,
logits_time_major=logits_time_major,
unique=unique,
blank_index=blank_index,
name=name)
@tf_export("nn.ctc_loss", v1=[])
@dispatch.add_dispatch_support
def ctc_loss_v3(labels,
logits,
label_length,
logit_length,
logits_time_major=True,
unique=None,
blank_index=None,
name=None):
"""Computes CTC (Connectionist Temporal Classification) loss.
This op implements the CTC loss as presented in (Graves et al., 2006).
Notes:
- Same as the "Classic CTC" in TensorFlow 1.x's tf.compat.v1.nn.ctc_loss
setting of preprocess_collapse_repeated=False, ctc_merge_repeated=True
- Labels may be supplied as either a dense, zero-padded tensor with a
vector of label sequence lengths OR as a SparseTensor.
- On TPU and GPU: Only dense padded labels are supported.
- On CPU: Caller may use SparseTensor or dense padded labels but calling with
a SparseTensor will be significantly faster.
- Default blank label is 0 rather num_classes - 1, unless overridden by
blank_index.
Args:
labels: tensor of shape [batch_size, max_label_seq_length] or SparseTensor
logits: tensor of shape [frames, batch_size, num_labels], if
logits_time_major == False, shape is [batch_size, frames, num_labels].
label_length: tensor of shape [batch_size], None if labels is SparseTensor
Length of reference label sequence in labels.
logit_length: tensor of shape [batch_size] Length of input sequence in
logits.
logits_time_major: (optional) If True (default), logits is shaped [time,
batch, logits]. If False, shape is [batch, time, logits]
unique: (optional) Unique label indices as computed by
ctc_unique_labels(labels). If supplied, enable a faster, memory efficient
implementation on TPU.
blank_index: (optional) Set the class index to use for the blank label.
Negative values will start from num_classes, ie, -1 will reproduce the
ctc_loss behavior of using num_classes - 1 for the blank symbol. There is
some memory/performance overhead to switching from the default of 0 as an
additional shifted copy of the logits may be created.
name: A name for this `Op`. Defaults to "ctc_loss_dense".
Returns:
loss: tensor of shape [batch_size], negative log probabilities.
References:
Connectionist Temporal Classification - Labeling Unsegmented Sequence Data
with Recurrent Neural Networks:
[Graves et al., 2006](https://dl.acm.org/citation.cfm?id=1143891)
([pdf](http://www.cs.toronto.edu/~graves/icml_2006.pdf))
"""
if isinstance(labels, sparse_tensor.SparseTensor):
if blank_index is None:
raise ValueError(
"blank_index must be given when using SparseTensor labels.")
if blank_index < 0:
blank_index += _get_dim(logits, 2)
params = {
"labels": labels,
"logits": logits,
"logit_length": logit_length,
"logits_time_major": logits_time_major,
"blank_index": blank_index
}
if context.executing_eagerly():
device_type = _get_context_device_type()
can_use_gpu = (
# Either user specified GPU or unspecified but GPU is available.
(device_type == _GPU_DEVICE_NAME or
(device_type is None and context.num_gpus() > 0)))
# Under eager context, check the device placement and prefer the
if can_use_gpu:
res = _ctc_loss_op_cudnn(**params)
else:
res = _ctc_loss_op_standard(**params)
else:
api_name = "ctc_loss_" + str(uuid.uuid4())
ctc_loss_op_standard = _generate_defun_backend(api_name, _CPU_DEVICE_NAME,
_ctc_loss_op_standard)
ctc_loss_op_cudnn = _generate_defun_backend(api_name, _GPU_DEVICE_NAME,
_ctc_loss_op_cudnn)
res = ctc_loss_op_standard(**params)
function_eager.register(ctc_loss_op_cudnn, **params)
return res
if blank_index is None:
blank_index = 0
return ctc_loss_dense(
labels=labels,
logits=logits,
label_length=label_length,
logit_length=logit_length,
logits_time_major=logits_time_major,
unique=unique,
blank_index=blank_index,
name=name)
def ctc_loss_dense(labels,
logits,
label_length,
logit_length,
logits_time_major=True,
unique=None,
blank_index=0,
name=None):
"""Computes CTC (Connectionist Temporal Classification) loss.
This op implements the CTC loss as presented in (Graves et al., 2006),
using the batched forward backward algorithm described in (Sim et al., 2017).
Notes:
Significant differences from tf.compat.v1.nn.ctc_loss:
Supports GPU and TPU (tf.compat.v1.nn.ctc_loss supports CPU only):
For batched operations, GPU and TPU are significantly faster than using
ctc_loss on CPU.
This implementation runs on CPU, but significantly slower than ctc_loss.
Blank label is 0 rather num_classes - 1, unless overridden by blank_index.
Logits and labels are dense arrays with padding rather than SparseTensor.
The only mode supported is the same as:
preprocess_collapse_repeated=False, ctc_merge_repeated=True
To collapse labels, the caller can preprocess label sequence first.
The dense implementation supports both CPU, GPU and TPU. A fast path is
provided that significantly improves memory use for large vocabulary if the
caller preprocesses label sequences to get unique label indices on the CPU
(eg. in the data input pipeline) using ctc_ops.unique and simplifies this in
the optional "unique" kwarg. This is especially useful for TPU and GPU but
also works with if used on CPU.
Args:
labels: tensor of shape [batch_size, max_label_seq_length]
logits: tensor of shape [frames, batch_size, num_labels], if
logits_time_major == False, shape is [batch_size, frames, num_labels].
label_length: tensor of shape [batch_size] Length of reference label
sequence in labels.
logit_length: tensor of shape [batch_size] Length of input sequence in
logits.
logits_time_major: (optional) If True (default), logits is shaped [time,
batch, logits]. If False, shape is [batch, time, logits]
unique: (optional) Unique label indices as computed by unique(labels). If
supplied, enable a faster, memory efficient implementation on TPU.
blank_index: (optional) Set the class index to use for the blank label.
Negative values will start from num_classes, ie, -1 will reproduce the
ctc_loss behavior of using num_classes - 1 for the blank symbol. There is
some memory/performance overhead to switching from the default of 0 as an
additional shifted copy of the logits may be created.
name: A name for this `Op`. Defaults to "ctc_loss_dense".
Returns:
loss: tensor of shape [batch_size], negative log probabilities.
References:
Connectionist Temporal Classification - Labeling Unsegmented Sequence Data
with Recurrent Neural Networks:
[Graves et al., 2006](https://dl.acm.org/citation.cfm?id=1143891)
([pdf](http://www.cs.toronto.edu/~graves/icml_2006.pdf))
Improving the efficiency of forward-backward algorithm using batched
computation in TensorFlow:
[Sim et al., 2017](https://ieeexplore.ieee.org/document/8268944)
([pdf](http://bacchiani.net/resume/papers/ASRU2017.pdf))
"""
with ops.name_scope(name, "ctc_loss_dense",
[logits, labels, label_length, logit_length]):
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
label_length = ops.convert_to_tensor(label_length, name="label_length")
logit_length = ops.convert_to_tensor(logit_length, name="logit_length")
if not logits_time_major:
logits = array_ops.transpose(logits, perm=[1, 0, 2])
if blank_index != 0:
if blank_index < 0:
blank_index += _get_dim(logits, 2)
logits = array_ops.concat([
logits[:, :, blank_index:blank_index + 1],
logits[:, :, :blank_index],
logits[:, :, blank_index + 1:],
],
axis=2)
labels = array_ops.where(labels < blank_index, labels + 1, labels)
args = [logits, labels, label_length, logit_length]
if unique:
unique_y, unique_idx = unique
if blank_index != 0:
unique_y = array_ops.where(unique_y < blank_index, unique_y + 1,
unique_y)
label_mask_len = math_ops.reduce_max(unique_idx, axis=1) + 1
max_label_length = _get_dim(unique_y, 1)
label_mask = array_ops.sequence_mask(label_mask_len, max_label_length)
unique_y = array_ops.where(label_mask, unique_y,
array_ops.zeros_like(unique_y))
args.extend([unique_y, unique_idx])
@custom_gradient.custom_gradient
def compute_ctc_loss(logits_t, labels_t, label_length_t, logit_length_t,
*unique_t):
"""Compute CTC loss."""
logits_t.set_shape(logits.shape)
labels_t.set_shape(labels.shape)
label_length_t.set_shape(label_length.shape)
logit_length_t.set_shape(logit_length.shape)
kwargs = dict(
logits=logits_t,
labels=labels_t,
label_length=label_length_t,
logit_length=logit_length_t)
if unique_t:
kwargs["unique"] = unique_t
result = ctc_loss_and_grad(**kwargs)
def grad(grad_loss):
grad = [array_ops.reshape(grad_loss, [1, -1, 1]) * result[1]]
grad += [None] * (len(args) - len(grad))
return grad
return result[0], grad
return compute_ctc_loss(*args)
@tf_export("nn.collapse_repeated")
@dispatch.add_dispatch_support
def collapse_repeated(labels, seq_length, name=None):
"""Merge repeated labels into single labels.
Args:
labels: Tensor of shape [batch, max value in seq_length]
seq_length: Tensor of shape [batch], sequence length of each batch element.
name: A name for this `Op`. Defaults to "collapse_repeated_labels".
Returns:
A tuple `(collapsed_labels, new_seq_length)` where
collapsed_labels: Tensor of shape [batch, max_seq_length] with repeated
labels collapsed and padded to max_seq_length, eg:
`[[A, A, B, B, A], [A, B, C, D, E]] => [[A, B, A, 0, 0], [A, B, C, D, E]]`
new_seq_length: int tensor of shape [batch] with new sequence lengths.
"""
with ops.name_scope(name, "collapse_repeated_labels", [labels, seq_length]):
labels = ops.convert_to_tensor(labels, name="labels")
seq_length = ops.convert_to_tensor(seq_length, name="seq_length")
# Mask labels that don't equal previous label.
label_mask = array_ops.concat([
array_ops.ones_like(labels[:, :1], dtypes.bool),
math_ops.not_equal(labels[:, 1:], labels[:, :-1])
],
axis=1)
# Filter labels that aren't in the original sequence.
maxlen = _get_dim(labels, 1)
seq_mask = array_ops.sequence_mask(seq_length, maxlen=maxlen)
label_mask = math_ops.logical_and(label_mask, seq_mask)
# Count masks for new sequence lengths.
new_seq_len = math_ops.reduce_sum(
math_ops.cast(label_mask, dtypes.int32), axis=1)
# Mask indexes based on sequence length mask.
new_maxlen = math_ops.reduce_max(new_seq_len)
idx_mask = array_ops.sequence_mask(new_seq_len, maxlen=new_maxlen)
# Flatten everything and mask out labels to keep and sparse indices.
flat_labels = array_ops.reshape(labels, [-1])
flat_label_mask = array_ops.reshape(label_mask, [-1])
flat_idx_mask = array_ops.reshape(idx_mask, [-1])
idx = math_ops.range(_get_dim(flat_idx_mask, 0))
# Scatter to flat shape.
flat = array_ops.scatter_nd(
indices=array_ops.expand_dims(
array_ops.boolean_mask(idx, flat_idx_mask), axis=1),
updates=array_ops.boolean_mask(flat_labels, flat_label_mask),
shape=array_ops.shape(flat_idx_mask))
# Reshape back to square batch.
batch_size = _get_dim(labels, 0)
new_shape = [batch_size, new_maxlen]
return (array_ops.reshape(flat, new_shape),
math_ops.cast(new_seq_len, seq_length.dtype))
def dense_labels_to_sparse(dense, length):
"""Convert dense labels with sequence lengths to sparse tensor.
Args:
dense: tensor of shape [batch, max_length]
length: int tensor of shape [batch] The length of each sequence in dense.
Returns:
tf.sparse.SparseTensor with values only for the valid elements of sequences.
"""
flat_values = array_ops.reshape(dense, [-1])
flat_indices = math_ops.range(
array_ops.shape(flat_values, out_type=dtypes.int64)[0])
mask = array_ops.sequence_mask(length, maxlen=array_ops.shape(dense)[1])
flat_mask = array_ops.reshape(mask, [-1])
indices = array_ops.expand_dims(
array_ops.boolean_mask(flat_indices, flat_mask), 1)
values = array_ops.boolean_mask(flat_values, flat_mask)
sparse = sparse_tensor.SparseTensor(
indices=indices,
values=math_ops.cast(values, dtypes.int32),
dense_shape=array_ops.shape(flat_values, out_type=dtypes.int64))
reshaped = sparse_ops.sparse_reshape(sparse, array_ops.shape(dense))
max_length = math_ops.reduce_max(length)
return sparse_tensor.SparseTensor(
indices=reshaped.indices,
values=reshaped.values,
dense_shape=[
math_ops.cast(reshaped.dense_shape[0], dtypes.int64),
math_ops.cast(max_length, dtypes.int64)
])
@tf_export("nn.ctc_unique_labels")
@dispatch.add_dispatch_support
def ctc_unique_labels(labels, name=None):
"""Get unique labels and indices for batched labels for `tf.nn.ctc_loss`.
For use with `tf.nn.ctc_loss` optional argument `unique`: This op can be
used to preprocess labels in input pipeline to for better speed/memory use
computing the ctc loss on TPU.
Example:
ctc_unique_labels([[3, 4, 4, 3]]) ->
unique labels padded with 0: [[3, 4, 0, 0]]
indices of original labels in unique: [0, 1, 1, 0]
Args:
labels: tensor of shape [batch_size, max_label_length] padded with 0.
name: A name for this `Op`. Defaults to "ctc_unique_labels".
Returns:
tuple of
- unique labels, tensor of shape `[batch_size, max_label_length]`
- indices into unique labels, shape `[batch_size, max_label_length]`
"""
with ops.name_scope(name, "ctc_unique_labels", [labels]):
labels = ops.convert_to_tensor(labels, name="labels")
def _unique(x):
u = array_ops.unique(x)
y = array_ops.pad(u.y, [[0, _get_dim(u.idx, 0) - _get_dim(u.y, 0)]])
y = math_ops.cast(y, dtypes.int64)
return [y, u.idx]
return map_fn.map_fn(_unique, labels, dtype=[dtypes.int64, dtypes.int32])
def _sum_states(idx, states):
"""Take logsumexp for each unique state out of all label states.
Args:
idx: tensor of shape [batch, label_length] For each sequence, indices into a
set of unique labels as computed by calling unique.
states: tensor of shape [frames, batch, label_length] Log probabilities for
each label state.
Returns:
tensor of shape [frames, batch_size, label_length], log probabilites summed
for each unique label of the sequence.
"""
with ops.name_scope("sum_states"):
idx = ops.convert_to_tensor(idx, name="idx")
num_states = _get_dim(states, 2)
states = array_ops.expand_dims(states, axis=2)
one_hot = array_ops.one_hot(
idx,
depth=num_states,
on_value=0.0,
off_value=math_ops.log(0.0),
axis=1)
return math_ops.reduce_logsumexp(states + one_hot, axis=-1)
def _forward_backward_log(state_trans_log_probs, initial_state_log_probs,
final_state_log_probs, observed_log_probs,
sequence_length):
"""Forward-backward algorithm computed in log domain.
Args:
state_trans_log_probs: tensor of shape [states, states] or if different
transition matrix per batch [batch_size, states, states]
initial_state_log_probs: tensor of shape [batch_size, states]
final_state_log_probs: tensor of shape [batch_size, states]
observed_log_probs: tensor of shape [frames, batch_size, states]
sequence_length: tensor of shape [batch_size]
Returns:
forward backward log probabilites: tensor of shape [frames, batch, states]
log_likelihood: tensor of shape [batch_size]
Raises:
ValueError: If state_trans_log_probs has unknown or incorrect rank.
"""
if state_trans_log_probs.shape.ndims == 2:
perm = [1, 0]
elif state_trans_log_probs.shape.ndims == 3:
perm = [0, 2, 1]
else:
raise ValueError(
"state_trans_log_probs rank must be known and == 2 or 3, is: %s" %
state_trans_log_probs.shape.ndims)
bwd_state_trans_log_probs = array_ops.transpose(state_trans_log_probs, perm)
batch_size = _get_dim(observed_log_probs, 1)
def _forward(state_log_prob, obs_log_prob):
state_log_prob = array_ops.expand_dims(state_log_prob, axis=1) # Broadcast.
state_log_prob += state_trans_log_probs
state_log_prob = math_ops.reduce_logsumexp(state_log_prob, axis=-1)
state_log_prob += obs_log_prob
log_prob_sum = math_ops.reduce_logsumexp(
state_log_prob, axis=-1, keepdims=True)
state_log_prob -= log_prob_sum
return state_log_prob
fwd = _scan(
_forward, observed_log_probs, initial_state_log_probs, inclusive=True)
def _backward(accs, elems):
"""Calculate log probs and cumulative sum masked for sequence length."""
state_log_prob, cum_log_sum = accs
obs_log_prob, mask = elems
state_log_prob += obs_log_prob
state_log_prob = array_ops.expand_dims(state_log_prob, axis=1) # Broadcast.
state_log_prob += bwd_state_trans_log_probs
state_log_prob = math_ops.reduce_logsumexp(state_log_prob, axis=-1)
log_prob_sum = math_ops.reduce_logsumexp(
state_log_prob, axis=-1, keepdims=True)
state_log_prob -= log_prob_sum
cum_log_sum += array_ops.squeeze(log_prob_sum) * mask
batched_mask = array_ops.expand_dims(mask, axis=1)
out = state_log_prob * batched_mask
out += final_state_log_probs * (1.0 - batched_mask)
return out, cum_log_sum
zero_log_sum = array_ops.zeros([batch_size])
maxlen = _get_dim(observed_log_probs, 0)
mask = array_ops.sequence_mask(sequence_length, maxlen, dtypes.float32)
mask = array_ops.transpose(mask, perm=[1, 0])
bwd, cum_log_sum = _scan(
_backward, (observed_log_probs, mask),
(final_state_log_probs, zero_log_sum),
reverse=True,
inclusive=True)
fwd_bwd_log_probs = fwd[1:] + bwd[1:]
fwd_bwd_log_probs_sum = math_ops.reduce_logsumexp(
fwd_bwd_log_probs, axis=2, keepdims=True)
fwd_bwd_log_probs -= fwd_bwd_log_probs_sum
fwd_bwd_log_probs += math_ops.log(array_ops.expand_dims(mask, axis=2))
log_likelihood = bwd[0, :, 0] + cum_log_sum[0]
return fwd_bwd_log_probs, log_likelihood
# TODO(tombagby): This is currently faster for the ctc implementation than using
# functional_ops.scan, but could be replaced by that or something similar if
# things change.
def _scan(fn, elems, initial, reverse=False, inclusive=False, final_only=False):
"""Repeatedly applies callable `fn` to a sequence of elements.
Implemented by functional_ops.While, tpu friendly, no gradient.
This is similar to functional_ops.scan but significantly faster on tpu/gpu
for the forward backward use case.
Examples:
scan(lambda a, e: a + e, [1.0, 2.0, 3.0], 1.0) => [2.0, 4.0, 7.0]
Multiple accumulators:
scan(lambda a, e: (a[0] + e, a[1] * e), [1.0, 2.0, 3.0], (0.0, 1.0))
Multiple inputs:
scan(lambda a, e: a + (e[0] * e[1]), (elems1, elems2), 0.0)
Args:
fn: callable, fn(accumulators, element) return new accumulator values. The
(possibly nested) sequence of accumulators is the same as `initial` and
the return value must have the same structure.
elems: A (possibly nested) tensor which will be unpacked along the first
dimension. The resulting slices will be the second argument to fn. The
first dimension of all nested input tensors must be the same.
initial: A tensor or (possibly nested) sequence of tensors with initial
values for the accumulators.
reverse: (optional) True enables scan and output elems in reverse order.
inclusive: (optional) True includes the initial accumulator values in the
output. Length of output will be len(elem sequence) + 1. Not meaningful if
final_only is True.
final_only: (optional) When True, return only the final accumulated values,
not the concatenation of accumulated values for each input.
Returns:
A (possibly nested) sequence of tensors with the results of applying fn
to tensors unpacked from elems and previous accumulator values.
"""
flat_elems = [ops.convert_to_tensor(x) for x in nest.flatten(elems)]
num_elems = array_ops.shape(flat_elems[0])[0]
pack_elems = lambda x: nest.pack_sequence_as(structure=elems, flat_sequence=x)
flat_initial = [ops.convert_to_tensor(x) for x in nest.flatten(initial)]
pack = lambda x: nest.pack_sequence_as(structure=initial, flat_sequence=x)
accum_dtypes = [x.dtype for x in flat_initial]
num_accums = len(flat_initial)
# Types for counter, [outputs], [accumulators] loop arguments.
if final_only:
loop_dtypes = [dtypes.int32, dtypes.int32] + accum_dtypes
else:
loop_dtypes = [dtypes.int32, dtypes.int32] + accum_dtypes + accum_dtypes
# TODO(tombagby): Update to tfe.defun
def cond(i, num_elems, *args):
del args
return i >= 0 if reverse else i < num_elems
# The loop *args are [output tensors] + [accumulator tensors] which must
# be paired. Each output corresponds to one accumulator.
def body(i, num_elems, *args):
"""Loop body."""
i.set_shape([])
if final_only:
accum = args
else:
out, accum = args[:num_accums], args[num_accums:]
slices = [array_ops.gather(e, i) for e in flat_elems]
accum = fn(pack(accum), pack_elems(slices))
flat_accum = nest.flatten(accum)
if final_only:
new_out = []
else:
update_i = i + 1 if inclusive and not reverse else i
new_out = [
inplace_ops.alias_inplace_update(x, update_i, y)
for x, y in zip(out, flat_accum)
]
i = i - 1 if reverse else i + 1
return [i, num_elems] + new_out + flat_accum
init_i = (
array_ops.shape(flat_elems[0])[0] -
1 if reverse else constant_op.constant(0, dtype=dtypes.int32))
outputs = []
if not final_only:
num_outputs = array_ops.shape(flat_elems[0])[0] + (1 if inclusive else 0)
for initial_accum in flat_initial:
out_shape = array_ops.concat(
[[num_outputs], array_ops.shape(initial_accum)], 0)
out = inplace_ops.empty(out_shape, dtype=initial_accum.dtype, init=True)
if inclusive:
out = inplace_ops.alias_inplace_add(out, init_i + (1 if reverse else 0),
initial_accum)
outputs.append(out)
loop_in = [init_i, num_elems] + outputs + flat_initial
hostmem = [
i for i, x in enumerate(loop_in)
if x.dtype.base_dtype in (dtypes.int32, dtypes.int64)
]
if context.executing_eagerly():
loop_results = loop_in
while cond(*loop_results):
loop_results = body(*loop_results)
else:
# TODO(tombagby): Update to while_v2.
cond = function.Defun(*loop_dtypes)(cond)
body = function.Defun(*loop_dtypes)(body)
loop_results = functional_ops.While(loop_in, cond, body, hostmem=hostmem)
out = loop_results[2:num_accums + 2]
return pack(out)
def _get_dim(tensor, i):
"""Get value of tensor shape[i] preferring static value if available."""
return tensor_shape.dimension_value(
tensor.shape[i]) or array_ops.shape(tensor)[i]
| apache-2.0 | -1,484,289,627,133,780,500 | 37.993179 | 80 | 0.662322 | false |
schmidtc/pysal | pysal/spreg/diagnostics.py | 6 | 35451 | """
Diagnostics for regression estimations.
"""
__author__ = "Luc Anselin [email protected], Nicholas Malizia [email protected] "
import pysal
from pysal.common import *
import scipy.sparse as SP
from math import sqrt
from utils import spmultiply, sphstack, spmin, spmax
__all__ = [
"f_stat", "t_stat", "r2", "ar2", "se_betas", "log_likelihood", "akaike", "schwarz",
"condition_index", "jarque_bera", "breusch_pagan", "white", "koenker_bassett", "vif", "likratiotest"]
def f_stat(reg):
"""
Calculates the f-statistic and associated p-value of the
regression. [Greene2003]_
(For two stage least squares see f_stat_tsls)
Parameters
----------
reg : regression object
output instance from a regression model
Returns
----------
fs_result : tuple
includes value of F statistic and associated p-value
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the F-statistic for the regression.
>>> testresult = diagnostics.f_stat(reg)
Print the results tuple, including the statistic and its significance.
>>> print("%12.12f"%testresult[0],"%12.12f"%testresult[1])
('28.385629224695', '0.000000009341')
"""
k = reg.k # (scalar) number of ind. vars (includes constant)
n = reg.n # (scalar) number of observations
utu = reg.utu # (scalar) residual sum of squares
predy = reg.predy # (array) vector of predicted values (n x 1)
mean_y = reg.mean_y # (scalar) mean of dependent observations
Q = utu
U = np.sum((predy - mean_y) ** 2)
fStat = (U / (k - 1)) / (Q / (n - k))
pValue = stats.f.sf(fStat, k - 1, n - k)
fs_result = (fStat, pValue)
return fs_result
def t_stat(reg, z_stat=False):
"""
Calculates the t-statistics (or z-statistics) and associated
p-values. [Greene2003]_
Parameters
----------
reg : regression object
output instance from a regression model
z_stat : boolean
If True run z-stat instead of t-stat
Returns
-------
ts_result : list of tuples
each tuple includes value of t statistic (or z
statistic) and associated p-value
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate t-statistics for the regression coefficients.
>>> testresult = diagnostics.t_stat(reg)
Print the tuples that contain the t-statistics and their significances.
>>> print("%12.12f"%testresult[0][0], "%12.12f"%testresult[0][1], "%12.12f"%testresult[1][0], "%12.12f"%testresult[1][1], "%12.12f"%testresult[2][0], "%12.12f"%testresult[2][1])
('14.490373143689', '0.000000000000', '-4.780496191297', '0.000018289595', '-2.654408642718', '0.010874504910')
"""
k = reg.k # (scalar) number of ind. vars (includes constant)
n = reg.n # (scalar) number of observations
vm = reg.vm # (array) coefficients of variance matrix (k x k)
betas = reg.betas # (array) coefficients of the regressors (1 x k)
variance = vm.diagonal()
tStat = betas[range(0, len(vm))].reshape(len(vm),) / np.sqrt(variance)
ts_result = []
for t in tStat:
if z_stat:
ts_result.append((t, stats.norm.sf(abs(t)) * 2))
else:
ts_result.append((t, stats.t.sf(abs(t), n - k) * 2))
return ts_result
def r2(reg):
"""
Calculates the R^2 value for the regression. [Greene2003]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
----------
r2_result : float
value of the coefficient of determination for the
regression
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the R^2 value for the regression.
>>> testresult = diagnostics.r2(reg)
Print the result.
>>> print("%1.8f"%testresult)
0.55240404
"""
y = reg.y # (array) vector of dep observations (n x 1)
mean_y = reg.mean_y # (scalar) mean of dep observations
utu = reg.utu # (scalar) residual sum of squares
ss_tot = ((y - mean_y) ** 2).sum(0)
r2 = 1 - utu / ss_tot
r2_result = r2[0]
return r2_result
def ar2(reg):
"""
Calculates the adjusted R^2 value for the regression. [Greene2003]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
----------
ar2_result : float
value of R^2 adjusted for the number of explanatory
variables.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the adjusted R^2 value for the regression.
>>> testresult = diagnostics.ar2(reg)
Print the result.
>>> print("%1.8f"%testresult)
0.53294335
"""
k = reg.k # (scalar) number of ind. variables (includes constant)
n = reg.n # (scalar) number of observations
ar2_result = 1 - (1 - r2(reg)) * (n - 1) / (n - k)
return ar2_result
def se_betas(reg):
"""
Calculates the standard error of the regression coefficients. [Greene2003]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
----------
se_result : array
includes standard errors of each coefficient (1 x k)
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the standard errors of the regression coefficients.
>>> testresult = diagnostics.se_betas(reg)
Print the vector of standard errors.
>>> testresult
array([ 4.73548613, 0.33413076, 0.10319868])
"""
vm = reg.vm # (array) coefficients of variance matrix (k x k)
variance = vm.diagonal()
se_result = np.sqrt(variance)
return se_result
def log_likelihood(reg):
"""
Calculates the log-likelihood value for the regression. [Greene2003]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
-------
ll_result : float
value for the log-likelihood of the regression.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the log-likelihood for the regression.
>>> testresult = diagnostics.log_likelihood(reg)
Print the result.
>>> testresult
-187.3772388121491
"""
n = reg.n # (scalar) number of observations
utu = reg.utu # (scalar) residual sum of squares
ll_result = -0.5 * \
(n * (np.log(2 * math.pi)) + n * np.log(utu / n) + (utu / (utu / n)))
return ll_result
def akaike(reg):
"""
Calculates the Akaike Information Criterion. [Akaike1974]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
-------
aic_result : scalar
value for Akaike Information Criterion of the
regression.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the Akaike Information Criterion (AIC).
>>> testresult = diagnostics.akaike(reg)
Print the result.
>>> testresult
380.7544776242982
"""
k = reg.k # (scalar) number of explanatory vars (including constant)
try: # ML estimation, logll already exists
# spatial coefficient included in k
aic_result = 2.0 * k - 2.0 * reg.logll
except AttributeError: # OLS case
n = reg.n # (scalar) number of observations
utu = reg.utu # (scalar) residual sum of squares
aic_result = 2 * k + n * (np.log((2 * np.pi * utu) / n) + 1)
return aic_result
def schwarz(reg):
"""
Calculates the Schwarz Information Criterion. [Schwarz1978]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
-------
bic_result : scalar
value for Schwarz (Bayesian) Information Criterion of
the regression.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the Schwarz Information Criterion.
>>> testresult = diagnostics.schwarz(reg)
Print the results.
>>> testresult
386.42993851863008
"""
n = reg.n # (scalar) number of observations
k = reg.k # (scalar) number of ind. variables (including constant)
try: # ML case logll already computed
# spatial coeff included in k
sc_result = k * np.log(n) - 2.0 * reg.logll
except AttributeError: # OLS case
utu = reg.utu # (scalar) residual sum of squares
sc_result = k * np.log(n) + n * (np.log((2 * np.pi * utu) / n) + 1)
return sc_result
def condition_index(reg):
"""
Calculates the multicollinearity condition index according to Belsey,
Kuh and Welsh (1980) [Belsley1980]_.
Parameters
----------
reg : regression object
output instance from a regression model
Returns
-------
ci_result : float
scalar value for the multicollinearity condition
index.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the condition index to check for multicollinearity.
>>> testresult = diagnostics.condition_index(reg)
Print the result.
>>> print("%1.3f"%testresult)
6.542
"""
if hasattr(reg, 'xtx'):
xtx = reg.xtx # (array) k x k projection matrix (includes constant)
elif hasattr(reg, 'hth'):
xtx = reg.hth # (array) k x k projection matrix (includes constant)
diag = np.diagonal(xtx)
scale = xtx / diag
eigval = np.linalg.eigvals(scale)
max_eigval = max(eigval)
min_eigval = min(eigval)
ci_result = sqrt(max_eigval / min_eigval)
return ci_result
def jarque_bera(reg):
"""
Jarque-Bera test for normality in the residuals. [Jarque1980]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
-------
jb_result : dictionary
contains the statistic (jb) for the Jarque-Bera test
and the associated p-value (p-value)
df : integer
degrees of freedom for the test (always 2)
jb : float
value of the test statistic
pvalue : float
p-value associated with the statistic (chi^2
distributed with 2 df)
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"), "r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the Jarque-Bera test for normality of residuals.
>>> testresult = diagnostics.jarque_bera(reg)
Print the degrees of freedom for the test.
>>> testresult['df']
2
Print the test statistic.
>>> print("%1.3f"%testresult['jb'])
1.836
Print the associated p-value.
>>> print("%1.4f"%testresult['pvalue'])
0.3994
"""
n = reg.n # (scalar) number of observations
u = reg.u # (array) residuals from regression
u2 = u ** 2
u3 = u ** 3
u4 = u ** 4
mu2 = np.mean(u2)
mu3 = np.mean(u3)
mu4 = np.mean(u4)
S = mu3 / (mu2 ** (1.5)) # skewness measure
K = (mu4 / (mu2 ** 2)) # kurtosis measure
jb = n * (((S ** 2) / 6) + ((K - 3) ** 2) / 24)
pvalue = stats.chisqprob(jb, 2)
jb_result = {"df": 2, "jb": jb, 'pvalue': pvalue}
return jb_result
def breusch_pagan(reg, z=None):
"""
Calculates the Breusch-Pagan test statistic to check for
heteroscedasticity. [Breusch1979]_
Parameters
----------
reg : regression object
output instance from a regression model
z : array
optional input for specifying an alternative set of
variables (Z) to explain the observed variance. By
default this is a matrix of the squared explanatory
variables (X**2) with a constant added to the first
column if not already present. In the default case,
the explanatory variables are squared to eliminate
negative values.
Returns
-------
bp_result : dictionary
contains the statistic (bp) for the test and the
associated p-value (p-value)
bp : float
scalar value for the Breusch-Pagan test statistic
df : integer
degrees of freedom associated with the test (k)
pvalue : float
p-value associated with the statistic (chi^2
distributed with k df)
Notes
-----
x attribute in the reg object must have a constant term included. This is
standard for spreg.OLS so no testing done to confirm constant.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"), "r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the Breusch-Pagan test for heteroscedasticity.
>>> testresult = diagnostics.breusch_pagan(reg)
Print the degrees of freedom for the test.
>>> testresult['df']
2
Print the test statistic.
>>> print("%1.3f"%testresult['bp'])
7.900
Print the associated p-value.
>>> print("%1.4f"%testresult['pvalue'])
0.0193
"""
e2 = reg.u ** 2
e = reg.u
n = reg.n
k = reg.k
ete = reg.utu
den = ete / n
g = e2 / den - 1.0
if z == None:
x = reg.x
#constant = constant_check(x)
# if constant == False:
# z = np.hstack((np.ones((n,1)),x))**2
# else:
# z = x**2
z = spmultiply(x, x)
else:
#constant = constant_check(z)
# if constant == False:
# z = np.hstack((np.ones((n,1)),z))
pass
n, p = z.shape
# Check to identify any duplicate columns in Z
omitcolumn = []
for i in range(p):
current = z[:, i]
for j in range(p):
check = z[:, j]
if i < j:
test = abs(current - check).sum()
if test == 0:
omitcolumn.append(j)
uniqueomit = set(omitcolumn)
omitcolumn = list(uniqueomit)
# Now the identified columns must be removed (done in reverse to
# prevent renumbering)
omitcolumn.sort()
omitcolumn.reverse()
for c in omitcolumn:
z = np.delete(z, c, 1)
n, p = z.shape
df = p - 1
# Now that the variables are prepared, we calculate the statistic
zt = np.transpose(z)
gt = np.transpose(g)
gtz = np.dot(gt, z)
ztg = np.dot(zt, g)
ztz = np.dot(zt, z)
ztzi = la.inv(ztz)
part1 = np.dot(gtz, ztzi)
part2 = np.dot(part1, ztg)
bp_array = 0.5 * part2
bp = bp_array[0, 0]
pvalue = stats.chisqprob(bp, df)
bp_result = {'df': df, 'bp': bp, 'pvalue': pvalue}
return bp_result
def white(reg):
"""
Calculates the White test to check for heteroscedasticity. [White1980]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
-------
white_result : dictionary
contains the statistic (white), degrees of freedom
(df) and the associated p-value (pvalue) for the
White test.
white : float
scalar value for the White test statistic.
df : integer
degrees of freedom associated with the test
pvalue : float
p-value associated with the statistic (chi^2
distributed with k df)
Notes
-----
x attribute in the reg object must have a constant term included. This is
standard for spreg.OLS so no testing done to confirm constant.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the White test for heteroscedasticity.
>>> testresult = diagnostics.white(reg)
Print the degrees of freedom for the test.
>>> print testresult['df']
5
Print the test statistic.
>>> print("%1.3f"%testresult['wh'])
19.946
Print the associated p-value.
>>> print("%1.4f"%testresult['pvalue'])
0.0013
"""
e = reg.u ** 2
k = int(reg.k)
n = int(reg.n)
y = reg.y
X = reg.x
#constant = constant_check(X)
# Check for constant, if none add one, see Greene 2003, pg. 222
# if constant == False:
# X = np.hstack((np.ones((n,1)),X))
# Check for multicollinearity in the X matrix
ci = condition_index(reg)
if ci > 30:
white_result = "Not computed due to multicollinearity."
return white_result
# Compute cross-products and squares of the regression variables
if type(X).__name__ == 'ndarray':
A = np.zeros((n, (k * (k + 1)) // 2))
elif type(X).__name__ == 'csc_matrix' or type(X).__name__ == 'csr_matrix':
# this is probably inefficient
A = SP.lil_matrix((n, (k * (k + 1)) // 2))
else:
raise Exception, "unknown X type, %s" % type(X).__name__
counter = 0
for i in range(k):
for j in range(i, k):
v = spmultiply(X[:, i], X[:, j], False)
A[:, counter] = v
counter += 1
# Append the original variables
A = sphstack(X, A) # note: this also converts a LIL to CSR
n, k = A.shape
# Check to identify any duplicate or constant columns in A
omitcolumn = []
for i in range(k):
current = A[:, i]
# remove all constant terms (will add a constant back later)
if spmax(current) == spmin(current):
omitcolumn.append(i)
pass
# do not allow duplicates
for j in range(k):
check = A[:, j]
if i < j:
test = abs(current - check).sum()
if test == 0:
omitcolumn.append(j)
uniqueomit = set(omitcolumn)
omitcolumn = list(uniqueomit)
# Now the identified columns must be removed
if type(A).__name__ == 'ndarray':
A = np.delete(A, omitcolumn, 1)
elif type(A).__name__ == 'csc_matrix' or type(A).__name__ == 'csr_matrix':
# this is probably inefficient
keepcolumn = range(k)
for i in omitcolumn:
keepcolumn.remove(i)
A = A[:, keepcolumn]
else:
raise Exception, "unknown A type, %s" % type(X).__name__
A = sphstack(np.ones((A.shape[0], 1)), A) # add a constant back in
n, k = A.shape
# Conduct the auxiliary regression and calculate the statistic
import ols as OLS
aux_reg = OLS.BaseOLS(e, A)
aux_r2 = r2(aux_reg)
wh = aux_r2 * n
df = k - 1
pvalue = stats.chisqprob(wh, df)
white_result = {'df': df, 'wh': wh, 'pvalue': pvalue}
return white_result
def koenker_bassett(reg, z=None):
"""
Calculates the Koenker-Bassett test statistic to check for
heteroscedasticity. [Koenker1982]_ [Greene2003]_
Parameters
----------
reg : regression output
output from an instance of a regression class
z : array
optional input for specifying an alternative set of
variables (Z) to explain the observed variance. By
default this is a matrix of the squared explanatory
variables (X**2) with a constant added to the first
column if not already present. In the default case,
the explanatory variables are squared to eliminate
negative values.
Returns
-------
kb_result : dictionary
contains the statistic (kb), degrees of freedom (df)
and the associated p-value (pvalue) for the test.
kb : float
scalar value for the Koenker-Bassett test statistic.
df : integer
degrees of freedom associated with the test
pvalue : float
p-value associated with the statistic (chi^2
distributed)
Notes
-----
x attribute in the reg object must have a constant term included. This is
standard for spreg.OLS so no testing done to confirm constant.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the Koenker-Bassett test for heteroscedasticity.
>>> testresult = diagnostics.koenker_bassett(reg)
Print the degrees of freedom for the test.
>>> testresult['df']
2
Print the test statistic.
>>> print("%1.3f"%testresult['kb'])
5.694
Print the associated p-value.
>>> print("%1.4f"%testresult['pvalue'])
0.0580
"""
# The notation here matches that of Greene (2003).
u = reg.u ** 2
e = reg.u
n = reg.n
k = reg.k
x = reg.x
ete = reg.utu
#constant = constant_check(x)
ubar = ete / n
ubari = ubar * np.ones((n, 1))
g = u - ubari
v = (1.0 / n) * np.sum((u - ubar) ** 2)
if z == None:
x = reg.x
#constant = constant_check(x)
# if constant == False:
# z = np.hstack((np.ones((n,1)),x))**2
# else:
# z = x**2
z = spmultiply(x, x)
else:
#constant = constant_check(z)
# if constant == False:
# z = np.hstack((np.ones((n,1)),z))
pass
n, p = z.shape
# Check to identify any duplicate columns in Z
omitcolumn = []
for i in range(p):
current = z[:, i]
for j in range(p):
check = z[:, j]
if i < j:
test = abs(current - check).sum()
if test == 0:
omitcolumn.append(j)
uniqueomit = set(omitcolumn)
omitcolumn = list(uniqueomit)
# Now the identified columns must be removed (done in reverse to
# prevent renumbering)
omitcolumn.sort()
omitcolumn.reverse()
for c in omitcolumn:
z = np.delete(z, c, 1)
n, p = z.shape
df = p - 1
# Conduct the auxiliary regression.
zt = np.transpose(z)
gt = np.transpose(g)
gtz = np.dot(gt, z)
ztg = np.dot(zt, g)
ztz = np.dot(zt, z)
ztzi = la.inv(ztz)
part1 = np.dot(gtz, ztzi)
part2 = np.dot(part1, ztg)
kb_array = (1.0 / v) * part2
kb = kb_array[0, 0]
pvalue = stats.chisqprob(kb, df)
kb_result = {'kb': kb, 'df': df, 'pvalue': pvalue}
return kb_result
def vif(reg):
"""
Calculates the variance inflation factor for each independent variable.
For the ease of indexing the results, the constant is currently
included. This should be omitted when reporting the results to the
output text. [Greene2003]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
-------
vif_result : list of tuples
each tuple includes the vif and the tolerance, the
order of the variables corresponds to their order in
the reg.x matrix
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the variance inflation factor (VIF).
>>> testresult = diagnostics.vif(reg)
Select the tuple for the income variable.
>>> incvif = testresult[1]
Print the VIF for income.
>>> print("%12.12f"%incvif[0])
1.333117497189
Print the tolerance for income.
>>> print("%12.12f"%incvif[1])
0.750121427487
Repeat for the home value variable.
>>> hovalvif = testresult[2]
>>> print("%12.12f"%hovalvif[0])
1.333117497189
>>> print("%12.12f"%hovalvif[1])
0.750121427487
"""
X = reg.x
n, k = X.shape
vif_result = []
for j in range(k):
Z = X.copy()
Z = np.delete(Z, j, 1)
y = X[:, j]
import ols as OLS
aux = OLS.BaseOLS(y, Z)
mean_y = aux.mean_y
utu = aux.utu
ss_tot = sum((y - mean_y) ** 2)
if ss_tot == 0:
resj = pysal.MISSINGVALUE
else:
r2aux = 1 - utu / ss_tot
tolj = 1 - r2aux
vifj = 1 / tolj
resj = (vifj, tolj)
vif_result.append(resj)
return vif_result
def constant_check(array):
"""
Checks to see numpy array includes a constant.
Parameters
----------
array : array
an array of variables to be inspected
Returns
-------
constant : boolean
true signifies the presence of a constant
Example
-------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
>>> reg = OLS(y,X)
>>> diagnostics.constant_check(reg.x)
True
"""
n, k = array.shape
constant = False
for j in range(k):
variable = array[:, j]
varmin = variable.min()
varmax = variable.max()
if varmin == varmax:
constant = True
break
return constant
def likratiotest(reg0, reg1):
"""
Likelihood ratio test statistic [Greene2003]_
Parameters
----------
reg0 : regression object for constrained model (H0)
reg1 : regression object for unconstrained model (H1)
Returns
-------
likratio : dictionary
contains the statistic (likr), the degrees of
freedom (df) and the p-value (pvalue)
likr : float
likelihood ratio statistic
df : integer
degrees of freedom
p-value : float
p-value
Examples
--------
>>> import numpy as np
>>> import pysal as ps
>>> import scipy.stats as stats
>>> import pysal.spreg.ml_lag as lag
Use the baltim sample data set
>>> db = ps.open(ps.examples.get_path("baltim.dbf"),'r')
>>> y_name = "PRICE"
>>> y = np.array(db.by_col(y_name)).T
>>> y.shape = (len(y),1)
>>> x_names = ["NROOM","NBATH","PATIO","FIREPL","AC","GAR","AGE","LOTSZ","SQFT"]
>>> x = np.array([db.by_col(var) for var in x_names]).T
>>> ww = ps.open(ps.examples.get_path("baltim_q.gal"))
>>> w = ww.read()
>>> ww.close()
>>> w.transform = 'r'
OLS regression
>>> ols1 = ps.spreg.OLS(y,x)
ML Lag regression
>>> mllag1 = lag.ML_Lag(y,x,w)
>>> lr = likratiotest(ols1,mllag1)
>>> print "Likelihood Ratio Test: {0:.4f} df: {1} p-value: {2:.4f}".format(lr["likr"],lr["df"],lr["p-value"])
Likelihood Ratio Test: 44.5721 df: 1 p-value: 0.0000
"""
likratio = {}
try:
likr = 2.0 * (reg1.logll - reg0.logll)
except AttributeError:
raise Exception, "Missing or improper log-likelihoods in regression objects"
if likr < 0.0: # always enforces positive likelihood ratio
likr = -likr
pvalue = stats.chisqprob(likr, 1)
likratio = {"likr": likr, "df": 1, "p-value": pvalue}
return likratio
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
| bsd-3-clause | -1,719,425,724,766,710,800 | 25.396873 | 181 | 0.546416 | false |
olologin/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause | -297,516,978,173,786,430 | 26.525 | 75 | 0.620345 | false |
qvit/django-color-captcha | color_captcha/utils.py | 1 | 1095 | # -*- coding: utf-8 -*-
class IncorrectCaptchaColorsFormatError(Exception):
message = "Incorrect 'CAPTCHA_COLORS' setting format (must be iterable of two-string-value tuples)"
def __str__(self):
return self.message
class TooFewCaptchaColorsError(Exception):
message = "Please specify al least two colors in 'CAPTCHA_COLORS' setting"
def __str__(self):
return self.message
def check_colors(COLORS):
def check_color_option(color_option):
try:
if not (len(color_option) == 2 and
isinstance(color_option[0], basestring) and
isinstance(color_option[1], basestring)):
raise IncorrectCaptchaColorsFormatError()
except IndexError:
raise IncorrectCaptchaColorsFormatError()
try:
iter(COLORS)
except TypeError:
raise IncorrectCaptchaColorsFormatError()
else:
if len(COLORS) < 2:
raise TooFewCaptchaColorsError()
else:
for color_option in COLORS:
check_color_option(color_option)
| mit | 5,993,354,313,832,439,000 | 27.815789 | 103 | 0.624658 | false |
salabim/salabim | test/test_componentgenerator.py | 1 | 4345 | import salabim as sim
import pytest
class X(sim.Component):
def setup(self, color='red'):
self.color = color
self.enter(components)
class Vehicle(sim.Component):
def setup(self):
self.enter(components)
class Car(Vehicle):
pass
class Bus(Vehicle):
pass
class Truck(Vehicle):
pass
def exp(X, run_time=None, *args, **kwargs):
global components
env = sim.Environment()
components = sim.Queue()
sim.ComponentGenerator(X, *args, **kwargs)
env.run(run_time)
return components
def test_iat():
components = exp(X, iat=sim.Uniform(0, 2), at=500, till=1000, force_at=True)
assert len(components) == pytest.approx(500, rel=1e-2)
assert components[0].enter_time(components) == 500
assert 998 <= components[-1].enter_time(components) <= 1000
with pytest.raises(ValueError):
components = exp(X, iat=sim.Uniform(0, 2), at=500, till=1000, force_at=True, force_till=True)
components = exp(X, iat=sim.Uniform(0, 2), till=1000, force_at=True)
assert len(components) == pytest.approx(1000, rel=1e-2)
assert components[-1].enter_time(components) <= 1000
components = exp(X, iat=20,at=10, till=111,force_at=True)
assert len(components) == 6
assert components[0].enter_time(components) == 10
assert components[-1].enter_time(components) == 110
components = exp(X, iat=20,at=10, till=111)
assert len(components) == 5
assert components[-1].enter_time(components) == 110
components = exp(X, iat=20,at=10,number=5,force_at=True)
assert len(components) == 5
assert components[0].enter_time(components) == 10
assert components[-1].enter_time(components) == 90
components = exp(X, iat=20,at=10,number=5)
assert len(components) == 5
assert components[0].enter_time(components) == 30
assert components[-1].enter_time(components) == 110
components = exp(X, run_time=110, iat=20, at=10)
assert len(components) == 4
assert components[0].enter_time(components) == 30
assert components[-1].enter_time(components) == 90
def test_spread():
components = exp(X, at=100, till=200, number=10)
assert len(components) == 10
assert components[0].enter_time(components) > 100
assert components[-1].enter_time(components) < 200
components = exp(X, at=100, till=200, number=10, force_at=True)
assert len(components) == 10
assert components[0].enter_time(components) == 100
assert components[-1].enter_time(components) < 200
components = exp(X, at=100, till=200, number=10, force_till=True)
assert len(components) == 10
assert components[0].enter_time(components) > 100
assert components[-1].enter_time(components) == 200
components = exp(X, at=100, till=200, number=10, force_at=True, force_till=True)
assert len(components) == 10
assert components[0].enter_time(components) ==100
assert components[-1].enter_time(components) == 200
components = exp(X, at=100, till=200, number=1, force_till=True)
assert len(components) == 1
assert components[0].enter_time(components) == 200
components = exp(X, at=100, till=200, number=1, force_at=True)
assert len(components) == 1
assert components[0].enter_time(components) == 100
with pytest.raises(ValueError):
components = exp(X, at=100, till=200, number=1, force_at=True, force_till=True)
components = exp(X, at=100, till=200, number=0, force_till=True)
assert len(components) == 0
def test_propagate():
components = exp(X, number=1, iat=1)
assert components[0].color == 'red'
assert components[0].name() == 'x.0'
components = exp(X, number=1, iat=1, color='blue', name='my name,')
assert components[0].color == 'blue'
assert components[0].name() == 'my name.1'
def test_dis():
components = exp(sim.Pdf((Car, Bus, Truck), (50, 30, 20)), iat=1, number=1000)
names = sim.Monitor()
for component in components:
names.tally(component.name().split('.')[0])
# names.print_histogram(values=True, sort_on_weight=True)
if __name__ == "__main__":
pytest.main(["-vv", "-s", __file__])
| mit | -4,639,386,344,932,780,000 | 33.76 | 101 | 0.625547 | false |
imatge-upc/unsupervised-2017-cvprw | autoencoder_train.py | 1 | 7127 | import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='1'
from os import listdir
import sys
import time
import tools.ops
import subprocess
import numpy as np
import tensorflow as tf
import scipy.misc as sm
from models.autoencoder_net import *
from tools.utilities import *
from tools.ops import *
from random import randint
flags = tf.app.flags
flags.DEFINE_integer('batch_size', 10, 'Batch size.')
flags.DEFINE_integer('num_epochs', 2000, 'Number of epochs.') # ~13 min per epoch
flags.DEFINE_integer('num_gpus', 4, 'Number of GPUs.')
flags.DEFINE_integer('seq_length', 16, 'Length of each video clip.')
flags.DEFINE_integer('height', 128, 'Height of video frame.')
flags.DEFINE_integer('width', 128, 'Width of video frame.')
flags.DEFINE_integer('channel', 3, 'Number of channels for each frame.')
flags.DEFINE_integer('num_sample', 10060, 'Number of samples in this dataset.')
FLAGS = flags.FLAGS
prefix = 'autoencoder'
model_save_dir = './ckpt/' + prefix
logs_save_dir = './logs/' + prefix
pred_save_dir = './output/' + prefix
loss_save_dir = './loss'
train_list_path = './dataset/trainlist.txt'
dataset_path = './dataset/UCF-101-tf-records'
evaluation_job = './jobs/autoencoder_val'
use_pretrained_model = True
save_predictions = True
def run_training():
# Create model directory
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
model_filename = "./mfb_ae_ucf24.model"
# Consturct computational graph
tower_grads = []
tower_losses, tower_rec_losses, tower_wd_losses = [], [], []
global_step = tf.get_variable(
'global_step',
[],
initializer=tf.constant_initializer(0),
trainable=False
)
starter_learning_rate = 1e-4
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
1000000, 0.8, staircase=True)
opt = tf.train.AdamOptimizer(learning_rate)
# Create a session for running Ops on the Graph.
config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)
coord = tf.train.Coordinator()
threads = None
train_list_file = open(train_list_path, 'r')
train_list = train_list_file.read().splitlines()
for i, line in enumerate(train_list):
train_list[i] = os.path.join(dataset_path, train_list[i])
assert(len(train_list) % FLAGS.num_gpus == 0)
num_for_each_gpu = len(train_list) // FLAGS.num_gpus
clips_list = []
with sess.as_default():
for i in range(FLAGS.num_gpus):
clips, _, _ = input_pipeline(train_list[i*num_for_each_gpu:(i+1)*num_for_each_gpu], \
FLAGS.batch_size, num_epochs=FLAGS.num_epochs, is_training=True)
clips_list.append(clips)
autoencoder_list = []
with tf.variable_scope('vars') as var_scope:
for gpu_index in range(FLAGS.num_gpus):
with tf.device('/gpu:%d' % (gpu_index)):
with tf.name_scope('%s_%d' % ('tower', gpu_index)) as scope:
# construct model
autoencoder = autoencoder_net(clips_list[gpu_index], FLAGS.height, FLAGS.width, FLAGS.seq_length, \
FLAGS.channel, FLAGS.batch_size)
autoencoder_list.append(autoencoder)
loss, rec_loss, wd_loss = tower_loss(scope, autoencoder, clips_list[gpu_index])
var_scope.reuse_variables()
vars_to_optimize = tf.trainable_variables()
grads = opt.compute_gradients(loss, var_list=vars_to_optimize)
tower_grads.append(grads)
tower_losses.append(loss)
tower_rec_losses.append(rec_loss)
tower_wd_losses.append(wd_loss)
# concatenate the losses of all towers
loss_op = tf.reduce_mean(tower_losses)
rec_loss_op = tf.reduce_mean(tower_rec_losses)
wd_loss_op = tf.reduce_mean(tower_wd_losses)
tf.summary.scalar('loss', loss_op)
tf.summary.scalar('rec_loss', rec_loss_op)
tf.summary.scalar('wd_loss', wd_loss_op)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
grads = average_gradients(tower_grads)
with tf.control_dependencies(update_ops):
train_op = opt.apply_gradients(grads, global_step=global_step)
# saver for saving checkpoints
saver = tf.train.Saver(max_to_keep=10)
init = tf.initialize_all_variables()
sess.run(init)
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
if use_pretrained_model:
print('[*] Loading checkpoint ...')
model = tf.train.latest_checkpoint(model_save_dir)
if model is not None:
saver.restore(sess, model)
print('[*] Loading success: %s!'%model)
else:
print('[*] Loading failed ...')
# Create summary writer
merged = tf.summary.merge_all()
if not os.path.exists(logs_save_dir):
os.makedirs(logs_save_dir)
sum_writer = tf.summary.FileWriter(logs_save_dir, sess.graph)
# Create prediction output folder
if not os.path.exists(pred_save_dir):
os.makedirs(pred_save_dir)
# Create loss output folder
if not os.path.exists(loss_save_dir):
os.makedirs(loss_save_dir)
loss_file = open(os.path.join(loss_save_dir, prefix+'.txt'), 'w')
total_steps = (FLAGS.num_sample / (FLAGS.num_gpus * FLAGS.batch_size)) * FLAGS.num_epochs
# start queue runner
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
gpu_idx = 0
try:
with sess.as_default():
print('\n\n\n*********** start training ***********\n\n\n')
while not coord.should_stop():
# Run training steps or whatever
start_time = time.time()
sess.run(train_op)
duration = time.time() - start_time
step = global_step.eval()
if step == 1 or step % 10 == 0: # evaluate loss
loss, rec_loss, wd_loss, lr = sess.run([loss_op, rec_loss_op, wd_loss_op, learning_rate])
line = 'step %d/%d, loss=%.8f, rec=%.8f, lwd=%.8f, dur=%.3f, lr=%.8f' \
%(step, total_steps, loss, rec_loss, wd_loss, duration, lr)
print(line)
loss_file.write(line + '\n')
loss_file.flush()
if step == 1 or step % 10 == 0: # save summary
summary = summary_str = sess.run(merged)
sum_writer.add_summary(summary, step)
if step % 100 == 0 and save_predictions: # save current predictions
clips = clips_list[gpu_idx]
autoencoder = autoencoder_list[gpu_idx]
gt_vid, rec_vid = sess.run([clips[0], autoencoder.rec_vid[0]])
gt_vid, rec_vid = (gt_vid+1)/2*255.0, (rec_vid+1)/2*255.0
rec_img = gen_pred_vid(rec_vid)
gt_img = gen_pred_vid(gt_vid)
save_img = np.concatenate((rec_img, gt_img))
sm.imsave(os.path.join(pred_save_dir, '%07d.jpg'%step), save_img)
gpu_idx += 1
if gpu_idx == FLAGS.num_gpus:
gpu_idx = 0
if step % 500 == 0: # save checkpoint
saver.save(sess, os.path.join(model_save_dir, model_filename), global_step=global_step)
if step % 500 == 0:
pass
# launch a new script for validation (please modify it for your own script)
#subprocess.check_output(['python', evaluation_job])
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
def main(_):
run_training()
if __name__ == '__main__':
tf.app.run()
| mit | -5,307,854,796,434,053,000 | 30.816964 | 104 | 0.664515 | false |
pichillilorenzo/JavaScriptEnhancements | src/libs/__init__.py | 1 | 1423 | from . import global_vars
from .javascript_enhancements_settings import javaScriptEnhancements
from . import util
from .node import NodeJS
from .npm import NPM
from .flow import main as flow
from .flow.flow_cli import FlowCLI
from .flow.flow_ide_server import FlowIDEServer, flow_ide_clients, JavascriptEnhancementsStartFlowIDEServerEventListener
from .animation_loader import AnimationLoader
from .repeated_timer import RepeatedTimer
from .hook import Hook
from .terminal import Terminal
from .popup_manager import popup_manager
from .socket import SocketClient
from .socket import SocketServer
from .folder_explorer import FolderExplorer
from .window_view import window_view_manager, WindowView, JavascriptEnhancementsWindowViewKeypressCommand,JavascriptEnhancementsWindowViewEventListener
from .execute_on_terminal import JavascriptEnhancementsExecuteOnTerminalCommand
__all__ = [
"global_vars",
"javaScriptEnhancements",
"util",
"NodeJS",
"NPM",
"AnimationLoader",
"RepeatedTimer",
"Hook",
"Terminal",
"popup_manager",
"SocketClient",
"SocketServer",
"FolderExplorer",
"window_view_manager",
"WindowView",
"JavascriptEnhancementsWindowViewKeypressCommand",
"JavascriptEnhancementsWindowViewEventListener",
"JavascriptEnhancementsExecuteOnTerminalCommand",
"flow",
"FlowCLI",
"FlowIDEServer",
"flow_ide_clients",
"JavascriptEnhancementsStartFlowIDEServerEventListener"
]
| mit | 8,745,076,949,346,265,000 | 31.340909 | 151 | 0.804638 | false |
ESOedX/edx-platform | lms/djangoapps/commerce/tests/test_signals.py | 1 | 13844 | # coding=UTF-8
"""
Tests for signal handling in commerce djangoapp.
"""
from __future__ import absolute_import, unicode_literals
import base64
import json
import ddt
import httpretty
import mock
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.test import TestCase
from django.test.utils import override_settings
from opaque_keys.edx.keys import CourseKey
from requests import Timeout
from six.moves.urllib.parse import urljoin # pylint: disable=import-error
from course_modes.models import CourseMode
from student.signals import REFUND_ORDER
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from ..models import CommerceConfiguration
from ..utils import _generate_refund_notification_body, _send_refund_notification, create_zendesk_ticket
from . import JSON
from .mocks import mock_create_refund, mock_process_refund
ZENDESK_URL = 'http://zendesk.example.com/'
ZENDESK_USER = '[email protected]'
ZENDESK_API_KEY = 'abc123'
@ddt.ddt
@override_settings(ZENDESK_URL=ZENDESK_URL, ZENDESK_USER=ZENDESK_USER, ZENDESK_API_KEY=ZENDESK_API_KEY)
class TestRefundSignal(TestCase):
"""
Exercises logic triggered by the REFUND_ORDER signal.
"""
def setUp(self):
super(TestRefundSignal, self).setUp()
# Ensure the E-Commerce service user exists
UserFactory(username=settings.ECOMMERCE_SERVICE_WORKER_USERNAME, is_staff=True)
self.requester = UserFactory(username="test-requester")
self.student = UserFactory(
username="test-student",
email="[email protected]",
)
self.course_enrollment = CourseEnrollmentFactory(
user=self.student,
course_id=CourseKey.from_string('course-v1:org+course+run'),
mode=CourseMode.VERIFIED,
)
self.course_enrollment.refundable = mock.Mock(return_value=True)
self.config = CommerceConfiguration.current()
self.config.enable_automatic_refund_approval = True
self.config.save()
def send_signal(self):
"""
DRY helper: emit the REFUND_ORDER signal, as is done in
common.djangoapps.student.models after a successful unenrollment.
"""
REFUND_ORDER.send(sender=None, course_enrollment=self.course_enrollment)
@override_settings(
ECOMMERCE_PUBLIC_URL_ROOT=None,
ECOMMERCE_API_URL=None,
)
def test_no_service(self):
"""
Ensure that the receiver quietly bypasses attempts to initiate
refunds when there is no external service configured.
"""
with mock.patch('lms.djangoapps.commerce.signals.refund_seat') as mock_refund_seat:
self.send_signal()
self.assertFalse(mock_refund_seat.called)
@mock.patch('lms.djangoapps.commerce.signals.refund_seat')
def test_receiver(self, mock_refund_seat):
"""
Ensure that the REFUND_ORDER signal triggers correct calls to
refund_seat(), when it is appropriate to do so.
TODO (jsa): ideally we would assert that the signal receiver got wired
up independently of the import statement in this module. I'm not aware
of any reliable / sane way to do this.
"""
self.send_signal()
self.assertTrue(mock_refund_seat.called)
self.assertEqual(mock_refund_seat.call_args[0], (self.course_enrollment,))
# if the course_enrollment is not refundable, we should not try to initiate a refund.
mock_refund_seat.reset_mock()
self.course_enrollment.refundable = mock.Mock(return_value=False)
self.send_signal()
self.assertFalse(mock_refund_seat.called)
@mock.patch('lms.djangoapps.commerce.signals.refund_seat')
@mock.patch('lms.djangoapps.commerce.signals.get_request_user', return_value=None)
def test_requester(self, mock_get_request_user, mock_refund_seat):
"""
Ensure the right requester is specified when initiating refunds.
"""
# no HTTP request/user: auth to commerce service as the unenrolled student.
self.send_signal()
self.assertTrue(mock_refund_seat.called)
self.assertEqual(mock_refund_seat.call_args[0], (self.course_enrollment,))
# HTTP user is the student: auth to commerce service as the unenrolled student.
mock_get_request_user.return_value = self.student
mock_refund_seat.reset_mock()
self.send_signal()
self.assertTrue(mock_refund_seat.called)
self.assertEqual(mock_refund_seat.call_args[0], (self.course_enrollment,))
# HTTP user is another user: auth to commerce service as the requester.
mock_get_request_user.return_value = self.requester
mock_refund_seat.reset_mock()
self.send_signal()
self.assertTrue(mock_refund_seat.called)
self.assertEqual(mock_refund_seat.call_args[0], (self.course_enrollment,))
# HTTP user is another server (AnonymousUser): do not try to initiate a refund at all.
mock_get_request_user.return_value = AnonymousUser()
mock_refund_seat.reset_mock()
self.send_signal()
self.assertFalse(mock_refund_seat.called)
@mock.patch('lms.djangoapps.commerce.signals.log.exception')
def test_error_logging(self, mock_log_exception):
"""
Ensure that unexpected Exceptions are logged as errors (but do not
break program flow).
"""
with mock_create_refund(status=500):
self.send_signal()
self.assertTrue(mock_log_exception.called)
@mock.patch('lms.djangoapps.commerce.utils._send_refund_notification')
def test_notification_when_approval_fails(self, mock_send_notification):
"""
Ensure the notification function is triggered when refunds are initiated, and cannot be automatically approved.
"""
refund_id = 1
failed_refund_id = 2
with mock_create_refund(status=201, response=[refund_id, failed_refund_id]):
with mock_process_refund(refund_id, reset_on_exit=False):
with mock_process_refund(failed_refund_id, status=500, reset_on_exit=False):
self.send_signal()
self.assertTrue(mock_send_notification.called)
mock_send_notification.assert_called_with(self.course_enrollment.user, [failed_refund_id])
@mock.patch('lms.djangoapps.commerce.utils._send_refund_notification')
def test_notification_if_automatic_approval_disabled(self, mock_send_notification):
"""
Ensure the notification is always sent if the automatic approval functionality is disabled.
"""
refund_id = 1
self.config.enable_automatic_refund_approval = False
self.config.save()
with mock_create_refund(status=201, response=[refund_id]):
self.send_signal()
self.assertTrue(mock_send_notification.called)
mock_send_notification.assert_called_with(self.course_enrollment.user, [refund_id])
@mock.patch('lms.djangoapps.commerce.utils._send_refund_notification')
def test_no_notification_after_approval(self, mock_send_notification):
"""
Ensure the notification function is triggered when refunds are initiated, and cannot be automatically approved.
"""
refund_id = 1
with mock_create_refund(status=201, response=[refund_id]):
with mock_process_refund(refund_id, reset_on_exit=False):
self.send_signal()
self.assertFalse(mock_send_notification.called)
last_request = httpretty.last_request()
self.assertDictEqual(json.loads(last_request.body.decode('utf8')), {'action': 'approve_payment_only'})
@mock.patch('lms.djangoapps.commerce.utils._send_refund_notification')
def test_notification_no_refund(self, mock_send_notification):
"""
Ensure the notification function is NOT triggered when no refunds are
initiated
"""
with mock_create_refund(status=200, response=[]):
self.send_signal()
self.assertFalse(mock_send_notification.called)
@mock.patch('lms.djangoapps.commerce.utils._send_refund_notification')
@ddt.data(
CourseMode.HONOR,
CourseMode.PROFESSIONAL,
CourseMode.AUDIT,
CourseMode.NO_ID_PROFESSIONAL_MODE,
CourseMode.CREDIT_MODE,
)
def test_notification_not_verified(self, mode, mock_send_notification):
"""
Ensure the notification function is NOT triggered when the
unenrollment is for any mode other than verified (i.e. any mode other
than one for which refunds are presently supported). See the
TODO associated with XCOM-371 in the signals module in the commerce
package for more information.
"""
self.course_enrollment.mode = mode
with mock_create_refund(status=200, response=[1, 2, 3]):
self.send_signal()
self.assertFalse(mock_send_notification.called)
@mock.patch('lms.djangoapps.commerce.utils._send_refund_notification', side_effect=Exception("Splat!"))
@mock.patch('lms.djangoapps.commerce.utils.log.warning')
def test_notification_error(self, mock_log_warning, mock_send_notification):
"""
Ensure an error occuring during notification does not break program
flow, but a warning is logged.
"""
with mock_create_refund(status=200, response=[1, 2, 3]):
self.send_signal()
self.assertTrue(mock_send_notification.called)
self.assertTrue(mock_log_warning.called)
@mock.patch('openedx.core.djangoapps.theming.helpers.is_request_in_themed_site', return_value=True)
def test_notification_themed_site(self, mock_is_request_in_themed_site): # pylint: disable=unused-argument
"""
Ensure the notification function raises an Exception if used in the
context of themed site.
"""
with self.assertRaises(NotImplementedError):
_send_refund_notification(self.course_enrollment.user, [1, 2, 3])
@ddt.data('[email protected]', 'üñî[email protected]')
@mock.patch('lms.djangoapps.commerce.utils.create_zendesk_ticket')
def test_send_refund_notification(self, student_email, mock_zendesk):
""" Verify the support team is notified of the refund request. """
refund_ids = [1, 2, 3]
# pass a student with unicode and ascii email to ensure that
# generate_refund_notification_body can handle formatting a unicode
# message
self.student.email = student_email
_send_refund_notification(self.course_enrollment.user, refund_ids)
body = _generate_refund_notification_body(self.student, refund_ids)
mock_zendesk.assert_called_with(
self.student.profile.name,
self.student.email,
"[Refund] User-Requested Refund",
body,
['auto_refund']
)
def _mock_zendesk_api(self, status=201):
""" Mock Zendesk's ticket creation API. """
httpretty.register_uri(httpretty.POST, urljoin(ZENDESK_URL, '/api/v2/tickets.json'), status=status,
body='{}', content_type=JSON)
def call_create_zendesk_ticket(self, name='Test user', email='[email protected]', subject='Test Ticket',
body='I want a refund!', tags=None):
""" Call the create_zendesk_ticket function. """
tags = tags or ['auto_refund']
return create_zendesk_ticket(name, email, subject, body, tags)
@override_settings(ZENDESK_URL=ZENDESK_URL, ZENDESK_USER=None, ZENDESK_API_KEY=None)
def test_create_zendesk_ticket_no_settings(self):
""" Verify the Zendesk API is not called if the settings are not all set. """
with mock.patch('requests.post') as mock_post:
success = self.call_create_zendesk_ticket()
self.assertFalse(success)
self.assertFalse(mock_post.called)
def test_create_zendesk_ticket_request_error(self):
"""
Verify exceptions are handled appropriately if the request to the Zendesk API fails.
We simply need to ensure the exception is not raised beyond the function.
"""
with mock.patch('requests.post', side_effect=Timeout) as mock_post:
success = self.call_create_zendesk_ticket()
self.assertFalse(success)
self.assertTrue(mock_post.called)
@httpretty.activate
def test_create_zendesk_ticket(self):
""" Verify the Zendesk API is called. """
self._mock_zendesk_api()
name = 'Test user'
email = '[email protected]'
subject = 'Test Ticket'
body = 'I want a refund!'
tags = ['auto_refund']
ticket_created = self.call_create_zendesk_ticket(name, email, subject, body, tags)
self.assertTrue(ticket_created)
last_request = httpretty.last_request()
# Verify the headers
expected = {
'content-type': JSON,
'Authorization': 'Basic {}'.format(base64.b64encode(
'{user}/token:{pwd}'.format(user=ZENDESK_USER, pwd=ZENDESK_API_KEY).encode('utf8')).decode('utf8')
)
}
self.assertDictContainsSubset(expected, last_request.headers)
# Verify the content
expected = {
'ticket': {
'requester': {
'name': name,
'email': email
},
'subject': subject,
'comment': {'body': body},
'tags': ['LMS'] + tags
}
}
self.assertDictEqual(json.loads(last_request.body.decode('utf8')), expected)
| agpl-3.0 | -3,551,410,567,111,405,000 | 41.457055 | 119 | 0.65306 | false |
ff94315/hiwifi-openwrt-HC5661-HC5761 | staging_dir/host/lib/python2.7/encodings/punycode.py | 586 | 6813 | # -*- coding: iso-8859-1 -*-
""" Codec for the Punicode encoding, as specified in RFC 3492
Written by Martin v. Löwis.
"""
import codecs
##################### Encoding #####################################
def segregate(str):
"""3.1 Basic code point segregation"""
base = []
extended = {}
for c in str:
if ord(c) < 128:
base.append(c)
else:
extended[c] = 1
extended = extended.keys()
extended.sort()
return "".join(base).encode("ascii"),extended
def selective_len(str, max):
"""Return the length of str, considering only characters below max."""
res = 0
for c in str:
if ord(c) < max:
res += 1
return res
def selective_find(str, char, index, pos):
"""Return a pair (index, pos), indicating the next occurrence of
char in str. index is the position of the character considering
only ordinals up to and including char, and pos is the position in
the full string. index/pos is the starting position in the full
string."""
l = len(str)
while 1:
pos += 1
if pos == l:
return (-1, -1)
c = str[pos]
if c == char:
return index+1, pos
elif c < char:
index += 1
def insertion_unsort(str, extended):
"""3.2 Insertion unsort coding"""
oldchar = 0x80
result = []
oldindex = -1
for c in extended:
index = pos = -1
char = ord(c)
curlen = selective_len(str, char)
delta = (curlen+1) * (char - oldchar)
while 1:
index,pos = selective_find(str,c,index,pos)
if index == -1:
break
delta += index - oldindex
result.append(delta-1)
oldindex = index
delta = 0
oldchar = char
return result
def T(j, bias):
# Punycode parameters: tmin = 1, tmax = 26, base = 36
res = 36 * (j + 1) - bias
if res < 1: return 1
if res > 26: return 26
return res
digits = "abcdefghijklmnopqrstuvwxyz0123456789"
def generate_generalized_integer(N, bias):
"""3.3 Generalized variable-length integers"""
result = []
j = 0
while 1:
t = T(j, bias)
if N < t:
result.append(digits[N])
return result
result.append(digits[t + ((N - t) % (36 - t))])
N = (N - t) // (36 - t)
j += 1
def adapt(delta, first, numchars):
if first:
delta //= 700
else:
delta //= 2
delta += delta // numchars
# ((base - tmin) * tmax) // 2 == 455
divisions = 0
while delta > 455:
delta = delta // 35 # base - tmin
divisions += 36
bias = divisions + (36 * delta // (delta + 38))
return bias
def generate_integers(baselen, deltas):
"""3.4 Bias adaptation"""
# Punycode parameters: initial bias = 72, damp = 700, skew = 38
result = []
bias = 72
for points, delta in enumerate(deltas):
s = generate_generalized_integer(delta, bias)
result.extend(s)
bias = adapt(delta, points==0, baselen+points+1)
return "".join(result)
def punycode_encode(text):
base, extended = segregate(text)
base = base.encode("ascii")
deltas = insertion_unsort(text, extended)
extended = generate_integers(len(base), deltas)
if base:
return base + "-" + extended
return extended
##################### Decoding #####################################
def decode_generalized_number(extended, extpos, bias, errors):
"""3.3 Generalized variable-length integers"""
result = 0
w = 1
j = 0
while 1:
try:
char = ord(extended[extpos])
except IndexError:
if errors == "strict":
raise UnicodeError, "incomplete punicode string"
return extpos + 1, None
extpos += 1
if 0x41 <= char <= 0x5A: # A-Z
digit = char - 0x41
elif 0x30 <= char <= 0x39:
digit = char - 22 # 0x30-26
elif errors == "strict":
raise UnicodeError("Invalid extended code point '%s'"
% extended[extpos])
else:
return extpos, None
t = T(j, bias)
result += digit * w
if digit < t:
return extpos, result
w = w * (36 - t)
j += 1
def insertion_sort(base, extended, errors):
"""3.2 Insertion unsort coding"""
char = 0x80
pos = -1
bias = 72
extpos = 0
while extpos < len(extended):
newpos, delta = decode_generalized_number(extended, extpos,
bias, errors)
if delta is None:
# There was an error in decoding. We can't continue because
# synchronization is lost.
return base
pos += delta+1
char += pos // (len(base) + 1)
if char > 0x10FFFF:
if errors == "strict":
raise UnicodeError, ("Invalid character U+%x" % char)
char = ord('?')
pos = pos % (len(base) + 1)
base = base[:pos] + unichr(char) + base[pos:]
bias = adapt(delta, (extpos == 0), len(base))
extpos = newpos
return base
def punycode_decode(text, errors):
pos = text.rfind("-")
if pos == -1:
base = ""
extended = text
else:
base = text[:pos]
extended = text[pos+1:]
base = unicode(base, "ascii", errors)
extended = extended.upper()
return insertion_sort(base, extended, errors)
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
res = punycode_encode(input)
return res, len(input)
def decode(self,input,errors='strict'):
if errors not in ('strict', 'replace', 'ignore'):
raise UnicodeError, "Unsupported error handling "+errors
res = punycode_decode(input, errors)
return res, len(input)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return punycode_encode(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
if self.errors not in ('strict', 'replace', 'ignore'):
raise UnicodeError, "Unsupported error handling "+self.errors
return punycode_decode(input, self.errors)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='punycode',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| gpl-2.0 | 5,143,402,574,014,110,000 | 27.62605 | 74 | 0.552033 | false |
groschovskiy/keyczar | cpp/src/tools/swtoolkit/test/help_test.py | 18 | 2153 | #!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test hammer displays SCons help for SCons help options (MEDIUM TEST)."""
import TestFramework
def main():
test = TestFramework.TestFramework()
expect = "usage: scons [OPTION] [TARGET] ..."
test.run(arguments="-h")
test.fail_test(test.stdout().find(expect) == -1)
test.run(arguments="--help")
test.fail_test(test.stdout().find(expect) == -1)
test.run(arguments="-H")
test.fail_test(test.stdout().find(expect) == -1)
test.run(arguments="--help-options")
test.fail_test(test.stdout().find(expect) == -1)
test.pass_test()
return 0
if __name__ == "__main__":
main()
| apache-2.0 | 1,675,598,016,119,672,600 | 34.883333 | 75 | 0.741291 | false |
erdc-cm/air-water-vv | 2d/floatingStructures/floating_caisson_chrono/redist_n.py | 12 | 3054 | from proteus.default_n import *
from proteus import (StepControl,
TimeIntegration,
NonlinearSolvers,
LinearSolvers,
LinearAlgebraTools,
NumericalFlux)
from proteus.mprans import RDLS
import redist_p as physics
from proteus import Context
ct = Context.get()
domain = ct.domain
nd = ct.domain.nd
mesh = domain.MeshOptions
# time stepping
runCFL = ct.runCFL
# mesh options
nLevels = ct.nLevels
parallelPartitioningType = mesh.parallelPartitioningType
nLayersOfOverlapForParallel = mesh.nLayersOfOverlapForParallel
restrictFineSolutionToAllMeshes = mesh.restrictFineSolutionToAllMeshes
triangleOptions = mesh.triangleOptions
elementQuadrature = ct.elementQuadrature
elementBoundaryQuadrature = ct.elementBoundaryQuadrature
femSpaces = {0: ct.basis}
elementQuadrature = ct.elementQuadrature
elementBoundaryQuadrature = ct.elementBoundaryQuadrature
massLumping = False
numericalFluxType = NumericalFlux.DoNothing
conservativeFlux = None
subgridError = RDLS.SubgridError(coefficients=physics.coefficients,
nd=ct.domain.nd)
shockCapturing = RDLS.ShockCapturing(coefficients=physics.coefficients,
nd=ct.domain.nd,
shockCapturingFactor=ct.rd_shockCapturingFactor,
lag=ct.rd_lag_shockCapturing)
fullNewtonFlag = True
multilevelNonlinearSolver = NonlinearSolvers.Newton
levelNonlinearSolver = NonlinearSolvers.Newton
nonlinearSmoother = NonlinearSolvers.NLGaussSeidel
linearSmoother = None
matrix = LinearAlgebraTools.SparseMatrix
if ct.useOldPETSc:
multilevelLinearSolver = LinearSolvers.PETSc
levelLinearSolver = LinearSolvers.PETSc
else:
multilevelLinearSolver = LinearSolvers.KSP_petsc4py
levelLinearSolver = LinearSolvers.KSP_petsc4py
if ct.useSuperlu:
multilevelLinearSolver = LinearSolvers.LU
levelLinearSolver = LinearSolvers.LU
if ct.redist_Newton:
timeIntegration = TimeIntegration.NoIntegration
stepController = StepControl.Newton_controller
maxNonlinearIts = 25
maxLineSearches = 0
nonlinearSolverConvergenceTest = 'rits'
levelNonlinearSolverConvergenceTest = 'rits'
linearSolverConvergenceTest = 'r-true'
else:
timeIntegration = TimeIntegration.BackwardEuler_cfl
stepController = RDLS.PsiTC
runCFL = 0.5
psitc['nStepsForce'] = 6
psitc['nStepsMax'] = 25
psitc['reduceRatio'] = 3.0
psitc['startRatio'] = 1.0
rtol_res[0] = 0.0
atol_res[0] = ct.rd_nl_atol_res
useEisenstatWalker = False#True
maxNonlinearIts = 1
maxLineSearches = 0
nonlinearSolverConvergenceTest = 'rits'
levelNonlinearSolverConvergenceTest = 'rits'
linearSolverConvergenceTest = 'r-true'
linear_solver_options_prefix = 'rdls_'
nl_atol_res = ct.rd_nl_atol_res
tolFac = 0.0
linTolFac = 0.001
l_atol_res = 0.001*ct.rd_nl_atol_res
useEisenstatWalker = False#True
| mit | 4,571,420,932,611,224,600 | 30.8125 | 88 | 0.714473 | false |
ishanic/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 162 | 9771 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
for init in ('random', 'pca'):
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
init=init, random_state=0)
X_embedded = tsne.fit_transform(X)
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0)
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
metric="precomputed", random_state=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca' or 'random'.
assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'",
TSNE, init="not available")
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_verbose():
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
| bsd-3-clause | -2,494,690,223,185,617,400 | 34.791209 | 79 | 0.631767 | false |
adamreis/nyc-jazz | src/lib/werkzeug/testsuite/multipart/collect.py | 78 | 1584 | #!/usr/bin/env python
"""
Hacky helper application to collect form data.
"""
from werkzeug.serving import run_simple
from werkzeug.wrappers import Request, Response
def copy_stream(request):
from os import mkdir
from time import time
folder = 'request-%d' % time()
mkdir(folder)
environ = request.environ
f = file(folder + '/request.txt', 'wb+')
f.write(environ['wsgi.input'].read(int(environ['CONTENT_LENGTH'])))
f.flush()
f.seek(0)
environ['wsgi.input'] = f
request.stat_folder = folder
def stats(request):
copy_stream(request)
f1 = request.files['file1']
f2 = request.files['file2']
text = request.form['text']
f1.save(request.stat_folder + '/file1.bin')
f2.save(request.stat_folder + '/file2.bin')
file(request.stat_folder + '/text.txt', 'w').write(text.encode('utf-8'))
return Response('Done.')
def upload_file(request):
return Response('''
<h1>Upload File</h1>
<form action="" method="post" enctype="multipart/form-data">
<input type="file" name="file1"><br>
<input type="file" name="file2"><br>
<textarea name="text"></textarea><br>
<input type="submit" value="Send">
</form>
''', mimetype='text/html')
def application(environ, start_responseonse):
request = Request(environ)
if request.method == 'POST':
response = stats(request)
else:
response = upload_file(request)
return response(environ, start_responseonse)
if __name__ == '__main__':
run_simple('localhost', 5000, application, use_debugger=True)
| mit | -1,148,820,576,218,409,100 | 27.285714 | 76 | 0.636364 | false |
mikhail-gorobets/chipsec | chipsec/modules/tools/smm/smm_ptr.py | 8 | 24962 | #CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2015, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#[email protected]
#
"""
CanSecWest 2015
`A New Class of Vulnerability in SMI Handlers of BIOS/UEFI Firmware <https://cansecwest.com/slides/2015/A%20New%20Class%20of%20Vulnin%20SMI%20-%20Andrew%20Furtak.pdf>`_
A tool to test SMI handlers for pointer validation vulnerabilities
Usage:
``chipsec_main -m tools.smm.smm_ptr -l log.txt \``
``[-a <mode>,<config_file>|<smic_start:smic_end>,<size>,<address>]``
- ``mode``: SMI fuzzing mode
* ``config`` = use SMI configuration file <config_file>
* ``fuzz`` = fuzz all SMI handlers with code in the range <smic_start:smic_end>
* ``fuzzmore`` = fuzz mode + pass 2nd-order pointers within buffer to SMI handlers
- ``size``: size of the memory buffer (in Hex)
- ``address``: physical address of memory buffer to pass in GP regs to SMI handlers (in Hex)
* ``smram`` = option passes address of SMRAM base (system may hang in this mode!)
In ``config`` mode, SMI configuration file should have the following format
::
SMI_code=<SMI code> or *
SMI_data=<SMI data> or *
RAX=<value of RAX> or * or PTR or VAL
RBX=<value of RBX> or * or PTR or VAL
RCX=<value of RCX> or * or PTR or VAL
RDX=<value of RDX> or * or PTR or VAL
RSI=<value of RSI> or * or PTR or VAL
RDI=<value of RDI> or * or PTR or VAL
[PTR_OFFSET=<offset to pointer in the buffer>]
[SIG=<signature>]
[SIG_OFFSET=<offset to signature in the buffer>]
[Name=<SMI name>]
[Desc=<SMI description>]
Where
- ``[]``: optional line
- ``*``: Don't Care (the module will replace * with 0x0)
- ``PTR``: Physical address SMI handler will write to (the module will replace PTR with physical address provided as a command-line argument)
- ``VAL``: Value SMI handler will write to PTR address (the module will replace VAL with hardcoded _FILL_VALUE_xx)
"""
from chipsec.module_common import *
from chipsec.file import *
from chipsec.hal.interrupts import Interrupts
#logger.VERBOSE = False
#################################################################
# Fuzzing configuration
#################################################################
#
# Logging option
#
# False - better performance, True - better results tracking
DUMP_MEMORY_ON_DETECT = False
# False - better performance, True - better results tracking
FLUSH_OUTPUT_ALWAYS = False
# makes sure SMI code is logged in case of a crash
FLUSH_OUTPUT_AFTER_SMI = True
# dump all registers in log before every SMI (True - large size of log file)
DUMP_GPRS_EVERY_SMI = True
#
# SMI fuzzing options
#
# stop fuzzing after the first potential issue detected
FUZZ_BAIL_ON_1ST_DETECT = True
# Consider SMI handler subfunctions are passed in RCX GP register
# Fuzz RCX as SMI subfunctions: from 0 to MAX_SMI_FUNCTIONS
# False - better performance, True - smarter fuzzing
FUZZ_SMI_FUNCTIONS_IN_ECX = True
MAX_SMI_FUNCTIONS = 0x10
# Max value of the value written to SMI data port (0xB3)
MAX_SMI_DATA = 0x100
#
# Pass the pointer to SMI handlers in all general-purpose registers
# rather than in one register
# True - faster, False - gives you specific GPR that the vulnerable SMI handler is consuming
#
PTR_IN_ALL_GPRS = False
#
# SMI handler may take a pointer/PA from (some offset of off) address passed in GPRs and write to it
# Treat contents at physical address passed in GPRs as pointers and check contents at that pointer
# If they changed, SMI handler might have modified them
#
#MODE_SECOND_ORDER_BUFFER = True
# Max offset of the pointer (physical address)
# of the 2nd order buffer written in the memory buffer passed to SMI
MAX_PTR_OFFSET_IN_BUFFER = 0x20
# very obscure option, don't even try to understand
GPR_2ADDR = False
#
# Defaults
#
_FILL_VALUE_QWORD = 0x5A5A5A5A5A5A5A5A
_FILL_VALUE_BYTE = 0x5A
_SMI_CODE_DATA = 0x0
_MEM_FILL_VALUE = chr(0x11)
_MEM_FILL_SIZE = 0x500
_MAX_ALLOC_PA = 0xFFFFFFFF
_DEFAULT_GPRS = {'rax' : _FILL_VALUE_QWORD, 'rbx' : _FILL_VALUE_QWORD, 'rcx' : _FILL_VALUE_QWORD, 'rdx' : _FILL_VALUE_QWORD, 'rsi' : _FILL_VALUE_QWORD, 'rdi' : _FILL_VALUE_QWORD}
_pth = 'smm_ptr'
class BadSMIDetected (RuntimeError):
pass
class smi_desc( object ):
def __init__(self):
self.smi_code = None
self.smi_data = None
self.name = 'smi'
self.desc = ''
self.gprs = _DEFAULT_GPRS
self.ptr_in_buffer = False
self.ptr = None
self.ptr_offset = 0
self.sig = None
self.sig_offset = 0
def DIFF( s, t, sz ):
return [ pos for pos in range( sz ) if s[pos] != t[pos] ]
def FILL_BUFFER( _fill_byte, _fill_size, _ptr_in_buffer, _ptr, _ptr_offset, _sig, _sig_offset ):
fill_buf = _fill_byte*_fill_size
if _ptr_in_buffer and _ptr is not None:
fill_buf = fill_buf[ : _ptr_offset ] + struct.pack('=I',_ptr&0xFFFFFFFF) + fill_buf[ _ptr_offset + 4 : ]
if _sig is not None:
fill_buf = fill_buf[ : _sig_offset ] + _sig + fill_buf[ _sig_offset + len(_sig) : ]
return fill_buf
class smm_ptr(BaseModule):
def __init__(self):
BaseModule.__init__(self)
self.interrupts = Interrupts( self.cs )
self.is_check_memory = True
self.test_ptr_in_buffer = False
self.fill_byte = _MEM_FILL_VALUE
self.fill_size = _MEM_FILL_SIZE
def is_supported(self):
return True
def fill_memory( self, _addr, is_ptr_in_buffer, _ptr, _ptr_offset, _sig, _sig_offset ):
#
# Fill in contents at PA = _addr with known pattern to check later if any SMI handler modifies them
#
fill_buf = FILL_BUFFER( self.fill_byte, self.fill_size, is_ptr_in_buffer, _ptr, _ptr_offset, _sig, _sig_offset )
s = "[*] writing 0x%X bytes at 0x%016X" % (self.fill_size, _addr)
if is_ptr_in_buffer: s += " -> PTR at +0x%X" % _ptr_offset
if _sig is not None: s += " -> SIG at +0x%X" % _sig_offset
self.logger.log( s )
self.cs.mem.write_physical_mem( _addr, self.fill_size, fill_buf )
if self.logger.VERBOSE:
self.logger.log( "filling in contents at PA 0x%016X:" % _addr )
chipsec.logger.print_buffer( fill_buf )
if is_ptr_in_buffer and _ptr is not None:
self.logger.log( "[*] writing buffer at PA 0x%016X with 0x%X bytes '%c'" % (_ptr, self.fill_size, self.fill_byte) )
self.cs.mem.write_physical_mem( _ptr, self.fill_size, self.fill_byte*self.fill_size )
return True
def send_smi( self, thread_id, smi_code, smi_data, name, desc, rax, rbx, rcx, rdx, rsi, rdi ):
self.logger.log( " > SMI %02X (data: %02X)" % (smi_code,smi_data) )
if DUMP_GPRS_EVERY_SMI:
self.logger.log( " RAX: 0x%016X\n RBX: 0x%016X\n RCX: 0x%016X\n RDX: 0x%016X\n RSI: 0x%016X\n RDI: 0x%016X" % (rax,rbx,rcx,rdx,rsi,rdi) )
self.interrupts.send_SW_SMI( thread_id, smi_code, smi_data, rax, rbx, rcx, rdx, rsi, rdi )
return True
def check_memory( self, _addr, _smi_desc, fn, restore_contents=False ):
_ptr = _smi_desc.ptr
filler = self.fill_byte*self.fill_size
#
# Check if contents have changed at physical address passed in GPRs to SMI handler
# If changed, SMI handler might have written to that address
#
self.logger.log( " < checking buffers" )
expected_buf = FILL_BUFFER( self.fill_byte, self.fill_size, _smi_desc.ptr_in_buffer, _smi_desc.ptr, _smi_desc.ptr_offset, _smi_desc.sig, _smi_desc.sig_offset )
buf = self.cs.mem.read_physical_mem( _addr, self.fill_size )
differences = DIFF( expected_buf, buf, self.fill_size )
_changed = (len(differences) > 0)
if self.logger.VERBOSE:
self.logger.log( "checking contents at PA 0x%016X:" % _addr )
chipsec.logger.print_buffer( buf )
self.logger.log( "expected contents:" )
chipsec.logger.print_buffer( expected_buf )
if _changed:
self.logger.log( " contents changed at 0x%016X +%s" % (_addr,differences) )
if restore_contents:
self.logger.log( " restoring 0x%X bytes at 0x%016X" % (self.fill_size, _addr) )
self.cs.mem.write_physical_mem( _addr, self.fill_size, expected_buf )
if DUMP_MEMORY_ON_DETECT:
_pth_smi = os.path.join( _pth, '%X_%s'% (_smi_desc.smi_code,_smi_desc.name) )
if not os.path.exists( _pth_smi ): os.makedirs( _pth_smi )
_f = os.path.join( _pth_smi, fn + '.dmp' )
self.logger.log( " dumping buffer to '%s'" % _f )
write_file( _f, buf )
_changed1 = False
expected_buf = filler
if _smi_desc.ptr_in_buffer and _ptr is not None:
buf1 = self.cs.mem.read_physical_mem( _ptr, self.fill_size )
differences1 = DIFF( expected_buf, buf1, self.fill_size )
_changed1 = (len(differences1) > 0)
if self.logger.VERBOSE:
self.logger.log( "checking contents at PA 0x%016X:" % _ptr )
chipsec.logger.print_buffer( buf1 )
if _changed1:
self.logger.log( " contents changed at 0x%016X +%s" % (_ptr,differences1) )
if restore_contents:
self.logger.log( " restoring 0x%X bytes at PA 0x%016X" % (self.fill_size, _ptr) )
self.cs.mem.write_physical_mem( _ptr, self.fill_size, expected_buf )
if DUMP_MEMORY_ON_DETECT:
_pth_smi = os.path.join( _pth, '%X_%s'% (_smi_desc.smi_code,_smi_desc.name) )
if not os.path.exists( _pth_smi ): os.makedirs( _pth_smi )
_f = os.path.join( _pth_smi, fn + ('_ptr%X.dmp' % _smi_desc.ptr_offset) )
self.logger.log( " dumping buffer to '%s'" % _f )
write_file( _f, buf1 )
return (_changed or _changed1)
def smi_fuzz_iter( self, thread_id, _addr, _smi_desc, fill_contents=True, restore_contents=False ):
#
# Fill memory buffer if not in 'No Fill' mode
#
if self.is_check_memory and fill_contents:
self.fill_memory( _addr, _smi_desc.ptr_in_buffer, _smi_desc.ptr, _smi_desc.ptr_offset, _smi_desc.sig, _smi_desc.sig_offset )
#
# Invoke SW SMI Handler
#
_rax = _smi_desc.gprs['rax']
_rbx = _smi_desc.gprs['rbx']
_rcx = _smi_desc.gprs['rcx']
_rdx = _smi_desc.gprs['rdx']
_rsi = _smi_desc.gprs['rsi']
_rdi = _smi_desc.gprs['rdi']
self.send_smi( thread_id, _smi_desc.smi_code, _smi_desc.smi_data, _smi_desc.name, _smi_desc.desc, _rax, _rbx, _rcx, _rdx, _rsi, _rdi )
#
# Check memory buffer if not in 'No Fill' mode
#
contents_changed = False
if self.is_check_memory:
fn = '%X-a%X_b%X_c%X_d%X_si%X_di%X' % (_smi_desc.smi_data,_rax,_rbx,_rcx,_rdx,_rsi,_rdi)
contents_changed = self.check_memory( _addr, _smi_desc, fn, restore_contents )
if contents_changed:
msg = "DETECTED: SMI# %X data %X (rax=%X rbx=%X rcx=%X rdx=%X rsi=%X rdi=%X)" % (_smi_desc.smi_code,_smi_desc.smi_data,_rax,_rbx,_rcx,_rdx,_rsi,_rdi)
self.logger.log_important( msg )
if FUZZ_BAIL_ON_1ST_DETECT: raise BadSMIDetected, msg
if FLUSH_OUTPUT_AFTER_SMI: self.logger.flush()
return contents_changed
def test_config( self, thread_id, _smi_config_fname, _addr, _addr1 ):
#
# Parse SMM config file describing SMI handlers and their call arguments
# Then invoke SMI handlers
#
fcfg = open( _smi_config_fname, 'r' )
self.logger.log( "\n[*] >>> Testing SMI handlers defined in '%s'.." % _smi_config_fname )
bad_ptr_cnt = 0
_smi_desc = smi_desc()
for line in fcfg:
if '' == line.strip():
self.logger.log( "\n[*] testing SMI# 0x%02X (data: 0x%02X) %s (%s)" % (_smi_desc.smi_code,_smi_desc.smi_data,_smi_desc.name,_smi_desc.desc) )
if self.smi_fuzz_iter( thread_id, _addr, _smi_desc ): bad_ptr_cnt += 1
_smi_desc = None
_smi_desc = smi_desc()
else:
name, var = line.strip().partition('=')[::2]
_n = name.strip().lower()
if 'name' == _n: _smi_desc.name = var
elif 'desc' == _n: _smi_desc.desc = var
elif 'smi_code' == _n: _smi_desc.smi_code = int(var,16) if '*'!=var else _SMI_CODE_DATA
elif 'smi_data' == _n: _smi_desc.smi_data = int(var,16) if '*'!=var else _SMI_CODE_DATA
elif 'ptr_offset' == _n:
_smi_desc.ptr_in_buffer = True
_smi_desc.ptr_offset = int(var,16)
_smi_desc.ptr = _addr1
elif 'sig' == _n: _smi_desc.sig = str( bytearray.fromhex( var ) )
elif 'sig_offset' == _n: _smi_desc.sig_offset = int(var,16)
else: _smi_desc.gprs[ _n ] = ( _addr if 'PTR'==var else (_FILL_VALUE_BYTE if 'VAL'==var else int(var,16)) ) if '*'!=var else _FILL_VALUE_QWORD
return bad_ptr_cnt
def test_fuzz( self, thread_id, smic_start, smic_end, _addr, _addr1 ):
gpr_value = ((_addr<<32)|_addr) if GPR_2ADDR else _addr
gprs_addr = {'rax' : gpr_value, 'rbx' : gpr_value, 'rcx' : gpr_value, 'rdx' : gpr_value, 'rsi' : gpr_value, 'rdi' : gpr_value}
gprs_fill = {'rax' : _FILL_VALUE_QWORD, 'rbx' : _FILL_VALUE_QWORD, 'rcx' : _FILL_VALUE_QWORD, 'rdx' : _FILL_VALUE_QWORD, 'rsi' : _FILL_VALUE_QWORD, 'rdi' : _FILL_VALUE_QWORD}
self.logger.log( "\n[*] >>> Fuzzing SMI handlers.." )
self.logger.log( "[*] AX in RAX will be overwridden with values of SW SMI ports 0xB2/0xB3" )
self.logger.log( " DX in RDX will be overwridden with value 0x00B2" )
bad_ptr_cnt = 0
_smi_desc = smi_desc()
_smi_desc.gprs = gprs_addr if PTR_IN_ALL_GPRS else gprs_fill
self.logger.log( "\n[*] Setting values of general purpose registers to 0x%016X" % _smi_desc.gprs['rax'] )
max_ptr_off = 1
if self.is_check_memory and self.test_ptr_in_buffer:
_smi_desc.ptr_in_buffer = True
_smi_desc.ptr = _addr1
max_ptr_off = MAX_PTR_OFFSET_IN_BUFFER+1
# if we are not in fuzzmore mode, i.e. we are not testing the pointer within memory buffer
# then this outer loop will only have 1 iteration
for off in range(max_ptr_off):
_smi_desc.ptr_offset = off
self.logger.log( "\n[*] reloading buffer with PTR at offset 0x%X.." % off )
if self.is_check_memory:
self.fill_memory( _addr, _smi_desc.ptr_in_buffer, _smi_desc.ptr, _smi_desc.ptr_offset, None, None )
for smi_code in range(smic_start, smic_end + 1, 1):
_smi_desc.smi_code = smi_code
for smi_data in range(MAX_SMI_DATA):
_smi_desc.smi_data = smi_data
self.logger.log( "\n[*] fuzzing SMI# 0x%02X (data: 0x%02X)" % (smi_code,smi_data) )
if FUZZ_SMI_FUNCTIONS_IN_ECX:
for _rcx in range(MAX_SMI_FUNCTIONS):
self.logger.log( " >> function (RCX): 0x%016X" % _rcx )
_smi_desc.gprs['rcx'] = _rcx
if PTR_IN_ALL_GPRS:
if self.smi_fuzz_iter( thread_id, _addr, _smi_desc, False, True ): bad_ptr_cnt += 1
else:
self.logger.log( " RBX: 0x%016X" % _addr )
_smi_desc.gprs['rbx'] = gpr_value
if self.smi_fuzz_iter( thread_id, _addr, _smi_desc, False, True ): bad_ptr_cnt += 1
_smi_desc.gprs['rbx'] = _FILL_VALUE_QWORD
self.logger.log( " RSI: 0x%016X" % _addr )
_smi_desc.gprs['rsi'] = gpr_value
if self.smi_fuzz_iter( thread_id, _addr, _smi_desc, False, True ): bad_ptr_cnt += 1
_smi_desc.gprs['rsi'] = _FILL_VALUE_QWORD
self.logger.log( " RDI: 0x%016X" % _addr )
_smi_desc.gprs['rdi'] = gpr_value
if self.smi_fuzz_iter( thread_id, _addr, _smi_desc, False, True ): bad_ptr_cnt += 1
_smi_desc.gprs['rdi'] = _FILL_VALUE_QWORD
else:
if PTR_IN_ALL_GPRS:
if self.smi_fuzz_iter( thread_id, _addr, _smi_desc, False, True ): bad_ptr_cnt += 1
else:
self.logger.log( " RBX: 0x%016X" % _addr )
_smi_desc.gprs['rbx'] = gpr_value
if self.smi_fuzz_iter( thread_id, _addr, _smi_desc, False, True ): bad_ptr_cnt += 1
_smi_desc.gprs['rbx'] = _FILL_VALUE_QWORD
self.logger.log( " RCX: 0x%016X" % _addr )
_smi_desc.gprs['rcx'] = gpr_value
if self.smi_fuzz_iter( thread_id, _addr, _smi_desc, False, True ): bad_ptr_cnt += 1
_smi_desc.gprs['rcx'] = _FILL_VALUE_QWORD
self.logger.log( " RSI: 0x%016X" % _addr )
_smi_desc.gprs['rsi'] = gpr_value
if self.smi_fuzz_iter( thread_id, _addr, _smi_desc, False, True ): bad_ptr_cnt += 1
_smi_desc.gprs['rsi'] = _FILL_VALUE_QWORD
self.logger.log( " RDI: 0x%016X" % _addr )
_smi_desc.gprs['rdi'] = gpr_value
if self.smi_fuzz_iter( thread_id, _addr, _smi_desc, False, True ): bad_ptr_cnt += 1
_smi_desc.gprs['rdi'] = _FILL_VALUE_QWORD
return bad_ptr_cnt
def run( self, module_argv ):
self.logger.start_test( "A tool to test SMI handlers for pointer validation vulnerabilies" )
self.logger.log( "Usage: chipsec_main -m tools.smm.smm_ptr [ -a <mode>,<config_file>|<smic_start:smic_end>,<size>,<address> ]" )
self.logger.log( " mode SMI handlers testing mode" )
self.logger.log( " = config use SMI configuration file <config_file>" )
self.logger.log( " = fuzz fuzz all SMI handlers with code in the range <smic_start:smic_end>" )
self.logger.log( " = fuzzmore fuzz mode + pass '2nd-order' pointers within buffer to SMI handlers")
self.logger.log( " size size of the memory buffer (in Hex)" )
self.logger.log( " address physical address of memory buffer to pass in GP regs to SMI handlers (in Hex)" )
self.logger.log( " = smram pass address of SMRAM base (system may hang in this mode!)\n" )
test_mode = 'config'
_smi_config_fname = 'chipsec/modules/tools/smm/smm_config.ini'
_addr = None
_addr1 = None
thread_id = 0x0
global DUMP_GPRS_EVERY_SMI
if len(module_argv) > 1:
test_mode = module_argv[0].lower()
if 'config' == test_mode:
_smi_config_fname = module_argv[1]
elif 'fuzz' == test_mode or 'fuzzmore' == test_mode:
smic_arr = module_argv[1].split(':')
smic_start = int(smic_arr[0],16)
smic_end = int(smic_arr[1],16)
if 'fuzzmore' == test_mode:
self.test_ptr_in_buffer = True
DUMP_GPRS_EVERY_SMI = False
else:
self.logger.error( "Unknown fuzzing mode '%s'" % module_argv[0] )
return ModuleResult.ERROR
if len(module_argv) > 2: self.fill_size = int(module_argv[2],16)
if len(module_argv) > 3:
if 'smram' == module_argv[3]:
(_addr, smram_limit, smram_size) = self.cs.cpu.get_SMRAM()
self.is_check_memory = False
self.logger.log( "[*] Using SMRAM base address (0x%016X) to pass to SMI handlers" % _addr )
else:
_addr = int(module_argv[3],16)
self.logger.log( "[*] Using address from command-line (0x%016X) to pass to SMI handlers" % _addr )
else:
(va, _addr) = self.cs.mem.alloc_physical_mem( self.fill_size, _MAX_ALLOC_PA )
self.logger.log( "[*] Allocated memory buffer (to pass to SMI handlers) : 0x%016X" % _addr )
if self.is_check_memory:
(va1, _addr1) = self.cs.mem.alloc_physical_mem( self.fill_size, _MAX_ALLOC_PA )
self.logger.log( "[*] Allocated 2nd buffer (address will be in the 1st buffer): 0x%016X" % _addr1 )
#
# @TODO: Need to check that SW/APMC SMI is enabled
#
self.logger.log( "\n[*] Configuration" )
self.logger.log( " SMI testing mode : %s" % test_mode )
if 'config' == test_mode:
self.logger.log( " Config file : %s" % _smi_config_fname )
else:
self.logger.log( " Range of SMI codes (B2) : 0x%02X:0x%02X" % (smic_start,smic_end) )
self.logger.log( " Memory buffer pointer : 0x%016X (address passed in GP regs to SMI)" % _addr )
self.logger.log( " Filling/checking memory? : %s" % ('YES' if self.is_check_memory else 'NO'))
if self.is_check_memory:
self.logger.log( " Second buffer pointer : 0x%016X (address written to memory buffer)" % _addr1 )
self.logger.log( " Number of bytes to fill : 0x%X" % self.fill_size )
self.logger.log( " Byte to fill with : 0x%X" % ord(self.fill_byte) )
self.logger.log( " Additional options (can be changed in the source code):" )
self.logger.log( " Fuzzing SMI functions in ECX? : %d" % FUZZ_SMI_FUNCTIONS_IN_ECX )
self.logger.log( " Max value of SMI function in ECX : 0x%X" % MAX_SMI_FUNCTIONS )
self.logger.log( " Max value of SMI data (B3) : 0x%X" % MAX_SMI_DATA )
self.logger.log( " Max offset of the pointer in the buffer: 0x%X" % MAX_PTR_OFFSET_IN_BUFFER )
self.logger.log( " Passing pointer in all GP registers? : %d" % PTR_IN_ALL_GPRS )
self.logger.log( " Default values of the registers : 0x%016X" % _FILL_VALUE_QWORD )
self.logger.log( " Dump all register values every SMI : %d" % DUMP_GPRS_EVERY_SMI )
self.logger.log( " Bail on first detection : %d" % FUZZ_BAIL_ON_1ST_DETECT )
self.logger.set_always_flush( FLUSH_OUTPUT_ALWAYS )
if DUMP_MEMORY_ON_DETECT and not os.path.exists( _pth ): os.makedirs( _pth )
bad_ptr_cnt = 0
try:
if 'config' == test_mode:
bad_ptr_cnt = self.test_config( thread_id, _smi_config_fname, _addr, _addr1 )
elif 'fuzz' == test_mode or 'fuzzmore' == test_mode:
bad_ptr_cnt = self.test_fuzz ( thread_id, smic_start, smic_end, _addr, _addr1 )
except BadSMIDetected, msg:
bad_ptr_cnt = 1
self.logger.log_important( "Potentially bad SMI detected! Stopped fuzing (see FUZZ_BAIL_ON_1ST_DETECT option)" )
if bad_ptr_cnt > 0: self.logger.log_bad( "<<< Done: found %d potential occurrences of unchecked input pointers" % bad_ptr_cnt )
else: self.logger.log_good( "<<< Done: didn't find unchecked input pointers in tested SMI handlers" )
res = ModuleResult.FAILED if (bad_ptr_cnt > 0) else ModuleResult.PASSED
return res
| gpl-2.0 | 3,814,541,026,084,775,000 | 47.469903 | 182 | 0.557367 | false |
afaheem88/rally | tests/unit/plugins/openstack/scenarios/sahara/test_node_group_templates.py | 12 | 3700 | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.sahara import (node_group_templates
as ngts)
from tests.unit import test
SAHARA_NGTS = ("rally.plugins.openstack.scenarios.sahara.node_group_templates"
".SaharaNodeGroupTemplates")
class SaharaNodeGroupTemplatesTestCase(test.TestCase):
def setUp(self):
super(SaharaNodeGroupTemplatesTestCase, self).setUp()
self.context = test.get_test_context()
@mock.patch(SAHARA_NGTS + "._list_node_group_templates")
@mock.patch(SAHARA_NGTS + "._create_master_node_group_template",
return_value=object())
@mock.patch(SAHARA_NGTS + "._create_worker_node_group_template",
return_value=object)
def test_create_and_list_node_group_templates(
self,
mock__create_worker_node_group_template,
mock__create_master_node_group_template,
mock__list_node_group_templates):
ngts_scenario = ngts.SaharaNodeGroupTemplates(self.context)
ngts_scenario.create_and_list_node_group_templates("test_flavor",
"test_plugin",
"test_version")
mock__create_master_node_group_template.assert_called_once_with(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version")
mock__create_worker_node_group_template.assert_called_once_with(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version")
mock__list_node_group_templates.assert_called_once_with()
@mock.patch(SAHARA_NGTS + "._delete_node_group_template")
@mock.patch(SAHARA_NGTS + "._create_master_node_group_template",
return_value=mock.MagicMock(id=1))
@mock.patch(SAHARA_NGTS + "._create_worker_node_group_template",
return_value=mock.MagicMock(id=2))
def test_create_delete_node_group_templates(
self,
mock__create_worker_node_group_template,
mock__create_master_node_group_template,
mock__delete_node_group_template):
ngts_scenario = ngts.SaharaNodeGroupTemplates(self.context)
ngts_scenario.create_delete_node_group_templates(
"test_flavor",
"test_plugin",
"test_version")
mock__create_master_node_group_template.assert_called_once_with(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version")
mock__create_worker_node_group_template.assert_called_once_with(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version")
mock__delete_node_group_template.assert_has_calls(calls=[
mock.call(mock__create_master_node_group_template.return_value),
mock.call(mock__create_worker_node_group_template.return_value)])
| apache-2.0 | -6,835,626,747,357,833,000 | 42.023256 | 78 | 0.627027 | false |
flingone/frameworks_base_cmds_remoted | libs/boost/libs/python/pyste/src/Pyste/infos.py | 13 | 9212 | # Copyright Bruno da Silva de Oliveira 2003. Use, modification and
# distribution is subject to the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os.path
import copy
import exporters
from ClassExporter import ClassExporter
from FunctionExporter import FunctionExporter
from EnumExporter import EnumExporter
from HeaderExporter import HeaderExporter
from VarExporter import VarExporter
from CodeExporter import CodeExporter
from exporterutils import FunctionWrapper
from utils import makeid
import warnings
#==============================================================================
# DeclarationInfo
#==============================================================================
class DeclarationInfo:
def __init__(self, otherInfo=None):
self.__infos = {}
self.__attributes = {}
if otherInfo is not None:
self.__infos = copy.deepcopy(otherInfo.__infos)
self.__attributes = copy.deepcopy(otherInfo.__attributes)
def __getitem__(self, name):
'Used to access sub-infos'
if name.startswith('__'):
raise AttributeError
default = DeclarationInfo()
default._Attribute('name', name)
return self.__infos.setdefault(name, default)
def __getattr__(self, name):
return self[name]
def _Attribute(self, name, value=None):
if value is None:
# get value
return self.__attributes.get(name)
else:
# set value
self.__attributes[name] = value
def AddExporter(self, exporter):
# this was causing a much serious bug, as reported by Niall Douglas:
# another solution must be found!
#if not exporters.importing:
if exporter not in exporters.exporters:
exporters.exporters.append(exporter)
exporter.interface_file = exporters.current_interface
#==============================================================================
# FunctionInfo
#==============================================================================
class FunctionInfo(DeclarationInfo):
def __init__(self, name, include, tail=None, otherOption=None,
exporter_class = FunctionExporter):
DeclarationInfo.__init__(self, otherOption)
self._Attribute('name', name)
self._Attribute('include', include)
self._Attribute('exclude', False)
# create a FunctionExporter
exporter = exporter_class(InfoWrapper(self), tail)
self.AddExporter(exporter)
#==============================================================================
# ClassInfo
#==============================================================================
class ClassInfo(DeclarationInfo):
def __init__(self, name, include, tail=None, otherInfo=None,
exporter_class = ClassExporter):
DeclarationInfo.__init__(self, otherInfo)
self._Attribute('name', name)
self._Attribute('include', include)
self._Attribute('exclude', False)
# create a ClassExporter
exporter = exporter_class(InfoWrapper(self), tail)
self.AddExporter(exporter)
#==============================================================================
# templates
#==============================================================================
def GenerateName(name, type_list):
name = name.replace('::', '_')
names = [name] + type_list
return makeid('_'.join(names))
class ClassTemplateInfo(DeclarationInfo):
def __init__(self, name, include,
exporter_class = ClassExporter):
DeclarationInfo.__init__(self)
self._Attribute('name', name)
self._Attribute('include', include)
self._exporter_class = exporter_class
def Instantiate(self, type_list, rename=None):
if not rename:
rename = GenerateName(self._Attribute('name'), type_list)
# generate code to instantiate the template
types = ', '.join(type_list)
tail = 'typedef %s< %s > %s;\n' % (self._Attribute('name'), types, rename)
tail += 'void __instantiate_%s()\n' % rename
tail += '{ sizeof(%s); }\n\n' % rename
# create a ClassInfo
class_ = ClassInfo(rename, self._Attribute('include'), tail, self,
exporter_class = self._exporter_class)
return class_
def __call__(self, types, rename=None):
if isinstance(types, str):
types = types.split()
return self.Instantiate(types, rename)
#==============================================================================
# EnumInfo
#==============================================================================
class EnumInfo(DeclarationInfo):
def __init__(self, name, include, exporter_class = EnumExporter):
DeclarationInfo.__init__(self)
self._Attribute('name', name)
self._Attribute('include', include)
self._Attribute('exclude', False)
self._Attribute('export_values', False)
exporter = exporter_class(InfoWrapper(self))
self.AddExporter(exporter)
#==============================================================================
# HeaderInfo
#==============================================================================
class HeaderInfo(DeclarationInfo):
def __init__(self, include, exporter_class = HeaderExporter):
warnings.warn('AllFromHeader is not working in all cases in the current version.')
DeclarationInfo.__init__(self)
self._Attribute('include', include)
exporter = exporter_class(InfoWrapper(self))
self.AddExporter(exporter)
#==============================================================================
# VarInfo
#==============================================================================
class VarInfo(DeclarationInfo):
def __init__(self, name, include, exporter_class = VarExporter):
DeclarationInfo.__init__(self)
self._Attribute('name', name)
self._Attribute('include', include)
exporter = exporter_class(InfoWrapper(self))
self.AddExporter(exporter)
#==============================================================================
# CodeInfo
#==============================================================================
class CodeInfo(DeclarationInfo):
def __init__(self, code, section, exporter_class = CodeExporter):
DeclarationInfo.__init__(self)
self._Attribute('code', code)
self._Attribute('section', section)
exporter = exporter_class(InfoWrapper(self))
self.AddExporter(exporter)
#==============================================================================
# InfoWrapper
#==============================================================================
class InfoWrapper:
'Provides a nicer interface for a info'
def __init__(self, info):
self.__dict__['_info'] = info # so __setattr__ is not called
def __getitem__(self, name):
return InfoWrapper(self._info[name])
def __getattr__(self, name):
return self._info._Attribute(name)
def __setattr__(self, name, value):
self._info._Attribute(name, value)
#==============================================================================
# Functions
#==============================================================================
def exclude(info):
info._Attribute('exclude', True)
def set_policy(info, policy):
info._Attribute('policy', policy)
def rename(info, name):
info._Attribute('rename', name)
def set_wrapper(info, wrapper):
if isinstance(wrapper, str):
wrapper = FunctionWrapper(wrapper)
info._Attribute('wrapper', wrapper)
def instantiate(template, types, rename=None):
if isinstance(types, str):
types = types.split()
return template.Instantiate(types, rename)
def use_shared_ptr(info):
info._Attribute('smart_ptr', 'boost::shared_ptr< %s >')
def use_auto_ptr(info):
info._Attribute('smart_ptr', 'std::auto_ptr< %s >')
def holder(info, function):
msg = "Expected a callable that accepts one string argument."
assert callable(function), msg
info._Attribute('holder', function)
def add_method(info, name, rename=None):
added = info._Attribute('__added__')
if added is None:
info._Attribute('__added__', [(name, rename)])
else:
added.append((name, rename))
def class_code(info, code):
added = info._Attribute('__code__')
if added is None:
info._Attribute('__code__', [code])
else:
added.append(code)
def final(info):
info._Attribute('no_override', True)
def export_values(info):
info._Attribute('export_values', True)
| apache-2.0 | -8,874,559,424,288,964,000 | 33.567568 | 90 | 0.499566 | false |
aerickson/ansible | lib/ansible/modules/network/nxos/_nxos_mtu.py | 59 | 11681 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['deprecated'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: nxos_mtu
extends_documentation_fragment: nxos
version_added: "2.2"
deprecated: Deprecated in 2.3 use M(nxos_system)'s C(mtu) option.
short_description: Manages MTU settings on Nexus switch.
description:
- Manages MTU settings on Nexus switch.
author:
- Jason Edelman (@jedelman8)
notes:
- Either C(sysmtu) param is required or (C(interface) AND C(mtu)) parameters are required.
- C(state=absent) unconfigures a given MTU if that value is currently present.
options:
interface:
description:
- Full name of interface, i.e. Ethernet1/1.
required: false
default: null
mtu:
description:
- MTU for a specific interface. Must be an even number between 576 and 9216.
required: false
default: null
sysmtu:
description:
- System jumbo MTU. Must be an even number between 576 and 9216.
required: false
default: null
state:
description:
- Specify desired state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# Ensure system mtu is 9126
- nxos_mtu:
sysmtu: 9216
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Config mtu on Eth1/1 (routed interface)
- nxos_mtu:
interface: Ethernet1/1
mtu: 1600
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Config mtu on Eth1/3 (switched interface)
- nxos_mtu:
interface: Ethernet1/3
mtu: 9216
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Unconfigure mtu on a given interface
- nxos_mtu:
interface: Ethernet1/3
mtu: 9216
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
state: absent
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"mtu": "1700"}
existing:
description:
- k/v pairs of existing mtu/sysmtu on the interface/system
returned: always
type: dict
sample: {"mtu": "1600", "sysmtu": "9216"}
end_state:
description: k/v pairs of mtu/sysmtu values after module execution
returned: always
type: dict
sample: {"mtu": "1700", sysmtu": "9216"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["interface vlan10", "mtu 1700"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_mtu(interface, module):
command = 'show interface {0}'.format(interface)
mtu = {}
body = execute_show_command(command, module)
try:
mtu_table = body[0]['TABLE_interface']['ROW_interface']
mtu['mtu'] = str(
mtu_table.get('eth_mtu',
mtu_table.get('svi_mtu', 'unreadable_via_api')))
mtu['sysmtu'] = get_system_mtu(module)['sysmtu']
except KeyError:
mtu = {}
return mtu
def get_system_mtu(module):
command = 'show run all | inc jumbomtu'
sysmtu = ''
body = execute_show_command(command, module, command_type='cli_show_ascii')
if body:
sysmtu = str(body[0].split(' ')[-1])
try:
sysmtu = int(sysmtu)
except:
sysmtu = ""
return dict(sysmtu=str(sysmtu))
def get_commands_config_mtu(delta, interface):
CONFIG_ARGS = {
'mtu': 'mtu {mtu}',
'sysmtu': 'system jumbomtu {sysmtu}',
}
commands = []
for param, value in delta.items():
command = CONFIG_ARGS.get(param, 'DNE').format(**delta)
if command and command != 'DNE':
commands.append(command)
command = None
mtu_check = delta.get('mtu', None)
if mtu_check:
commands.insert(0, 'interface {0}'.format(interface))
return commands
def get_commands_remove_mtu(delta, interface):
CONFIG_ARGS = {
'mtu': 'no mtu {mtu}',
'sysmtu': 'no system jumbomtu {sysmtu}',
}
commands = []
for param, value in delta.items():
command = CONFIG_ARGS.get(param, 'DNE').format(**delta)
if command and command != 'DNE':
commands.append(command)
command = None
mtu_check = delta.get('mtu', None)
if mtu_check:
commands.insert(0, 'interface {0}'.format(interface))
return commands
def get_interface_type(interface):
if interface.upper().startswith('ET'):
return 'ethernet'
elif interface.upper().startswith('VL'):
return 'svi'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('MG'):
return 'management'
elif interface.upper().startswith('MA'):
return 'management'
elif interface.upper().startswith('PO'):
return 'portchannel'
else:
return 'unknown'
def is_default(interface, module):
command = 'show run interface {0}'.format(interface)
try:
body = execute_show_command(
command, module, command_type='cli_show_ascii')[0]
if body == 'DNE':
return 'DNE'
else:
raw_list = body.split('\n')
if raw_list[-1].startswith('interface'):
return True
else:
return False
except (KeyError):
return 'DNE'
def get_interface_mode(interface, intf_type, module):
command = 'show interface {0}'.format(interface)
mode = 'unknown'
interface_table = {}
body = execute_show_command(command, module)
try:
interface_table = body[0]['TABLE_interface']['ROW_interface']
except (KeyError, AttributeError, IndexError):
return mode
if intf_type in ['ethernet', 'portchannel']:
mode = str(interface_table.get('eth_mode', 'layer3'))
if mode in ['access', 'trunk']:
mode = 'layer2'
elif mode == 'routed':
mode = 'layer3'
elif intf_type in ['loopback', 'svi']:
mode = 'layer3'
return mode
def main():
argument_spec = dict(
mtu=dict(type='str'),
interface=dict(type='str'),
sysmtu=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
required_together=[['mtu', 'interface']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
interface = module.params['interface']
mtu = module.params['mtu']
sysmtu = module.params['sysmtu']
state = module.params['state']
if sysmtu and (interface or mtu):
module.fail_json(msg='Proper usage-- either just use the sysmtu param '
'or use interface AND mtu params')
if interface:
intf_type = get_interface_type(interface)
if intf_type != 'ethernet':
if is_default(interface, module) == 'DNE':
module.fail_json(msg='Invalid interface. It does not exist '
'on the switch.')
existing = get_mtu(interface, module)
else:
existing = get_system_mtu(module)
if interface and mtu:
if intf_type == 'loopback':
module.fail_json(msg='Cannot set MTU for loopback interface.')
mode = get_interface_mode(interface, intf_type, module)
if mode == 'layer2':
if intf_type in ['ethernet', 'portchannel']:
if mtu not in [existing['sysmtu'], '1500']:
module.fail_json(msg='MTU on L2 interfaces can only be set'
' to the system default (1500) or '
'existing sysmtu value which is '
' {0}'.format(existing['sysmtu']))
elif mode == 'layer3':
if intf_type in ['ethernet', 'portchannel', 'svi']:
if ((int(mtu) < 576 or int(mtu) > 9216) or
((int(mtu) % 2) != 0)):
module.fail_json(msg='Invalid MTU for Layer 3 interface'
'needs to be an even number between'
'576 and 9216')
if sysmtu:
if ((int(sysmtu) < 576 or int(sysmtu) > 9216 or
((int(sysmtu) % 2) != 0))):
module.fail_json(msg='Invalid MTU- needs to be an even '
'number between 576 and 9216')
args = dict(mtu=mtu, sysmtu=sysmtu)
proposed = dict((k, v) for k, v in args.items() if v is not None)
delta = dict(set(proposed.items()).difference(existing.items()))
changed = False
end_state = existing
commands = []
if state == 'present':
if delta:
command = get_commands_config_mtu(delta, interface)
commands.append(command)
elif state == 'absent':
common = set(proposed.items()).intersection(existing.items())
if common:
command = get_commands_remove_mtu(dict(common), interface)
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
if interface:
end_state = get_mtu(interface, module)
else:
end_state = get_system_mtu(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 | -6,297,521,848,106,136,000 | 29.578534 | 94 | 0.582912 | false |
grpc/grpc | src/python/grpcio/support.py | 10 | 4388 | # Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path
import shutil
import sys
import tempfile
from distutils import errors
import commands
C_PYTHON_DEV = """
#include <Python.h>
int main(int argc, char **argv) { return 0; }
"""
C_PYTHON_DEV_ERROR_MESSAGE = """
Could not find <Python.h>. This could mean the following:
* You're on Ubuntu and haven't run `apt-get install <PY_REPR>-dev`.
* You're on RHEL/Fedora and haven't run `yum install <PY_REPR>-devel` or
`dnf install <PY_REPR>-devel` (make sure you also have redhat-rpm-config
installed)
* You're on Mac OS X and the usual Python framework was somehow corrupted
(check your environment variables or try re-installing?)
* You're on Windows and your Python installation was somehow corrupted
(check your environment variables or try re-installing?)
"""
if sys.version_info[0] == 2:
PYTHON_REPRESENTATION = 'python'
elif sys.version_info[0] == 3:
PYTHON_REPRESENTATION = 'python3'
else:
raise NotImplementedError('Unsupported Python version: %s' % sys.version)
C_CHECKS = {
C_PYTHON_DEV:
C_PYTHON_DEV_ERROR_MESSAGE.replace('<PY_REPR>', PYTHON_REPRESENTATION),
}
def _compile(compiler, source_string):
tempdir = tempfile.mkdtemp()
cpath = os.path.join(tempdir, 'a.c')
with open(cpath, 'w') as cfile:
cfile.write(source_string)
try:
compiler.compile([cpath])
except errors.CompileError as error:
return error
finally:
shutil.rmtree(tempdir)
def _expect_compile(compiler, source_string, error_message):
if _compile(compiler, source_string) is not None:
sys.stderr.write(error_message)
raise commands.CommandError(
"Diagnostics found a compilation environment issue:\n{}".format(
error_message))
def diagnose_compile_error(build_ext, error):
"""Attempt to diagnose an error during compilation."""
for c_check, message in C_CHECKS.items():
_expect_compile(build_ext.compiler, c_check, message)
python_sources = [
source for source in build_ext.get_source_files()
if source.startswith('./src/python') and source.endswith('c')
]
for source in python_sources:
if not os.path.isfile(source):
raise commands.CommandError((
"Diagnostics found a missing Python extension source file:\n{}\n\n"
"This is usually because the Cython sources haven't been transpiled "
"into C yet and you're building from source.\n"
"Try setting the environment variable "
"`GRPC_PYTHON_BUILD_WITH_CYTHON=1` when invoking `setup.py` or "
"when using `pip`, e.g.:\n\n"
"pip install -rrequirements.txt\n"
"GRPC_PYTHON_BUILD_WITH_CYTHON=1 pip install .").format(source))
def diagnose_attribute_error(build_ext, error):
if any('_needs_stub' in arg for arg in error.args):
raise commands.CommandError(
"We expect a missing `_needs_stub` attribute from older versions of "
"setuptools. Consider upgrading setuptools.")
_ERROR_DIAGNOSES = {
errors.CompileError: diagnose_compile_error,
AttributeError: diagnose_attribute_error,
}
def diagnose_build_ext_error(build_ext, error, formatted):
diagnostic = _ERROR_DIAGNOSES.get(type(error))
if diagnostic is None:
raise commands.CommandError(
"\n\nWe could not diagnose your build failure. If you are unable to "
"proceed, please file an issue at http://www.github.com/grpc/grpc "
"with `[Python install]` in the title; please attach the whole log "
"(including everything that may have appeared above the Python "
"backtrace).\n\n{}".format(formatted))
else:
diagnostic(build_ext, error)
| apache-2.0 | 1,985,814,918,368,222,200 | 36.186441 | 85 | 0.672288 | false |
loco-odoo/localizacion_co | openerp/addons-extra/print_receipt/reports/account_cheque_bancolombia.py | 3 | 1068 | # -*- coding: utf-8 -*-
import time
from openerp.report import report_sxw
from openerp import pooler
class account_voucher(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_voucher, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'getLines': self._lines_get,
})
self.context = context
def _lines_get(self, voucher):
voucherline_obj = pooler.get_pool(self.cr.dbname).get('account.voucher.line')
voucherlines = voucherline_obj.search(self.cr, self.uid,[('voucher_id','=',voucher.id)])
voucherlines = voucherline_obj.browse(self.cr, self.uid, voucherlines)
return voucherlines
report_sxw.report_sxw('report.account_cheque_bancolombia', 'account.voucher',
'addons/print_receipt/reports/account_cheque_bancolombia.rml',
parser=account_voucher)
| agpl-3.0 | -3,243,311,462,761,863,700 | 37.142857 | 96 | 0.571161 | false |
dbarbier/privot-doc | src/UseCasesGuide/script_WhiteNoise.py | 1 | 1386 | from openturns import *
# Time grid over which all the processes will be defined
nt = 100
timeGrid = RegularGrid(0.0, 1.0, nt)
# Definition of the distribution
sigma = 1.0
myDistribution = Normal(0., sigma)
# Definition of the process
myProcess = WhiteNoise(myDistribution, timeGrid)
# We get a realization of the white noise process
realization = myProcess.getRealization()
# The realization is a time series
# we draw it as function of time thanks to the drawMarginal method
# We rework the legend name and color to have pretty graph
graph = Graph()
marginalDraw = realization.drawMarginal(0)
drawable = marginalDraw.getDrawable(0)
drawable.setLegendName('realization')
drawable.setColor('blue')
graph.add(drawable)
graph.setXTitle('Time')
graph.setYTitle('Values')
graph.setTitle("White noise process")
graph.setLegendPosition('topright')
graph.draw("whitenoise_realization", 800, 600, GraphImplementation.PNG)
# Several realization ==> here we fix 5 in order to be able to compare and visualize difference
sample = myProcess.getSample(5)
graphSample = sample.drawMarginal(0)
graphSample.setTitle("5 realizations of the White noise process")
for k in range(5):
drawable = graphSample.getDrawable(k)
drawable.setLegendName('realization ' + str(k+1))
graphSample.setDrawable(drawable, k)
graphSample.draw("whitenoise_realizations", 800, 600, GraphImplementation.PNG)
| lgpl-2.1 | 2,781,032,347,835,453,400 | 29.8 | 95 | 0.777056 | false |
AdrianGaudebert/elmo | vendor-local/lib/python/south/management/commands/datamigration.py | 10 | 4665 | """
Data migration creation command
"""
from __future__ import print_function
import sys
import os
import re
from optparse import make_option
try:
set
except NameError:
from sets import Set as set
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.db import models
from django.conf import settings
from south.migration import Migrations
from south.exceptions import NoMigrations
from south.creator import freezer
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--freeze', action='append', dest='freeze_list', type='string',
help='Freeze the specified app(s). Provide an app name with each; use the option multiple times for multiple apps'),
make_option('--stdout', action='store_true', dest='stdout', default=False,
help='Print the migration to stdout instead of writing it to a file.'),
)
help = "Creates a new template data migration for the given app"
usage_str = "Usage: ./manage.py datamigration appname migrationname [--stdout] [--freeze appname]"
def handle(self, app=None, name="", freeze_list=None, stdout=False, verbosity=1, **options):
# Any supposed lists that are None become empty lists
freeze_list = freeze_list or []
# --stdout means name = -
if stdout:
name = "-"
# Only allow valid names
if re.search('[^_\w]', name) and name != "-":
self.error("Migration names should contain only alphanumeric characters and underscores.")
# if not name, there's an error
if not name:
self.error("You must provide a name for this migration\n" + self.usage_str)
if not app:
self.error("You must provide an app to create a migration for.\n" + self.usage_str)
# Get the Migrations for this app (creating the migrations dir if needed)
migrations = Migrations(app, force_creation=True, verbose_creation=verbosity > 0)
# See what filename is next in line. We assume they use numbers.
new_filename = migrations.next_filename(name)
# Work out which apps to freeze
apps_to_freeze = self.calc_frozen_apps(migrations, freeze_list)
# So, what's in this file, then?
file_contents = MIGRATION_TEMPLATE % {
"frozen_models": freezer.freeze_apps_to_string(apps_to_freeze),
"complete_apps": apps_to_freeze and "complete_apps = [%s]" % (", ".join(map(repr, apps_to_freeze))) or ""
}
# - is a special name which means 'print to stdout'
if name == "-":
print(file_contents)
# Write the migration file if the name isn't -
else:
fp = open(os.path.join(migrations.migrations_dir(), new_filename), "w")
fp.write(file_contents)
fp.close()
print("Created %s." % new_filename, file=sys.stderr)
def calc_frozen_apps(self, migrations, freeze_list):
"""
Works out, from the current app, settings, and the command line options,
which apps should be frozen.
"""
apps_to_freeze = []
for to_freeze in freeze_list:
if "." in to_freeze:
self.error("You cannot freeze %r; you must provide an app label, like 'auth' or 'books'." % to_freeze)
# Make sure it's a real app
if not models.get_app(to_freeze):
self.error("You cannot freeze %r; it's not an installed app." % to_freeze)
# OK, it's fine
apps_to_freeze.append(to_freeze)
if getattr(settings, 'SOUTH_AUTO_FREEZE_APP', True):
apps_to_freeze.append(migrations.app_label())
return apps_to_freeze
def error(self, message, code=1):
"""
Prints the error, and exits with the given code.
"""
print(message, file=sys.stderr)
sys.exit(code)
MIGRATION_TEMPLATE = """# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
def backwards(self, orm):
"Write your backwards methods here."
models = %(frozen_models)s
%(complete_apps)s
symmetrical = True
"""
| mpl-2.0 | -2,866,289,819,673,576,400 | 35.445313 | 128 | 0.620579 | false |
ehashman/oh-mainline | vendor/packages/sqlparse/tests/test_parse.py | 16 | 6668 | # -*- coding: utf-8 -*-
"""Tests sqlparse function."""
import pytest
from tests.utils import TestCaseBase
import sqlparse
import sqlparse.sql
from sqlparse import tokens as T
class SQLParseTest(TestCaseBase):
"""Tests sqlparse.parse()."""
def test_tokenize(self):
sql = 'select * from foo;'
stmts = sqlparse.parse(sql)
self.assertEqual(len(stmts), 1)
self.assertEqual(str(stmts[0]), sql)
def test_multistatement(self):
sql1 = 'select * from foo;'
sql2 = 'select * from bar;'
stmts = sqlparse.parse(sql1 + sql2)
self.assertEqual(len(stmts), 2)
self.assertEqual(str(stmts[0]), sql1)
self.assertEqual(str(stmts[1]), sql2)
def test_newlines(self):
sql = u'select\n*from foo;'
p = sqlparse.parse(sql)[0]
self.assertEqual(unicode(p), sql)
sql = u'select\r\n*from foo'
p = sqlparse.parse(sql)[0]
self.assertEqual(unicode(p), sql)
sql = u'select\r*from foo'
p = sqlparse.parse(sql)[0]
self.assertEqual(unicode(p), sql)
sql = u'select\r\n*from foo\n'
p = sqlparse.parse(sql)[0]
self.assertEqual(unicode(p), sql)
def test_within(self):
sql = 'foo(col1, col2)'
p = sqlparse.parse(sql)[0]
col1 = p.tokens[0].tokens[1].tokens[1].tokens[0]
self.assert_(col1.within(sqlparse.sql.Function))
def test_child_of(self):
sql = '(col1, col2)'
p = sqlparse.parse(sql)[0]
self.assert_(p.tokens[0].tokens[1].is_child_of(p.tokens[0]))
sql = 'select foo'
p = sqlparse.parse(sql)[0]
self.assert_(not p.tokens[2].is_child_of(p.tokens[0]))
self.assert_(p.tokens[2].is_child_of(p))
def test_has_ancestor(self):
sql = 'foo or (bar, baz)'
p = sqlparse.parse(sql)[0]
baz = p.tokens[-1].tokens[1].tokens[-1]
self.assert_(baz.has_ancestor(p.tokens[-1].tokens[1]))
self.assert_(baz.has_ancestor(p.tokens[-1]))
self.assert_(baz.has_ancestor(p))
def test_float(self):
t = sqlparse.parse('.5')[0].tokens
self.assertEqual(len(t), 1)
self.assert_(t[0].ttype is sqlparse.tokens.Number.Float)
t = sqlparse.parse('.51')[0].tokens
self.assertEqual(len(t), 1)
self.assert_(t[0].ttype is sqlparse.tokens.Number.Float)
t = sqlparse.parse('1.5')[0].tokens
self.assertEqual(len(t), 1)
self.assert_(t[0].ttype is sqlparse.tokens.Number.Float)
t = sqlparse.parse('12.5')[0].tokens
self.assertEqual(len(t), 1)
self.assert_(t[0].ttype is sqlparse.tokens.Number.Float)
def test_placeholder(self):
def _get_tokens(sql):
return sqlparse.parse(sql)[0].tokens[-1].tokens
t = _get_tokens('select * from foo where user = ?')
self.assert_(t[-1].ttype is sqlparse.tokens.Name.Placeholder)
self.assertEqual(t[-1].value, '?')
t = _get_tokens('select * from foo where user = :1')
self.assert_(t[-1].ttype is sqlparse.tokens.Name.Placeholder)
self.assertEqual(t[-1].value, ':1')
t = _get_tokens('select * from foo where user = :name')
self.assert_(t[-1].ttype is sqlparse.tokens.Name.Placeholder)
self.assertEqual(t[-1].value, ':name')
t = _get_tokens('select * from foo where user = %s')
self.assert_(t[-1].ttype is sqlparse.tokens.Name.Placeholder)
self.assertEqual(t[-1].value, '%s')
t = _get_tokens('select * from foo where user = $a')
self.assert_(t[-1].ttype is sqlparse.tokens.Name.Placeholder)
self.assertEqual(t[-1].value, '$a')
def test_access_symbol(self): # see issue27
t = sqlparse.parse('select a.[foo bar] as foo')[0].tokens
self.assert_(isinstance(t[-1], sqlparse.sql.Identifier))
self.assertEqual(t[-1].get_name(), 'foo')
self.assertEqual(t[-1].get_real_name(), '[foo bar]')
self.assertEqual(t[-1].get_parent_name(), 'a')
def test_keyword_like_identifier(self): # see issue47
t = sqlparse.parse('foo.key')[0].tokens
self.assertEqual(len(t), 1)
self.assert_(isinstance(t[0], sqlparse.sql.Identifier))
def test_function_parameter(self): # see issue94
t = sqlparse.parse('abs(some_col)')[0].tokens[0].get_parameters()
self.assertEqual(len(t), 1)
self.assert_(isinstance(t[0], sqlparse.sql.Identifier))
def test_function_param_single_literal(self):
t = sqlparse.parse('foo(5)')[0].tokens[0].get_parameters()
self.assertEqual(len(t), 1)
self.assert_(t[0].ttype is T.Number.Integer)
def test_nested_function(self):
t = sqlparse.parse('foo(bar(5))')[0].tokens[0].get_parameters()
self.assertEqual(len(t), 1)
self.assert_(type(t[0]) is sqlparse.sql.Function)
def test_quoted_identifier():
t = sqlparse.parse('select x.y as "z" from foo')[0].tokens
assert isinstance(t[2], sqlparse.sql.Identifier)
assert t[2].get_name() == 'z'
assert t[2].get_real_name() == 'y'
def test_psql_quotation_marks(): # issue83
# regression: make sure plain $$ work
t = sqlparse.split("""
CREATE OR REPLACE FUNCTION testfunc1(integer) RETURNS integer AS $$
....
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION testfunc2(integer) RETURNS integer AS $$
....
$$ LANGUAGE plpgsql;""")
assert len(t) == 2
# make sure $SOMETHING$ works too
t = sqlparse.split("""
CREATE OR REPLACE FUNCTION testfunc1(integer) RETURNS integer AS $PROC_1$
....
$PROC_1$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION testfunc2(integer) RETURNS integer AS $PROC_2$
....
$PROC_2$ LANGUAGE plpgsql;""")
assert len(t) == 2
@pytest.mark.parametrize('ph', ['?', ':1', ':foo', '%s', '%(foo)s'])
def test_placeholder(ph):
p = sqlparse.parse(ph)[0].tokens
assert len(p) == 1
assert p[0].ttype is T.Name.Placeholder
@pytest.mark.parametrize('num', ['6.67428E-8', '1.988e33', '1e-12'])
def test_scientific_numbers(num):
p = sqlparse.parse(num)[0].tokens
assert len(p) == 1
assert p[0].ttype is T.Number.Float
def test_single_quotes_are_strings():
p = sqlparse.parse("'foo'")[0].tokens
assert len(p) == 1
assert p[0].ttype is T.String.Single
def test_double_quotes_are_identifiers():
p = sqlparse.parse('"foo"')[0].tokens
assert len(p) == 1
assert isinstance(p[0], sqlparse.sql.Identifier)
def test_single_quotes_with_linebreaks(): # issue118
p = sqlparse.parse("'f\nf'")[0].tokens
assert len(p) == 1
assert p[0].ttype is T.String.Single
| agpl-3.0 | -7,683,300,934,872,038,000 | 34.657754 | 77 | 0.603629 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.