repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
pycharmers-hackathon/hackathon-rep | SmartFuel/SmartFuel/aco.py | 2 | 3615 | from math import radians, cos, sin, sqrt, atan2
import random
import operator
def aco(petrol_stations, gas_type, capacity, initial_petrol, overall_dist,
consumption, source, target, ants=2, a=1, b=1, iterations=10):
pheromone = {station['id']: 4 for station in petrol_stations}
probability = lambda source, i, feasible, dist: pheromone[i] ** a * h(source, i, dist) ** b / \
summation(source, feasible, calulate_distance)
summation = lambda source, feasible, dist: sum(pheromone[i] ** a * h(source, i, dist) ** b
for i in feasible)
h = lambda source, i, dist: 1.22000 / dist(source, i)
solutions = []
overall_best_sol = None
overall_cost = float('inf')
for i in range(iterations):
best_sol = None
best_cost = float('inf')
for j in range(ants):
sol, cost = construct_solution(source, target, petrol_stations, initial_petrol,
consumption, overall_dist, probability,
capacity, gas_type)
solutions.append((sol, cost))
if cost < best_cost:
best_cost = cost
best_sol = sol
if best_cost < overall_cost:
overall_cost = best_cost
overall_best_sol = best_sol
update_pheromone(pheromone, best_sol, best_cost)
return overall_best_sol, overall_cost
def construct_solution(source, target, petrol_stations, initial_petrol,
consumption, overall_dist, probability, capacity,
gas_type):
feasible_dist = initial_petrol * consumption
dist = calulate_distance(source, target)
sol = []
cost = 0.0
while feasible_dist < dist:
feasible_stations = get_feasible(source, target, petrol_stations,
feasible_dist, overall_dist)
p = {feasible_station: probability(source, feasible_station, feasible_stations,
calulate_distance)
for feasible_station in feasible_stations}
next_patrol = max(p.iteritems(), key=operator.itemgetter(1))[0]
quantity = calulate_distance(source, next_patrol) * consumption
feasible_dist = (capacity - quantity) * consumption
sol.append({next_patrol: quantity})
source = next_patrol
cost += quantity * next_patrol[gas_type]
return sol, cost
def update_pheromone(pheromone, sol, cost):
for item in sol.keys():
pheromone[item] += 1 / cost
def calulate_distance(source, target):
R = 3673.0
source_lat = radians(source[0])
source_lng = radians(source[1])
target_lat = radians(target[0])
target_lng = radians(target[1])
dist_lat = target_lat - source_lat
dist_lng = target_lng - source_lng
a = sin(dist_lat / 2) ** 2 + cos(source_lat) * cos(target_lat) * sin(dist_lng / 2) ** 2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
return R * c
def get_feasible(source, target, petrol_stations, feasible_dist, overall_dist):
"""Returns list of ids. """
accepted = []
for petrol_station in petrol_stations:
dist1 = calulate_distance(source, (petrol_station['latitude'],
petrol_station['longitude']))
dist2 = calulate_distance((petrol_station['latitude'], petrol_station['longitude'])
, target)
if dist2 > overall_dist:
continue
if dist1 <= feasible_dist:
accepted.append(petrol_station['id'])
return accepted | cc0-1.0 |
bcarlin/flask-scss | doc/conf.py | 1 | 7157 | # -*- coding: utf-8 -*-
#
# Flask-Scss documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 8 17:32:32 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Flask-Scss'
copyright = u'2013, Bruno Carlin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5'
# The full version, including alpha/beta/rc tags.
release = '0.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for Autodoc extensions -------------------------------------------
autoclass_content = 'both'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'flask_small'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['.themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flask-Scssdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Flask-Scss.tex', u'Flask-Scss Documentation',
u'Bruno Carlin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'flask-scss', u'Flask-Scss Documentation',
[u'Bruno Carlin'], 1)
]
| mit |
tdmsinc/three.js | utils/converters/msgpack/msgpack/fallback.py | 641 | 26403 | """Fallback pure Python implementation of msgpack"""
import sys
import array
import struct
if sys.version_info[0] == 3:
PY3 = True
int_types = int
Unicode = str
xrange = range
def dict_iteritems(d):
return d.items()
else:
PY3 = False
int_types = (int, long)
Unicode = unicode
def dict_iteritems(d):
return d.iteritems()
if hasattr(sys, 'pypy_version_info'):
# cStringIO is slow on PyPy, StringIO is faster. However: PyPy's own
# StringBuilder is fastest.
from __pypy__ import newlist_hint
from __pypy__.builders import StringBuilder
USING_STRINGBUILDER = True
class StringIO(object):
def __init__(self, s=b''):
if s:
self.builder = StringBuilder(len(s))
self.builder.append(s)
else:
self.builder = StringBuilder()
def write(self, s):
self.builder.append(s)
def getvalue(self):
return self.builder.build()
else:
USING_STRINGBUILDER = False
from io import BytesIO as StringIO
newlist_hint = lambda size: []
from msgpack.exceptions import (
BufferFull,
OutOfData,
UnpackValueError,
PackValueError,
ExtraData)
from msgpack import ExtType
EX_SKIP = 0
EX_CONSTRUCT = 1
EX_READ_ARRAY_HEADER = 2
EX_READ_MAP_HEADER = 3
TYPE_IMMEDIATE = 0
TYPE_ARRAY = 1
TYPE_MAP = 2
TYPE_RAW = 3
TYPE_BIN = 4
TYPE_EXT = 5
DEFAULT_RECURSE_LIMIT = 511
def unpack(stream, **kwargs):
"""
Unpack an object from `stream`.
Raises `ExtraData` when `packed` contains extra bytes.
See :class:`Unpacker` for options.
"""
unpacker = Unpacker(stream, **kwargs)
ret = unpacker._fb_unpack()
if unpacker._fb_got_extradata():
raise ExtraData(ret, unpacker._fb_get_extradata())
return ret
def unpackb(packed, **kwargs):
"""
Unpack an object from `packed`.
Raises `ExtraData` when `packed` contains extra bytes.
See :class:`Unpacker` for options.
"""
unpacker = Unpacker(None, **kwargs)
unpacker.feed(packed)
try:
ret = unpacker._fb_unpack()
except OutOfData:
raise UnpackValueError("Data is not enough.")
if unpacker._fb_got_extradata():
raise ExtraData(ret, unpacker._fb_get_extradata())
return ret
class Unpacker(object):
"""
Streaming unpacker.
`file_like` is a file-like object having a `.read(n)` method.
When `Unpacker` is initialized with a `file_like`, `.feed()` is not
usable.
`read_size` is used for `file_like.read(read_size)`.
If `use_list` is True (default), msgpack lists are deserialized to Python
lists. Otherwise they are deserialized to tuples.
`object_hook` is the same as in simplejson. If it is not None, it should
be callable and Unpacker calls it with a dict argument after deserializing
a map.
`object_pairs_hook` is the same as in simplejson. If it is not None, it
should be callable and Unpacker calls it with a list of key-value pairs
after deserializing a map.
`ext_hook` is callback for ext (User defined) type. It called with two
arguments: (code, bytes). default: `msgpack.ExtType`
`encoding` is the encoding used for decoding msgpack bytes. If it is
None (default), msgpack bytes are deserialized to Python bytes.
`unicode_errors` is used for decoding bytes.
`max_buffer_size` limits the buffer size. 0 means INT_MAX (default).
Raises `BufferFull` exception when it is unsufficient.
You should set this parameter when unpacking data from an untrustred source.
example of streaming deserialization from file-like object::
unpacker = Unpacker(file_like)
for o in unpacker:
do_something(o)
example of streaming deserialization from socket::
unpacker = Unpacker()
while 1:
buf = sock.recv(1024*2)
if not buf:
break
unpacker.feed(buf)
for o in unpacker:
do_something(o)
"""
def __init__(self, file_like=None, read_size=0, use_list=True,
object_hook=None, object_pairs_hook=None, list_hook=None,
encoding=None, unicode_errors='strict', max_buffer_size=0,
ext_hook=ExtType):
if file_like is None:
self._fb_feeding = True
else:
if not callable(file_like.read):
raise TypeError("`file_like.read` must be callable")
self.file_like = file_like
self._fb_feeding = False
self._fb_buffers = []
self._fb_buf_o = 0
self._fb_buf_i = 0
self._fb_buf_n = 0
self._max_buffer_size = max_buffer_size or 2**31-1
if read_size > self._max_buffer_size:
raise ValueError("read_size must be smaller than max_buffer_size")
self._read_size = read_size or min(self._max_buffer_size, 2048)
self._encoding = encoding
self._unicode_errors = unicode_errors
self._use_list = use_list
self._list_hook = list_hook
self._object_hook = object_hook
self._object_pairs_hook = object_pairs_hook
self._ext_hook = ext_hook
if list_hook is not None and not callable(list_hook):
raise TypeError('`list_hook` is not callable')
if object_hook is not None and not callable(object_hook):
raise TypeError('`object_hook` is not callable')
if object_pairs_hook is not None and not callable(object_pairs_hook):
raise TypeError('`object_pairs_hook` is not callable')
if object_hook is not None and object_pairs_hook is not None:
raise TypeError("object_pairs_hook and object_hook are mutually "
"exclusive")
if not callable(ext_hook):
raise TypeError("`ext_hook` is not callable")
def feed(self, next_bytes):
if isinstance(next_bytes, array.array):
next_bytes = next_bytes.tostring()
elif isinstance(next_bytes, bytearray):
next_bytes = bytes(next_bytes)
assert self._fb_feeding
if self._fb_buf_n + len(next_bytes) > self._max_buffer_size:
raise BufferFull
self._fb_buf_n += len(next_bytes)
self._fb_buffers.append(next_bytes)
def _fb_consume(self):
self._fb_buffers = self._fb_buffers[self._fb_buf_i:]
if self._fb_buffers:
self._fb_buffers[0] = self._fb_buffers[0][self._fb_buf_o:]
self._fb_buf_o = 0
self._fb_buf_i = 0
self._fb_buf_n = sum(map(len, self._fb_buffers))
def _fb_got_extradata(self):
if self._fb_buf_i != len(self._fb_buffers):
return True
if self._fb_feeding:
return False
if not self.file_like:
return False
if self.file_like.read(1):
return True
return False
def __iter__(self):
return self
def read_bytes(self, n):
return self._fb_read(n)
def _fb_rollback(self):
self._fb_buf_i = 0
self._fb_buf_o = 0
def _fb_get_extradata(self):
bufs = self._fb_buffers[self._fb_buf_i:]
if bufs:
bufs[0] = bufs[0][self._fb_buf_o:]
return b''.join(bufs)
def _fb_read(self, n, write_bytes=None):
buffs = self._fb_buffers
if (write_bytes is None and self._fb_buf_i < len(buffs) and
self._fb_buf_o + n < len(buffs[self._fb_buf_i])):
self._fb_buf_o += n
return buffs[self._fb_buf_i][self._fb_buf_o - n:self._fb_buf_o]
ret = b''
while len(ret) != n:
if self._fb_buf_i == len(buffs):
if self._fb_feeding:
break
tmp = self.file_like.read(self._read_size)
if not tmp:
break
buffs.append(tmp)
continue
sliced = n - len(ret)
ret += buffs[self._fb_buf_i][self._fb_buf_o:self._fb_buf_o + sliced]
self._fb_buf_o += sliced
if self._fb_buf_o >= len(buffs[self._fb_buf_i]):
self._fb_buf_o = 0
self._fb_buf_i += 1
if len(ret) != n:
self._fb_rollback()
raise OutOfData
if write_bytes is not None:
write_bytes(ret)
return ret
def _read_header(self, execute=EX_CONSTRUCT, write_bytes=None):
typ = TYPE_IMMEDIATE
n = 0
obj = None
c = self._fb_read(1, write_bytes)
b = ord(c)
if b & 0b10000000 == 0:
obj = b
elif b & 0b11100000 == 0b11100000:
obj = struct.unpack("b", c)[0]
elif b & 0b11100000 == 0b10100000:
n = b & 0b00011111
obj = self._fb_read(n, write_bytes)
typ = TYPE_RAW
elif b & 0b11110000 == 0b10010000:
n = b & 0b00001111
typ = TYPE_ARRAY
elif b & 0b11110000 == 0b10000000:
n = b & 0b00001111
typ = TYPE_MAP
elif b == 0xc0:
obj = None
elif b == 0xc2:
obj = False
elif b == 0xc3:
obj = True
elif b == 0xc4:
typ = TYPE_BIN
n = struct.unpack("B", self._fb_read(1, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xc5:
typ = TYPE_BIN
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xc6:
typ = TYPE_BIN
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xc7: # ext 8
typ = TYPE_EXT
L, n = struct.unpack('Bb', self._fb_read(2, write_bytes))
obj = self._fb_read(L, write_bytes)
elif b == 0xc8: # ext 16
typ = TYPE_EXT
L, n = struct.unpack('>Hb', self._fb_read(3, write_bytes))
obj = self._fb_read(L, write_bytes)
elif b == 0xc9: # ext 32
typ = TYPE_EXT
L, n = struct.unpack('>Ib', self._fb_read(5, write_bytes))
obj = self._fb_read(L, write_bytes)
elif b == 0xca:
obj = struct.unpack(">f", self._fb_read(4, write_bytes))[0]
elif b == 0xcb:
obj = struct.unpack(">d", self._fb_read(8, write_bytes))[0]
elif b == 0xcc:
obj = struct.unpack("B", self._fb_read(1, write_bytes))[0]
elif b == 0xcd:
obj = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
elif b == 0xce:
obj = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
elif b == 0xcf:
obj = struct.unpack(">Q", self._fb_read(8, write_bytes))[0]
elif b == 0xd0:
obj = struct.unpack("b", self._fb_read(1, write_bytes))[0]
elif b == 0xd1:
obj = struct.unpack(">h", self._fb_read(2, write_bytes))[0]
elif b == 0xd2:
obj = struct.unpack(">i", self._fb_read(4, write_bytes))[0]
elif b == 0xd3:
obj = struct.unpack(">q", self._fb_read(8, write_bytes))[0]
elif b == 0xd4: # fixext 1
typ = TYPE_EXT
n, obj = struct.unpack('b1s', self._fb_read(2, write_bytes))
elif b == 0xd5: # fixext 2
typ = TYPE_EXT
n, obj = struct.unpack('b2s', self._fb_read(3, write_bytes))
elif b == 0xd6: # fixext 4
typ = TYPE_EXT
n, obj = struct.unpack('b4s', self._fb_read(5, write_bytes))
elif b == 0xd7: # fixext 8
typ = TYPE_EXT
n, obj = struct.unpack('b8s', self._fb_read(9, write_bytes))
elif b == 0xd8: # fixext 16
typ = TYPE_EXT
n, obj = struct.unpack('b16s', self._fb_read(17, write_bytes))
elif b == 0xd9:
typ = TYPE_RAW
n = struct.unpack("B", self._fb_read(1, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xda:
typ = TYPE_RAW
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xdb:
typ = TYPE_RAW
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xdc:
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
typ = TYPE_ARRAY
elif b == 0xdd:
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
typ = TYPE_ARRAY
elif b == 0xde:
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
typ = TYPE_MAP
elif b == 0xdf:
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
typ = TYPE_MAP
else:
raise UnpackValueError("Unknown header: 0x%x" % b)
return typ, n, obj
def _fb_unpack(self, execute=EX_CONSTRUCT, write_bytes=None):
typ, n, obj = self._read_header(execute, write_bytes)
if execute == EX_READ_ARRAY_HEADER:
if typ != TYPE_ARRAY:
raise UnpackValueError("Expected array")
return n
if execute == EX_READ_MAP_HEADER:
if typ != TYPE_MAP:
raise UnpackValueError("Expected map")
return n
# TODO should we eliminate the recursion?
if typ == TYPE_ARRAY:
if execute == EX_SKIP:
for i in xrange(n):
# TODO check whether we need to call `list_hook`
self._fb_unpack(EX_SKIP, write_bytes)
return
ret = newlist_hint(n)
for i in xrange(n):
ret.append(self._fb_unpack(EX_CONSTRUCT, write_bytes))
if self._list_hook is not None:
ret = self._list_hook(ret)
# TODO is the interaction between `list_hook` and `use_list` ok?
return ret if self._use_list else tuple(ret)
if typ == TYPE_MAP:
if execute == EX_SKIP:
for i in xrange(n):
# TODO check whether we need to call hooks
self._fb_unpack(EX_SKIP, write_bytes)
self._fb_unpack(EX_SKIP, write_bytes)
return
if self._object_pairs_hook is not None:
ret = self._object_pairs_hook(
(self._fb_unpack(EX_CONSTRUCT, write_bytes),
self._fb_unpack(EX_CONSTRUCT, write_bytes))
for _ in xrange(n))
else:
ret = {}
for _ in xrange(n):
key = self._fb_unpack(EX_CONSTRUCT, write_bytes)
ret[key] = self._fb_unpack(EX_CONSTRUCT, write_bytes)
if self._object_hook is not None:
ret = self._object_hook(ret)
return ret
if execute == EX_SKIP:
return
if typ == TYPE_RAW:
if self._encoding is not None:
obj = obj.decode(self._encoding, self._unicode_errors)
return obj
if typ == TYPE_EXT:
return self._ext_hook(n, obj)
if typ == TYPE_BIN:
return obj
assert typ == TYPE_IMMEDIATE
return obj
def next(self):
try:
ret = self._fb_unpack(EX_CONSTRUCT, None)
self._fb_consume()
return ret
except OutOfData:
raise StopIteration
__next__ = next
def skip(self, write_bytes=None):
self._fb_unpack(EX_SKIP, write_bytes)
self._fb_consume()
def unpack(self, write_bytes=None):
ret = self._fb_unpack(EX_CONSTRUCT, write_bytes)
self._fb_consume()
return ret
def read_array_header(self, write_bytes=None):
ret = self._fb_unpack(EX_READ_ARRAY_HEADER, write_bytes)
self._fb_consume()
return ret
def read_map_header(self, write_bytes=None):
ret = self._fb_unpack(EX_READ_MAP_HEADER, write_bytes)
self._fb_consume()
return ret
class Packer(object):
"""
MessagePack Packer
usage:
packer = Packer()
astream.write(packer.pack(a))
astream.write(packer.pack(b))
Packer's constructor has some keyword arguments:
:param callable default:
Convert user type to builtin type that Packer supports.
See also simplejson's document.
:param str encoding:
Convert unicode to bytes with this encoding. (default: 'utf-8')
:param str unicode_errors:
Error handler for encoding unicode. (default: 'strict')
:param bool use_single_float:
Use single precision float type for float. (default: False)
:param bool autoreset:
Reset buffer after each pack and return it's content as `bytes`. (default: True).
If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.
:param bool use_bin_type:
Use bin type introduced in msgpack spec 2.0 for bytes.
It also enable str8 type for unicode.
"""
def __init__(self, default=None, encoding='utf-8', unicode_errors='strict',
use_single_float=False, autoreset=True, use_bin_type=False):
self._use_float = use_single_float
self._autoreset = autoreset
self._use_bin_type = use_bin_type
self._encoding = encoding
self._unicode_errors = unicode_errors
self._buffer = StringIO()
if default is not None:
if not callable(default):
raise TypeError("default must be callable")
self._default = default
def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT, isinstance=isinstance):
default_used = False
while True:
if nest_limit < 0:
raise PackValueError("recursion limit exceeded")
if obj is None:
return self._buffer.write(b"\xc0")
if isinstance(obj, bool):
if obj:
return self._buffer.write(b"\xc3")
return self._buffer.write(b"\xc2")
if isinstance(obj, int_types):
if 0 <= obj < 0x80:
return self._buffer.write(struct.pack("B", obj))
if -0x20 <= obj < 0:
return self._buffer.write(struct.pack("b", obj))
if 0x80 <= obj <= 0xff:
return self._buffer.write(struct.pack("BB", 0xcc, obj))
if -0x80 <= obj < 0:
return self._buffer.write(struct.pack(">Bb", 0xd0, obj))
if 0xff < obj <= 0xffff:
return self._buffer.write(struct.pack(">BH", 0xcd, obj))
if -0x8000 <= obj < -0x80:
return self._buffer.write(struct.pack(">Bh", 0xd1, obj))
if 0xffff < obj <= 0xffffffff:
return self._buffer.write(struct.pack(">BI", 0xce, obj))
if -0x80000000 <= obj < -0x8000:
return self._buffer.write(struct.pack(">Bi", 0xd2, obj))
if 0xffffffff < obj <= 0xffffffffffffffff:
return self._buffer.write(struct.pack(">BQ", 0xcf, obj))
if -0x8000000000000000 <= obj < -0x80000000:
return self._buffer.write(struct.pack(">Bq", 0xd3, obj))
raise PackValueError("Integer value out of range")
if self._use_bin_type and isinstance(obj, bytes):
n = len(obj)
if n <= 0xff:
self._buffer.write(struct.pack('>BB', 0xc4, n))
elif n <= 0xffff:
self._buffer.write(struct.pack(">BH", 0xc5, n))
elif n <= 0xffffffff:
self._buffer.write(struct.pack(">BI", 0xc6, n))
else:
raise PackValueError("Bytes is too large")
return self._buffer.write(obj)
if isinstance(obj, (Unicode, bytes)):
if isinstance(obj, Unicode):
if self._encoding is None:
raise TypeError(
"Can't encode unicode string: "
"no encoding is specified")
obj = obj.encode(self._encoding, self._unicode_errors)
n = len(obj)
if n <= 0x1f:
self._buffer.write(struct.pack('B', 0xa0 + n))
elif self._use_bin_type and n <= 0xff:
self._buffer.write(struct.pack('>BB', 0xd9, n))
elif n <= 0xffff:
self._buffer.write(struct.pack(">BH", 0xda, n))
elif n <= 0xffffffff:
self._buffer.write(struct.pack(">BI", 0xdb, n))
else:
raise PackValueError("String is too large")
return self._buffer.write(obj)
if isinstance(obj, float):
if self._use_float:
return self._buffer.write(struct.pack(">Bf", 0xca, obj))
return self._buffer.write(struct.pack(">Bd", 0xcb, obj))
if isinstance(obj, ExtType):
code = obj.code
data = obj.data
assert isinstance(code, int)
assert isinstance(data, bytes)
L = len(data)
if L == 1:
self._buffer.write(b'\xd4')
elif L == 2:
self._buffer.write(b'\xd5')
elif L == 4:
self._buffer.write(b'\xd6')
elif L == 8:
self._buffer.write(b'\xd7')
elif L == 16:
self._buffer.write(b'\xd8')
elif L <= 0xff:
self._buffer.write(struct.pack(">BB", 0xc7, L))
elif L <= 0xffff:
self._buffer.write(struct.pack(">BH", 0xc8, L))
else:
self._buffer.write(struct.pack(">BI", 0xc9, L))
self._buffer.write(struct.pack("b", code))
self._buffer.write(data)
return
if isinstance(obj, (list, tuple)):
n = len(obj)
self._fb_pack_array_header(n)
for i in xrange(n):
self._pack(obj[i], nest_limit - 1)
return
if isinstance(obj, dict):
return self._fb_pack_map_pairs(len(obj), dict_iteritems(obj),
nest_limit - 1)
if not default_used and self._default is not None:
obj = self._default(obj)
default_used = 1
continue
raise TypeError("Cannot serialize %r" % obj)
def pack(self, obj):
self._pack(obj)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_map_pairs(self, pairs):
self._fb_pack_map_pairs(len(pairs), pairs)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_array_header(self, n):
if n >= 2**32:
raise ValueError
self._fb_pack_array_header(n)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_map_header(self, n):
if n >= 2**32:
raise ValueError
self._fb_pack_map_header(n)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_ext_type(self, typecode, data):
if not isinstance(typecode, int):
raise TypeError("typecode must have int type.")
if not 0 <= typecode <= 127:
raise ValueError("typecode should be 0-127")
if not isinstance(data, bytes):
raise TypeError("data must have bytes type")
L = len(data)
if L > 0xffffffff:
raise ValueError("Too large data")
if L == 1:
self._buffer.write(b'\xd4')
elif L == 2:
self._buffer.write(b'\xd5')
elif L == 4:
self._buffer.write(b'\xd6')
elif L == 8:
self._buffer.write(b'\xd7')
elif L == 16:
self._buffer.write(b'\xd8')
elif L <= 0xff:
self._buffer.write(b'\xc7' + struct.pack('B', L))
elif L <= 0xffff:
self._buffer.write(b'\xc8' + struct.pack('>H', L))
else:
self._buffer.write(b'\xc9' + struct.pack('>I', L))
self._buffer.write(struct.pack('B', typecode))
self._buffer.write(data)
def _fb_pack_array_header(self, n):
if n <= 0x0f:
return self._buffer.write(struct.pack('B', 0x90 + n))
if n <= 0xffff:
return self._buffer.write(struct.pack(">BH", 0xdc, n))
if n <= 0xffffffff:
return self._buffer.write(struct.pack(">BI", 0xdd, n))
raise PackValueError("Array is too large")
def _fb_pack_map_header(self, n):
if n <= 0x0f:
return self._buffer.write(struct.pack('B', 0x80 + n))
if n <= 0xffff:
return self._buffer.write(struct.pack(">BH", 0xde, n))
if n <= 0xffffffff:
return self._buffer.write(struct.pack(">BI", 0xdf, n))
raise PackValueError("Dict is too large")
def _fb_pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT):
self._fb_pack_map_header(n)
for (k, v) in pairs:
self._pack(k, nest_limit - 1)
self._pack(v, nest_limit - 1)
def bytes(self):
return self._buffer.getvalue()
def reset(self):
self._buffer = StringIO()
| mit |
djabber/Dashboard | bottle/dash/local/lib/python2.7/site-packages/setuptools/tests/server.py | 452 | 2651 | """Basic http server for tests to simulate PyPI or custom indexes
"""
import sys
import time
import threading
from setuptools.compat import BaseHTTPRequestHandler
from setuptools.compat import (urllib2, URLError, HTTPServer,
SimpleHTTPRequestHandler)
class IndexServer(HTTPServer):
"""Basic single-threaded http server simulating a package index
You can use this server in unittest like this::
s = IndexServer()
s.start()
index_url = s.base_url() + 'mytestindex'
# do some test requests to the index
# The index files should be located in setuptools/tests/indexes
s.stop()
"""
def __init__(self, server_address=('', 0),
RequestHandlerClass=SimpleHTTPRequestHandler):
HTTPServer.__init__(self, server_address, RequestHandlerClass)
self._run = True
def serve(self):
while self._run:
self.handle_request()
def start(self):
self.thread = threading.Thread(target=self.serve)
self.thread.start()
def stop(self):
"Stop the server"
# Let the server finish the last request and wait for a new one.
time.sleep(0.1)
# self.shutdown is not supported on python < 2.6, so just
# set _run to false, and make a request, causing it to
# terminate.
self._run = False
url = 'http://127.0.0.1:%(server_port)s/' % vars(self)
try:
if sys.version_info >= (2, 6):
urllib2.urlopen(url, timeout=5)
else:
urllib2.urlopen(url)
except URLError:
# ignore any errors; all that's important is the request
pass
self.thread.join()
self.socket.close()
def base_url(self):
port = self.server_port
return 'http://127.0.0.1:%s/setuptools/tests/indexes/' % port
class RequestRecorder(BaseHTTPRequestHandler):
def do_GET(self):
requests = vars(self.server).setdefault('requests', [])
requests.append(self)
self.send_response(200, 'OK')
class MockServer(HTTPServer, threading.Thread):
"""
A simple HTTP Server that records the requests made to it.
"""
def __init__(self, server_address=('', 0),
RequestHandlerClass=RequestRecorder):
HTTPServer.__init__(self, server_address, RequestHandlerClass)
threading.Thread.__init__(self)
self.setDaemon(True)
self.requests = []
def run(self):
self.serve_forever()
def url(self):
return 'http://localhost:%(server_port)s/' % vars(self)
url = property(url)
| mit |
dexterx17/nodoSocket | clients/Python-2.7.6/Lib/distutils/command/upload.py | 176 | 7002 | """distutils.command.upload
Implements the Distutils 'upload' subcommand (upload package to PyPI)."""
import os
import socket
import platform
from urllib2 import urlopen, Request, HTTPError
from base64 import standard_b64encode
import urlparse
import cStringIO as StringIO
from hashlib import md5
from distutils.errors import DistutilsOptionError
from distutils.core import PyPIRCCommand
from distutils.spawn import spawn
from distutils import log
class upload(PyPIRCCommand):
description = "upload binary package to PyPI"
user_options = PyPIRCCommand.user_options + [
('sign', 's',
'sign files to upload using gpg'),
('identity=', 'i', 'GPG identity used to sign files'),
]
boolean_options = PyPIRCCommand.boolean_options + ['sign']
def initialize_options(self):
PyPIRCCommand.initialize_options(self)
self.username = ''
self.password = ''
self.show_response = 0
self.sign = False
self.identity = None
def finalize_options(self):
PyPIRCCommand.finalize_options(self)
if self.identity and not self.sign:
raise DistutilsOptionError(
"Must use --sign for --identity to have meaning"
)
config = self._read_pypirc()
if config != {}:
self.username = config['username']
self.password = config['password']
self.repository = config['repository']
self.realm = config['realm']
# getting the password from the distribution
# if previously set by the register command
if not self.password and self.distribution.password:
self.password = self.distribution.password
def run(self):
if not self.distribution.dist_files:
raise DistutilsOptionError("No dist file created in earlier command")
for command, pyversion, filename in self.distribution.dist_files:
self.upload_file(command, pyversion, filename)
def upload_file(self, command, pyversion, filename):
# Makes sure the repository URL is compliant
schema, netloc, url, params, query, fragments = \
urlparse.urlparse(self.repository)
if params or query or fragments:
raise AssertionError("Incompatible url %s" % self.repository)
if schema not in ('http', 'https'):
raise AssertionError("unsupported schema " + schema)
# Sign if requested
if self.sign:
gpg_args = ["gpg", "--detach-sign", "-a", filename]
if self.identity:
gpg_args[2:2] = ["--local-user", self.identity]
spawn(gpg_args,
dry_run=self.dry_run)
# Fill in the data - send all the meta-data in case we need to
# register a new release
f = open(filename,'rb')
try:
content = f.read()
finally:
f.close()
meta = self.distribution.metadata
data = {
# action
':action': 'file_upload',
'protcol_version': '1',
# identify release
'name': meta.get_name(),
'version': meta.get_version(),
# file content
'content': (os.path.basename(filename),content),
'filetype': command,
'pyversion': pyversion,
'md5_digest': md5(content).hexdigest(),
# additional meta-data
'metadata_version' : '1.0',
'summary': meta.get_description(),
'home_page': meta.get_url(),
'author': meta.get_contact(),
'author_email': meta.get_contact_email(),
'license': meta.get_licence(),
'description': meta.get_long_description(),
'keywords': meta.get_keywords(),
'platform': meta.get_platforms(),
'classifiers': meta.get_classifiers(),
'download_url': meta.get_download_url(),
# PEP 314
'provides': meta.get_provides(),
'requires': meta.get_requires(),
'obsoletes': meta.get_obsoletes(),
}
comment = ''
if command == 'bdist_rpm':
dist, version, id = platform.dist()
if dist:
comment = 'built for %s %s' % (dist, version)
elif command == 'bdist_dumb':
comment = 'built for %s' % platform.platform(terse=1)
data['comment'] = comment
if self.sign:
data['gpg_signature'] = (os.path.basename(filename) + ".asc",
open(filename+".asc").read())
# set up the authentication
auth = "Basic " + standard_b64encode(self.username + ":" +
self.password)
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = '\n--' + boundary
end_boundary = sep_boundary + '--'
body = StringIO.StringIO()
for key, value in data.items():
# handle multiple entries for the same name
if not isinstance(value, list):
value = [value]
for value in value:
if isinstance(value, tuple):
fn = ';filename="%s"' % value[0]
value = value[1]
else:
fn = ""
body.write(sep_boundary)
body.write('\nContent-Disposition: form-data; name="%s"'%key)
body.write(fn)
body.write("\n\n")
body.write(value)
if value and value[-1] == '\r':
body.write('\n') # write an extra newline (lurve Macs)
body.write(end_boundary)
body.write("\n")
body = body.getvalue()
self.announce("Submitting %s to %s" % (filename, self.repository), log.INFO)
# build the Request
headers = {'Content-type':
'multipart/form-data; boundary=%s' % boundary,
'Content-length': str(len(body)),
'Authorization': auth}
request = Request(self.repository, data=body,
headers=headers)
# send the data
try:
result = urlopen(request)
status = result.getcode()
reason = result.msg
if self.show_response:
msg = '\n'.join(('-' * 75, r.read(), '-' * 75))
self.announce(msg, log.INFO)
except socket.error, e:
self.announce(str(e), log.ERROR)
return
except HTTPError, e:
status = e.code
reason = e.msg
if status == 200:
self.announce('Server response (%s): %s' % (status, reason),
log.INFO)
else:
self.announce('Upload failed (%s): %s' % (status, reason),
log.ERROR)
| mit |
baylee/django | tests/template_tests/filter_tests/test_dictsortreversed.py | 181 | 1686 | from django.template.defaultfilters import dictsortreversed
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_sort(self):
sorted_dicts = dictsortreversed(
[{'age': 23, 'name': 'Barbara-Ann'},
{'age': 63, 'name': 'Ra Ra Rasputin'},
{'name': 'Jonny B Goode', 'age': 18}],
'age',
)
self.assertEqual(
[sorted(dict.items()) for dict in sorted_dicts],
[[('age', 63), ('name', 'Ra Ra Rasputin')],
[('age', 23), ('name', 'Barbara-Ann')],
[('age', 18), ('name', 'Jonny B Goode')]],
)
def test_sort_list_of_tuples(self):
data = [('a', '42'), ('c', 'string'), ('b', 'foo')]
expected = [('c', 'string'), ('b', 'foo'), ('a', '42')]
self.assertEqual(dictsortreversed(data, 0), expected)
def test_sort_list_of_tuple_like_dicts(self):
data = [
{'0': 'a', '1': '42'},
{'0': 'c', '1': 'string'},
{'0': 'b', '1': 'foo'},
]
expected = [
{'0': 'c', '1': 'string'},
{'0': 'b', '1': 'foo'},
{'0': 'a', '1': '42'},
]
self.assertEqual(dictsortreversed(data, '0'), expected)
def test_invalid_values(self):
"""
If dictsortreversed is passed something other than a list of
dictionaries, fail silently.
"""
self.assertEqual(dictsortreversed([1, 2, 3], 'age'), '')
self.assertEqual(dictsortreversed('Hello!', 'age'), '')
self.assertEqual(dictsortreversed({'a': 1}, 'age'), '')
self.assertEqual(dictsortreversed(1, 'age'), '')
| bsd-3-clause |
yank555-lu/N3-Sammy-JB | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
p4datasystems/CarnotKE | jyhton/lib-python/2.7/plat-linux2/DLFCN.py | 158 | 1628 | # Generated by h2py from /usr/include/dlfcn.h
_DLFCN_H = 1
# Included from features.h
_FEATURES_H = 1
__USE_ANSI = 1
__FAVOR_BSD = 1
_ISOC99_SOURCE = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 199506L
_XOPEN_SOURCE = 600
_XOPEN_SOURCE_EXTENDED = 1
_LARGEFILE64_SOURCE = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
__USE_ISOC99 = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 2
_POSIX_C_SOURCE = 199506L
__USE_POSIX = 1
__USE_POSIX2 = 1
__USE_POSIX199309 = 1
__USE_POSIX199506 = 1
__USE_XOPEN = 1
__USE_XOPEN_EXTENDED = 1
__USE_UNIX98 = 1
_LARGEFILE_SOURCE = 1
__USE_XOPEN2K = 1
__USE_ISOC99 = 1
__USE_XOPEN_EXTENDED = 1
__USE_LARGEFILE = 1
__USE_LARGEFILE64 = 1
__USE_FILE_OFFSET64 = 1
__USE_MISC = 1
__USE_BSD = 1
__USE_SVID = 1
__USE_GNU = 1
__USE_REENTRANT = 1
__STDC_IEC_559__ = 1
__STDC_IEC_559_COMPLEX__ = 1
__STDC_ISO_10646__ = 200009L
__GNU_LIBRARY__ = 6
__GLIBC__ = 2
__GLIBC_MINOR__ = 2
# Included from sys/cdefs.h
_SYS_CDEFS_H = 1
def __PMT(args): return args
def __P(args): return args
def __PMT(args): return args
def __STRING(x): return #x
__flexarr = []
__flexarr = [0]
__flexarr = []
__flexarr = [1]
def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
def __attribute__(xyz): return
def __attribute_format_arg__(x): return __attribute__ ((__format_arg__ (x)))
def __attribute_format_arg__(x): return
__USE_LARGEFILE = 1
__USE_LARGEFILE64 = 1
__USE_EXTERN_INLINES = 1
# Included from gnu/stubs.h
# Included from bits/dlfcn.h
RTLD_LAZY = 0x00001
RTLD_NOW = 0x00002
RTLD_BINDING_MASK = 0x3
RTLD_NOLOAD = 0x00004
RTLD_GLOBAL = 0x00100
RTLD_LOCAL = 0
RTLD_NODELETE = 0x01000
| apache-2.0 |
zcbenz/cefode-chromium | tools/find_runtime_symbols/tests/proc_maps_test.py | 38 | 4611 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import logging
import os
import sys
import unittest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
from proc_maps import ProcMaps
class ProcMapsTest(unittest.TestCase):
_TEST_PROCMAPS = '\n'.join([
'00000000-00001000 r--p 00000000 fc:00 0',
'0080b000-0080c000 r-xp 0020b000 fc:00 2231329'
' /usr/bin/some',
'0080c000-0080f000 ---p 0020c000 fc:00 2231329'
' /usr/bin/some',
'0100a000-0100c000 r-xp 0120a000 fc:00 22381'
' /usr/bin/chrome',
'0100c000-0100f000 ---p 0120c000 fc:00 22381'
' /usr/bin/chrome',
'0237d000-02a9b000 rw-p 00000000 00:00 0'
' [heap]',
'7fb920e6d000-7fb920e85000 r-xp 00000000 fc:00 263482'
' /lib/x86_64-linux-gnu/libpthread-2.15.so',
'7fb920e85000-7fb921084000 ---p 00018000 fc:00 263482'
' /lib/x86_64-linux-gnu/libpthread-2.15.so',
'7fb9225f4000-7fb922654000 rw-s 00000000 00:04 19660808'
' /SYSV00000000 (deleted)',
'ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0'
' [vsyscall]',
])
_EXPECTED = [
(0x0, 0x1000, 'r', '-', '-', 'p', 0x0, 'fc', '00', 0, ''),
(0x80b000, 0x80c000, 'r', '-', 'x', 'p', 0x20b000,
'fc', '00', 2231329, '/usr/bin/some'),
(0x80c000, 0x80f000, '-', '-', '-', 'p', 0x20c000,
'fc', '00', 2231329, '/usr/bin/some'),
(0x100a000, 0x100c000, 'r', '-', 'x', 'p', 0x120a000,
'fc', '00', 22381, '/usr/bin/chrome'),
(0x100c000, 0x100f000, '-', '-', '-', 'p', 0x120c000,
'fc', '00', 22381, '/usr/bin/chrome'),
(0x237d000, 0x2a9b000, 'r', 'w', '-', 'p', 0x0,
'00', '00', 0, '[heap]'),
(0x7fb920e6d000, 0x7fb920e85000, 'r', '-', 'x', 'p', 0x0,
'fc', '00', 263482, '/lib/x86_64-linux-gnu/libpthread-2.15.so'),
(0x7fb920e85000, 0x7fb921084000, '-', '-', '-', 'p', 0x18000,
'fc', '00', 263482, '/lib/x86_64-linux-gnu/libpthread-2.15.so'),
(0x7fb9225f4000, 0x7fb922654000, 'r', 'w', '-', 's', 0x0,
'00', '04', 19660808, '/SYSV00000000 (deleted)'),
(0xffffffffff600000, 0xffffffffff601000, 'r', '-', 'x', 'p', 0x0,
'00', '00', 0, '[vsyscall]'),
]
@staticmethod
def _expected_as_dict(index):
return {
'begin': ProcMapsTest._EXPECTED[index][0],
'end': ProcMapsTest._EXPECTED[index][1],
'readable': ProcMapsTest._EXPECTED[index][2],
'writable': ProcMapsTest._EXPECTED[index][3],
'executable': ProcMapsTest._EXPECTED[index][4],
'private': ProcMapsTest._EXPECTED[index][5],
'offset': ProcMapsTest._EXPECTED[index][6],
'major': ProcMapsTest._EXPECTED[index][7],
'minor': ProcMapsTest._EXPECTED[index][8],
'inode': ProcMapsTest._EXPECTED[index][9],
'name': ProcMapsTest._EXPECTED[index][10],
}
def test_load(self):
maps = ProcMaps.load(cStringIO.StringIO(self._TEST_PROCMAPS))
for index, entry in enumerate(maps):
self.assertEqual(entry.as_dict(), self._expected_as_dict(index))
def test_constants(self):
maps = ProcMaps.load(cStringIO.StringIO(self._TEST_PROCMAPS))
selected = [4, 7]
for index, entry in enumerate(maps.iter(ProcMaps.constants)):
self.assertEqual(entry.as_dict(),
self._expected_as_dict(selected[index]))
def test_executable(self):
maps = ProcMaps.load(cStringIO.StringIO(self._TEST_PROCMAPS))
selected = [3, 6]
for index, entry in enumerate(maps.iter(ProcMaps.executable)):
self.assertEqual(entry.as_dict(),
self._expected_as_dict(selected[index]))
def test_executable_and_constants(self):
maps = ProcMaps.load(cStringIO.StringIO(self._TEST_PROCMAPS))
selected = [3, 4, 6, 7]
for index, entry in enumerate(maps.iter(ProcMaps.executable_and_constants)):
self.assertEqual(entry.as_dict(),
self._expected_as_dict(selected[index]))
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.ERROR,
format='%(levelname)5s %(filename)15s(%(lineno)3d): %(message)s')
unittest.main()
| bsd-3-clause |
wangyum/spark | sql/gen-sql-config-docs.py | 15 | 4773 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
from collections import namedtuple
from textwrap import dedent
# To avoid adding a new direct dependency, we import markdown from within mkdocs.
from mkdocs.structure.pages import markdown
from pyspark.java_gateway import launch_gateway
SQLConfEntry = namedtuple(
"SQLConfEntry", ["name", "default", "description", "version"])
def get_sql_configs(jvm, group):
if group == "static":
config_set = jvm.org.apache.spark.sql.api.python.PythonSQLUtils.listStaticSQLConfigs()
else:
config_set = jvm.org.apache.spark.sql.api.python.PythonSQLUtils.listRuntimeSQLConfigs()
sql_configs = [
SQLConfEntry(
name=_sql_config._1(),
default=_sql_config._2(),
description=_sql_config._3(),
version=_sql_config._4()
)
for _sql_config in config_set
]
return sql_configs
def generate_sql_configs_table_html(sql_configs, path):
"""
Generates an HTML table at `path` that lists all public SQL
configuration options.
The table will look something like this:
```html
<table class="table">
<tr><th>Property Name</th><th>Default</th><th>Meaning</th><th>Since Version</th></tr>
<tr>
<td><code>spark.sql.adaptive.enabled</code></td>
<td>false</td>
<td><p>When true, enable adaptive query execution.</p></td>
<td>2.1.0</td>
</tr>
...
</table>
```
"""
value_reference_pattern = re.compile(r"^<value of (\S*)>$")
with open(path, 'w') as f:
f.write(dedent(
"""
<table class="table">
<tr><th>Property Name</th><th>Default</th><th>Meaning</th><th>Since Version</th></tr>
"""
))
for config in sorted(sql_configs, key=lambda x: x.name):
if config.name == "spark.sql.session.timeZone":
default = "(value of local timezone)"
elif config.name == "spark.sql.warehouse.dir":
default = "(value of <code>$PWD/spark-warehouse</code>)"
elif config.default == "<undefined>":
default = "(none)"
elif config.default.startswith("<value of "):
referenced_config_name = value_reference_pattern.match(config.default).group(1)
default = "(value of <code>{}</code>)".format(referenced_config_name)
else:
default = config.default
if default.startswith("<"):
raise Exception(
"Unhandled reference in SQL config docs. Config '{name}' "
"has default '{default}' that looks like an HTML tag."
.format(
name=config.name,
default=config.default,
)
)
f.write(dedent(
"""
<tr>
<td><code>{name}</code></td>
<td>{default}</td>
<td>{description}</td>
<td>{version}</td>
</tr>
"""
.format(
name=config.name,
default=default,
description=markdown.markdown(config.description),
version=config.version
)
))
f.write("</table>\n")
if __name__ == "__main__":
jvm = launch_gateway().jvm
docs_root_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "docs")
sql_configs = get_sql_configs(jvm, "runtime")
sql_configs_table_path = os.path.join(docs_root_dir, "generated-runtime-sql-config-table.html")
generate_sql_configs_table_html(sql_configs, path=sql_configs_table_path)
sql_configs = get_sql_configs(jvm, "static")
sql_configs_table_path = os.path.join(docs_root_dir, "generated-static-sql-config-table.html")
generate_sql_configs_table_html(sql_configs, path=sql_configs_table_path)
| apache-2.0 |
lybicat/lybica-runner | tst/test_lybica/actions/user_script.py | 1 | 1597 | from unittest import TestCase
from lybica.actions.user_script import _UserScriptAction
from lybica.__main__ import Context
import os
import shutil
class TestUserScriptAction(TestCase):
def setUp(self):
super(TestUserScriptAction, self).setUp()
self.context = Context()
os.environ['WORKSPACE'] = './ut_logs'
self.context.WORKSPACE = './ut_logs'
os.makedirs(self.context.WORKSPACE)
def tearDown(self):
if os.path.exists(self.context.WORKSPACE):
shutil.rmtree(self.context.WORKSPACE)
def test_run_do_ut_action_under_case_root(self):
os.environ['CASE_ROOT'] = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'testsuite'))
class _Action(_UserScriptAction):
def get_script_name(self):
return 'do_ut'
_Action()._run_script(self.context, {})
def test_run_do_lybica_ut_action_under_lybica_directory(self):
os.environ['CASE_ROOT'] = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'testsuite'))
class _Action(_UserScriptAction):
def get_script_name(self):
return 'do_lybica_ut'
_Action()._run_script(self.context, {})
def test_run_do_undefined_action(self):
os.environ['CASE_ROOT'] = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'testsuite'))
class _Action(_UserScriptAction):
def get_script_name(self):
return 'do_undefined'
self.assertRaises(RuntimeError, _Action()._run_script, self.context, {})
| mit |
kellyaj/pytactoe | pytactoe/test/test_computer.py | 1 | 3921 | import unittest
from mock import MagicMock
from pytactoe.board import Board
from pytactoe.computer import Computer
class ComputerTests(unittest.TestCase):
def setUp(self):
self.computer = Computer("O")
self.board = Board()
def test_computer_initializes_with_a_mark(self):
self.assertEqual("O", self.computer.mark)
def test_checking_first_move(self):
self.assertTrue(self.computer.is_first_move(self.board))
self.board.spots = [1, 2, 3, 4, "O", 6, 7, 8, 9]
self.assertFalse(self.computer.is_first_move(self.board))
self.board.spots =[1, "X", "X", "O", "X", "O", "O", "O", "X"]
self.assertFalse(self.computer.is_first_move(self.board))
class ComputerMoveChoiceTests(unittest.TestCase):
def setUp(self):
self.computer = Computer("O")
self.board = Board()
self.mock_presenter = MagicMock()
self.mock_presenter.computer_move_message = MagicMock()
def test_computer_chooses_moves_from_available_spots(self):
self.board.spots =[1, "X", "X", "O", "X", "O", "O", "O", "X"]
available_spots = self.board.available_spots()
chosen_move = self.computer.get_move(self.mock_presenter, self.board)
self.assertTrue(chosen_move in self.board.spots)
def test_computer_choses_first_move_optimally(self):
optimal_moves = [1, 3, 5, 7, 9]
chosen_move = self.computer.get_move(self.mock_presenter, self.board)
self.assertTrue(chosen_move in optimal_moves)
def test_computer_chooses_obvious_win(self):
self.board.spots =[1, "X", "X", "O", "X", "O", "O", "O", "X"]
chosen_move = self.computer.get_move(self.mock_presenter, self.board)
self.assertEqual(1, chosen_move)
def test_computer_chooses_winning_row_move(self):
self.board.spots = ["X", "X", 3, 4, "X", 6, 7, "O", "O"]
chosen_move = self.computer.get_move(self.mock_presenter, self.board)
self.assertEqual(7, chosen_move)
def test_computer_chooses_winning_column_move(self):
self.board.spots = ["X", 2, "O", "X", "O", 6, 7, 8, 9]
chosen_move = self.computer.get_move(self.mock_presenter, self.board)
self.assertEqual(7, chosen_move)
def test_computer_chooses_winning_diagonal_move(self):
self.board.spots = [1, 2, "X", 4, "X", "O", "O", "O", "X"]
chosen_move = self.computer.get_move(self.mock_presenter, self.board)
self.assertEqual(1, chosen_move)
def test_computer_takes_win_over_block(self):
self.board.spots = ["X", "X", 3, 4, "X", 6, "O", "O", 9]
chosen_move = self.computer.get_move(self.mock_presenter, self.board)
self.assertEqual(9, chosen_move)
def test_computer_takes_win_over_fork(self):
self.board.spots = ["X", 2, "X", "O", "O", 6, "X", 8, 9]
chosen_move = self.computer.get_move(self.mock_presenter, self.board)
self.assertEqual(6, chosen_move)
class ScoreMoveTests(unittest.TestCase):
def setUp(self):
self.computer = Computer("X")
self.board = Board()
self.winning_spots = ["X", "X", 3, 4, "X", 6, "O", "O", "O"]
def test_stalemates_are_scored_as_zero(self):
stalemated_board = ["X", "O", "O", "O", "X", "X", "X", "X", "O"]
self.board.spots = stalemated_board
score = self.computer.score_move(self.board, "X", 1)
self.assertEqual(score, 0)
def test_wins_are_score_as_one(self):
self.board.spots = self.winning_spots
score = self.computer.score_move(self.board, "X", 1)
self.assertEqual(score, -1)
def test_lower_depths_received_higher_scores(self):
self.board.spots = self.winning_spots
low_depth_score = -(self.computer.score_move(self.board, "X", 1))
high_depth_score = -(self.computer.score_move(self.board, "X", 2))
self.assertTrue(low_depth_score > high_depth_score)
| mit |
mrtukkin/svm-street-detector | devkit_kitti/helper.py | 1 | 12022 | #!/usr/bin/env python
#
# THE KITTI VISION BENCHMARK SUITE: ROAD BENCHMARK
#
# Copyright (C) 2013
# Honda Research Institute Europe GmbH
# Carl-Legien-Str. 30
# 63073 Offenbach/Main
# Germany
#
# UNPUBLISHED PROPRIETARY MATERIAL.
# ALL RIGHTS RESERVED.
#
# Authors: Tobias Kuehnl <[email protected]>
# Jannik Fritsch <[email protected]>
#
import numpy as np
import pylab
import os
import cv2
def getGroundTruth(fileNameGT):
'''
Returns the ground truth maps for roadArea and the validArea
:param fileNameGT:
'''
# Read GT
assert os.path.isfile(fileNameGT), 'Cannot find: %s' % fileNameGT
full_gt = cv2.imread(fileNameGT, cv2.CV_LOAD_IMAGE_UNCHANGED)
#attention: OpenCV reads in as BGR, so first channel has Blue / road GT
roadArea = full_gt[:,:,0] > 0
validArea = full_gt[:,:,2] > 0
return roadArea, validArea
def overlayImageWithConfidence(in_image, conf, vis_channel = 1, threshold = 0.5):
'''
:param in_image:
:param conf:
:param vis_channel:
:param threshold:
'''
if in_image.dtype == 'uint8':
visImage = in_image.copy().astype('f4')/255
else:
visImage = in_image.copy()
channelPart = visImage[:, :, vis_channel] * (conf > threshold) - conf
channelPart[channelPart < 0] = 0
visImage[:, :, vis_channel] = visImage[:, :, vis_channel] * (conf <= threshold) + (conf > threshold) * conf + channelPart
return visImage
def evalExp(gtBin, cur_prob, thres, validMap = None, validArea=None):
'''
Does the basic pixel based evaluation!
:param gtBin:
:param cur_prob:
:param thres:
:param validMap:
'''
assert len(cur_prob.shape) == 2, 'Wrong size of input prob map'
assert len(gtBin.shape) == 2, 'Wrong size of input prob map'
thresInf = np.concatenate(([-np.Inf], thres, [np.Inf]))
#Merge validMap with validArea
if validMap!=None:
if validArea!=None:
validMap = (validMap == True) & (validArea == True)
elif validArea!=None:
validMap=validArea
# histogram of false negatives
if validMap!=None:
fnArray = cur_prob[(gtBin == True) & (validMap == True)]
else:
fnArray = cur_prob[(gtBin == True)]
fnHist = np.histogram(fnArray,bins=thresInf)[0]
fnCum = np.cumsum(fnHist)
FN = fnCum[0:0+len(thres)];
if validMap!=None:
fpArray = cur_prob[(gtBin == False) & (validMap == True)]
else:
fpArray = cur_prob[(gtBin == False)]
fpHist = np.histogram(fpArray, bins=thresInf)[0]
fpCum = np.flipud(np.cumsum(np.flipud(fpHist)))
FP = fpCum[1:1+len(thres)]
# count labels and protos
#posNum = fnArray.shape[0]
#negNum = fpArray.shape[0]
if validMap!=None:
posNum = np.sum((gtBin == True) & (validMap == True))
negNum = np.sum((gtBin == False) & (validMap == True))
else:
posNum = np.sum(gtBin == True)
negNum = np.sum(gtBin == False)
return FN, FP, posNum, negNum
def pxEval_maximizeFMeasure(totalPosNum, totalNegNum, totalFN, totalFP, thresh = None):
'''
@param totalPosNum: scalar
@param totalNegNum: scalar
@param totalFN: vector
@param totalFP: vector
@param thresh: vector
'''
#Calc missing stuff
totalTP = totalPosNum - totalFN
totalTN = totalNegNum - totalFP
valid = (totalTP>=0) & (totalTN>=0)
assert valid.all(), 'Detected invalid elements in eval'
recall = totalTP / float( totalPosNum )
precision = totalTP / (totalTP + totalFP + 1e-10)
selector_invalid = (recall==0) & (precision==0)
recall = recall[~selector_invalid]
precision = precision[~selector_invalid]
maxValidIndex = len(precision)
#Pascal VOC average precision
AvgPrec = 0
counter = 0
for i in np.arange(0,1.1,0.1):
ind = np.where(recall>=i)
if ind == None:
continue
pmax = max(precision[ind])
AvgPrec += pmax
counter += 1
AvgPrec = AvgPrec/counter
# F-measure operation point
beta = 1.0
betasq = beta**2
F = (1 + betasq) * (precision * recall)/((betasq * precision) + recall + 1e-10)
index = F.argmax()
MaxF= F[index]
recall_bst = recall[index]
precision_bst = precision[index]
TP = totalTP[index]
TN = totalTN[index]
FP = totalFP[index]
FN = totalFN[index]
valuesMaxF = np.zeros((1,4),'u4')
valuesMaxF[0,0] = TP
valuesMaxF[0,1] = TN
valuesMaxF[0,2] = FP
valuesMaxF[0,3] = FN
#ACC = (totalTP+ totalTN)/(totalPosNum+totalNegNum)
prob_eval_scores = calcEvalMeasures(valuesMaxF)
prob_eval_scores['AvgPrec'] = AvgPrec
prob_eval_scores['MaxF'] = MaxF
#prob_eval_scores['totalFN'] = totalFN
#prob_eval_scores['totalFP'] = totalFP
prob_eval_scores['totalPosNum'] = totalPosNum
prob_eval_scores['totalNegNum'] = totalNegNum
prob_eval_scores['precision'] = precision
prob_eval_scores['recall'] = recall
#prob_eval_scores['precision_bst'] = precision_bst
#prob_eval_scores['recall_bst'] = recall_bst
prob_eval_scores['thresh'] = thresh
if thresh != None:
BestThresh= thresh[index]
prob_eval_scores['BestThresh'] = BestThresh
#return a dict
return prob_eval_scores
def calcEvalMeasures(evalDict, tag = '_wp'):
'''
:param evalDict:
:param tag:
'''
# array mode!
TP = evalDict[:,0].astype('f4')
TN = evalDict[:,1].astype('f4')
FP = evalDict[:,2].astype('f4')
FN = evalDict[:,3].astype('f4')
Q = TP / (TP + FP + FN)
P = TP + FN
N = TN + FP
TPR = TP / P
FPR = FP / N
FNR = FN / P
TNR = TN / N
A = (TP + TN) / (P + N)
precision = TP / (TP + FP)
recall = TP / P
#numSamples = TP + TN + FP + FN
correct_rate = A
# F-measure
#beta = 1.0
#betasq = beta**2
#F_max = (1 + betasq) * (precision * recall)/((betasq * precision) + recall + 1e-10)
outDict =dict()
outDict['TP'+ tag] = TP
outDict['FP'+ tag] = FP
outDict['FN'+ tag] = FN
outDict['TN'+ tag] = TN
outDict['Q'+ tag] = Q
outDict['A'+ tag] = A
outDict['TPR'+ tag] = TPR
outDict['FPR'+ tag] = FPR
outDict['FNR'+ tag] = FNR
outDict['PRE'+ tag] = precision
outDict['REC'+ tag] = recall
outDict['correct_rate'+ tag] = correct_rate
return outDict
def setFigLinesBW(fig):
"""
Take each axes in the figure, and for each line in the axes, make the
line viewable in black and white.
"""
for ax in fig.get_axes():
setAxLinesBW(ax)
def setAxLinesBW(ax):
"""
Take each Line2D in the axes, ax, and convert the line style to be
suitable for black and white viewing.
"""
MARKERSIZE = 3
# COLORMAP = {
# 'r': {'marker': None, 'dash': (None,None)},
# 'g': {'marker': None, 'dash': [5,2]},
# 'm': {'marker': None, 'dash': [11,3]},
# 'b': {'marker': None, 'dash': [6,3,2,3]},
# 'c': {'marker': None, 'dash': [1,3]},
# 'y': {'marker': None, 'dash': [5,3,1,2,1,10]},
# 'k': {'marker': 'o', 'dash': (None,None)} #[1,2,1,10]}
# }
COLORMAP = {
'r': {'marker': "None", 'dash': ("None","None")},
'g': {'marker': "None", 'dash': [5,2]},
'm': {'marker': "None", 'dash': [11,3]},
'b': {'marker': "None", 'dash': [6,3,2,3]},
'c': {'marker': "None", 'dash': [1,3]},
'y': {'marker': "None", 'dash': [5,3,1,2,1,10]},
'k': {'marker': 'o', 'dash': ("None","None")} #[1,2,1,10]}
}
for line in ax.get_lines():
origColor = line.get_color()
#line.set_color('black')
line.set_dashes(COLORMAP[origColor]['dash'])
line.set_marker(COLORMAP[origColor]['marker'])
line.set_markersize(MARKERSIZE)
def plotPrecisionRecall(precision, recall, outFileName, Fig=None, drawCol=1, textLabel = None, title = None, fontsize1 = 24, fontsize2 = 20, linewidth = 3):
'''
:param precision:
:param recall:
:param outFileName:
:param Fig:
:param drawCol:
:param textLabel:
:param fontsize1:
:param fontsize2:
:param linewidth:
'''
clearFig = False
if Fig == None:
Fig = pylab.figure()
clearFig = True
#tableString = 'Algo avgprec Fmax prec recall accuracy fpr Q(TonITS)\n'
linecol = ['g','m','b','c']
#if we are evaluating SP, then BL is available
#sectionName = 'Evaluation_'+tag+'PxProb'
#fullEvalFile = os.path.join(eval_dir,evalName)
#Precision,Recall,evalString = readEvaluation(fullEvalFile,sectionName,AlgoLabel)
pylab.plot(100*recall, 100*precision, linewidth=linewidth, color=linecol[drawCol], label=textLabel)
#writing out PrecRecall curves as graphic
setFigLinesBW(Fig)
if textLabel!= None:
pylab.legend(loc='lower left',prop={'size':fontsize2})
if title!= None:
pylab.title(title, fontsize=fontsize1)
#pylab.title(title,fontsize=24)
pylab.ylabel('PRECISION [%]',fontsize=fontsize1)
pylab.xlabel('RECALL [%]',fontsize=fontsize1)
pylab.xlim(0,100)
pylab.xticks( [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
('0','','20','','40','','60','','80','','100'), fontsize=fontsize2 )
pylab.ylim(0,100)
pylab.yticks( [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
('0','','20','','40','','60','','80','','100'), fontsize=fontsize2 )
pylab.grid(True)
#
if type(outFileName) != list:
pylab.savefig( outFileName )
else:
for outFn in outFileName:
pylab.savefig( outFn )
if clearFig:
pylab.close()
Fig.clear()
def saveBEVImageWithAxes(data, outputname, cmap = None, xlabel = 'x [m]', ylabel = 'z [m]', rangeX = [-10, 10], rangeXpx = None, numDeltaX = 5, rangeZ = [7, 62], rangeZpx = None, numDeltaZ = 5, fontSize = 16):
'''
:param data:
:param outputname:
:param cmap:
'''
aspect_ratio = float(data.shape[1])/data.shape[0]
fig = pylab.figure()
Scale = 8
# add +1 to get axis text
fig.set_size_inches(Scale*aspect_ratio+1,Scale*1)
ax = pylab.gca()
#ax.set_axis_off()
#fig.add_axes(ax)
if cmap != None:
pylab.set_cmap(cmap)
#ax.imshow(data, interpolation='nearest', aspect = 'normal')
ax.imshow(data, interpolation='nearest')
if rangeXpx == None:
rangeXpx = (0, data.shape[1])
if rangeZpx == None:
rangeZpx = (0, data.shape[0])
modBev_plot(ax, rangeX, rangeXpx, numDeltaX, rangeZ, rangeZpx, numDeltaZ, fontSize, xlabel = xlabel, ylabel = ylabel)
#plt.savefig(outputname, bbox_inches='tight', dpi = dpi)
pylab.savefig(outputname, dpi = data.shape[0]/Scale)
pylab.close()
fig.clear()
def modBev_plot(ax, rangeX = [-10, 10 ], rangeXpx= [0, 400], numDeltaX = 5, rangeZ= [8,48 ], rangeZpx= [0, 800], numDeltaZ = 9, fontSize = None, xlabel = 'x [m]', ylabel = 'z [m]'):
'''
@param ax:
'''
#TODO: Configureabiltiy would be nice!
if fontSize==None:
fontSize = 8
ax.set_xlabel(xlabel, fontsize=fontSize)
ax.set_ylabel(ylabel, fontsize=fontSize)
zTicksLabels_val = np.linspace(rangeZpx[0], rangeZpx[1], numDeltaZ)
ax.set_yticks(zTicksLabels_val)
#ax.set_yticks([0, 100, 200, 300, 400, 500, 600, 700, 800])
xTicksLabels_val = np.linspace(rangeXpx[0], rangeXpx[1], numDeltaX)
ax.set_xticks(xTicksLabels_val)
xTicksLabels_val = np.linspace(rangeX[0], rangeX[1], numDeltaX)
zTicksLabels = map(lambda x: str(int(x)), xTicksLabels_val)
ax.set_xticklabels(zTicksLabels,fontsize=fontSize)
zTicksLabels_val = np.linspace(rangeZ[1],rangeZ[0], numDeltaZ)
zTicksLabels = map(lambda x: str(int(x)), zTicksLabels_val)
ax.set_yticklabels(zTicksLabels,fontsize=fontSize)
| gpl-3.0 |
mlperf/training_results_v0.6 | Google/benchmarks/transformer/implementations/tpu-v3-256-transformer/dataset_preproc/data_generators/tokenizer.py | 7 | 6150 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple invertible tokenizer.
Converts from a unicode string to a list of tokens
(represented as Unicode strings).
This tokenizer has the following desirable properties:
- It is invertible.
- Alphanumeric characters are broken away from non-alphanumeric characters.
- A single space between words does not produce an extra token.
- The full Unicode punctuation and separator set is recognized.
The tokenization algorithm is as follows:
1. Split the text into a list of tokens, splitting at every boundary of an
alphanumeric character and a non-alphanumeric character. This produces
a list which alternates between "alphanumeric tokens"
(strings of alphanumeric characters) and "non-alphanumeric tokens"
(strings of non-alphanumeric characters).
2. Remove every token consisting of a single space, unless it is
the very first or very last token in the list. These tokens are now
implied by the fact that there are two adjacent alphanumeric tokens.
e.g. u"Dude - that's so cool."
-> [u"Dude", u" - ", u"that", u"'", u"s", u"so", u"cool", u"."]
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import sys
import unicodedata
import six
from six.moves import range # pylint: disable=redefined-builtin
import tensorflow as tf
# Conversion between Unicode and UTF-8, if required (on Python2)
_native_to_unicode = (lambda s: s.decode("utf-8")) if six.PY2 else (lambda s: s)
# This set contains all letter and number characters.
_ALPHANUMERIC_CHAR_SET = set(
six.unichr(i) for i in range(sys.maxunicode)
if (unicodedata.category(six.unichr(i)).startswith("L") or
unicodedata.category(six.unichr(i)).startswith("N")))
def encode(text):
"""Encode a unicode string as a list of tokens.
Args:
text: a unicode string
Returns:
a list of tokens as Unicode strings
"""
if not text:
return []
ret = []
token_start = 0
# Classify each character in the input string
is_alnum = [c in _ALPHANUMERIC_CHAR_SET for c in text]
for pos in range(1, len(text)):
if is_alnum[pos] != is_alnum[pos - 1]:
token = text[token_start:pos]
if token != u" " or token_start == 0:
ret.append(token)
token_start = pos
final_token = text[token_start:]
ret.append(final_token)
return ret
def decode(tokens):
"""Decode a list of tokens to a unicode string.
Args:
tokens: a list of Unicode strings
Returns:
a unicode string
"""
token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens]
ret = []
for i, token in enumerate(tokens):
if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]:
ret.append(u" ")
ret.append(token)
return "".join(ret)
def _read_filepattern(filepattern, max_lines=None, split_on_newlines=True):
"""Reads files matching a wildcard pattern, yielding the contents.
Args:
filepattern: A wildcard pattern matching one or more files.
max_lines: If set, stop reading after reading this many lines.
split_on_newlines: A boolean. If true, then split files by lines and strip
leading and trailing whitespace from each line. Otherwise, treat each
file as a single string.
Yields:
The contents of the files as lines, if split_on_newlines is True, or
the entire contents of each file if False.
"""
filenames = sorted(tf.gfile.Glob(filepattern))
lines_read = 0
for filename in filenames:
with tf.gfile.Open(filename) as f:
if split_on_newlines:
for line in f:
yield line.strip()
lines_read += 1
if max_lines and lines_read >= max_lines:
return
else:
if max_lines:
doc = []
for line in f:
doc.append(line)
lines_read += 1
if max_lines and lines_read >= max_lines:
yield "".join(doc)
return
yield "".join(doc)
else:
yield f.read()
def corpus_token_counts(
text_filepattern, corpus_max_lines, split_on_newlines=True):
"""Read the corpus and compute a dictionary of token counts.
Args:
text_filepattern: A pattern matching one or more files.
corpus_max_lines: An integer; maximum total lines to read.
split_on_newlines: A boolean. If true, then split files by lines and strip
leading and trailing whitespace from each line. Otherwise, treat each
file as a single string.
Returns:
a dictionary mapping token to count.
"""
counts = collections.Counter()
for doc in _read_filepattern(
text_filepattern,
max_lines=corpus_max_lines,
split_on_newlines=split_on_newlines):
counts.update(encode(_native_to_unicode(doc)))
return counts
def vocab_token_counts(text_filepattern, max_lines):
"""Read a vocab file and return a dictionary of token counts.
Reads a two-column CSV file of tokens and their frequency in a dataset. The
tokens are presumed to be generated by encode() or the equivalent.
Args:
text_filepattern: A pattern matching one or more files.
max_lines: An integer; maximum total lines to read.
Returns:
a dictionary mapping token to count.
"""
ret = {}
for i, line in enumerate(
_read_filepattern(text_filepattern, max_lines=max_lines)):
if "," not in line:
tf.logging.warning("Malformed vocab line #%d '%s'", i, line)
continue
token, count = line.rsplit(",", 1)
ret[_native_to_unicode(token)] = int(count)
return ret
| apache-2.0 |
fishstamp82/loprop | test/h2o_data.py | 2 | 11431 | from ..daltools.util.full import init
Z = [8., 1., 1.]
Rc = init([0.00000000, 0.00000000, 0.48860959])
Dtot = [0, 0, -0.76539388]
Daa = init([
[ 0.00000000, 0.00000000, -0.28357300],
[ 0.15342658, 0.00000000, 0.12734703],
[-0.15342658, 0.00000000, 0.12734703],
])
QUc = init([-7.31176220, 0., 0., -5.43243232, 0., -6.36258665])
QUN = init([4.38968295, 0., 0., 0., 0., 1.75400326])
QUaa = init([
[-3.29253618, 0.00000000, 0.00000000, -4.54316657, 0.00000000, -4.00465380],
[-0.13213704, 0.00000000, 0.24980518, -0.44463288, 0.00000000, -0.26059139],
[-0.13213704, 0.00000000,-0.24980518, -0.44463288, 0.00000000, -0.26059139]
])
Fab = init([
[-0.11E-03, 0.55E-04, 0.55E-04],
[ 0.55E-04, -0.55E-04, 0.16E-30],
[ 0.55E-04, 0.16E-30, -0.55E-04]
])
Lab = init([
[0.11E-03, 0.28E-03, 0.28E-03],
[0.28E-03, 0.17E-03, 0.22E-03],
[0.28E-03, 0.22E-03, 0.17E-03]
])
la = init([
[0.0392366,-27.2474016 , 27.2081650],
[0.0358964, 27.2214515 ,-27.2573479],
[0.01211180, -0.04775576, 0.03564396],
[0.01210615, -0.00594030, -0.00616584],
[10.69975088, -5.34987556, -5.34987532],
[-10.6565582, 5.3282791 , 5.3282791]
])
O = [
0.76145382,
-0.00001648, 1.75278523,
-0.00007538, 0.00035773, 1.39756345
]
H1O = [
3.11619527,
0.00019911, 1.25132346,
2.11363325, 0.00111442, 2.12790474
]
H1 = [
0.57935224,
0.00018083, 0.43312326,
0.11495546, 0.00004222, 0.45770123
]
H2O = [
3.11568759,
0.00019821, 1.25132443,
-2.11327482, -0.00142746, 2.12790473
]
H2H1 = [
0.04078206,
-0.00008380, -0.01712262,
-0.00000098, 0.00000084, -0.00200285
]
H2 = [
0.57930522,
0.00018221, 0.43312149,
-0.11493635, -0.00016407, 0.45770123
]
Aab = init([O, H1O, H1, H2O, H2H1, H2])
Aa = init([
[ 3.87739525, 0.00018217, 3.00410918, 0.00010384, 0.00020122, 3.52546819 ],
[ 2.15784091, 0.00023848, 1.05022368, 1.17177159, 0.00059985, 1.52065218 ],
[ 2.15754005, 0.00023941, 1.05022240, -1.17157425, -0.00087738, 1.52065217 ]
])
ff = 0.001
rMP = init([
#O
[
[-8.70343886, 0.00000000, 0.00000000, -0.39827574, -3.68114747, 0.00000000, 0.00000000, -4.58632761, 0.00000000, -4.24741556],
[-8.70343235, 0.00076124, 0.00000000, -0.39827535, -3.68114147, 0.00000000, 0.00193493, -4.58631888, 0.00000000, -4.24741290],
[-8.70343291,-0.00076166, 0.00000000, -0.39827505, -3.68114128, 0.00000000, -0.00193603, -4.58631789, 0.00000000, -4.24741229],
[-8.70343685,-0.00000006, 0.00175241, -0.39827457, -3.68114516, 0.00000000, 0.00000161, -4.58632717, 0.00053363, -4.24741642],
[-8.70343685, 0.00000000, -0.00175316, -0.39827456, -3.68114514, 0.00000000, 0.00000000, -4.58632711, -0.00053592, -4.24741639],
[-8.70166502, 0.00000000, 0.00000144, -0.39688042, -3.67884999, 0.00000000, 0.00000000, -4.58395384, 0.00000080, -4.24349307],
[-8.70520554, 0.00000000, 0.00000000, -0.39967554, -3.68344246, 0.00000000, 0.00000000, -4.58868836, 0.00000000, -4.25134640],
],
#H1O
[
[ 0.00000000, 0.10023328, 0.00000000, 0.11470275, 0.53710687, 0.00000000, 0.43066796, 0.04316104, 0.00000000, 0.36285790],
[ 0.00150789, 0.10111974, 0.00000000, 0.11541803, 0.53753360, 0.00000000, 0.43120945, 0.04333774, 0.00000000, 0.36314215],
[-0.00150230, 0.09934695, 0.00000000, 0.11398581, 0.53667861, 0.00000000, 0.43012612, 0.04298361, 0.00000000, 0.36257249],
[ 0.00000331, 0.10023328, 0.00125017, 0.11470067, 0.53710812, -0.00006107, 0.43066944, 0.04316020, 0.00015952, 0.36285848],
[ 0.00000100, 0.10023249, -0.00125247, 0.11470042, 0.53710716, 0.00006135, 0.43066837, 0.04316018, -0.00015966, 0.36285788],
[ 0.00088692, 0.10059268, -0.00000064, 0.11590322, 0.53754715, -0.00000006, 0.43071206, 0.04334198, -0.00000015, 0.36330053],
[-0.00088334, 0.09987383, 0.00000000, 0.11350091, 0.53666602, 0.00000000, 0.43062352, 0.04297910, 0.00000000, 0.36241326],
],
#H1
[
[-0.64828057, 0.10330994, 0.00000000, 0.07188960, -0.47568174, 0.00000000, -0.03144252, -0.46920879, 0.00000000, -0.50818752],
[-0.64978846, 0.10389186, 0.00000000, 0.07204462, -0.47729337, 0.00000000, -0.03154159, -0.47074619, 0.00000000, -0.50963693],
[-0.64677827, 0.10273316, 0.00000000, 0.07173584, -0.47408263, 0.00000000, -0.03134407, -0.46768337, 0.00000000, -0.50674873],
[-0.64828388, 0.10331167, 0.00043314, 0.07189029, -0.47568875, -0.00023642, -0.03144270, -0.46921635, -0.00021728, -0.50819386],
[-0.64828157, 0.10331095, -0.00043311, 0.07188988, -0.47568608, 0.00023641, -0.03144256, -0.46921346, 0.00021729, -0.50819095],
[-0.64916749, 0.10338629, -0.00000024, 0.07234862, -0.47634698, 0.00000013, -0.03159569, -0.47003679, 0.00000011, -0.50936853],
[-0.64739723, 0.10323524, 0.00000000, 0.07143322, -0.47502412, 0.00000000, -0.03129003, -0.46838912, 0.00000000, -0.50701656],
],
#H2O
[
[ 0.00000000,-0.10023328, 0.00000000, 0.11470275, 0.53710687, 0.00000000, -0.43066796, 0.04316104, 0.00000000, 0.36285790],
[-0.00150139,-0.09934749, 0.00000000, 0.11398482, 0.53667874, 0.00000000, -0.43012670, 0.04298387, 0.00000000, 0.36257240],
[ 0.00150826,-0.10112008, 0.00000000, 0.11541676, 0.53753350, 0.00000000, -0.43120982, 0.04333795, 0.00000000, 0.36314186],
[-0.00000130,-0.10023170, 0.00125018, 0.11470018, 0.53710620, 0.00006107, -0.43066732, 0.04316017, 0.00015952, 0.36285728],
[ 0.00000101,-0.10023249, -0.00125247, 0.11470042, 0.53710716, -0.00006135, -0.43066838, 0.04316018, -0.00015966, 0.36285788],
[ 0.00088692,-0.10059268, -0.00000064, 0.11590322, 0.53754715, 0.00000006, -0.43071206, 0.04334198, -0.00000015, 0.36330053],
[-0.00088334,-0.09987383, 0.00000000, 0.11350091, 0.53666602, 0.00000000, -0.43062352, 0.04297910, 0.00000000, 0.36241326],
],
#H2H1
[
[ 0.00000000, 0.00000000, 0.00000000, -0.00378789, 0.00148694, 0.00000000, 0.00000000, 0.00599079, 0.00000000, 0.01223822],
[ 0.00000000, 0.00004089, 0.00000000, -0.00378786, 0.00148338, 0.00000000, -0.00004858, 0.00599281, 0.00000000, 0.01224094],
[ 0.00000000,-0.00004067, 0.00000000, -0.00378785, 0.00148341, 0.00000000, 0.00004861, 0.00599277, 0.00000000, 0.01224093],
[ 0.00000000,-0.00000033, -0.00001707, -0.00378763, 0.00149017, 0.00000000, 0.00000001, 0.00599114, -0.00001229, 0.01223979],
[ 0.00000000, 0.00000000, 0.00001717, -0.00378763, 0.00149019, 0.00000000, 0.00000000, 0.00599114, 0.00001242, 0.01223980],
[ 0.00000000, 0.00000000, 0.00000000, -0.00378978, 0.00141897, 0.00000000, 0.00000000, 0.00590445, 0.00000002, 0.01210376],
[ 0.00000000, 0.00000000, 0.00000000, -0.00378577, 0.00155694, 0.00000000, 0.00000000, 0.00607799, 0.00000000, 0.01237393],
],
#H2
[
[-0.64828057,-0.10330994, 0.00000000, 0.07188960, -0.47568174, 0.00000000, 0.03144252, -0.46920879, 0.00000000, -0.50818752],
[-0.64677918,-0.10273369, 0.00000000, 0.07173576, -0.47408411, 0.00000000, 0.03134408, -0.46768486, 0.00000000, -0.50674986],
[-0.64978883,-0.10389230, 0.00000000, 0.07204446, -0.47729439, 0.00000000, 0.03154159, -0.47074717, 0.00000000, -0.50963754],
[-0.64827927,-0.10331022, 0.00043313, 0.07188947, -0.47568340, 0.00023642, 0.03144242, -0.46921057, -0.00021727, -0.50818804],
[-0.64828158,-0.10331095, -0.00043311, 0.07188988, -0.47568609, -0.00023641, 0.03144256, -0.46921348, 0.00021729, -0.50819097],
[-0.64916749,-0.10338629, -0.00000024, 0.07234862, -0.47634698, -0.00000013, 0.03159569, -0.47003679, 0.00000011, -0.50936853],
[-0.64739723,-0.10323524, 0.00000000, 0.07143322, -0.47502412, 0.00000000, 0.03129003, -0.46838912, 0.00000000, -0.50701656]
]
])
Am = init([
[8.186766009140, 0., 0.],
[0., 5.102747935447, 0.],
[0., 0., 6.565131856389]
])
Amw = init([
[11.98694996213, 0., 0.],
[0., 4.403583657738, 0.],
[0., 0., 2.835142058626]
])
R = [
[ 0.00000, 0.00000, 0.69801],
[-1.48150, 0.00000, -0.34901],
[ 1.48150, 0.00000, -0.34901]
]
Qtot = -10.0
Q = rMP[0, 0, (0, 2, 5)]
D = rMP[1:4, 0, :]
QU = rMP[4:, 0, :]
dQa = rMP[0, :, (0,2,5)]
dQab = rMP[0, :, (1, 3, 4)]
#These are string data for testing potential file
PAn0 = """AU
3 -1 0 1
1 0.000 0.000 0.698
1 -1.481 0.000 -0.349
1 1.481 0.000 -0.349
"""
PA00 = """AU
3 0 0 1
1 0.000 0.000 0.698 -0.703
1 -1.481 0.000 -0.349 0.352
1 1.481 0.000 -0.349 0.352
"""
PA10 = """AU
3 1 0 1
1 0.000 0.000 0.698 -0.703 -0.000 0.000 -0.284
1 -1.481 0.000 -0.349 0.352 0.153 0.000 0.127
1 1.481 0.000 -0.349 0.352 -0.153 0.000 0.127
"""
PA20 = """AU
3 2 0 1
1 0.000 0.000 0.698 -0.703 -0.000 0.000 -0.284 -3.293 0.000 -0.000 -4.543 -0.000 -4.005
1 -1.481 0.000 -0.349 0.352 0.153 0.000 0.127 -0.132 0.000 0.250 -0.445 0.000 -0.261
1 1.481 0.000 -0.349 0.352 -0.153 0.000 0.127 -0.132 -0.000 -0.250 -0.445 0.000 -0.261
"""
PA21 = """AU
3 2 1 1
1 0.000 0.000 0.698 -0.703 -0.000 0.000 -0.284 -3.293 0.000 -0.000 -4.543 -0.000 -4.005 3.466
1 -1.481 0.000 -0.349 0.352 0.153 0.000 0.127 -0.132 0.000 0.250 -0.445 0.000 -0.261 1.576
1 1.481 0.000 -0.349 0.352 -0.153 0.000 0.127 -0.132 -0.000 -0.250 -0.445 0.000 -0.261 1.576
"""
PA22 = """AU
3 2 2 1
1 0.000 0.000 0.698 -0.703 -0.000 0.000 -0.284 -3.293 0.000 -0.000 -4.543 -0.000 -4.005 3.875 -0.000 -0.000 3.000 -0.000 3.524
1 -1.481 0.000 -0.349 0.352 0.153 0.000 0.127 -0.132 0.000 0.250 -0.445 0.000 -0.261 2.156 -0.000 1.106 1.051 -0.000 1.520
1 1.481 0.000 -0.349 0.352 -0.153 0.000 0.127 -0.132 -0.000 -0.250 -0.445 0.000 -0.261 2.156 -0.000 -1.106 1.051 -0.000 1.520
"""
OUTPUT_n0_1 = """\
---------------
Atomic domain 1
---------------
Domain center: 0.00000 0.00000 0.69801
"""
OUTPUT_00_1 = OUTPUT_n0_1 + """\
Nuclear charge: 8.00000
Electronic charge: -8.70344
Total charge: -0.70344
"""
OUTPUT_10_1 = OUTPUT_00_1 + """\
Electronic dipole -0.00000 0.00000 -0.28357
"""
OUTPUT_20_1 = OUTPUT_10_1 + """\
Electronic quadrupole -3.29254 0.00000 -0.00000 -4.54317 0.00000 -4.00466
"""
OUTPUT_01_1 = OUTPUT_00_1 + """\
Isotropic polarizablity (w=0) 3.46639
"""
OUTPUT_02_1 = OUTPUT_00_1 + """\
Electronic polarizability (w=0) 3.87468 -0.00000 3.00027 -0.00000 -0.00000 3.52422
"""
| gpl-3.0 |
linkedin/Zopkio | examples/zookeeper/zookeeper_example.py | 4 | 1235 | # Copyright 2015 LinkedIn Corp.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
test = {
"deployment_code": os.path.join(os.path.dirname(os.path.abspath(__file__)), "deployment.py"),
"test_code": [
os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_suites/zookeeper_basic.py")],
"perf_code": os.path.join(os.path.dirname(os.path.abspath(__file__)), "log_naarad.py"),
"configs_directory": os.path.join(os.path.dirname(os.path.abspath(__file__)), "config/")
}
| apache-2.0 |
nwokeo/supysonic | venv/lib/python2.7/site-packages/psycopg2/errorcodes.py | 7 | 13246 | """Error codes for PostgresSQL
This module contains symbolic names for all PostgreSQL error codes.
"""
# psycopg2/errorcodes.py - PostgreSQL error codes
#
# Copyright (C) 2006-2010 Johan Dahlin <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# Based on:
#
# http://www.postgresql.org/docs/current/static/errcodes-appendix.html
#
def lookup(code, _cache={}):
"""Lookup an error code or class code and return its symbolic name.
Raise `KeyError` if the code is not found.
"""
if _cache:
return _cache[code]
# Generate the lookup map at first usage.
tmp = {}
for k, v in globals().iteritems():
if isinstance(v, str) and len(v) in (2, 5):
tmp[v] = k
assert tmp
# Atomic update, to avoid race condition on import (bug #382)
_cache.update(tmp)
return _cache[code]
# autogenerated data: do not edit below this point.
# Error classes
CLASS_SUCCESSFUL_COMPLETION = '00'
CLASS_WARNING = '01'
CLASS_NO_DATA = '02'
CLASS_SQL_STATEMENT_NOT_YET_COMPLETE = '03'
CLASS_CONNECTION_EXCEPTION = '08'
CLASS_TRIGGERED_ACTION_EXCEPTION = '09'
CLASS_FEATURE_NOT_SUPPORTED = '0A'
CLASS_INVALID_TRANSACTION_INITIATION = '0B'
CLASS_LOCATOR_EXCEPTION = '0F'
CLASS_INVALID_GRANTOR = '0L'
CLASS_INVALID_ROLE_SPECIFICATION = '0P'
CLASS_DIAGNOSTICS_EXCEPTION = '0Z'
CLASS_CASE_NOT_FOUND = '20'
CLASS_CARDINALITY_VIOLATION = '21'
CLASS_DATA_EXCEPTION = '22'
CLASS_INTEGRITY_CONSTRAINT_VIOLATION = '23'
CLASS_INVALID_CURSOR_STATE = '24'
CLASS_INVALID_TRANSACTION_STATE = '25'
CLASS_INVALID_SQL_STATEMENT_NAME = '26'
CLASS_TRIGGERED_DATA_CHANGE_VIOLATION = '27'
CLASS_INVALID_AUTHORIZATION_SPECIFICATION = '28'
CLASS_DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST = '2B'
CLASS_INVALID_TRANSACTION_TERMINATION = '2D'
CLASS_SQL_ROUTINE_EXCEPTION = '2F'
CLASS_INVALID_CURSOR_NAME = '34'
CLASS_EXTERNAL_ROUTINE_EXCEPTION = '38'
CLASS_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION = '39'
CLASS_SAVEPOINT_EXCEPTION = '3B'
CLASS_INVALID_CATALOG_NAME = '3D'
CLASS_INVALID_SCHEMA_NAME = '3F'
CLASS_TRANSACTION_ROLLBACK = '40'
CLASS_SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION = '42'
CLASS_WITH_CHECK_OPTION_VIOLATION = '44'
CLASS_INSUFFICIENT_RESOURCES = '53'
CLASS_PROGRAM_LIMIT_EXCEEDED = '54'
CLASS_OBJECT_NOT_IN_PREREQUISITE_STATE = '55'
CLASS_OPERATOR_INTERVENTION = '57'
CLASS_SYSTEM_ERROR = '58'
CLASS_CONFIGURATION_FILE_ERROR = 'F0'
CLASS_FOREIGN_DATA_WRAPPER_ERROR = 'HV'
CLASS_PL_PGSQL_ERROR = 'P0'
CLASS_INTERNAL_ERROR = 'XX'
# Class 00 - Successful Completion
SUCCESSFUL_COMPLETION = '00000'
# Class 01 - Warning
WARNING = '01000'
NULL_VALUE_ELIMINATED_IN_SET_FUNCTION = '01003'
STRING_DATA_RIGHT_TRUNCATION = '01004'
PRIVILEGE_NOT_REVOKED = '01006'
PRIVILEGE_NOT_GRANTED = '01007'
IMPLICIT_ZERO_BIT_PADDING = '01008'
DYNAMIC_RESULT_SETS_RETURNED = '0100C'
DEPRECATED_FEATURE = '01P01'
# Class 02 - No Data (this is also a warning class per the SQL standard)
NO_DATA = '02000'
NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED = '02001'
# Class 03 - SQL Statement Not Yet Complete
SQL_STATEMENT_NOT_YET_COMPLETE = '03000'
# Class 08 - Connection Exception
CONNECTION_EXCEPTION = '08000'
SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION = '08001'
CONNECTION_DOES_NOT_EXIST = '08003'
SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION = '08004'
CONNECTION_FAILURE = '08006'
TRANSACTION_RESOLUTION_UNKNOWN = '08007'
PROTOCOL_VIOLATION = '08P01'
# Class 09 - Triggered Action Exception
TRIGGERED_ACTION_EXCEPTION = '09000'
# Class 0A - Feature Not Supported
FEATURE_NOT_SUPPORTED = '0A000'
# Class 0B - Invalid Transaction Initiation
INVALID_TRANSACTION_INITIATION = '0B000'
# Class 0F - Locator Exception
LOCATOR_EXCEPTION = '0F000'
INVALID_LOCATOR_SPECIFICATION = '0F001'
# Class 0L - Invalid Grantor
INVALID_GRANTOR = '0L000'
INVALID_GRANT_OPERATION = '0LP01'
# Class 0P - Invalid Role Specification
INVALID_ROLE_SPECIFICATION = '0P000'
# Class 0Z - Diagnostics Exception
DIAGNOSTICS_EXCEPTION = '0Z000'
STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER = '0Z002'
# Class 20 - Case Not Found
CASE_NOT_FOUND = '20000'
# Class 21 - Cardinality Violation
CARDINALITY_VIOLATION = '21000'
# Class 22 - Data Exception
DATA_EXCEPTION = '22000'
STRING_DATA_RIGHT_TRUNCATION = '22001'
NULL_VALUE_NO_INDICATOR_PARAMETER = '22002'
NUMERIC_VALUE_OUT_OF_RANGE = '22003'
NULL_VALUE_NOT_ALLOWED = '22004'
ERROR_IN_ASSIGNMENT = '22005'
INVALID_DATETIME_FORMAT = '22007'
DATETIME_FIELD_OVERFLOW = '22008'
INVALID_TIME_ZONE_DISPLACEMENT_VALUE = '22009'
ESCAPE_CHARACTER_CONFLICT = '2200B'
INVALID_USE_OF_ESCAPE_CHARACTER = '2200C'
INVALID_ESCAPE_OCTET = '2200D'
ZERO_LENGTH_CHARACTER_STRING = '2200F'
MOST_SPECIFIC_TYPE_MISMATCH = '2200G'
NOT_AN_XML_DOCUMENT = '2200L'
INVALID_XML_DOCUMENT = '2200M'
INVALID_XML_CONTENT = '2200N'
INVALID_XML_COMMENT = '2200S'
INVALID_XML_PROCESSING_INSTRUCTION = '2200T'
INVALID_INDICATOR_PARAMETER_VALUE = '22010'
SUBSTRING_ERROR = '22011'
DIVISION_BY_ZERO = '22012'
INVALID_ARGUMENT_FOR_NTILE_FUNCTION = '22014'
INTERVAL_FIELD_OVERFLOW = '22015'
INVALID_ARGUMENT_FOR_NTH_VALUE_FUNCTION = '22016'
INVALID_CHARACTER_VALUE_FOR_CAST = '22018'
INVALID_ESCAPE_CHARACTER = '22019'
INVALID_REGULAR_EXPRESSION = '2201B'
INVALID_ARGUMENT_FOR_LOGARITHM = '2201E'
INVALID_ARGUMENT_FOR_POWER_FUNCTION = '2201F'
INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION = '2201G'
INVALID_ROW_COUNT_IN_LIMIT_CLAUSE = '2201W'
INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE = '2201X'
INVALID_LIMIT_VALUE = '22020'
CHARACTER_NOT_IN_REPERTOIRE = '22021'
INDICATOR_OVERFLOW = '22022'
INVALID_PARAMETER_VALUE = '22023'
UNTERMINATED_C_STRING = '22024'
INVALID_ESCAPE_SEQUENCE = '22025'
STRING_DATA_LENGTH_MISMATCH = '22026'
TRIM_ERROR = '22027'
ARRAY_SUBSCRIPT_ERROR = '2202E'
INVALID_TABLESAMPLE_REPEAT = '2202G'
INVALID_TABLESAMPLE_ARGUMENT = '2202H'
FLOATING_POINT_EXCEPTION = '22P01'
INVALID_TEXT_REPRESENTATION = '22P02'
INVALID_BINARY_REPRESENTATION = '22P03'
BAD_COPY_FILE_FORMAT = '22P04'
UNTRANSLATABLE_CHARACTER = '22P05'
NONSTANDARD_USE_OF_ESCAPE_CHARACTER = '22P06'
# Class 23 - Integrity Constraint Violation
INTEGRITY_CONSTRAINT_VIOLATION = '23000'
RESTRICT_VIOLATION = '23001'
NOT_NULL_VIOLATION = '23502'
FOREIGN_KEY_VIOLATION = '23503'
UNIQUE_VIOLATION = '23505'
CHECK_VIOLATION = '23514'
EXCLUSION_VIOLATION = '23P01'
# Class 24 - Invalid Cursor State
INVALID_CURSOR_STATE = '24000'
# Class 25 - Invalid Transaction State
INVALID_TRANSACTION_STATE = '25000'
ACTIVE_SQL_TRANSACTION = '25001'
BRANCH_TRANSACTION_ALREADY_ACTIVE = '25002'
INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION = '25003'
INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION = '25004'
NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION = '25005'
READ_ONLY_SQL_TRANSACTION = '25006'
SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED = '25007'
HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL = '25008'
NO_ACTIVE_SQL_TRANSACTION = '25P01'
IN_FAILED_SQL_TRANSACTION = '25P02'
# Class 26 - Invalid SQL Statement Name
INVALID_SQL_STATEMENT_NAME = '26000'
# Class 27 - Triggered Data Change Violation
TRIGGERED_DATA_CHANGE_VIOLATION = '27000'
# Class 28 - Invalid Authorization Specification
INVALID_AUTHORIZATION_SPECIFICATION = '28000'
INVALID_PASSWORD = '28P01'
# Class 2B - Dependent Privilege Descriptors Still Exist
DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST = '2B000'
DEPENDENT_OBJECTS_STILL_EXIST = '2BP01'
# Class 2D - Invalid Transaction Termination
INVALID_TRANSACTION_TERMINATION = '2D000'
# Class 2F - SQL Routine Exception
SQL_ROUTINE_EXCEPTION = '2F000'
MODIFYING_SQL_DATA_NOT_PERMITTED = '2F002'
PROHIBITED_SQL_STATEMENT_ATTEMPTED = '2F003'
READING_SQL_DATA_NOT_PERMITTED = '2F004'
FUNCTION_EXECUTED_NO_RETURN_STATEMENT = '2F005'
# Class 34 - Invalid Cursor Name
INVALID_CURSOR_NAME = '34000'
# Class 38 - External Routine Exception
EXTERNAL_ROUTINE_EXCEPTION = '38000'
CONTAINING_SQL_NOT_PERMITTED = '38001'
MODIFYING_SQL_DATA_NOT_PERMITTED = '38002'
PROHIBITED_SQL_STATEMENT_ATTEMPTED = '38003'
READING_SQL_DATA_NOT_PERMITTED = '38004'
# Class 39 - External Routine Invocation Exception
EXTERNAL_ROUTINE_INVOCATION_EXCEPTION = '39000'
INVALID_SQLSTATE_RETURNED = '39001'
NULL_VALUE_NOT_ALLOWED = '39004'
TRIGGER_PROTOCOL_VIOLATED = '39P01'
SRF_PROTOCOL_VIOLATED = '39P02'
EVENT_TRIGGER_PROTOCOL_VIOLATED = '39P03'
# Class 3B - Savepoint Exception
SAVEPOINT_EXCEPTION = '3B000'
INVALID_SAVEPOINT_SPECIFICATION = '3B001'
# Class 3D - Invalid Catalog Name
INVALID_CATALOG_NAME = '3D000'
# Class 3F - Invalid Schema Name
INVALID_SCHEMA_NAME = '3F000'
# Class 40 - Transaction Rollback
TRANSACTION_ROLLBACK = '40000'
SERIALIZATION_FAILURE = '40001'
TRANSACTION_INTEGRITY_CONSTRAINT_VIOLATION = '40002'
STATEMENT_COMPLETION_UNKNOWN = '40003'
DEADLOCK_DETECTED = '40P01'
# Class 42 - Syntax Error or Access Rule Violation
SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION = '42000'
INSUFFICIENT_PRIVILEGE = '42501'
SYNTAX_ERROR = '42601'
INVALID_NAME = '42602'
INVALID_COLUMN_DEFINITION = '42611'
NAME_TOO_LONG = '42622'
DUPLICATE_COLUMN = '42701'
AMBIGUOUS_COLUMN = '42702'
UNDEFINED_COLUMN = '42703'
UNDEFINED_OBJECT = '42704'
DUPLICATE_OBJECT = '42710'
DUPLICATE_ALIAS = '42712'
DUPLICATE_FUNCTION = '42723'
AMBIGUOUS_FUNCTION = '42725'
GROUPING_ERROR = '42803'
DATATYPE_MISMATCH = '42804'
WRONG_OBJECT_TYPE = '42809'
INVALID_FOREIGN_KEY = '42830'
CANNOT_COERCE = '42846'
UNDEFINED_FUNCTION = '42883'
RESERVED_NAME = '42939'
UNDEFINED_TABLE = '42P01'
UNDEFINED_PARAMETER = '42P02'
DUPLICATE_CURSOR = '42P03'
DUPLICATE_DATABASE = '42P04'
DUPLICATE_PREPARED_STATEMENT = '42P05'
DUPLICATE_SCHEMA = '42P06'
DUPLICATE_TABLE = '42P07'
AMBIGUOUS_PARAMETER = '42P08'
AMBIGUOUS_ALIAS = '42P09'
INVALID_COLUMN_REFERENCE = '42P10'
INVALID_CURSOR_DEFINITION = '42P11'
INVALID_DATABASE_DEFINITION = '42P12'
INVALID_FUNCTION_DEFINITION = '42P13'
INVALID_PREPARED_STATEMENT_DEFINITION = '42P14'
INVALID_SCHEMA_DEFINITION = '42P15'
INVALID_TABLE_DEFINITION = '42P16'
INVALID_OBJECT_DEFINITION = '42P17'
INDETERMINATE_DATATYPE = '42P18'
INVALID_RECURSION = '42P19'
WINDOWING_ERROR = '42P20'
COLLATION_MISMATCH = '42P21'
INDETERMINATE_COLLATION = '42P22'
# Class 44 - WITH CHECK OPTION Violation
WITH_CHECK_OPTION_VIOLATION = '44000'
# Class 53 - Insufficient Resources
INSUFFICIENT_RESOURCES = '53000'
DISK_FULL = '53100'
OUT_OF_MEMORY = '53200'
TOO_MANY_CONNECTIONS = '53300'
CONFIGURATION_LIMIT_EXCEEDED = '53400'
# Class 54 - Program Limit Exceeded
PROGRAM_LIMIT_EXCEEDED = '54000'
STATEMENT_TOO_COMPLEX = '54001'
TOO_MANY_COLUMNS = '54011'
TOO_MANY_ARGUMENTS = '54023'
# Class 55 - Object Not In Prerequisite State
OBJECT_NOT_IN_PREREQUISITE_STATE = '55000'
OBJECT_IN_USE = '55006'
CANT_CHANGE_RUNTIME_PARAM = '55P02'
LOCK_NOT_AVAILABLE = '55P03'
# Class 57 - Operator Intervention
OPERATOR_INTERVENTION = '57000'
QUERY_CANCELED = '57014'
ADMIN_SHUTDOWN = '57P01'
CRASH_SHUTDOWN = '57P02'
CANNOT_CONNECT_NOW = '57P03'
DATABASE_DROPPED = '57P04'
# Class 58 - System Error (errors external to PostgreSQL itself)
SYSTEM_ERROR = '58000'
IO_ERROR = '58030'
UNDEFINED_FILE = '58P01'
DUPLICATE_FILE = '58P02'
# Class F0 - Configuration File Error
CONFIG_FILE_ERROR = 'F0000'
LOCK_FILE_EXISTS = 'F0001'
# Class HV - Foreign Data Wrapper Error (SQL/MED)
FDW_ERROR = 'HV000'
FDW_OUT_OF_MEMORY = 'HV001'
FDW_DYNAMIC_PARAMETER_VALUE_NEEDED = 'HV002'
FDW_INVALID_DATA_TYPE = 'HV004'
FDW_COLUMN_NAME_NOT_FOUND = 'HV005'
FDW_INVALID_DATA_TYPE_DESCRIPTORS = 'HV006'
FDW_INVALID_COLUMN_NAME = 'HV007'
FDW_INVALID_COLUMN_NUMBER = 'HV008'
FDW_INVALID_USE_OF_NULL_POINTER = 'HV009'
FDW_INVALID_STRING_FORMAT = 'HV00A'
FDW_INVALID_HANDLE = 'HV00B'
FDW_INVALID_OPTION_INDEX = 'HV00C'
FDW_INVALID_OPTION_NAME = 'HV00D'
FDW_OPTION_NAME_NOT_FOUND = 'HV00J'
FDW_REPLY_HANDLE = 'HV00K'
FDW_UNABLE_TO_CREATE_EXECUTION = 'HV00L'
FDW_UNABLE_TO_CREATE_REPLY = 'HV00M'
FDW_UNABLE_TO_ESTABLISH_CONNECTION = 'HV00N'
FDW_NO_SCHEMAS = 'HV00P'
FDW_SCHEMA_NOT_FOUND = 'HV00Q'
FDW_TABLE_NOT_FOUND = 'HV00R'
FDW_FUNCTION_SEQUENCE_ERROR = 'HV010'
FDW_TOO_MANY_HANDLES = 'HV014'
FDW_INCONSISTENT_DESCRIPTOR_INFORMATION = 'HV021'
FDW_INVALID_ATTRIBUTE_VALUE = 'HV024'
FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH = 'HV090'
FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER = 'HV091'
# Class P0 - PL/pgSQL Error
PLPGSQL_ERROR = 'P0000'
RAISE_EXCEPTION = 'P0001'
NO_DATA_FOUND = 'P0002'
TOO_MANY_ROWS = 'P0003'
ASSERT_FAILURE = 'P0004'
# Class XX - Internal Error
INTERNAL_ERROR = 'XX000'
DATA_CORRUPTED = 'XX001'
INDEX_CORRUPTED = 'XX002'
| agpl-3.0 |
dipeshbh/nodebbdb | node_modules/sitemap/env/lib/node_modules/npm/node_modules/node-gyp/gyp/buildbot/buildbot_run.py | 270 | 8338 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Argument-less script to select what to run on the buildbots."""
import filecmp
import os
import shutil
import subprocess
import sys
if sys.platform in ['win32', 'cygwin']:
EXE_SUFFIX = '.exe'
else:
EXE_SUFFIX = ''
BUILDBOT_DIR = os.path.dirname(os.path.abspath(__file__))
TRUNK_DIR = os.path.dirname(BUILDBOT_DIR)
ROOT_DIR = os.path.dirname(TRUNK_DIR)
ANDROID_DIR = os.path.join(ROOT_DIR, 'android')
CMAKE_DIR = os.path.join(ROOT_DIR, 'cmake')
CMAKE_BIN_DIR = os.path.join(CMAKE_DIR, 'bin')
OUT_DIR = os.path.join(TRUNK_DIR, 'out')
def CallSubProcess(*args, **kwargs):
"""Wrapper around subprocess.call which treats errors as build exceptions."""
with open(os.devnull) as devnull_fd:
retcode = subprocess.call(stdin=devnull_fd, *args, **kwargs)
if retcode != 0:
print '@@@STEP_EXCEPTION@@@'
sys.exit(1)
def PrepareCmake():
"""Build CMake 2.8.8 since the version in Precise is 2.8.7."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber CMake checkout@@@'
shutil.rmtree(CMAKE_DIR)
# We always build CMake 2.8.8, so no need to do anything
# if the directory already exists.
if os.path.isdir(CMAKE_DIR):
return
print '@@@BUILD_STEP Initialize CMake checkout@@@'
os.mkdir(CMAKE_DIR)
print '@@@BUILD_STEP Sync CMake@@@'
CallSubProcess(
['git', 'clone',
'--depth', '1',
'--single-branch',
'--branch', 'v2.8.8',
'--',
'git://cmake.org/cmake.git',
CMAKE_DIR],
cwd=CMAKE_DIR)
print '@@@BUILD_STEP Build CMake@@@'
CallSubProcess(
['/bin/bash', 'bootstrap', '--prefix=%s' % CMAKE_DIR],
cwd=CMAKE_DIR)
CallSubProcess( ['make', 'cmake'], cwd=CMAKE_DIR)
_ANDROID_SETUP = 'source build/envsetup.sh && lunch full-eng'
def PrepareAndroidTree():
"""Prepare an Android tree to run 'android' format tests."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber Android checkout@@@'
shutil.rmtree(ANDROID_DIR)
# (Re)create the directory so that the following steps will succeed.
if not os.path.isdir(ANDROID_DIR):
os.mkdir(ANDROID_DIR)
# We use a manifest from the gyp project listing pinned revisions of AOSP to
# use, to ensure that we test against a stable target. This needs to be
# updated to pick up new build system changes sometimes, so we must test if
# it has changed.
manifest_filename = 'aosp_manifest.xml'
gyp_manifest = os.path.join(BUILDBOT_DIR, manifest_filename)
android_manifest = os.path.join(ANDROID_DIR, '.repo', 'manifests',
manifest_filename)
manifest_is_current = (os.path.isfile(android_manifest) and
filecmp.cmp(gyp_manifest, android_manifest))
if not manifest_is_current:
# It's safe to repeat these steps, so just do them again to make sure we are
# in a good state.
print '@@@BUILD_STEP Initialize Android checkout@@@'
CallSubProcess(
['repo', 'init',
'-u', 'https://android.googlesource.com/platform/manifest',
'-b', 'master',
'-g', 'all,-notdefault,-device,-darwin,-mips,-x86'],
cwd=ANDROID_DIR)
shutil.copy(gyp_manifest, android_manifest)
print '@@@BUILD_STEP Sync Android@@@'
CallSubProcess(['repo', 'sync', '-j4', '-m', manifest_filename],
cwd=ANDROID_DIR)
# If we already built the system image successfully and didn't sync to a new
# version of the source, skip running the build again as it's expensive even
# when there's nothing to do.
system_img = os.path.join(ANDROID_DIR, 'out', 'target', 'product', 'generic',
'system.img')
if manifest_is_current and os.path.isfile(system_img):
return
print '@@@BUILD_STEP Build Android@@@'
CallSubProcess(
['/bin/bash',
'-c', '%s && make -j4' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
def StartAndroidEmulator():
"""Start an android emulator from the built android tree."""
print '@@@BUILD_STEP Start Android emulator@@@'
CallSubProcess(['/bin/bash', '-c',
'%s && adb kill-server ' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
# If taskset is available, use it to force adbd to run only on one core, as,
# sadly, it improves its reliability (see crbug.com/268450).
adbd_wrapper = ''
with open(os.devnull, 'w') as devnull_fd:
if subprocess.call(['which', 'taskset'], stdout=devnull_fd) == 0:
adbd_wrapper = 'taskset -c 0'
CallSubProcess(['/bin/bash', '-c',
'%s && %s adb start-server ' % (_ANDROID_SETUP, adbd_wrapper)],
cwd=ANDROID_DIR)
subprocess.Popen(
['/bin/bash', '-c',
'%s && emulator -no-window' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
CallSubProcess(
['/bin/bash', '-c',
'%s && adb wait-for-device' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
def StopAndroidEmulator():
"""Stop all android emulators."""
print '@@@BUILD_STEP Stop Android emulator@@@'
# If this fails, it's because there is no emulator running.
subprocess.call(['pkill', 'emulator.*'])
def GypTestFormat(title, format=None, msvs_version=None, tests=[]):
"""Run the gyp tests for a given format, emitting annotator tags.
See annotator docs at:
https://sites.google.com/a/chromium.org/dev/developers/testing/chromium-build-infrastructure/buildbot-annotations
Args:
format: gyp format to test.
Returns:
0 for sucesss, 1 for failure.
"""
if not format:
format = title
print '@@@BUILD_STEP ' + title + '@@@'
sys.stdout.flush()
env = os.environ.copy()
if msvs_version:
env['GYP_MSVS_VERSION'] = msvs_version
command = ' '.join(
[sys.executable, 'gyp/gyptest.py',
'--all',
'--passed',
'--format', format,
'--path', CMAKE_BIN_DIR,
'--chdir', 'gyp'] + tests)
if format == 'android':
# gyptest needs the environment setup from envsetup/lunch in order to build
# using the 'android' backend, so this is done in a single shell.
retcode = subprocess.call(
['/bin/bash',
'-c', '%s && cd %s && %s' % (_ANDROID_SETUP, ROOT_DIR, command)],
cwd=ANDROID_DIR, env=env)
else:
retcode = subprocess.call(command, cwd=ROOT_DIR, env=env, shell=True)
if retcode:
# Emit failure tag, and keep going.
print '@@@STEP_FAILURE@@@'
return 1
return 0
def GypBuild():
# Dump out/ directory.
print '@@@BUILD_STEP cleanup@@@'
print 'Removing %s...' % OUT_DIR
shutil.rmtree(OUT_DIR, ignore_errors=True)
print 'Done.'
retcode = 0
# The Android gyp bot runs on linux so this must be tested first.
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-android':
PrepareAndroidTree()
StartAndroidEmulator()
try:
retcode += GypTestFormat('android')
finally:
StopAndroidEmulator()
elif sys.platform.startswith('linux'):
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('make')
PrepareCmake()
retcode += GypTestFormat('cmake')
elif sys.platform == 'darwin':
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('xcode')
retcode += GypTestFormat('make')
elif sys.platform == 'win32':
retcode += GypTestFormat('ninja')
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-win64':
retcode += GypTestFormat('msvs-ninja-2013', format='msvs-ninja',
msvs_version='2013',
tests=[
r'test\generator-output\gyptest-actions.py',
r'test\generator-output\gyptest-relocate.py',
r'test\generator-output\gyptest-rules.py'])
retcode += GypTestFormat('msvs-2013', format='msvs', msvs_version='2013')
else:
raise Exception('Unknown platform')
if retcode:
# TODO(bradnelson): once the annotator supports a postscript (section for
# after the build proper that could be used for cumulative failures),
# use that instead of this. This isolates the final return value so
# that it isn't misattributed to the last stage.
print '@@@BUILD_STEP failures@@@'
sys.exit(retcode)
if __name__ == '__main__':
GypBuild()
| gpl-3.0 |
samuelhavron/heroku-buildpack-python | Python-3.4.3/Lib/test/test_itertools.py | 8 | 89289 | import unittest
from test import support
from itertools import *
import weakref
from decimal import Decimal
from fractions import Fraction
import sys
import operator
import random
import copy
import pickle
from functools import reduce
import sys
import struct
maxsize = support.MAX_Py_ssize_t
minsize = -maxsize-1
def lzip(*args):
return list(zip(*args))
def onearg(x):
'Test function of one argument'
return 2*x
def errfunc(*args):
'Test function that raises an error'
raise ValueError
def gen3():
'Non-restartable source sequence'
for i in (0, 1, 2):
yield i
def isEven(x):
'Test predicate'
return x%2==0
def isOdd(x):
'Test predicate'
return x%2==1
def tupleize(*args):
return args
def irange(n):
for i in range(n):
yield i
class StopNow:
'Class emulating an empty iterable.'
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def take(n, seq):
'Convenience function for partially consuming a long of infinite iterable'
return list(islice(seq, n))
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def fact(n):
'Factorial'
return prod(range(1, n+1))
# root level methods for pickling ability
def testR(r):
return r[0]
def testR2(r):
return r[2]
def underten(x):
return x<10
picklecopiers = [lambda s, proto=proto: pickle.loads(pickle.dumps(s, proto))
for proto in range(pickle.HIGHEST_PROTOCOL + 1)]
class TestBasicOps(unittest.TestCase):
def pickletest(self, protocol, it, stop=4, take=1, compare=None):
"""Test that an iterator is the same after pickling, also when part-consumed"""
def expand(it, i=0):
# Recursively expand iterables, within sensible bounds
if i > 10:
raise RuntimeError("infinite recursion encountered")
if isinstance(it, str):
return it
try:
l = list(islice(it, stop))
except TypeError:
return it # can't expand it
return [expand(e, i+1) for e in l]
# Test the initial copy against the original
dump = pickle.dumps(it, protocol)
i2 = pickle.loads(dump)
self.assertEqual(type(it), type(i2))
a, b = expand(it), expand(i2)
self.assertEqual(a, b)
if compare:
c = expand(compare)
self.assertEqual(a, c)
# Take from the copy, and create another copy and compare them.
i3 = pickle.loads(dump)
took = 0
try:
for i in range(take):
next(i3)
took += 1
except StopIteration:
pass #in case there is less data than 'take'
dump = pickle.dumps(i3, protocol)
i4 = pickle.loads(dump)
a, b = expand(i3), expand(i4)
self.assertEqual(a, b)
if compare:
c = expand(compare[took:])
self.assertEqual(a, c);
def test_accumulate(self):
self.assertEqual(list(accumulate(range(10))), # one positional arg
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
self.assertEqual(list(accumulate(iterable=range(10))), # kw arg
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
for typ in int, complex, Decimal, Fraction: # multiple types
self.assertEqual(
list(accumulate(map(typ, range(10)))),
list(map(typ, [0, 1, 3, 6, 10, 15, 21, 28, 36, 45])))
self.assertEqual(list(accumulate('abc')), ['a', 'ab', 'abc']) # works with non-numeric
self.assertEqual(list(accumulate([])), []) # empty iterable
self.assertEqual(list(accumulate([7])), [7]) # iterable of length one
self.assertRaises(TypeError, accumulate, range(10), 5, 6) # too many args
self.assertRaises(TypeError, accumulate) # too few args
self.assertRaises(TypeError, accumulate, x=range(10)) # unexpected kwd arg
self.assertRaises(TypeError, list, accumulate([1, []])) # args that don't add
s = [2, 8, 9, 5, 7, 0, 3, 4, 1, 6]
self.assertEqual(list(accumulate(s, min)),
[2, 2, 2, 2, 2, 0, 0, 0, 0, 0])
self.assertEqual(list(accumulate(s, max)),
[2, 8, 9, 9, 9, 9, 9, 9, 9, 9])
self.assertEqual(list(accumulate(s, operator.mul)),
[2, 16, 144, 720, 5040, 0, 0, 0, 0, 0])
with self.assertRaises(TypeError):
list(accumulate(s, chr)) # unary-operation
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, accumulate(range(10))) # test pickling
def test_chain(self):
def chain2(*iterables):
'Pure python version in the docs'
for it in iterables:
for element in it:
yield element
for c in (chain, chain2):
self.assertEqual(list(c('abc', 'def')), list('abcdef'))
self.assertEqual(list(c('abc')), list('abc'))
self.assertEqual(list(c('')), [])
self.assertEqual(take(4, c('abc', 'def')), list('abcd'))
self.assertRaises(TypeError, list,c(2, 3))
def test_chain_from_iterable(self):
self.assertEqual(list(chain.from_iterable(['abc', 'def'])), list('abcdef'))
self.assertEqual(list(chain.from_iterable(['abc'])), list('abc'))
self.assertEqual(list(chain.from_iterable([''])), [])
self.assertEqual(take(4, chain.from_iterable(['abc', 'def'])), list('abcd'))
self.assertRaises(TypeError, list, chain.from_iterable([2, 3]))
def test_chain_reducible(self):
for oper in [copy.deepcopy] + picklecopiers:
it = chain('abc', 'def')
self.assertEqual(list(oper(it)), list('abcdef'))
self.assertEqual(next(it), 'a')
self.assertEqual(list(oper(it)), list('bcdef'))
self.assertEqual(list(oper(chain(''))), [])
self.assertEqual(take(4, oper(chain('abc', 'def'))), list('abcd'))
self.assertRaises(TypeError, list, oper(chain(2, 3)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, chain('abc', 'def'), compare=list('abcdef'))
def test_combinations(self):
self.assertRaises(TypeError, combinations, 'abc') # missing r argument
self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, combinations, None) # pool is not iterable
self.assertRaises(ValueError, combinations, 'abc', -2) # r is negative
for op in [lambda a:a] + picklecopiers:
self.assertEqual(list(op(combinations('abc', 32))), []) # r > n
self.assertEqual(list(op(combinations('ABCD', 2))),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
testIntermediate = combinations('ABCD', 2)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(op(combinations(range(4), 3))),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
testIntermediate = combinations(range(4), 3)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[(0,1,3), (0,2,3), (1,2,3)])
def combinations1(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = list(range(r))
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
def combinations2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in permutations(range(n), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def combinations3(iterable, r):
'Pure python version from cwr()'
pool = tuple(iterable)
n = len(pool)
for indices in combinations_with_replacement(range(n), r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(combinations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) / fact(r) / fact(n-r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for c in result:
self.assertEqual(len(c), r) # r-length combinations
self.assertEqual(len(set(c)), r) # no duplicate elements
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(list(c),
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(combinations1(values, r))) # matches first pure python version
self.assertEqual(result, list(combinations2(values, r))) # matches second pure python version
self.assertEqual(result, list(combinations3(values, r))) # matches second pure python version
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, combinations(values, r)) # test pickling
@support.bigaddrspacetest
def test_combinations_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
combinations("AA", 2**29)
# Test implementation detail: tuple re-use
@support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_tuple_reuse(self):
self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1)
def test_combinations_with_replacement(self):
cwr = combinations_with_replacement
self.assertRaises(TypeError, cwr, 'abc') # missing r argument
self.assertRaises(TypeError, cwr, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, cwr, None) # pool is not iterable
self.assertRaises(ValueError, cwr, 'abc', -2) # r is negative
for op in [lambda a:a] + picklecopiers:
self.assertEqual(list(op(cwr('ABC', 2))),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
testIntermediate = cwr('ABC', 2)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def cwr1(iterable, r):
'Pure python version shown in the docs'
# number items returned: (n+r-1)! / r! / (n-1)! when n>0
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
def cwr2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in product(range(n), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def numcombs(n, r):
if not n:
return 0 if r else 1
return fact(n+r-1) / fact(r)/ fact(n-1)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(cwr(values, r))
self.assertEqual(len(result), numcombs(n, r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
regular_combs = list(combinations(values, r)) # compare to combs without replacement
if n == 0 or r <= 1:
self.assertEqual(result, regular_combs) # cases that should be identical
else:
self.assertTrue(set(result) >= set(regular_combs)) # rest should be supersets of regular combs
for c in result:
self.assertEqual(len(c), r) # r-length combinations
noruns = [k for k,v in groupby(c)] # combo without consecutive repeats
self.assertEqual(len(noruns), len(set(noruns))) # no repeats other than consecutive
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(noruns,
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(cwr1(values, r))) # matches first pure python version
self.assertEqual(result, list(cwr2(values, r))) # matches second pure python version
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, cwr(values,r)) # test pickling
@support.bigaddrspacetest
def test_combinations_with_replacement_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
combinations_with_replacement("AA", 2**30)
# Test implementation detail: tuple re-use
@support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_with_replacement_tuple_reuse(self):
cwr = combinations_with_replacement
self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1)
def test_permutations(self):
self.assertRaises(TypeError, permutations) # too few arguments
self.assertRaises(TypeError, permutations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, permutations, None) # pool is not iterable
self.assertRaises(ValueError, permutations, 'abc', -2) # r is negative
self.assertEqual(list(permutations('abc', 32)), []) # r > n
self.assertRaises(TypeError, permutations, 'abc', 's') # r is not an int or None
self.assertEqual(list(permutations(range(3), 2)),
[(0,1), (0,2), (1,0), (1,2), (2,0), (2,1)])
def permutations1(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = list(range(n))
cycles = list(range(n-r+1, n+1))[::-1]
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
def permutations2(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(range(n), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(permutations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) / fact(n-r)) # right number of perms
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for p in result:
self.assertEqual(len(p), r) # r-length permutations
self.assertEqual(len(set(p)), r) # no duplicate elements
self.assertTrue(all(e in values for e in p)) # elements taken from input iterable
self.assertEqual(result, list(permutations1(values, r))) # matches first pure python version
self.assertEqual(result, list(permutations2(values, r))) # matches second pure python version
if r == n:
self.assertEqual(result, list(permutations(values, None))) # test r as None
self.assertEqual(result, list(permutations(values))) # test default r
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, permutations(values, r)) # test pickling
@support.bigaddrspacetest
def test_permutations_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
permutations("A", 2**30)
@support.impl_detail("tuple reuse is specific to CPython")
def test_permutations_tuple_reuse(self):
self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1)
def test_combinatorics(self):
# Test relationships between product(), permutations(),
# combinations() and combinations_with_replacement().
for n in range(6):
s = 'ABCDEFG'[:n]
for r in range(8):
prod = list(product(s, repeat=r))
cwr = list(combinations_with_replacement(s, r))
perm = list(permutations(s, r))
comb = list(combinations(s, r))
# Check size
self.assertEqual(len(prod), n**r)
self.assertEqual(len(cwr), (fact(n+r-1) / fact(r)/ fact(n-1)) if n else (not r))
self.assertEqual(len(perm), 0 if r>n else fact(n) / fact(n-r))
self.assertEqual(len(comb), 0 if r>n else fact(n) / fact(r) / fact(n-r))
# Check lexicographic order without repeated tuples
self.assertEqual(prod, sorted(set(prod)))
self.assertEqual(cwr, sorted(set(cwr)))
self.assertEqual(perm, sorted(set(perm)))
self.assertEqual(comb, sorted(set(comb)))
# Check interrelationships
self.assertEqual(cwr, [t for t in prod if sorted(t)==list(t)]) # cwr: prods which are sorted
self.assertEqual(perm, [t for t in prod if len(set(t))==r]) # perm: prods with no dups
self.assertEqual(comb, [t for t in perm if sorted(t)==list(t)]) # comb: perms that are sorted
self.assertEqual(comb, [t for t in cwr if len(set(t))==r]) # comb: cwrs without dups
self.assertEqual(comb, list(filter(set(cwr).__contains__, perm))) # comb: perm that is a cwr
self.assertEqual(comb, list(filter(set(perm).__contains__, cwr))) # comb: cwr that is a perm
self.assertEqual(comb, sorted(set(cwr) & set(perm))) # comb: both a cwr and a perm
def test_compress(self):
self.assertEqual(list(compress(data='ABCDEF', selectors=[1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [0,0,0,0,0,0])), list(''))
self.assertEqual(list(compress('ABCDEF', [1,1,1,1,1,1])), list('ABCDEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1])), list('AC'))
self.assertEqual(list(compress('ABC', [0,1,1,1,1,1])), list('BC'))
n = 10000
data = chain.from_iterable(repeat(range(6), n))
selectors = chain.from_iterable(repeat((0, 1)))
self.assertEqual(list(compress(data, selectors)), [1,3,5] * n)
self.assertRaises(TypeError, compress, None, range(6)) # 1st arg not iterable
self.assertRaises(TypeError, compress, range(6), None) # 2nd arg not iterable
self.assertRaises(TypeError, compress, range(6)) # too few args
self.assertRaises(TypeError, compress, range(6), None) # too many args
# check copy, deepcopy, pickle
for op in [lambda a:copy.copy(a), lambda a:copy.deepcopy(a)] + picklecopiers:
for data, selectors, result1, result2 in [
('ABCDEF', [1,0,1,0,1,1], 'ACEF', 'CEF'),
('ABCDEF', [0,0,0,0,0,0], '', ''),
('ABCDEF', [1,1,1,1,1,1], 'ABCDEF', 'BCDEF'),
('ABCDEF', [1,0,1], 'AC', 'C'),
('ABC', [0,1,1,1,1,1], 'BC', 'C'),
]:
self.assertEqual(list(op(compress(data=data, selectors=selectors))), list(result1))
self.assertEqual(list(op(compress(data, selectors))), list(result1))
testIntermediate = compress(data, selectors)
if result1:
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)), list(result2))
def test_count(self):
self.assertEqual(lzip('abc',count()), [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(lzip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)])
self.assertEqual(take(2, lzip('abc',count(3))), [('a', 3), ('b', 4)])
self.assertEqual(take(2, zip('abc',count(-1))), [('a', -1), ('b', 0)])
self.assertEqual(take(2, zip('abc',count(-3))), [('a', -3), ('b', -2)])
self.assertRaises(TypeError, count, 2, 3, 4)
self.assertRaises(TypeError, count, 'a')
self.assertEqual(list(islice(count(maxsize-5), 10)),
list(range(maxsize-5, maxsize+5)))
self.assertEqual(list(islice(count(-maxsize-5), 10)),
list(range(-maxsize-5, -maxsize+5)))
self.assertEqual(list(islice(count(10, maxsize+5), 3)),
list(range(10, 10+3*(maxsize+5), maxsize+5)))
c = count(3)
self.assertEqual(repr(c), 'count(3)')
next(c)
self.assertEqual(repr(c), 'count(4)')
c = count(-9)
self.assertEqual(repr(c), 'count(-9)')
next(c)
self.assertEqual(repr(count(10.25)), 'count(10.25)')
self.assertEqual(next(c), -8)
for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5):
# Test repr
r1 = repr(count(i))
r2 = 'count(%r)'.__mod__(i)
self.assertEqual(r1, r2)
# check copy, deepcopy, pickle
for value in -3, 3, maxsize-5, maxsize+5:
c = count(value)
self.assertEqual(next(copy.copy(c)), value)
self.assertEqual(next(copy.deepcopy(c)), value)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, count(value))
#check proper internal error handling for large "step' sizes
count(1, maxsize+5); sys.exc_info()
def test_count_with_stride(self):
self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(lzip('abc',count(start=2,step=3)),
[('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(lzip('abc',count(step=-1)),
[('a', 0), ('b', -1), ('c', -2)])
self.assertEqual(lzip('abc',count(2,0)), [('a', 2), ('b', 2), ('c', 2)])
self.assertEqual(lzip('abc',count(2,1)), [('a', 2), ('b', 3), ('c', 4)])
self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(take(20, count(maxsize-15, 3)), take(20, range(maxsize-15, maxsize+100, 3)))
self.assertEqual(take(20, count(-maxsize-15, 3)), take(20, range(-maxsize-15,-maxsize+100, 3)))
self.assertEqual(take(3, count(2, 3.25-4j)), [2, 5.25-4j, 8.5-8j])
self.assertEqual(take(3, count(Decimal('1.1'), Decimal('.1'))),
[Decimal('1.1'), Decimal('1.2'), Decimal('1.3')])
self.assertEqual(take(3, count(Fraction(2,3), Fraction(1,7))),
[Fraction(2,3), Fraction(17,21), Fraction(20,21)])
self.assertEqual(repr(take(3, count(10, 2.5))), repr([10, 12.5, 15.0]))
c = count(3, 5)
self.assertEqual(repr(c), 'count(3, 5)')
next(c)
self.assertEqual(repr(c), 'count(8, 5)')
c = count(-9, 0)
self.assertEqual(repr(c), 'count(-9, 0)')
next(c)
self.assertEqual(repr(c), 'count(-9, 0)')
c = count(-9, -3)
self.assertEqual(repr(c), 'count(-9, -3)')
next(c)
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(count(10.5, 1.25)), 'count(10.5, 1.25)')
self.assertEqual(repr(count(10.5, 1)), 'count(10.5)') # suppress step=1 when it's an int
self.assertEqual(repr(count(10.5, 1.00)), 'count(10.5, 1.0)') # do show float values lilke 1.0
for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5):
for j in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 1, 10, sys.maxsize-5, sys.maxsize+5):
# Test repr
r1 = repr(count(i, j))
if j == 1:
r2 = ('count(%r)' % i)
else:
r2 = ('count(%r, %r)' % (i, j))
self.assertEqual(r1, r2)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, count(i, j))
def test_cycle(self):
self.assertEqual(take(10, cycle('abc')), list('abcabcabca'))
self.assertEqual(list(cycle('')), [])
self.assertRaises(TypeError, cycle)
self.assertRaises(TypeError, cycle, 5)
self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0])
# check copy, deepcopy, pickle
c = cycle('abc')
self.assertEqual(next(c), 'a')
#simple copy currently not supported, because __reduce__ returns
#an internal iterator
#self.assertEqual(take(10, copy.copy(c)), list('bcabcabcab'))
self.assertEqual(take(10, copy.deepcopy(c)), list('bcabcabcab'))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertEqual(take(10, pickle.loads(pickle.dumps(c, proto))),
list('bcabcabcab'))
next(c)
self.assertEqual(take(10, pickle.loads(pickle.dumps(c, proto))),
list('cabcabcabc'))
next(c)
next(c)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, cycle('abc'))
def test_groupby(self):
# Check whether it accepts arguments correctly
self.assertEqual([], list(groupby([])))
self.assertEqual([], list(groupby([], key=id)))
self.assertRaises(TypeError, list, groupby('abc', []))
self.assertRaises(TypeError, groupby, None)
self.assertRaises(TypeError, groupby, 'abc', lambda x:x, 10)
# Check normal input
s = [(0, 10, 20), (0, 11,21), (0,12,21), (1,13,21), (1,14,22),
(2,15,22), (3,16,23), (3,17,23)]
dup = []
for k, g in groupby(s, lambda r:r[0]):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check normal pickled
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
dup = []
for k, g in pickle.loads(pickle.dumps(groupby(s, testR), proto)):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested case
dup = []
for k, g in groupby(s, testR):
for ik, ig in groupby(g, testR2):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested and pickled
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
dup = []
for k, g in pickle.loads(pickle.dumps(groupby(s, testR), proto)):
for ik, ig in pickle.loads(pickle.dumps(groupby(g, testR2), proto)):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check case where inner iterator is not used
keys = [k for k, g in groupby(s, testR)]
expectedkeys = set([r[0] for r in s])
self.assertEqual(set(keys), expectedkeys)
self.assertEqual(len(keys), len(expectedkeys))
# Exercise pipes and filters style
s = 'abracadabra'
# sort s | uniq
r = [k for k, g in groupby(sorted(s))]
self.assertEqual(r, ['a', 'b', 'c', 'd', 'r'])
# sort s | uniq -d
r = [k for k, g in groupby(sorted(s)) if list(islice(g,1,2))]
self.assertEqual(r, ['a', 'b', 'r'])
# sort s | uniq -c
r = [(len(list(g)), k) for k, g in groupby(sorted(s))]
self.assertEqual(r, [(5, 'a'), (2, 'b'), (1, 'c'), (1, 'd'), (2, 'r')])
# sort s | uniq -c | sort -rn | head -3
r = sorted([(len(list(g)) , k) for k, g in groupby(sorted(s))], reverse=True)[:3]
self.assertEqual(r, [(5, 'a'), (2, 'r'), (2, 'b')])
# iter.__next__ failure
class ExpectedError(Exception):
pass
def delayed_raise(n=0):
for i in range(n):
yield 'yo'
raise ExpectedError
def gulp(iterable, keyp=None, func=list):
return [func(g) for k, g in groupby(iterable, keyp)]
# iter.__next__ failure on outer object
self.assertRaises(ExpectedError, gulp, delayed_raise(0))
# iter.__next__ failure on inner object
self.assertRaises(ExpectedError, gulp, delayed_raise(1))
# __cmp__ failure
class DummyCmp:
def __eq__(self, dst):
raise ExpectedError
s = [DummyCmp(), DummyCmp(), None]
# __eq__ failure on outer object
self.assertRaises(ExpectedError, gulp, s, func=id)
# __eq__ failure on inner object
self.assertRaises(ExpectedError, gulp, s)
# keyfunc failure
def keyfunc(obj):
if keyfunc.skip > 0:
keyfunc.skip -= 1
return obj
else:
raise ExpectedError
# keyfunc failure on outer object
keyfunc.skip = 0
self.assertRaises(ExpectedError, gulp, [None], keyfunc)
keyfunc.skip = 1
self.assertRaises(ExpectedError, gulp, [None, None], keyfunc)
def test_filter(self):
self.assertEqual(list(filter(isEven, range(6))), [0,2,4])
self.assertEqual(list(filter(None, [0,1,0,2,0])), [1,2])
self.assertEqual(list(filter(bool, [0,1,0,2,0])), [1,2])
self.assertEqual(take(4, filter(isEven, count())), [0,2,4,6])
self.assertRaises(TypeError, filter)
self.assertRaises(TypeError, filter, lambda x:x)
self.assertRaises(TypeError, filter, lambda x:x, range(6), 7)
self.assertRaises(TypeError, filter, isEven, 3)
self.assertRaises(TypeError, next, filter(range(6), range(6)))
# check copy, deepcopy, pickle
ans = [0,2,4]
c = filter(isEven, range(6))
self.assertEqual(list(copy.copy(c)), ans)
c = filter(isEven, range(6))
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = filter(isEven, range(6))
self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), ans)
next(c)
self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), ans[1:])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = filter(isEven, range(6))
self.pickletest(proto, c)
def test_filterfalse(self):
self.assertEqual(list(filterfalse(isEven, range(6))), [1,3,5])
self.assertEqual(list(filterfalse(None, [0,1,0,2,0])), [0,0,0])
self.assertEqual(list(filterfalse(bool, [0,1,0,2,0])), [0,0,0])
self.assertEqual(take(4, filterfalse(isEven, count())), [1,3,5,7])
self.assertRaises(TypeError, filterfalse)
self.assertRaises(TypeError, filterfalse, lambda x:x)
self.assertRaises(TypeError, filterfalse, lambda x:x, range(6), 7)
self.assertRaises(TypeError, filterfalse, isEven, 3)
self.assertRaises(TypeError, next, filterfalse(range(6), range(6)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, filterfalse(isEven, range(6)))
def test_zip(self):
# XXX This is rather silly now that builtin zip() calls zip()...
ans = [(x,y) for x, y in zip('abc',count())]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(list(zip('abc', range(6))), lzip('abc', range(6)))
self.assertEqual(list(zip('abcdef', range(3))), lzip('abcdef', range(3)))
self.assertEqual(take(3,zip('abcdef', count())), lzip('abcdef', range(3)))
self.assertEqual(list(zip('abcdef')), lzip('abcdef'))
self.assertEqual(list(zip()), lzip())
self.assertRaises(TypeError, zip, 3)
self.assertRaises(TypeError, zip, range(3), 3)
self.assertEqual([tuple(list(pair)) for pair in zip('abc', 'def')],
lzip('abc', 'def'))
self.assertEqual([pair for pair in zip('abc', 'def')],
lzip('abc', 'def'))
@support.impl_detail("tuple reuse is specific to CPython")
def test_zip_tuple_reuse(self):
ids = list(map(id, zip('abc', 'def')))
self.assertEqual(min(ids), max(ids))
ids = list(map(id, list(zip('abc', 'def'))))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
# check copy, deepcopy, pickle
ans = [(x,y) for x, y in copy.copy(zip('abc',count()))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
ans = [(x,y) for x, y in copy.deepcopy(zip('abc',count()))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
ans = [(x,y) for x, y in pickle.loads(pickle.dumps(zip('abc',count()), proto))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
testIntermediate = zip('abc',count())
next(testIntermediate)
ans = [(x,y) for x, y in pickle.loads(pickle.dumps(testIntermediate, proto))]
self.assertEqual(ans, [('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, zip('abc', count()))
def test_ziplongest(self):
for args in [
['abc', range(6)],
[range(6), 'abc'],
[range(1000), range(2000,2100), range(3000,3050)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500), range(0)],
]:
target = [tuple([arg[i] if i < len(arg) else None for arg in args])
for i in range(max(map(len, args)))]
self.assertEqual(list(zip_longest(*args)), target)
self.assertEqual(list(zip_longest(*args, **{})), target)
target = [tuple((e is None and 'X' or e) for e in t) for t in target] # Replace None fills with 'X'
self.assertEqual(list(zip_longest(*args, **dict(fillvalue='X'))), target)
self.assertEqual(take(3,zip_longest('abcdef', count())), list(zip('abcdef', range(3)))) # take 3 from infinite input
self.assertEqual(list(zip_longest()), list(zip()))
self.assertEqual(list(zip_longest([])), list(zip([])))
self.assertEqual(list(zip_longest('abcdef')), list(zip('abcdef')))
self.assertEqual(list(zip_longest('abc', 'defg', **{})),
list(zip(list('abc')+[None], 'defg'))) # empty keyword dict
self.assertRaises(TypeError, zip_longest, 3)
self.assertRaises(TypeError, zip_longest, range(3), 3)
for stmt in [
"zip_longest('abc', fv=1)",
"zip_longest('abc', fillvalue=1, bogus_keyword=None)",
]:
try:
eval(stmt, globals(), locals())
except TypeError:
pass
else:
self.fail('Did not raise Type in: ' + stmt)
self.assertEqual([tuple(list(pair)) for pair in zip_longest('abc', 'def')],
list(zip('abc', 'def')))
self.assertEqual([pair for pair in zip_longest('abc', 'def')],
list(zip('abc', 'def')))
@support.impl_detail("tuple reuse is specific to CPython")
def test_zip_longest_tuple_reuse(self):
ids = list(map(id, zip_longest('abc', 'def')))
self.assertEqual(min(ids), max(ids))
ids = list(map(id, list(zip_longest('abc', 'def'))))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_zip_longest_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, zip_longest("abc", "def"))
self.pickletest(proto, zip_longest("abc", "defgh"))
self.pickletest(proto, zip_longest("abc", "defgh", fillvalue=1))
self.pickletest(proto, zip_longest("", "defgh"))
def test_bug_7244(self):
class Repeater:
# this class is similar to itertools.repeat
def __init__(self, o, t, e):
self.o = o
self.t = int(t)
self.e = e
def __iter__(self): # its iterator is itself
return self
def __next__(self):
if self.t > 0:
self.t -= 1
return self.o
else:
raise self.e
# Formerly this code in would fail in debug mode
# with Undetected Error and Stop Iteration
r1 = Repeater(1, 3, StopIteration)
r2 = Repeater(2, 4, StopIteration)
def run(r1, r2):
result = []
for i, j in zip_longest(r1, r2, fillvalue=0):
with support.captured_output('stdout'):
print((i, j))
result.append((i, j))
return result
self.assertEqual(run(r1, r2), [(1,2), (1,2), (1,2), (0,2)])
# Formerly, the RuntimeError would be lost
# and StopIteration would stop as expected
r1 = Repeater(1, 3, RuntimeError)
r2 = Repeater(2, 4, StopIteration)
it = zip_longest(r1, r2, fillvalue=0)
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertRaises(RuntimeError, next, it)
def test_product(self):
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(product(*args)), result)
for r in range(4):
self.assertEqual(list(product(*(args*r))),
list(product(*args, **dict(repeat=r))))
self.assertEqual(len(list(product(*[range(7)]*6))), 7**6)
self.assertRaises(TypeError, product, range(6), None)
def product1(*args, **kwds):
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
n = len(pools)
if n == 0:
yield ()
return
if any(len(pool) == 0 for pool in pools):
return
indices = [0] * n
yield tuple(pool[i] for pool, i in zip(pools, indices))
while 1:
for i in reversed(range(n)): # right to left
if indices[i] == len(pools[i]) - 1:
continue
indices[i] += 1
for j in range(i+1, n):
indices[j] = 0
yield tuple(pool[i] for pool, i in zip(pools, indices))
break
else:
return
def product2(*args, **kwds):
'Pure python version used in docs'
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
argtypes = ['', 'abc', '', range(0), range(4), dict(a=1, b=2, c=3),
set('abcdefg'), range(11), tuple(range(13))]
for i in range(100):
args = [random.choice(argtypes) for j in range(random.randrange(5))]
expected_len = prod(map(len, args))
self.assertEqual(len(list(product(*args))), expected_len)
self.assertEqual(list(product(*args)), list(product1(*args)))
self.assertEqual(list(product(*args)), list(product2(*args)))
args = map(iter, args)
self.assertEqual(len(list(product(*args))), expected_len)
@support.bigaddrspacetest
def test_product_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
product(*(['ab']*2**5), repeat=2**25)
@support.impl_detail("tuple reuse is specific to CPython")
def test_product_tuple_reuse(self):
self.assertEqual(len(set(map(id, product('abc', 'def')))), 1)
self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1)
def test_product_pickling(self):
# check copy, deepcopy, pickle
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(copy.copy(product(*args))), result)
self.assertEqual(list(copy.deepcopy(product(*args))), result)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, product(*args))
def test_repeat(self):
self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a'])
self.assertEqual(lzip(range(3),repeat('a')),
[(0, 'a'), (1, 'a'), (2, 'a')])
self.assertEqual(list(repeat('a', 3)), ['a', 'a', 'a'])
self.assertEqual(take(3, repeat('a')), ['a', 'a', 'a'])
self.assertEqual(list(repeat('a', 0)), [])
self.assertEqual(list(repeat('a', -3)), [])
self.assertRaises(TypeError, repeat)
self.assertRaises(TypeError, repeat, None, 3, 4)
self.assertRaises(TypeError, repeat, None, 'a')
r = repeat(1+0j)
self.assertEqual(repr(r), 'repeat((1+0j))')
r = repeat(1+0j, 5)
self.assertEqual(repr(r), 'repeat((1+0j), 5)')
list(r)
self.assertEqual(repr(r), 'repeat((1+0j), 0)')
# check copy, deepcopy, pickle
c = repeat(object='a', times=10)
self.assertEqual(next(c), 'a')
self.assertEqual(take(2, copy.copy(c)), list('a' * 2))
self.assertEqual(take(2, copy.deepcopy(c)), list('a' * 2))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, repeat(object='a', times=10))
def test_repeat_with_negative_times(self):
self.assertEqual(repr(repeat('a', -1)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', -2)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', times=-1)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', times=-2)), "repeat('a', 0)")
def test_map(self):
self.assertEqual(list(map(operator.pow, range(3), range(1,7))),
[0**1, 1**2, 2**3])
self.assertEqual(list(map(tupleize, 'abc', range(5))),
[('a',0),('b',1),('c',2)])
self.assertEqual(list(map(tupleize, 'abc', count())),
[('a',0),('b',1),('c',2)])
self.assertEqual(take(2,map(tupleize, 'abc', count())),
[('a',0),('b',1)])
self.assertEqual(list(map(operator.pow, [])), [])
self.assertRaises(TypeError, map)
self.assertRaises(TypeError, list, map(None, range(3), range(3)))
self.assertRaises(TypeError, map, operator.neg)
self.assertRaises(TypeError, next, map(10, range(5)))
self.assertRaises(ValueError, next, map(errfunc, [4], [5]))
self.assertRaises(TypeError, next, map(onearg, [4], [5]))
# check copy, deepcopy, pickle
ans = [('a',0),('b',1),('c',2)]
c = map(tupleize, 'abc', count())
self.assertEqual(list(copy.copy(c)), ans)
c = map(tupleize, 'abc', count())
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = map(tupleize, 'abc', count())
self.pickletest(proto, c)
def test_starmap(self):
self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))),
[0**1, 1**2, 2**3])
self.assertEqual(take(3, starmap(operator.pow, zip(count(), count(1)))),
[0**1, 1**2, 2**3])
self.assertEqual(list(starmap(operator.pow, [])), [])
self.assertEqual(list(starmap(operator.pow, [iter([4,5])])), [4**5])
self.assertRaises(TypeError, list, starmap(operator.pow, [None]))
self.assertRaises(TypeError, starmap)
self.assertRaises(TypeError, starmap, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, starmap(10, [(4,5)]))
self.assertRaises(ValueError, next, starmap(errfunc, [(4,5)]))
self.assertRaises(TypeError, next, starmap(onearg, [(4,5)]))
# check copy, deepcopy, pickle
ans = [0**1, 1**2, 2**3]
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.assertEqual(list(copy.copy(c)), ans)
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.pickletest(proto, c)
def test_islice(self):
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 3),
(20,)
]:
self.assertEqual(list(islice(range(100), *args)),
list(range(*args)))
for args, tgtargs in [ # Stop when seqn is exhausted
((10, 110, 3), ((10, 100, 3))),
((10, 110), ((10, 100))),
((110,), (100,))
]:
self.assertEqual(list(islice(range(100), *args)),
list(range(*tgtargs)))
# Test stop=None
self.assertEqual(list(islice(range(10), None)), list(range(10)))
self.assertEqual(list(islice(range(10), None, None)), list(range(10)))
self.assertEqual(list(islice(range(10), None, None, None)), list(range(10)))
self.assertEqual(list(islice(range(10), 2, None)), list(range(2, 10)))
self.assertEqual(list(islice(range(10), 1, None, 2)), list(range(1, 10, 2)))
# Test number of items consumed SF #1171417
it = iter(range(10))
self.assertEqual(list(islice(it, 3)), list(range(3)))
self.assertEqual(list(it), list(range(3, 10)))
# Test invalid arguments
ra = range(10)
self.assertRaises(TypeError, islice, ra)
self.assertRaises(TypeError, islice, ra, 1, 2, 3, 4)
self.assertRaises(ValueError, islice, ra, -5, 10, 1)
self.assertRaises(ValueError, islice, ra, 1, -5, -1)
self.assertRaises(ValueError, islice, ra, 1, 10, -1)
self.assertRaises(ValueError, islice, ra, 1, 10, 0)
self.assertRaises(ValueError, islice, ra, 'a')
self.assertRaises(ValueError, islice, ra, 'a', 1)
self.assertRaises(ValueError, islice, ra, 1, 'a')
self.assertRaises(ValueError, islice, ra, 'a', 1, 1)
self.assertRaises(ValueError, islice, ra, 1, 'a', 1)
self.assertEqual(len(list(islice(count(), 1, 10, maxsize))), 1)
# Issue #10323: Less islice in a predictable state
c = count()
self.assertEqual(list(islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
# check copy, deepcopy, pickle
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 3),
(20,)
]:
self.assertEqual(list(copy.copy(islice(range(100), *args))),
list(range(*args)))
self.assertEqual(list(copy.deepcopy(islice(range(100), *args))),
list(range(*args)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, islice(range(100), *args))
# Issue #21321: check source iterator is not referenced
# from islice() after the latter has been exhausted
it = (x for x in (1, 2))
wr = weakref.ref(it)
it = islice(it, 1)
self.assertIsNotNone(wr())
list(it) # exhaust the iterator
support.gc_collect()
self.assertIsNone(wr())
def test_takewhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
self.assertEqual(list(takewhile(underten, data)), [1, 3, 5])
self.assertEqual(list(takewhile(underten, [])), [])
self.assertRaises(TypeError, takewhile)
self.assertRaises(TypeError, takewhile, operator.pow)
self.assertRaises(TypeError, takewhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, takewhile(10, [(4,5)]))
self.assertRaises(ValueError, next, takewhile(errfunc, [(4,5)]))
t = takewhile(bool, [1, 1, 1, 0, 0, 0])
self.assertEqual(list(t), [1, 1, 1])
self.assertRaises(StopIteration, next, t)
# check copy, deepcopy, pickle
self.assertEqual(list(copy.copy(takewhile(underten, data))), [1, 3, 5])
self.assertEqual(list(copy.deepcopy(takewhile(underten, data))),
[1, 3, 5])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, takewhile(underten, data))
def test_dropwhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8])
self.assertEqual(list(dropwhile(underten, [])), [])
self.assertRaises(TypeError, dropwhile)
self.assertRaises(TypeError, dropwhile, operator.pow)
self.assertRaises(TypeError, dropwhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, dropwhile(10, [(4,5)]))
self.assertRaises(ValueError, next, dropwhile(errfunc, [(4,5)]))
# check copy, deepcopy, pickle
self.assertEqual(list(copy.copy(dropwhile(underten, data))), [20, 2, 4, 6, 8])
self.assertEqual(list(copy.deepcopy(dropwhile(underten, data))),
[20, 2, 4, 6, 8])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, dropwhile(underten, data))
def test_tee(self):
n = 200
a, b = tee([]) # test empty iterator
self.assertEqual(list(a), [])
self.assertEqual(list(b), [])
a, b = tee(irange(n)) # test 100% interleaved
self.assertEqual(lzip(a,b), lzip(range(n), range(n)))
a, b = tee(irange(n)) # test 0% interleaved
self.assertEqual(list(a), list(range(n)))
self.assertEqual(list(b), list(range(n)))
a, b = tee(irange(n)) # test dealloc of leading iterator
for i in range(100):
self.assertEqual(next(a), i)
del a
self.assertEqual(list(b), list(range(n)))
a, b = tee(irange(n)) # test dealloc of trailing iterator
for i in range(100):
self.assertEqual(next(a), i)
del b
self.assertEqual(list(a), list(range(100, n)))
for j in range(5): # test randomly interleaved
order = [0]*n + [1]*n
random.shuffle(order)
lists = ([], [])
its = tee(irange(n))
for i in order:
value = next(its[i])
lists[i].append(value)
self.assertEqual(lists[0], list(range(n)))
self.assertEqual(lists[1], list(range(n)))
# test argument format checking
self.assertRaises(TypeError, tee)
self.assertRaises(TypeError, tee, 3)
self.assertRaises(TypeError, tee, [1,2], 'x')
self.assertRaises(TypeError, tee, [1,2], 3, 'x')
# tee object should be instantiable
a, b = tee('abc')
c = type(a)('def')
self.assertEqual(list(c), list('def'))
# test long-lagged and multi-way split
a, b, c = tee(range(2000), 3)
for i in range(100):
self.assertEqual(next(a), i)
self.assertEqual(list(b), list(range(2000)))
self.assertEqual([next(c), next(c)], list(range(2)))
self.assertEqual(list(a), list(range(100,2000)))
self.assertEqual(list(c), list(range(2,2000)))
# test values of n
self.assertRaises(TypeError, tee, 'abc', 'invalid')
self.assertRaises(ValueError, tee, [], -1)
for n in range(5):
result = tee('abc', n)
self.assertEqual(type(result), tuple)
self.assertEqual(len(result), n)
self.assertEqual([list(x) for x in result], [list('abc')]*n)
# tee pass-through to copyable iterator
a, b = tee('abc')
c, d = tee(a)
self.assertTrue(a is c)
# test tee_new
t1, t2 = tee('abc')
tnew = type(t1)
self.assertRaises(TypeError, tnew)
self.assertRaises(TypeError, tnew, 10)
t3 = tnew(t1)
self.assertTrue(list(t1) == list(t2) == list(t3) == list('abc'))
# test that tee objects are weak referencable
a, b = tee(range(10))
p = weakref.proxy(a)
self.assertEqual(getattr(p, '__class__'), type(b))
del a
self.assertRaises(ReferenceError, getattr, p, '__class__')
ans = list('abc')
long_ans = list(range(10000))
# check copy
a, b = tee('abc')
self.assertEqual(list(copy.copy(a)), ans)
self.assertEqual(list(copy.copy(b)), ans)
a, b = tee(list(range(10000)))
self.assertEqual(list(copy.copy(a)), long_ans)
self.assertEqual(list(copy.copy(b)), long_ans)
# check partially consumed copy
a, b = tee('abc')
take(2, a)
take(1, b)
self.assertEqual(list(copy.copy(a)), ans[2:])
self.assertEqual(list(copy.copy(b)), ans[1:])
self.assertEqual(list(a), ans[2:])
self.assertEqual(list(b), ans[1:])
a, b = tee(range(10000))
take(100, a)
take(60, b)
self.assertEqual(list(copy.copy(a)), long_ans[100:])
self.assertEqual(list(copy.copy(b)), long_ans[60:])
self.assertEqual(list(a), long_ans[100:])
self.assertEqual(list(b), long_ans[60:])
# check deepcopy
a, b = tee('abc')
self.assertEqual(list(copy.deepcopy(a)), ans)
self.assertEqual(list(copy.deepcopy(b)), ans)
self.assertEqual(list(a), ans)
self.assertEqual(list(b), ans)
a, b = tee(range(10000))
self.assertEqual(list(copy.deepcopy(a)), long_ans)
self.assertEqual(list(copy.deepcopy(b)), long_ans)
self.assertEqual(list(a), long_ans)
self.assertEqual(list(b), long_ans)
# check partially consumed deepcopy
a, b = tee('abc')
take(2, a)
take(1, b)
self.assertEqual(list(copy.deepcopy(a)), ans[2:])
self.assertEqual(list(copy.deepcopy(b)), ans[1:])
self.assertEqual(list(a), ans[2:])
self.assertEqual(list(b), ans[1:])
a, b = tee(range(10000))
take(100, a)
take(60, b)
self.assertEqual(list(copy.deepcopy(a)), long_ans[100:])
self.assertEqual(list(copy.deepcopy(b)), long_ans[60:])
self.assertEqual(list(a), long_ans[100:])
self.assertEqual(list(b), long_ans[60:])
# check pickle
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, iter(tee('abc')))
a, b = tee('abc')
self.pickletest(proto, a, compare=ans)
self.pickletest(proto, b, compare=ans)
# Issue 13454: Crash when deleting backward iterator from tee()
def test_tee_del_backward(self):
forward, backward = tee(repeat(None, 20000000))
any(forward) # exhaust the iterator
del backward
def test_StopIteration(self):
self.assertRaises(StopIteration, next, zip())
for f in (chain, cycle, zip, groupby):
self.assertRaises(StopIteration, next, f([]))
self.assertRaises(StopIteration, next, f(StopNow()))
self.assertRaises(StopIteration, next, islice([], None))
self.assertRaises(StopIteration, next, islice(StopNow(), None))
p, q = tee([])
self.assertRaises(StopIteration, next, p)
self.assertRaises(StopIteration, next, q)
p, q = tee(StopNow())
self.assertRaises(StopIteration, next, p)
self.assertRaises(StopIteration, next, q)
self.assertRaises(StopIteration, next, repeat(None, 0))
for f in (filter, filterfalse, map, takewhile, dropwhile, starmap):
self.assertRaises(StopIteration, next, f(lambda x:x, []))
self.assertRaises(StopIteration, next, f(lambda x:x, StopNow()))
class TestExamples(unittest.TestCase):
def test_accumulate(self):
self.assertEqual(list(accumulate([1,2,3,4,5])), [1, 3, 6, 10, 15])
def test_accumulate_reducible(self):
# check copy, deepcopy, pickle
data = [1, 2, 3, 4, 5]
accumulated = [1, 3, 6, 10, 15]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it = accumulate(data)
self.assertEqual(list(pickle.loads(pickle.dumps(it, proto))), accumulated[:])
self.assertEqual(next(it), 1)
self.assertEqual(list(pickle.loads(pickle.dumps(it, proto))), accumulated[1:])
it = accumulate(data)
self.assertEqual(next(it), 1)
self.assertEqual(list(copy.deepcopy(it)), accumulated[1:])
self.assertEqual(list(copy.copy(it)), accumulated[1:])
def test_chain(self):
self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF')
def test_chain_from_iterable(self):
self.assertEqual(''.join(chain.from_iterable(['ABC', 'DEF'])), 'ABCDEF')
def test_combinations(self):
self.assertEqual(list(combinations('ABCD', 2)),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def test_combinations_with_replacement(self):
self.assertEqual(list(combinations_with_replacement('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def test_compress(self):
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
def test_count(self):
self.assertEqual(list(islice(count(10), 5)), [10, 11, 12, 13, 14])
def test_cycle(self):
self.assertEqual(list(islice(cycle('ABCD'), 12)), list('ABCDABCDABCD'))
def test_dropwhile(self):
self.assertEqual(list(dropwhile(lambda x: x<5, [1,4,6,4,1])), [6,4,1])
def test_groupby(self):
self.assertEqual([k for k, g in groupby('AAAABBBCCDAABBB')],
list('ABCDAB'))
self.assertEqual([(list(g)) for k, g in groupby('AAAABBBCCD')],
[list('AAAA'), list('BBB'), list('CC'), list('D')])
def test_filter(self):
self.assertEqual(list(filter(lambda x: x%2, range(10))), [1,3,5,7,9])
def test_filterfalse(self):
self.assertEqual(list(filterfalse(lambda x: x%2, range(10))), [0,2,4,6,8])
def test_map(self):
self.assertEqual(list(map(pow, (2,3,10), (5,2,3))), [32, 9, 1000])
def test_islice(self):
self.assertEqual(list(islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(islice('ABCDEFG', 0, None, 2)), list('ACEG'))
def test_zip(self):
self.assertEqual(list(zip('ABCD', 'xy')), [('A', 'x'), ('B', 'y')])
def test_zip_longest(self):
self.assertEqual(list(zip_longest('ABCD', 'xy', fillvalue='-')),
[('A', 'x'), ('B', 'y'), ('C', '-'), ('D', '-')])
def test_permutations(self):
self.assertEqual(list(permutations('ABCD', 2)),
list(map(tuple, 'AB AC AD BA BC BD CA CB CD DA DB DC'.split())))
self.assertEqual(list(permutations(range(3))),
[(0,1,2), (0,2,1), (1,0,2), (1,2,0), (2,0,1), (2,1,0)])
def test_product(self):
self.assertEqual(list(product('ABCD', 'xy')),
list(map(tuple, 'Ax Ay Bx By Cx Cy Dx Dy'.split())))
self.assertEqual(list(product(range(2), repeat=3)),
[(0,0,0), (0,0,1), (0,1,0), (0,1,1),
(1,0,0), (1,0,1), (1,1,0), (1,1,1)])
def test_repeat(self):
self.assertEqual(list(repeat(10, 3)), [10, 10, 10])
def test_stapmap(self):
self.assertEqual(list(starmap(pow, [(2,5), (3,2), (10,3)])),
[32, 9, 1000])
def test_takewhile(self):
self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4])
class TestGC(unittest.TestCase):
def makecycle(self, iterator, container):
container.append(iterator)
next(iterator)
del container, iterator
def test_accumulate(self):
a = []
self.makecycle(accumulate([1,2,a,3]), a)
def test_chain(self):
a = []
self.makecycle(chain(a), a)
def test_chain_from_iterable(self):
a = []
self.makecycle(chain.from_iterable([a]), a)
def test_combinations(self):
a = []
self.makecycle(combinations([1,2,a,3], 3), a)
def test_combinations_with_replacement(self):
a = []
self.makecycle(combinations_with_replacement([1,2,a,3], 3), a)
def test_compress(self):
a = []
self.makecycle(compress('ABCDEF', [1,0,1,0,1,0]), a)
def test_count(self):
a = []
Int = type('Int', (int,), dict(x=a))
self.makecycle(count(Int(0), Int(1)), a)
def test_cycle(self):
a = []
self.makecycle(cycle([a]*2), a)
def test_dropwhile(self):
a = []
self.makecycle(dropwhile(bool, [0, a, a]), a)
def test_groupby(self):
a = []
self.makecycle(groupby([a]*2, lambda x:x), a)
def test_issue2246(self):
# Issue 2246 -- the _grouper iterator was not included in GC
n = 10
keyfunc = lambda x: x
for i, j in groupby(range(n), key=keyfunc):
keyfunc.__dict__.setdefault('x',[]).append(j)
def test_filter(self):
a = []
self.makecycle(filter(lambda x:True, [a]*2), a)
def test_filterfalse(self):
a = []
self.makecycle(filterfalse(lambda x:False, a), a)
def test_zip(self):
a = []
self.makecycle(zip([a]*2, [a]*3), a)
def test_zip_longest(self):
a = []
self.makecycle(zip_longest([a]*2, [a]*3), a)
b = [a, None]
self.makecycle(zip_longest([a]*2, [a]*3, fillvalue=b), a)
def test_map(self):
a = []
self.makecycle(map(lambda x:x, [a]*2), a)
def test_islice(self):
a = []
self.makecycle(islice([a]*2, None), a)
def test_permutations(self):
a = []
self.makecycle(permutations([1,2,a,3], 3), a)
def test_product(self):
a = []
self.makecycle(product([1,2,a,3], repeat=3), a)
def test_repeat(self):
a = []
self.makecycle(repeat(a), a)
def test_starmap(self):
a = []
self.makecycle(starmap(lambda *t: t, [(a,a)]*2), a)
def test_takewhile(self):
a = []
self.makecycle(takewhile(bool, [1, 0, a, a]), a)
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_accumulate(self):
s = [1,2,3,4,5]
r = [1,3,6,10,15]
n = len(s)
for g in (G, I, Ig, L, R):
self.assertEqual(list(accumulate(g(s))), r)
self.assertEqual(list(accumulate(S(s))), [])
self.assertRaises(TypeError, accumulate, X(s))
self.assertRaises(TypeError, accumulate, N(s))
self.assertRaises(ZeroDivisionError, list, accumulate(E(s)))
def test_chain(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(chain(g(s))), list(g(s)))
self.assertEqual(list(chain(g(s), g(s))), list(g(s))+list(g(s)))
self.assertRaises(TypeError, list, chain(X(s)))
self.assertRaises(TypeError, list, chain(N(s)))
self.assertRaises(ZeroDivisionError, list, chain(E(s)))
def test_compress(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
n = len(s)
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(compress(g(s), repeat(1))), list(g(s)))
self.assertRaises(TypeError, compress, X(s), repeat(1))
self.assertRaises(TypeError, compress, N(s), repeat(1))
self.assertRaises(ZeroDivisionError, list, compress(E(s), repeat(1)))
def test_product(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
self.assertRaises(TypeError, product, X(s))
self.assertRaises(TypeError, product, N(s))
self.assertRaises(ZeroDivisionError, product, E(s))
def test_cycle(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgtlen = len(s) * 3
expected = list(g(s))*3
actual = list(islice(cycle(g(s)), tgtlen))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, cycle, X(s))
self.assertRaises(TypeError, cycle, N(s))
self.assertRaises(ZeroDivisionError, list, cycle(E(s)))
def test_groupby(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual([k for k, sb in groupby(g(s))], list(g(s)))
self.assertRaises(TypeError, groupby, X(s))
self.assertRaises(TypeError, groupby, N(s))
self.assertRaises(ZeroDivisionError, list, groupby(E(s)))
def test_filter(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filter(isEven, g(s))),
[x for x in g(s) if isEven(x)])
self.assertRaises(TypeError, filter, isEven, X(s))
self.assertRaises(TypeError, filter, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filter(isEven, E(s)))
def test_filterfalse(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filterfalse(isEven, g(s))),
[x for x in g(s) if isOdd(x)])
self.assertRaises(TypeError, filterfalse, isEven, X(s))
self.assertRaises(TypeError, filterfalse, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filterfalse(isEven, E(s)))
def test_zip(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip(g(s))), lzip(g(s)))
self.assertEqual(list(zip(g(s), g(s))), lzip(g(s), g(s)))
self.assertRaises(TypeError, zip, X(s))
self.assertRaises(TypeError, zip, N(s))
self.assertRaises(ZeroDivisionError, list, zip(E(s)))
def test_ziplongest(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip_longest(g(s))), list(zip(g(s))))
self.assertEqual(list(zip_longest(g(s), g(s))), list(zip(g(s), g(s))))
self.assertRaises(TypeError, zip_longest, X(s))
self.assertRaises(TypeError, zip_longest, N(s))
self.assertRaises(ZeroDivisionError, list, zip_longest(E(s)))
def test_map(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(map(onearg, g(s))),
[onearg(x) for x in g(s)])
self.assertEqual(list(map(operator.pow, g(s), g(s))),
[x**x for x in g(s)])
self.assertRaises(TypeError, map, onearg, X(s))
self.assertRaises(TypeError, map, onearg, N(s))
self.assertRaises(ZeroDivisionError, list, map(onearg, E(s)))
def test_islice(self):
for s in ("12345", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2])
self.assertRaises(TypeError, islice, X(s), 10)
self.assertRaises(TypeError, islice, N(s), 10)
self.assertRaises(ZeroDivisionError, list, islice(E(s), 10))
def test_starmap(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
ss = lzip(s, s)
self.assertEqual(list(starmap(operator.pow, g(ss))),
[x**x for x in g(s)])
self.assertRaises(TypeError, starmap, operator.pow, X(ss))
self.assertRaises(TypeError, starmap, operator.pow, N(ss))
self.assertRaises(ZeroDivisionError, list, starmap(operator.pow, E(ss)))
def test_takewhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not isEven(elem): break
tgt.append(elem)
self.assertEqual(list(takewhile(isEven, g(s))), tgt)
self.assertRaises(TypeError, takewhile, isEven, X(s))
self.assertRaises(TypeError, takewhile, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, takewhile(isEven, E(s)))
def test_dropwhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not tgt and isOdd(elem): continue
tgt.append(elem)
self.assertEqual(list(dropwhile(isOdd, g(s))), tgt)
self.assertRaises(TypeError, dropwhile, isOdd, X(s))
self.assertRaises(TypeError, dropwhile, isOdd, N(s))
self.assertRaises(ZeroDivisionError, list, dropwhile(isOdd, E(s)))
def test_tee(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
it1, it2 = tee(g(s))
self.assertEqual(list(it1), list(g(s)))
self.assertEqual(list(it2), list(g(s)))
self.assertRaises(TypeError, tee, X(s))
self.assertRaises(TypeError, tee, N(s))
self.assertRaises(ZeroDivisionError, list, tee(E(s))[0])
class LengthTransparency(unittest.TestCase):
def test_repeat(self):
self.assertEqual(operator.length_hint(repeat(None, 50)), 50)
self.assertEqual(operator.length_hint(repeat(None, 0)), 0)
self.assertEqual(operator.length_hint(repeat(None), 12), 12)
def test_repeat_with_negative_times(self):
self.assertEqual(operator.length_hint(repeat(None, -1)), 0)
self.assertEqual(operator.length_hint(repeat(None, -2)), 0)
self.assertEqual(operator.length_hint(repeat(None, times=-1)), 0)
self.assertEqual(operator.length_hint(repeat(None, times=-2)), 0)
class RegressionTests(unittest.TestCase):
def test_sf_793826(self):
# Fix Armin Rigo's successful efforts to wreak havoc
def mutatingtuple(tuple1, f, tuple2):
# this builds a tuple t which is a copy of tuple1,
# then calls f(t), then mutates t to be equal to tuple2
# (needs len(tuple1) == len(tuple2)).
def g(value, first=[1]):
if first:
del first[:]
f(next(z))
return value
items = list(tuple2)
items[1:1] = list(tuple1)
gen = map(g, items)
z = zip(*[gen]*len(tuple1))
next(z)
def f(t):
global T
T = t
first[:] = list(T)
first = []
mutatingtuple((1,2,3), f, (4,5,6))
second = list(T)
self.assertEqual(first, second)
def test_sf_950057(self):
# Make sure that chain() and cycle() catch exceptions immediately
# rather than when shifting between input sources
def gen1():
hist.append(0)
yield 1
hist.append(1)
raise AssertionError
hist.append(2)
def gen2(x):
hist.append(3)
yield 2
hist.append(4)
if x:
raise StopIteration
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(False)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(True)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, cycle(gen1()))
self.assertEqual(hist, [0,1])
class SubclassWithKwargsTest(unittest.TestCase):
def test_keywords_in_subclass(self):
# count is not subclassable...
for cls in (repeat, zip, filter, filterfalse, chain, map,
starmap, islice, takewhile, dropwhile, cycle, compress):
class Subclass(cls):
def __init__(self, newarg=None, *args):
cls.__init__(self, *args)
try:
Subclass(newarg=1)
except TypeError as err:
# we expect type errors because of wrong argument count
self.assertNotIn("does not take keyword arguments", err.args[0])
@support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.ssize_t = struct.calcsize('n')
check_sizeof = support.check_sizeof
def test_product_sizeof(self):
basesize = support.calcobjsize('3Pi')
check = self.check_sizeof
check(product('ab', '12'), basesize + 2 * self.ssize_t)
check(product(*(('abc',) * 10)), basesize + 10 * self.ssize_t)
def test_combinations_sizeof(self):
basesize = support.calcobjsize('3Pni')
check = self.check_sizeof
check(combinations('abcd', 3), basesize + 3 * self.ssize_t)
check(combinations(range(10), 4), basesize + 4 * self.ssize_t)
def test_combinations_with_replacement_sizeof(self):
cwr = combinations_with_replacement
basesize = support.calcobjsize('3Pni')
check = self.check_sizeof
check(cwr('abcd', 3), basesize + 3 * self.ssize_t)
check(cwr(range(10), 4), basesize + 4 * self.ssize_t)
def test_permutations_sizeof(self):
basesize = support.calcobjsize('4Pni')
check = self.check_sizeof
check(permutations('abcd'),
basesize + 4 * self.ssize_t + 4 * self.ssize_t)
check(permutations('abcd', 3),
basesize + 4 * self.ssize_t + 3 * self.ssize_t)
check(permutations('abcde', 3),
basesize + 5 * self.ssize_t + 3 * self.ssize_t)
check(permutations(range(10), 4),
basesize + 10 * self.ssize_t + 4 * self.ssize_t)
libreftest = """ Doctest for examples in the library reference: libitertools.tex
>>> amounts = [120.15, 764.05, 823.14]
>>> for checknum, amount in zip(count(1200), amounts):
... print('Check %d is for $%.2f' % (checknum, amount))
...
Check 1200 is for $120.15
Check 1201 is for $764.05
Check 1202 is for $823.14
>>> import operator
>>> for cube in map(operator.pow, range(1,4), repeat(3)):
... print(cube)
...
1
8
27
>>> reportlines = ['EuroPython', 'Roster', '', 'alex', '', 'laura', '', 'martin', '', 'walter', '', 'samuele']
>>> for name in islice(reportlines, 3, None, 2):
... print(name.title())
...
Alex
Laura
Martin
Walter
Samuele
>>> from operator import itemgetter
>>> d = dict(a=1, b=2, c=1, d=2, e=1, f=2, g=3)
>>> di = sorted(sorted(d.items()), key=itemgetter(1))
>>> for k, g in groupby(di, itemgetter(1)):
... print(k, list(map(itemgetter(0), g)))
...
1 ['a', 'c', 'e']
2 ['b', 'd', 'f']
3 ['g']
# Find runs of consecutive numbers using groupby. The key to the solution
# is differencing with a range so that consecutive numbers all appear in
# same group.
>>> data = [ 1, 4,5,6, 10, 15,16,17,18, 22, 25,26,27,28]
>>> for k, g in groupby(enumerate(data), lambda t:t[0]-t[1]):
... print(list(map(operator.itemgetter(1), g)))
...
[1]
[4, 5, 6]
[10]
[15, 16, 17, 18]
[22]
[25, 26, 27, 28]
>>> def take(n, iterable):
... "Return first n items of the iterable as a list"
... return list(islice(iterable, n))
>>> def enumerate(iterable, start=0):
... return zip(count(start), iterable)
>>> def tabulate(function, start=0):
... "Return function(0), function(1), ..."
... return map(function, count(start))
>>> def nth(iterable, n, default=None):
... "Returns the nth item or a default value"
... return next(islice(iterable, n, None), default)
>>> def quantify(iterable, pred=bool):
... "Count how many times the predicate is true"
... return sum(map(pred, iterable))
>>> def padnone(iterable):
... "Returns the sequence elements and then returns None indefinitely"
... return chain(iterable, repeat(None))
>>> def ncycles(iterable, n):
... "Returns the sequence elements n times"
... return chain(*repeat(iterable, n))
>>> def dotproduct(vec1, vec2):
... return sum(map(operator.mul, vec1, vec2))
>>> def flatten(listOfLists):
... return list(chain.from_iterable(listOfLists))
>>> def repeatfunc(func, times=None, *args):
... "Repeat calls to func with specified arguments."
... " Example: repeatfunc(random.random)"
... if times is None:
... return starmap(func, repeat(args))
... else:
... return starmap(func, repeat(args, times))
>>> def pairwise(iterable):
... "s -> (s0,s1), (s1,s2), (s2, s3), ..."
... a, b = tee(iterable)
... try:
... next(b)
... except StopIteration:
... pass
... return zip(a, b)
>>> def grouper(n, iterable, fillvalue=None):
... "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
... args = [iter(iterable)] * n
... return zip_longest(*args, fillvalue=fillvalue)
>>> def roundrobin(*iterables):
... "roundrobin('ABC', 'D', 'EF') --> A D E B F C"
... # Recipe credited to George Sakkis
... pending = len(iterables)
... nexts = cycle(iter(it).__next__ for it in iterables)
... while pending:
... try:
... for next in nexts:
... yield next()
... except StopIteration:
... pending -= 1
... nexts = cycle(islice(nexts, pending))
>>> def powerset(iterable):
... "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
... s = list(iterable)
... return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
>>> def unique_everseen(iterable, key=None):
... "List unique elements, preserving order. Remember all elements ever seen."
... # unique_everseen('AAAABBBCCDAABBB') --> A B C D
... # unique_everseen('ABBCcAD', str.lower) --> A B C D
... seen = set()
... seen_add = seen.add
... if key is None:
... for element in iterable:
... if element not in seen:
... seen_add(element)
... yield element
... else:
... for element in iterable:
... k = key(element)
... if k not in seen:
... seen_add(k)
... yield element
>>> def unique_justseen(iterable, key=None):
... "List unique elements, preserving order. Remember only the element just seen."
... # unique_justseen('AAAABBBCCDAABBB') --> A B C D A B
... # unique_justseen('ABBCcAD', str.lower) --> A B C A D
... return map(next, map(itemgetter(1), groupby(iterable, key)))
>>> def first_true(iterable, default=False, pred=None):
... '''Returns the first true value in the iterable.
...
... If no true value is found, returns *default*
...
... If *pred* is not None, returns the first item
... for which pred(item) is true.
...
... '''
... # first_true([a,b,c], x) --> a or b or c or x
... # first_true([a,b], x, f) --> a if f(a) else b if f(b) else x
... return next(filter(pred, iterable), default)
This is not part of the examples but it tests to make sure the definitions
perform as purported.
>>> take(10, count())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> list(enumerate('abc'))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> list(islice(tabulate(lambda x: 2*x), 4))
[0, 2, 4, 6]
>>> nth('abcde', 3)
'd'
>>> nth('abcde', 9) is None
True
>>> quantify(range(99), lambda x: x%2==0)
50
>>> a = [[1, 2, 3], [4, 5, 6]]
>>> flatten(a)
[1, 2, 3, 4, 5, 6]
>>> list(repeatfunc(pow, 5, 2, 3))
[8, 8, 8, 8, 8]
>>> import random
>>> take(5, map(int, repeatfunc(random.random)))
[0, 0, 0, 0, 0]
>>> list(pairwise('abcd'))
[('a', 'b'), ('b', 'c'), ('c', 'd')]
>>> list(pairwise([]))
[]
>>> list(pairwise('a'))
[]
>>> list(islice(padnone('abc'), 0, 6))
['a', 'b', 'c', None, None, None]
>>> list(ncycles('abc', 3))
['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c']
>>> dotproduct([1,2,3], [4,5,6])
32
>>> list(grouper(3, 'abcdefg', 'x'))
[('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'x', 'x')]
>>> list(roundrobin('abc', 'd', 'ef'))
['a', 'd', 'e', 'b', 'f', 'c']
>>> list(powerset([1,2,3]))
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
>>> all(len(list(powerset(range(n)))) == 2**n for n in range(18))
True
>>> list(powerset('abcde')) == sorted(sorted(set(powerset('abcde'))), key=len)
True
>>> list(unique_everseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D']
>>> list(unique_everseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'D']
>>> list(unique_justseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D', 'A', 'B']
>>> list(unique_justseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'A', 'D']
>>> first_true('ABC0DEF1', '9', str.isdigit)
'0'
"""
__test__ = {'libreftest' : libreftest}
def test_main(verbose=None):
test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC,
RegressionTests, LengthTransparency,
SubclassWithKwargsTest, TestExamples,
SizeofTest)
support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
# doctest the examples in the library reference
support.run_doctest(sys.modules[__name__], verbose)
if __name__ == "__main__":
test_main(verbose=True)
| mit |
elmgone/coligui | vendor.etc/json-to-elm/generate.py | 5 | 3247 | from __future__ import print_function, unicode_literals
import json
import sys
from type_alias import create_type_alias, find_type_aliases, find_union_types
from decoder import create_decoder, create_encoder, create_union_type_decoder, create_union_type_encoder
from helpers import *
def test():
testJson = """
{ "name" : "Noah"
, "age" : 23
, "location" :
{ "name" : "sweden"
, "days" : 45
}
}
"""
stuff = json.loads(testJson)
print('Creating type alias')
aliases = create_type_alias(stuff, type_alias_name='Assignment')
print('\n'.join(aliases))
print('Creating decoder')
decoders = []
for alias in aliases:
decoder = create_decoder(alias, has_snakecase=True, prefix='decode')
decoders.append(decoder)
print('\n'.join(decoders))
print_everything(
"""
{ "name" : "Noah"
, "age" : 23
, "location" :
{ "name" : "sweden"
, "days" : 45
}
}
"""
, alias_name = "Person")
print(create_union_type_decoder('type Action = Run | Hide | Noop'))
print(create_union_type_encoder('type Action = Run | Hide | Noop'))
def print_everything(some_json, alias_name):
stuff = json.loads(some_json)
aliases = create_type_alias(stuff, type_alias_name=alias_name)
decoders = [create_decoder(alias, has_snakecase=True, prefix='decode') for alias in aliases ]
encoders = [create_encoder(alias, has_snakecase=True, prefix='encode') for alias in aliases ]
print('\n'.join(aliases))
print('\n'.join(decoders))
print('\n'.join(encoders))
def from_elm_file(file_text):
aliases = find_type_aliases(file_text)
unions = find_union_types(file_text)
decoders = [create_decoder(alias, has_snakecase=True, prefix='decode') for alias in aliases ]
decoders.extend(create_union_type_decoder(union_type) for union_type in unions)
encoders = [create_encoder(alias, has_snakecase=True, prefix='encode') for alias in aliases ]
encoders.extend(create_union_type_encoder(union_type) for union_type in unions)
print('\n'.join(decoders))
print('\n'.join(encoders))
text ="""
type Action =
Run | Hide
type alias Location =
{ name : String
, days : Int
}
type alias Person =
{ age : Int
, name : String
, location : Location
}
location =
{ name = "Noah"
, days = 45
}
view =
div [] []
"""
def message(*args):
print(*args, file=sys.stderr)
def main():
if len(sys.argv) < 2:
message('Give me some elm file names and I\'ll give you decoders and encoders')
message('Or give me some json files and I\'ll give you a type alias and decoders and encoders')
return
for arg in sys.argv[1:]:
if arg.endswith('.elm'):
message('Generating decoders and encoders for {name}'.format(name=arg))
with open(arg) as f:
from_elm_file(f.read())
if arg.endswith('.json'):
message('Generating type alias, decoders and encoders from {name}'.format(name=arg))
with open(arg) as f:
arg = arg.split('/')[-1]
name = arg.split('.')[0].capitalize()
print_everything(f.read(), name)
if __name__ == '__main__':
main()
| apache-2.0 |
chirilo/kuma | vendor/packages/setuptools/command/test.py | 363 | 6471 | from distutils.errors import DistutilsOptionError
from unittest import TestLoader
import unittest
import sys
from pkg_resources import (resource_listdir, resource_exists, normalize_path,
working_set, _namespace_packages,
add_activation_listener, require, EntryPoint)
from setuptools import Command
from setuptools.compat import PY3
from setuptools.py31compat import unittest_main
class ScanningLoader(TestLoader):
def loadTestsFromModule(self, module):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
tests = []
tests.append(TestLoader.loadTestsFromModule(self, module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file != '__init__.py':
submodule = module.__name__ + '.' + file[:-3]
else:
if resource_exists(module.__name__, file + '/__init__.py'):
submodule = module.__name__ + '.' + file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests) != 1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build"
user_options = [
('test-module=', 'm', "Run 'test_suite' in specified module"),
('test-suite=', 's',
"Test suite to run (e.g. 'some_module.test_suite')"),
('test-runner=', 'r', "Test runner to use"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
self.test_runner = None
def finalize_options(self):
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module + ".test_suite"
elif self.test_module:
raise DistutilsOptionError(
"You may specify a module or a suite, but not both"
)
self.test_args = [self.test_suite]
if self.verbose:
self.test_args.insert(0, '--verbose')
if self.test_loader is None:
self.test_loader = getattr(self.distribution, 'test_loader', None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
if self.test_runner is None:
self.test_runner = getattr(self.distribution, 'test_runner', None)
def with_project_on_sys_path(self, func):
with_2to3 = PY3 and getattr(self.distribution, 'use_2to3', False)
if with_2to3:
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
sys.path.insert(0, normalize_path(ei_cmd.egg_base))
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
func()
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(
self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
if self.test_suite:
cmd = ' '.join(self.test_args)
if self.dry_run:
self.announce('skipping "unittest %s" (dry run)' % cmd)
else:
self.announce('running "unittest %s"' % cmd)
self.with_project_on_sys_path(self.run_tests)
def run_tests(self):
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if PY3 and getattr(self.distribution, 'use_2to3', False):
module = self.test_args[-1].split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
unittest_main(
None, None, [unittest.__file__] + self.test_args,
testLoader=self._resolve_as_ep(self.test_loader),
testRunner=self._resolve_as_ep(self.test_runner),
)
@staticmethod
def _resolve_as_ep(val):
"""
Load the indicated attribute value, called, as a as if it were
specified as an entry point.
"""
if val is None:
return
parsed = EntryPoint.parse("x=" + val)
return parsed.resolve()()
| mpl-2.0 |
uchida/selenium | py/selenium/webdriver/ie/service.py | 8 | 2235 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.webdriver.common import service
class Service(service.Service):
"""
Object that manages the starting and stopping of the IEDriver
"""
def __init__(self, executable_path, port=0, host=None, log_level=None, log_file=None):
"""
Creates a new instance of the Service
:Args:
- executable_path : Path to the IEDriver
- port : Port the service is running on
- host : IP address the service port is bound
- log_level : Level of logging of service, may be "FATAL", "ERROR", "WARN", "INFO", "DEBUG", "TRACE".
Default is "FATAL".
- log_file : Target of logging of service, may be "stdout", "stderr" or file path.
Default is "stdout"."""
self.service_args = []
if host is not None:
self.service_args.append("--host=%s" % host)
if log_level is not None:
self.service_args.append("--log-level=%s" % log_level)
if log_file is not None:
self.service_args.append("--log-file=%s" % log_file)
service.Service.__init__(self, executable_path, port=port,
start_error_message="Please download from http://selenium-release.storage.googleapis.com/index.html and read up at https://github.com/SeleniumHQ/selenium/wiki/InternetExplorerDriver")
def command_line_args(self):
return ["--port=%d" % self.port] + self.service_args
| apache-2.0 |
Adjective-Object/ligament | ligament/__init__.py | 1 | 2250 | """
A python task automator, with a focus on task composition
"""
import ligament
import helpers
import buildtarget
import buildcontext
import buildcontextfseventhandler
import exceptions
import sys
import getopt
from helpers import pout
def print_helptext():
""" print helptext for command-line interface """
pout("usage: %s [flags] [tasks]\n"
" Execute tasks in order. if no tasks are specified, tries to\n"
" execute task 'default'\n"
" flags:\n"
" -w / --watch watch files/folders for changes and update\n"
" -h / --help display this helptext" % sys.argv[0])
def main():
""" parse command line opts and run a skeleton file
when called from the command line, ligament looks in the current
working directory for a file called `skeleton.py`. Tasks specified from
the command line are then executed in order, and if the -w flag was
specified, ligament then watches the filesystem for changes to
prompt task re-execution;
"""
options = None
try:
options, args = getopt.gnu_getopt(
sys.argv[1:],
"whqv",
["watch", "help", "query", "verbose"])
except getopt.GetoptError as e:
print e
print_helptext()
exit(1)
should_watch = False
query_skeleton = False
verbose = False
for opt, arg in options:
if opt == "--watch" or opt == '-w':
should_watch = True
elif opt == "--query" or opt == '-q':
query_skeleton = True
elif opt == "--help" or opt == '-h':
print_helptext()
exit(0)
elif opt == "--verbose" or opt == '-v':
verbose = True
else:
print "option '%s' not recognized" % opt
print_helptext()
exit(1)
if verbose:
helpers.set_verbosity(".*")
if query_skeleton:
print " ".join(ligament.query_skeleton("./skeleton.py"))
helpers.set_verbosity()
else:
helpers.add_verbosity_groups("build_task")
ligament.run_skeleton(
"./skeleton.py",
["default"] if len(args) == 0 else args,
watch=should_watch)
| apache-2.0 |
edx/edx-load-tests | loadtests/ecommerce_deadlock/authentication.py | 1 | 2577 | import uuid
from locust import TaskSet
from helpers import settings
LMS_HOST = settings.data['LMS_HOST'].rstrip('\/')
class AuthenticationMixin(TaskSet):
"""Mixin that gives tasks different authentication methods."""
def __init__(self, parent):
super().__init__(parent)
self._setup_basic_auth()
def on_start(self):
"""Raise an exception. This TaskSet is not meant to be run on its own."""
raise Exception("This TaskSet is not meant to be run")
def auto_auth_login(self):
"""Create a new account with given credentials and log in.
This requires that the test server has the feature flag
`AUTOMATIC_AUTH_FOR_TESTING` set to True.
"""
auto_auth_username = "ecom_deadlock"
auto_auth_email_url = "simulator.amazonses.com"
default_password = "test"
del self.client.cookies["sessionid"]
username = "{0}{1}".format(auto_auth_username, uuid.uuid4().hex[:20])
full_email = "{0}@{1}".format(username, auto_auth_email_url)
params = {
'password': default_password,
'email': full_email,
'username': username,
}
self.client.get("/auto_auth", params=params, verify=False, name="/auto_auth")
def login(self, email, password):
"""
Login to the LMS for the given credentials
Args:
email (str): User email
password (str): User's password
"""
signin_url = '{}/login'.format(LMS_HOST)
headers = self._get_csrf(signin_url)
login_url = '{}/login_ajax'.format(LMS_HOST)
response = self.client.post(login_url, {
'email': email,
'password': password,
'honor_code': 'true'
}, headers=headers).json()
if not response['success']:
raise Exception(str(response))
print('Login successful {}'.format(email))
def _get_csrf(self, url):
try:
response = self.client.get(url)
csrf = response.cookies['csrftoken']
return {'X-CSRFToken': csrf, 'Referer': url}
except Exception as error: # pylint: disable=W0703
print("Error when retrieving csrf token.", error)
def _setup_basic_auth(self):
"""Sets up basic authentication if available"""
if settings.data.get('BASIC_AUTH') is not None:
self.client.auth = (
settings.data['BASIC_AUTH']['BASIC_AUTH_USER'],
settings.data['BASIC_AUTH']['BASIC_AUTH_PASS'],
)
| apache-2.0 |
henriquejensen/marolo | controllers/default.py | 2 | 3682 | # -*- coding: utf-8 -*-
def index():
pagina = request.args(0, cast=int, default=1)
itens_por_pagina = 5
total = db(db.noticias).count()
paginas = total / itens_por_pagina
if total % itens_por_pagina:
paginas += 1
limites = (itens_por_pagina * (pagina - 1), (itens_por_pagina * pagina))
noticias = db(db.noticias.status == "publicado").select(
orderby=~db.noticias.created_on | ~db.noticias.modified_on,
limitby=limites
)
carousel = db(db.carousel.status == 'ativo').select(
orderby=~db.carousel.id,
limitby=(0, 4),
)
return {
'noticias': noticias,
'pagina': pagina,
'paginas': paginas,
'carousel': carousel
}
def noticias():
permalink = request.args(0, otherwise='/')
noticia = db.noticias(permalink=permalink)
if not noticia:
session.flash = T('Desculpe, não existem noticias cadastradas ainda')
redirect('/')
return dict(noticia=noticia)
def membros():
membros = db(db.membros).select().as_list()
if not membros:
session.flash = T('Desculpe, não existem membros cadastrados ainda')
redirect('/')
linhas = (len(membros) // 3) + 1
matriz = []
for _ in range(linhas):
aux = []
cont = 0
while cont < 3 and membros:
aux.append(membros.pop(0))
cont += 1
matriz.append(aux)
return {
'matriz': matriz
}
def associacao():
return {}
def projeto():
return {}
def eventos():
parametro_ano = request.args(0, cast=int, default=request.now.year)
ano_atual = request.now.year
eventos = db(db.eventos.dia.year() == parametro_ano).select()
if not eventos:
session.flash = T('Desculpe, não existem eventos cadastrados ainda')
redirect('/')
minimo = db.eventos.dia.min()
maximo = db.eventos.dia.max()
extremos = db().select(maximo, minimo).first()
menor_ano = extremos[minimo].year
maior_ano = extremos[maximo].year
anos_anteriores = []
anos_posteriores = []
for incremento in range(3, 0, -1):
ano_posterior = parametro_ano + incremento
if (ano_posterior <= maior_ano) or (ano_posterior <= ano_atual):
anos_posteriores.append(ano_posterior)
for subtrator in range(1, 4):
ano_anterior = parametro_ano - subtrator
if ano_anterior >= menor_ano:
anos_anteriores.append(ano_anterior)
return {
'eventos': eventos,
'ano': parametro_ano,
'anos_anteriores': anos_anteriores,
'anos_posteriores': anos_posteriores,
}
def contato():
if form.validate():
campos = form.vars
mail.send(
to=mail.settings.sender,
subject=campos['assunto'],
message=campos['mensagem'],
sender=campos['email']
)
return dict(form=form)
def produtos():
pagina = request.args(0, cast=int, default=1)
itens_por_pagina = 9
total = db(db.produtos).count()
if not total:
session.flash = T('Desculpe, não existem produtos cadastrados ainda')
redirect('/')
paginas = total / itens_por_pagina
if total % itens_por_pagina:
paginas += 1
limites = (itens_por_pagina * (pagina - 1), (itens_por_pagina * pagina))
produtos = db(db.produtos).select(
limitby=limites
)
return {
'produtos': produtos,
'pagina': pagina,
'paginas': paginas
}
@cache.action()
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request, db)
| mit |
CloudWareChile/OpenChile | openerp/addons/account_analytic_plans/account_analytic_plans.py | 6 | 24178 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from osv import fields, osv
import tools
from tools.translate import _
class one2many_mod2(fields.one2many):
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
if context is None:
context = {}
res = {}
for id in ids:
res[id] = []
ids2 = None
if 'journal_id' in context:
journal = obj.pool.get('account.journal').browse(cr, user, context['journal_id'], context=context)
pnum = int(name[7]) -1
plan = journal.plan_id
if plan and len(plan.plan_ids) > pnum:
acc_id = plan.plan_ids[pnum].root_analytic_id.id
ids2 = obj.pool.get(self._obj).search(cr, user, [(self._fields_id,'in',ids),('analytic_account_id','child_of',[acc_id])], limit=self._limit)
if ids2 is None:
ids2 = obj.pool.get(self._obj).search(cr, user, [(self._fields_id,'in',ids)], limit=self._limit)
for r in obj.pool.get(self._obj)._read_flat(cr, user, ids2, [self._fields_id], context=context, load='_classic_write'):
res[r[self._fields_id]].append( r['id'] )
return res
class account_analytic_line(osv.osv):
_inherit = 'account.analytic.line'
_description = 'Analytic Line'
def _get_amount(self, cr, uid, ids, name, args, context=None):
res = {}
for id in ids:
res.setdefault(id, 0.0)
for line in self.browse(cr, uid, ids, context=context):
amount = line.move_id and line.move_id.amount_currency * (line.percentage / 100) or 0.0
res[line.id] = amount
return res
_columns = {
'amount_currency': fields.function(_get_amount, string="Amount Currency", type="float", store=True, help="The amount expressed in the related account currency if not equal to the company one.", readonly=True),
'percentage': fields.float('Percentage')
}
account_analytic_line()
class account_analytic_plan(osv.osv):
_name = "account.analytic.plan"
_description = "Analytic Plan"
_columns = {
'name': fields.char('Analytic Plan', size=64, required=True, select=True),
'plan_ids': fields.one2many('account.analytic.plan.line', 'plan_id', 'Analytic Plans'),
}
account_analytic_plan()
class account_analytic_plan_line(osv.osv):
_name = "account.analytic.plan.line"
_description = "Analytic Plan Line"
_order = "sequence, id"
_columns = {
'plan_id': fields.many2one('account.analytic.plan','Analytic Plan'),
'name': fields.char('Plan Name', size=64, required=True, select=True),
'sequence': fields.integer('Sequence'),
'root_analytic_id': fields.many2one('account.analytic.account', 'Root Account', help="Root account of this plan.", required=False),
'min_required': fields.float('Minimum Allowed (%)'),
'max_required': fields.float('Maximum Allowed (%)'),
}
_defaults = {
'min_required': 100.0,
'max_required': 100.0,
}
account_analytic_plan_line()
class account_analytic_plan_instance(osv.osv):
_name = "account.analytic.plan.instance"
_description = "Analytic Plan Instance"
_columns = {
'name': fields.char('Analytic Distribution', size=64),
'code': fields.char('Distribution Code', size=16),
'journal_id': fields.many2one('account.analytic.journal', 'Analytic Journal' ),
'account_ids': fields.one2many('account.analytic.plan.instance.line', 'plan_id', 'Account Id'),
'account1_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account1 Id'),
'account2_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account2 Id'),
'account3_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account3 Id'),
'account4_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account4 Id'),
'account5_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account5 Id'),
'account6_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account6 Id'),
'plan_id': fields.many2one('account.analytic.plan', "Model's Plan"),
}
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
journal_obj = self.pool.get('account.journal')
if context.get('journal_id', False):
journal = journal_obj.browse(cr, user, [context['journal_id']], context=context)[0]
analytic_journal = journal.analytic_journal_id and journal.analytic_journal_id.id or False
args.append('|')
args.append(('journal_id', '=', analytic_journal))
args.append(('journal_id', '=', False))
res = super(account_analytic_plan_instance, self).search(cr, user, args, offset=offset, limit=limit, order=order,
context=context, count=count)
return res
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({'account1_ids':False, 'account2_ids':False, 'account3_ids':False,
'account4_ids':False, 'account5_ids':False, 'account6_ids':False})
return super(account_analytic_plan_instance, self).copy(cr, uid, id, default, context=context)
def _default_journal(self, cr, uid, context=None):
if context is None:
context = {}
journal_obj = self.pool.get('account.journal')
if context.has_key('journal_id') and context['journal_id']:
journal = journal_obj.browse(cr, uid, context['journal_id'], context=context)
if journal.analytic_journal_id:
return journal.analytic_journal_id.id
return False
_defaults = {
'plan_id': False,
'journal_id': _default_journal,
}
def name_get(self, cr, uid, ids, context=None):
res = []
for inst in self.browse(cr, uid, ids, context=context):
name = inst.name or '/'
if name and inst.code:
name=name+' ('+inst.code+')'
res.append((inst.id, name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
args = args or []
if name:
ids = self.search(cr, uid, [('code', '=', name)] + args, limit=limit, context=context or {})
if not ids:
ids = self.search(cr, uid, [('name', operator, name)] + args, limit=limit, context=context or {})
else:
ids = self.search(cr, uid, args, limit=limit, context=context or {})
return self.name_get(cr, uid, ids, context or {})
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None:
context = {}
wiz_id = self.pool.get('ir.actions.act_window').search(cr, uid, [("name","=","analytic.plan.create.model.action")], context=context)
res = super(account_analytic_plan_instance,self).fields_view_get(cr, uid, view_id, view_type, context, toolbar=toolbar, submenu=submenu)
journal_obj = self.pool.get('account.journal')
analytic_plan_obj = self.pool.get('account.analytic.plan')
if (res['type']=='form'):
plan_id = False
if context.get('journal_id', False):
plan_id = journal_obj.browse(cr, uid, int(context['journal_id']), context=context).plan_id
elif context.get('plan_id', False):
plan_id = analytic_plan_obj.browse(cr, uid, int(context['plan_id']), context=context)
if plan_id:
i=1
res['arch'] = """<form string="%s">
<field name="name"/>
<field name="code"/>
<field name="journal_id"/>
<button name="%d" string="Save This Distribution as a Model" type="action" colspan="2"/>
"""% (tools.to_xml(plan_id.name), wiz_id[0])
for line in plan_id.plan_ids:
res['arch']+="""
<field name="account%d_ids" string="%s" nolabel="1" colspan="4">
<tree string="%s" editable="bottom">
<field name="rate"/>
<field name="analytic_account_id" domain="[('parent_id','child_of',[%d])]" groups="base.group_extended"/>
</tree>
</field>
<newline/>"""%(i,tools.to_xml(line.name),tools.to_xml(line.name),line.root_analytic_id and line.root_analytic_id.id or 0)
i+=1
res['arch'] += "</form>"
doc = etree.fromstring(res['arch'].encode('utf8'))
xarch, xfields = self._view_look_dom_arch(cr, uid, doc, view_id, context=context)
res['arch'] = xarch
res['fields'] = xfields
return res
else:
return res
def create(self, cr, uid, vals, context=None):
journal_obj = self.pool.get('account.journal')
ana_plan_instance_obj = self.pool.get('account.analytic.plan.instance')
acct_anal_acct = self.pool.get('account.analytic.account')
acct_anal_plan_line_obj = self.pool.get('account.analytic.plan.line')
if context and 'journal_id' in context:
journal = journal_obj.browse(cr, uid, context['journal_id'], context=context)
pids = ana_plan_instance_obj.search(cr, uid, [('name','=',vals['name']), ('code','=',vals['code']), ('plan_id','<>',False)], context=context)
if pids:
raise osv.except_osv(_('Error'), _('A model having this name and code already exists !'))
res = acct_anal_plan_line_obj.search(cr, uid, [('plan_id','=',journal.plan_id.id)], context=context)
for i in res:
total_per_plan = 0
item = acct_anal_plan_line_obj.browse(cr, uid, i, context=context)
temp_list = ['account1_ids','account2_ids','account3_ids','account4_ids','account5_ids','account6_ids']
for l in temp_list:
if vals.has_key(l):
for tempo in vals[l]:
if acct_anal_acct.search(cr, uid, [('parent_id', 'child_of', [item.root_analytic_id.id]), ('id', '=', tempo[2]['analytic_account_id'])], context=context):
total_per_plan += tempo[2]['rate']
if total_per_plan < item.min_required or total_per_plan > item.max_required:
raise osv.except_osv(_('Value Error'),_('The Total Should be Between %s and %s') % (str(item.min_required), str(item.max_required)))
return super(account_analytic_plan_instance, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None, check=True, update_check=True):
if context is None:
context = {}
this = self.browse(cr, uid, ids[0], context=context)
invoice_line_obj = self.pool.get('account.invoice.line')
if this.plan_id and not vals.has_key('plan_id'):
#this instance is a model, so we have to create a new plan instance instead of modifying it
#copy the existing model
temp_id = self.copy(cr, uid, this.id, None, context=context)
#get the list of the invoice line that were linked to the model
lists = invoice_line_obj.search(cr, uid, [('analytics_id','=',this.id)], context=context)
#make them link to the copy
invoice_line_obj.write(cr, uid, lists, {'analytics_id':temp_id}, context=context)
#and finally modify the old model to be not a model anymore
vals['plan_id'] = False
if not vals.has_key('name'):
vals['name'] = this.name and (str(this.name)+'*') or "*"
if not vals.has_key('code'):
vals['code'] = this.code and (str(this.code)+'*') or "*"
return super(account_analytic_plan_instance, self).write(cr, uid, ids, vals, context=context)
account_analytic_plan_instance()
class account_analytic_plan_instance_line(osv.osv):
_name = "account.analytic.plan.instance.line"
_description = "Analytic Instance Line"
_columns = {
'plan_id': fields.many2one('account.analytic.plan.instance', 'Plan Id'),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account', required=True, domain=[('type','<>','view')]),
'rate': fields.float('Rate (%)', required=True),
}
_defaults = {
'rate': 100.0
}
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
reads = self.read(cr, uid, ids, ['analytic_account_id'], context=context)
res = []
for record in reads:
res.append((record['id'], record['analytic_account_id']))
return res
account_analytic_plan_instance_line()
class account_journal(osv.osv):
_inherit = "account.journal"
_name = "account.journal"
_columns = {
'plan_id': fields.many2one('account.analytic.plan', 'Analytic Plans'),
}
account_journal()
class account_invoice_line(osv.osv):
_inherit = "account.invoice.line"
_name = "account.invoice.line"
_columns = {
'analytics_id': fields.many2one('account.analytic.plan.instance', 'Analytic Distribution'),
}
def create(self, cr, uid, vals, context=None):
if 'analytics_id' in vals and isinstance(vals['analytics_id'], tuple):
vals['analytics_id'] = vals['analytics_id'][0]
return super(account_invoice_line, self).create(cr, uid, vals, context=context)
def move_line_get_item(self, cr, uid, line, context=None):
res = super(account_invoice_line, self).move_line_get_item(cr, uid, line, context=context)
res ['analytics_id'] = line.analytics_id and line.analytics_id.id or False
return res
def product_id_change(self, cr, uid, ids, product, uom, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, address_invoice_id=False, currency_id=False, context=None, company_id=None):
res_prod = super(account_invoice_line, self).product_id_change(cr, uid, ids, product, uom, qty, name, type, partner_id, fposition_id, price_unit, address_invoice_id, currency_id, context=context, company_id=company_id)
rec = self.pool.get('account.analytic.default').account_get(cr, uid, product, partner_id, uid, time.strftime('%Y-%m-%d'), context=context)
if rec and rec.analytics_id:
res_prod['value'].update({'analytics_id': rec.analytics_id.id})
return res_prod
account_invoice_line()
class account_move_line(osv.osv):
_inherit = "account.move.line"
_name = "account.move.line"
_columns = {
'analytics_id':fields.many2one('account.analytic.plan.instance', 'Analytic Distribution'),
}
def _default_get_move_form_hook(self, cursor, user, data):
data = super(account_move_line, self)._default_get_move_form_hook(cursor, user, data)
if data.has_key('analytics_id'):
del(data['analytics_id'])
return data
def create_analytic_lines(self, cr, uid, ids, context=None):
if context is None:
context = {}
super(account_move_line, self).create_analytic_lines(cr, uid, ids, context=context)
analytic_line_obj = self.pool.get('account.analytic.line')
for line in self.browse(cr, uid, ids, context=context):
if line.analytics_id:
if not line.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal !'),_("You have to define an analytic journal on the '%s' journal!") % (line.journal_id.name,))
toremove = analytic_line_obj.search(cr, uid, [('move_id','=',line.id)], context=context)
if toremove:
analytic_line_obj.unlink(cr, uid, toremove, context=context)
for line2 in line.analytics_id.account_ids:
val = (line.credit or 0.0) - (line.debit or 0.0)
amt=val * (line2.rate/100)
al_vals={
'name': line.name,
'date': line.date,
'account_id': line2.analytic_account_id.id,
'unit_amount': line.quantity,
'product_id': line.product_id and line.product_id.id or False,
'product_uom_id': line.product_uom_id and line.product_uom_id.id or False,
'amount': amt,
'general_account_id': line.account_id.id,
'move_id': line.id,
'journal_id': line.journal_id.analytic_journal_id.id,
'ref': line.ref,
'percentage': line2.rate
}
analytic_line_obj.create(cr, uid, al_vals, context=context)
return True
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None:
context = {}
result = super(account_move_line, self).fields_view_get(cr, uid, view_id, view_type, context, toolbar=toolbar, submenu=submenu)
return result
account_move_line()
class account_invoice(osv.osv):
_name = "account.invoice"
_inherit = "account.invoice"
def line_get_convert(self, cr, uid, x, part, date, context=None):
res=super(account_invoice,self).line_get_convert(cr, uid, x, part, date, context=context)
res['analytics_id'] = x.get('analytics_id', False)
return res
def _get_analytic_lines(self, cr, uid, id, context=None):
inv = self.browse(cr, uid, [id])[0]
cur_obj = self.pool.get('res.currency')
invoice_line_obj = self.pool.get('account.invoice.line')
acct_ins_obj = self.pool.get('account.analytic.plan.instance')
company_currency = inv.company_id.currency_id.id
if inv.type in ('out_invoice', 'in_refund'):
sign = 1
else:
sign = -1
iml = invoice_line_obj.move_line_get(cr, uid, inv.id, context=context)
for il in iml:
if il.get('analytics_id', False):
if inv.type in ('in_invoice', 'in_refund'):
ref = inv.reference
else:
ref = self._convert_ref(cr, uid, inv.number)
obj_move_line = acct_ins_obj.browse(cr, uid, il['analytics_id'], context=context)
ctx = context.copy()
ctx.update({'date': inv.date_invoice})
amount_calc = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, il['price'], context=ctx) * sign
qty = il['quantity']
il['analytic_lines'] = []
for line2 in obj_move_line.account_ids:
amt = amount_calc * (line2.rate/100)
qtty = qty* (line2.rate/100)
al_vals = {
'name': il['name'],
'date': inv['date_invoice'],
'unit_amount': qtty,
'product_id': il['product_id'],
'account_id': line2.analytic_account_id.id,
'amount': amt,
'product_uom_id': il['uos_id'],
'general_account_id': il['account_id'],
'journal_id': self._get_journal_analytic(cr, uid, inv.type),
'ref': ref,
}
il['analytic_lines'].append((0, 0, al_vals))
return iml
account_invoice()
class account_analytic_plan(osv.osv):
_inherit = "account.analytic.plan"
_columns = {
'default_instance_id': fields.many2one('account.analytic.plan.instance', 'Default Entries'),
}
account_analytic_plan()
class analytic_default(osv.osv):
_inherit = "account.analytic.default"
_columns = {
'analytics_id': fields.many2one('account.analytic.plan.instance', 'Analytic Distribution'),
}
analytic_default()
class sale_order_line(osv.osv):
_inherit = "sale.order.line"
# Method overridden to set the analytic account by default on criterion match
def invoice_line_create(self, cr, uid, ids, context=None):
create_ids = super(sale_order_line,self).invoice_line_create(cr, uid, ids, context=context)
inv_line_obj = self.pool.get('account.invoice.line')
acct_anal_def_obj = self.pool.get('account.analytic.default')
if ids:
sale_line = self.browse(cr, uid, ids[0], context=context)
for line in inv_line_obj.browse(cr, uid, create_ids, context=context):
rec = acct_anal_def_obj.account_get(cr, uid, line.product_id.id, sale_line.order_id.partner_id.id, uid, time.strftime('%Y-%m-%d'), context)
if rec:
inv_line_obj.write(cr, uid, [line.id], {'analytics_id': rec.analytics_id.id}, context=context)
return create_ids
sale_order_line()
class account_bank_statement(osv.osv):
_inherit = "account.bank.statement"
_name = "account.bank.statement"
def create_move_from_st_line(self, cr, uid, st_line_id, company_currency_id, st_line_number, context=None):
account_move_line_pool = self.pool.get('account.move.line')
account_bank_statement_line_pool = self.pool.get('account.bank.statement.line')
st_line = account_bank_statement_line_pool.browse(cr, uid, st_line_id, context=context)
result = super(account_bank_statement,self).create_move_from_st_line(cr, uid, st_line_id, company_currency_id, st_line_number, context=context)
move = st_line.move_ids and st_line.move_ids[0] or False
if move:
for line in move.line_id:
account_move_line_pool.write(cr, uid, [line.id], {'analytics_id':st_line.analytics_id.id}, context=context)
return result
def button_confirm_bank(self, cr, uid, ids, context=None):
super(account_bank_statement,self).button_confirm_bank(cr, uid, ids, context=context)
for st in self.browse(cr, uid, ids, context=context):
for st_line in st.line_ids:
if st_line.analytics_id:
if not st.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal !'),_("You have to define an analytic journal on the '%s' journal!") % (st.journal_id.name,))
if not st_line.amount:
continue
return True
account_bank_statement()
class account_bank_statement_line(osv.osv):
_inherit = "account.bank.statement.line"
_name = "account.bank.statement.line"
_columns = {
'analytics_id': fields.many2one('account.analytic.plan.instance', 'Analytic Distribution'),
}
account_bank_statement_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jonathanverner/brython | www/src/Lib/test/test_imp.py | 23 | 19284 | import imp
import importlib
import os
import os.path
import shutil
import sys
from test import support
from test.test_importlib import util
import unittest
import warnings
class LockTests(unittest.TestCase):
"""Very basic test of import lock functions."""
def verify_lock_state(self, expected):
self.assertEqual(imp.lock_held(), expected,
"expected imp.lock_held() to be %r" % expected)
def testLock(self):
LOOPS = 50
# The import lock may already be held, e.g. if the test suite is run
# via "import test.autotest".
lock_held_at_start = imp.lock_held()
self.verify_lock_state(lock_held_at_start)
for i in range(LOOPS):
imp.acquire_lock()
self.verify_lock_state(True)
for i in range(LOOPS):
imp.release_lock()
# The original state should be restored now.
self.verify_lock_state(lock_held_at_start)
if not lock_held_at_start:
try:
imp.release_lock()
except RuntimeError:
pass
else:
self.fail("release_lock() without lock should raise "
"RuntimeError")
class ImportTests(unittest.TestCase):
def setUp(self):
mod = importlib.import_module('test.encoded_modules')
self.test_strings = mod.test_strings
self.test_path = mod.__path__
def test_import_encoded_module(self):
for modname, encoding, teststr in self.test_strings:
mod = importlib.import_module('test.encoded_modules.'
'module_' + modname)
self.assertEqual(teststr, mod.test)
def test_find_module_encoding(self):
for mod, encoding, _ in self.test_strings:
with imp.find_module('module_' + mod, self.test_path)[0] as fd:
self.assertEqual(fd.encoding, encoding)
path = [os.path.dirname(__file__)]
with self.assertRaises(SyntaxError):
imp.find_module('badsyntax_pep3120', path)
def test_issue1267(self):
for mod, encoding, _ in self.test_strings:
fp, filename, info = imp.find_module('module_' + mod,
self.test_path)
with fp:
self.assertNotEqual(fp, None)
self.assertEqual(fp.encoding, encoding)
self.assertEqual(fp.tell(), 0)
self.assertEqual(fp.readline(), '# test %s encoding\n'
% encoding)
fp, filename, info = imp.find_module("tokenize")
with fp:
self.assertNotEqual(fp, None)
self.assertEqual(fp.encoding, "utf-8")
self.assertEqual(fp.tell(), 0)
self.assertEqual(fp.readline(),
'"""Tokenization help for Python programs.\n')
def test_issue3594(self):
temp_mod_name = 'test_imp_helper'
sys.path.insert(0, '.')
try:
with open(temp_mod_name + '.py', 'w') as file:
file.write("# coding: cp1252\nu = 'test.test_imp'\n")
file, filename, info = imp.find_module(temp_mod_name)
file.close()
self.assertEqual(file.encoding, 'cp1252')
finally:
del sys.path[0]
support.unlink(temp_mod_name + '.py')
support.unlink(temp_mod_name + '.pyc')
support.unlink(temp_mod_name + '.pyo')
def test_issue5604(self):
# Test cannot cover imp.load_compiled function.
# Martin von Loewis note what shared library cannot have non-ascii
# character because init_xxx function cannot be compiled
# and issue never happens for dynamic modules.
# But sources modified to follow generic way for processing pathes.
# the return encoding could be uppercase or None
fs_encoding = sys.getfilesystemencoding()
# covers utf-8 and Windows ANSI code pages
# one non-space symbol from every page
# (http://en.wikipedia.org/wiki/Code_page)
known_locales = {
'utf-8' : b'\xc3\xa4',
'cp1250' : b'\x8C',
'cp1251' : b'\xc0',
'cp1252' : b'\xc0',
'cp1253' : b'\xc1',
'cp1254' : b'\xc0',
'cp1255' : b'\xe0',
'cp1256' : b'\xe0',
'cp1257' : b'\xc0',
'cp1258' : b'\xc0',
}
if sys.platform == 'darwin':
self.assertEqual(fs_encoding, 'utf-8')
# Mac OS X uses the Normal Form D decomposition
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
special_char = b'a\xcc\x88'
else:
special_char = known_locales.get(fs_encoding)
if not special_char:
self.skipTest("can't run this test with %s as filesystem encoding"
% fs_encoding)
decoded_char = special_char.decode(fs_encoding)
temp_mod_name = 'test_imp_helper_' + decoded_char
test_package_name = 'test_imp_helper_package_' + decoded_char
init_file_name = os.path.join(test_package_name, '__init__.py')
try:
# if the curdir is not in sys.path the test fails when run with
# ./python ./Lib/test/regrtest.py test_imp
sys.path.insert(0, os.curdir)
with open(temp_mod_name + '.py', 'w') as file:
file.write('a = 1\n')
file, filename, info = imp.find_module(temp_mod_name)
with file:
self.assertIsNotNone(file)
self.assertTrue(filename[:-3].endswith(temp_mod_name))
self.assertEqual(info[0], '.py')
self.assertEqual(info[1], 'U')
self.assertEqual(info[2], imp.PY_SOURCE)
mod = imp.load_module(temp_mod_name, file, filename, info)
self.assertEqual(mod.a, 1)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
mod = imp.load_source(temp_mod_name, temp_mod_name + '.py')
self.assertEqual(mod.a, 1)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if not sys.dont_write_bytecode:
mod = imp.load_compiled(
temp_mod_name,
imp.cache_from_source(temp_mod_name + '.py'))
self.assertEqual(mod.a, 1)
if not os.path.exists(test_package_name):
os.mkdir(test_package_name)
with open(init_file_name, 'w') as file:
file.write('b = 2\n')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
package = imp.load_package(test_package_name, test_package_name)
self.assertEqual(package.b, 2)
finally:
del sys.path[0]
for ext in ('.py', '.pyc', '.pyo'):
support.unlink(temp_mod_name + ext)
support.unlink(init_file_name + ext)
support.rmtree(test_package_name)
def test_issue9319(self):
path = os.path.dirname(__file__)
self.assertRaises(SyntaxError,
imp.find_module, "badsyntax_pep3120", [path])
def test_load_from_source(self):
# Verify that the imp module can correctly load and find .py files
# XXX (ncoghlan): It would be nice to use support.CleanImport
# here, but that breaks because the os module registers some
# handlers in copy_reg on import. Since CleanImport doesn't
# revert that registration, the module is left in a broken
# state after reversion. Reinitialising the module contents
# and just reverting os.environ to its previous state is an OK
# workaround
orig_path = os.path
orig_getenv = os.getenv
with support.EnvironmentVarGuard():
x = imp.find_module("os")
self.addCleanup(x[0].close)
new_os = imp.load_module("os", *x)
self.assertIs(os, new_os)
self.assertIs(orig_path, new_os.path)
self.assertIsNot(orig_getenv, new_os.getenv)
@support.cpython_only
@unittest.skipIf(not hasattr(imp, 'load_dynamic'),
'imp.load_dynamic() required')
def test_issue15828_load_extensions(self):
# Issue 15828 picked up that the adapter between the old imp API
# and importlib couldn't handle C extensions
example = "_heapq"
x = imp.find_module(example)
file_ = x[0]
if file_ is not None:
self.addCleanup(file_.close)
mod = imp.load_module(example, *x)
self.assertEqual(mod.__name__, example)
def test_load_dynamic_ImportError_path(self):
# Issue #1559549 added `name` and `path` attributes to ImportError
# in order to provide better detail. Issue #10854 implemented those
# attributes on import failures of extensions on Windows.
path = 'bogus file path'
name = 'extension'
with self.assertRaises(ImportError) as err:
imp.load_dynamic(name, path)
self.assertIn(path, err.exception.path)
self.assertEqual(name, err.exception.name)
@support.cpython_only
@unittest.skipIf(not hasattr(imp, 'load_dynamic'),
'imp.load_dynamic() required')
def test_load_module_extension_file_is_None(self):
# When loading an extension module and the file is None, open one
# on the behalf of imp.load_dynamic().
# Issue #15902
name = '_heapq'
found = imp.find_module(name)
if found[0] is not None:
found[0].close()
if found[2][2] != imp.C_EXTENSION:
return
imp.load_module(name, None, *found[1:])
def test_multiple_calls_to_get_data(self):
# Issue #18755: make sure multiple calls to get_data() can succeed.
loader = imp._LoadSourceCompatibility('imp', imp.__file__,
open(imp.__file__))
loader.get_data(imp.__file__) # File should be closed
loader.get_data(imp.__file__) # Will need to create a newly opened file
class ReloadTests(unittest.TestCase):
"""Very basic tests to make sure that imp.reload() operates just like
reload()."""
def test_source(self):
# XXX (ncoghlan): It would be nice to use test.support.CleanImport
# here, but that breaks because the os module registers some
# handlers in copy_reg on import. Since CleanImport doesn't
# revert that registration, the module is left in a broken
# state after reversion. Reinitialising the module contents
# and just reverting os.environ to its previous state is an OK
# workaround
with support.EnvironmentVarGuard():
import os
imp.reload(os)
def test_extension(self):
with support.CleanImport('time'):
import time
imp.reload(time)
def test_builtin(self):
with support.CleanImport('marshal'):
import marshal
imp.reload(marshal)
def test_with_deleted_parent(self):
# see #18681
from html import parser
html = sys.modules.pop('html')
def cleanup():
sys.modules['html'] = html
self.addCleanup(cleanup)
with self.assertRaisesRegex(ImportError, 'html'):
imp.reload(parser)
def test_module_replaced(self):
# see #18698
def code():
module = type(sys)('top_level')
module.spam = 3
sys.modules['top_level'] = module
mock = util.mock_modules('top_level',
module_code={'top_level': code})
with mock:
with util.import_state(meta_path=[mock]):
module = importlib.import_module('top_level')
reloaded = imp.reload(module)
actual = sys.modules['top_level']
self.assertEqual(actual.spam, 3)
self.assertEqual(reloaded.spam, 3)
class PEP3147Tests(unittest.TestCase):
"""Tests of PEP 3147."""
tag = imp.get_tag()
@unittest.skipUnless(sys.implementation.cache_tag is not None,
'requires sys.implementation.cache_tag not be None')
def test_cache_from_source(self):
# Given the path to a .py file, return the path to its PEP 3147
# defined .pyc file (i.e. under __pycache__).
path = os.path.join('foo', 'bar', 'baz', 'qux.py')
expect = os.path.join('foo', 'bar', 'baz', '__pycache__',
'qux.{}.pyc'.format(self.tag))
self.assertEqual(imp.cache_from_source(path, True), expect)
def test_cache_from_source_no_cache_tag(self):
# Non cache tag means NotImplementedError.
with support.swap_attr(sys.implementation, 'cache_tag', None):
with self.assertRaises(NotImplementedError):
imp.cache_from_source('whatever.py')
def test_cache_from_source_no_dot(self):
# Directory with a dot, filename without dot.
path = os.path.join('foo.bar', 'file')
expect = os.path.join('foo.bar', '__pycache__',
'file{}.pyc'.format(self.tag))
self.assertEqual(imp.cache_from_source(path, True), expect)
def test_cache_from_source_optimized(self):
# Given the path to a .py file, return the path to its PEP 3147
# defined .pyo file (i.e. under __pycache__).
path = os.path.join('foo', 'bar', 'baz', 'qux.py')
expect = os.path.join('foo', 'bar', 'baz', '__pycache__',
'qux.{}.pyo'.format(self.tag))
self.assertEqual(imp.cache_from_source(path, False), expect)
def test_cache_from_source_cwd(self):
path = 'foo.py'
expect = os.path.join('__pycache__', 'foo.{}.pyc'.format(self.tag))
self.assertEqual(imp.cache_from_source(path, True), expect)
def test_cache_from_source_override(self):
# When debug_override is not None, it can be any true-ish or false-ish
# value.
path = os.path.join('foo', 'bar', 'baz.py')
partial_expect = os.path.join('foo', 'bar', '__pycache__',
'baz.{}.py'.format(self.tag))
self.assertEqual(imp.cache_from_source(path, []), partial_expect + 'o')
self.assertEqual(imp.cache_from_source(path, [17]),
partial_expect + 'c')
# However if the bool-ishness can't be determined, the exception
# propagates.
class Bearish:
def __bool__(self): raise RuntimeError
with self.assertRaises(RuntimeError):
imp.cache_from_source('/foo/bar/baz.py', Bearish())
@unittest.skipUnless(os.sep == '\\' and os.altsep == '/',
'test meaningful only where os.altsep is defined')
def test_sep_altsep_and_sep_cache_from_source(self):
# Windows path and PEP 3147 where sep is right of altsep.
self.assertEqual(
imp.cache_from_source('\\foo\\bar\\baz/qux.py', True),
'\\foo\\bar\\baz\\__pycache__\\qux.{}.pyc'.format(self.tag))
@unittest.skipUnless(sys.implementation.cache_tag is not None,
'requires sys.implementation.cache_tag to not be '
'None')
def test_source_from_cache(self):
# Given the path to a PEP 3147 defined .pyc file, return the path to
# its source. This tests the good path.
path = os.path.join('foo', 'bar', 'baz', '__pycache__',
'qux.{}.pyc'.format(self.tag))
expect = os.path.join('foo', 'bar', 'baz', 'qux.py')
self.assertEqual(imp.source_from_cache(path), expect)
def test_source_from_cache_no_cache_tag(self):
# If sys.implementation.cache_tag is None, raise NotImplementedError.
path = os.path.join('blah', '__pycache__', 'whatever.pyc')
with support.swap_attr(sys.implementation, 'cache_tag', None):
with self.assertRaises(NotImplementedError):
imp.source_from_cache(path)
def test_source_from_cache_bad_path(self):
# When the path to a pyc file is not in PEP 3147 format, a ValueError
# is raised.
self.assertRaises(
ValueError, imp.source_from_cache, '/foo/bar/bazqux.pyc')
def test_source_from_cache_no_slash(self):
# No slashes at all in path -> ValueError
self.assertRaises(
ValueError, imp.source_from_cache, 'foo.cpython-32.pyc')
def test_source_from_cache_too_few_dots(self):
# Too few dots in final path component -> ValueError
self.assertRaises(
ValueError, imp.source_from_cache, '__pycache__/foo.pyc')
def test_source_from_cache_too_many_dots(self):
# Too many dots in final path component -> ValueError
self.assertRaises(
ValueError, imp.source_from_cache,
'__pycache__/foo.cpython-32.foo.pyc')
def test_source_from_cache_no__pycache__(self):
# Another problem with the path -> ValueError
self.assertRaises(
ValueError, imp.source_from_cache,
'/foo/bar/foo.cpython-32.foo.pyc')
def test_package___file__(self):
try:
m = __import__('pep3147')
except ImportError:
pass
else:
self.fail("pep3147 module already exists: %r" % (m,))
# Test that a package's __file__ points to the right source directory.
os.mkdir('pep3147')
sys.path.insert(0, os.curdir)
def cleanup():
if sys.path[0] == os.curdir:
del sys.path[0]
shutil.rmtree('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py file.
support.create_empty_file('pep3147/__init__.py')
importlib.invalidate_caches()
expected___file__ = os.sep.join(('.', 'pep3147', '__init__.py'))
m = __import__('pep3147')
self.assertEqual(m.__file__, expected___file__, (m.__file__, m.__path__, sys.path, sys.path_importer_cache))
# Ensure we load the pyc file.
support.unload('pep3147')
m = __import__('pep3147')
support.unload('pep3147')
self.assertEqual(m.__file__, expected___file__, (m.__file__, m.__path__, sys.path, sys.path_importer_cache))
class NullImporterTests(unittest.TestCase):
@unittest.skipIf(support.TESTFN_UNENCODABLE is None,
"Need an undecodeable filename")
def test_unencodeable(self):
name = support.TESTFN_UNENCODABLE
os.mkdir(name)
try:
self.assertRaises(ImportError, imp.NullImporter, name)
finally:
os.rmdir(name)
def test_main():
tests = [
ImportTests,
PEP3147Tests,
ReloadTests,
NullImporterTests,
]
try:
import _thread
except ImportError:
pass
else:
tests.append(LockTests)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
| bsd-3-clause |
Distrotech/qtcreator | tests/system/tools/findUnusedObjects.py | 6 | 5941 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2015 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms and
## conditions see http://www.qt.io/terms-conditions. For further information
## use the contact form at http://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Public License
## requirements will be met: https://www.gnu.org/licenses/lgpl.html and
## http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, The Qt Company gives you certain additional
## rights. These rights are described in The Qt Company LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
import os
import sys
import tokenize
from optparse import OptionParser
from toolfunctions import checkDirectory
from toolfunctions import getFileContent
objMap = None
lastToken = [None, None]
stopTokens = ('OP', 'NAME', 'NUMBER', 'ENDMARKER')
def parseCommandLine():
global directory, onlyRemovable
parser = OptionParser("\n%prog [OPTIONS] [DIRECTORY]")
parser.add_option("-o", "--only-removable", dest="onlyRemovable",
action="store_true", default=False,
help="list removable objects only")
(options, args) = parser.parse_args()
if len(args) == 0:
directory = os.path.abspath(".")
elif len(args) == 1:
directory = os.path.abspath(args[0])
else:
print "\nERROR: Too many arguments\n"
parser.print_help()
sys.exit(1)
onlyRemovable = options.onlyRemovable
def collectObjects():
global objMap
data = getFileContent(objMap)
return map(lambda x: x.strip().split("\t", 1)[0], data.strip().splitlines())
def handleStringsWithTrailingBackSlash(origStr):
try:
while True:
index = origStr.index("\\\n")
origStr = origStr[:index] + origStr[index+2:].lstrip()
except:
return origStr
def handle_token(tokenType, token, (startRow, startCol), (endRow, endCol), line):
global useCounts, lastToken, stopTokens
if tokenize.tok_name[tokenType] == 'STRING':
# concatenate strings followed directly by other strings
if lastToken[0] == 'STRING':
token = "'" + lastToken[1][1:-1] + str(token)[1:-1] + "'"
# store the new string as lastToken after removing potential trailing backslashes
# (including their following indentation)
lastToken = ['STRING' , handleStringsWithTrailingBackSlash(str(token))]
# if a stop token occurs check the potential string before it
elif tokenize.tok_name[tokenType] in stopTokens:
if lastToken[0] == 'STRING':
for obj in useCounts:
useCounts[obj] += lastToken[1].count("'%s'" % obj)
useCounts[obj] += lastToken[1].count('"%s"' % obj)
# store the stop token as lastToken
lastToken = [tokenize.tok_name[tokenType], str(token)]
def handleDataFiles(openFile, separator):
global useCounts
# ignore header line
openFile.readline()
for line in openFile:
currentTokens = line.split(separator)
for token in currentTokens:
stripped = token.strip().strip('"')
if stripped in useCounts:
useCounts[stripped] = useCounts[stripped] + 1
def findUsages():
global directory, objMap
suffixes = (".py", ".csv", ".tsv")
for root, dirnames, filenames in os.walk(directory):
for filename in filter(lambda x: x.endswith(suffixes), filenames):
currentFile = open(os.path.join(root, filename))
if filename.endswith(".py"):
tokenize.tokenize(currentFile.readline, handle_token)
elif filename.endswith(".csv"):
handleDataFiles(currentFile, ",")
elif filename.endswith(".tsv"):
handleDataFiles(currentFile, "\t")
currentFile.close()
currentFile = open(objMap)
tokenize.tokenize(currentFile.readline, handle_token)
currentFile.close()
def printResult():
global useCounts, onlyRemovable
print
if onlyRemovable:
if min(useCounts.values()) > 0:
print "All objects are used once at least.\n"
return False
print "Unused objects:\n"
for obj in filter(lambda x: useCounts[x] == 0, useCounts):
print "%s" % obj
return True
else:
length = max(map(len, useCounts.keys()))
outFormat = "%%%ds %%3d" % length
for obj,useCount in useCounts.iteritems():
print outFormat % (obj, useCount)
print
return None
def main():
global useCounts, objMap
objMap = checkDirectory(directory)
useCounts = dict.fromkeys(collectObjects(), 0)
findUsages()
atLeastOneRemovable = printResult()
if atLeastOneRemovable:
print "\nAfter removing the listed objects you should re-run this tool"
print "to find objects that might have been used only by these objects.\n"
return 0
if __name__ == '__main__':
parseCommandLine()
sys.exit(main())
| lgpl-2.1 |
pescobar/easybuild-framework | easybuild/toolchains/mpi/__init__.py | 2 | 1244 | ##
# Copyright 2012-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Declares toolchains.mpi namespace.
:author: Stijn De Weirdt (Ghent University)
:author: Kenneth Hoste (Ghent University)
"""
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
| gpl-2.0 |
mengqiy/kubernetes | cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py | 24 | 55854 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import json
import os
import random
import shutil
import subprocess
import time
import yaml
from charms.leadership import leader_get, leader_set
from pathlib import Path
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname, getfqdn
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import endpoint_from_flag
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not, when_none
from charms.kubernetes.common import get_version
from charms.reactive.helpers import data_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config'
gcp_creds_env_key = 'GOOGLE_APPLICATION_CREDENTIALS'
snap_resources = ['kubectl', 'kubelet', 'kube-proxy']
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# migrate to new flags
if is_state('kubernetes-worker.restarted-for-cloud'):
remove_state('kubernetes-worker.restarted-for-cloud')
set_state('kubernetes-worker.cloud.ready')
if is_state('kubernetes-worker.cloud-request-sent'):
# minor change, just for consistency
remove_state('kubernetes-worker.cloud-request-sent')
set_state('kubernetes-worker.cloud.request-sent')
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
migrate_resource_checksums()
check_resources_for_upgrade_needed()
# Remove the RC for nginx ingress if it exists
if hookenv.config().get('ingress'):
kubectl_success('delete', 'rc', 'nginx-ingress-controller')
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
if is_state('kubernetes-worker.gpu.enabled'):
remove_state('kubernetes-worker.gpu.enabled')
try:
disable_gpu()
except ApplyNodeLabelFailed:
# Removing node label failed. Probably the master is unavailable.
# Proceed with the upgrade in hope GPUs will still be there.
hookenv.log('Failed to remove GPU labels. Proceed with upgrade.')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
remove_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed')
def get_resource_checksum_db_key(resource):
''' Convert a resource name to a resource checksum database key. '''
return 'kubernetes-worker.resource-checksums.' + resource
def calculate_resource_checksum(resource):
''' Calculate a checksum for a resource '''
md5 = hashlib.md5()
path = hookenv.resource_get(resource)
if path:
with open(path, 'rb') as f:
data = f.read()
md5.update(data)
return md5.hexdigest()
def migrate_resource_checksums():
''' Migrate resource checksums from the old schema to the new one '''
for resource in snap_resources:
new_key = get_resource_checksum_db_key(resource)
if not db.get(new_key):
path = hookenv.resource_get(resource)
if path:
# old key from charms.reactive.helpers.any_file_changed
old_key = 'reactive.files_changed.' + path
old_checksum = db.get(old_key)
db.set(new_key, old_checksum)
else:
# No resource is attached. Previously, this meant no checksum
# would be calculated and stored. But now we calculate it as if
# it is a 0-byte resource, so let's go ahead and do that.
zero_checksum = hashlib.md5().hexdigest()
db.set(new_key, zero_checksum)
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
for resource in snap_resources:
key = get_resource_checksum_db_key(resource)
old_checksum = db.get(key)
new_checksum = calculate_resource_checksum(resource)
if new_checksum != old_checksum:
set_upgrade_needed()
def calculate_and_store_resource_checksums():
for resource in snap_resources:
key = get_resource_checksum_db_key(resource)
checksum = calculate_resource_checksum(resource)
db.set(key, checksum)
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
calculate_and_store_resource_checksums()
set_state('kubernetes-worker.snaps.installed')
set_state('kubernetes-worker.restart-needed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the worker services
'''
try:
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', get_node_name())
except CalledProcessError:
hookenv.log('Failed to unregister node.')
service_stop('snap.kubelet.daemon')
service_stop('snap.kube-proxy.daemon')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
resource_name = 'cni-{}'.format(arch())
archive = hookenv.resource_get(resource_name)
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when('snap.refresh.set')
@when('leadership.is_leader')
def process_snapd_timer():
''' Set the snapd refresh timer on the leader so all cluster members
(present and future) will refresh near the same time. '''
# Get the current snapd refresh timer; we know layer-snap has set this
# when the 'snap.refresh.set' flag is present.
timer = snap.get(snapname='core', key='refresh.timer').decode('utf-8')
# The first time through, data_changed will be true. Subsequent calls
# should only update leader data if something changed.
if data_changed('worker_snapd_refresh', timer):
hookenv.log('setting snapd_refresh timer to: {}'.format(timer))
leader_set({'snapd_refresh': timer})
@when('kubernetes-worker.snaps.installed')
@when('snap.refresh.set')
@when('leadership.changed.snapd_refresh')
@when_not('leadership.is_leader')
def set_snapd_timer():
''' Set the snapd refresh.timer on non-leader cluster members. '''
# NB: This method should only be run when 'snap.refresh.set' is present.
# Layer-snap will always set a core refresh.timer, which may not be the
# same as our leader. Gating with 'snap.refresh.set' ensures layer-snap
# has finished and we are free to set our config to the leader's timer.
timer = leader_get('snapd_refresh')
hookenv.log('setting snapd_refresh timer to: {}'.format(timer))
snap.set_refresh_timer(timer)
@hookenv.atexit
def charm_status():
'''Update the status message with the current status of kubelet.'''
vsphere_joined = is_state('endpoint.vsphere.joined')
azure_joined = is_state('endpoint.azure.joined')
cloud_blocked = is_state('kubernetes-worker.cloud.blocked')
if vsphere_joined and cloud_blocked:
hookenv.status_set('blocked',
'vSphere integration requires K8s 1.12 or greater')
return
if azure_joined and cloud_blocked:
hookenv.status_set('blocked',
'Azure integration requires K8s 1.11 or greater')
return
if is_state('kubernetes-worker.cloud.pending'):
hookenv.status_set('waiting', 'Waiting for cloud integration')
return
if not is_state('kube-control.dns.available'):
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool
# waiting to self host the dns pod, and configure itself to query the
# dns service declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
return
if is_state('kubernetes-worker.snaps.upgrade-specified'):
hookenv.status_set('waiting', 'Upgrade pending')
return
if is_state('kubernetes-worker.snaps.upgrade-needed'):
hookenv.status_set('blocked',
'Needs manual upgrade, run the upgrade action')
return
if is_state('kubernetes-worker.snaps.installed'):
update_kubelet_status()
return
else:
pass # will have been set by snap layer or other handler
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
services = [
'kubelet',
'kube-proxy'
]
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not _systemctl_is_active(daemon):
failing_services.append(service)
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes worker running.')
else:
msg = 'Waiting for {} to start.'.format(','.join(failing_services))
hookenv.status_set('waiting', msg)
def get_ingress_address(relation):
try:
network_info = hookenv.network_get(relation.relation_name)
except NotImplementedError:
network_info = []
if network_info and 'ingress-addresses' in network_info:
# just grab the first one for now, maybe be more robust here?
return network_info['ingress-addresses'][0]
else:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
@when('certificates.available', 'kube-control.connected')
def send_data(tls, kube_control):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
ingress_ip = get_ingress_address(kube_control)
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
ingress_ip,
gethostname()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved',
'kube-control.dns.available', 'kube-control.auth.available',
'cni.available', 'kubernetes-worker.restart-needed',
'worker.auth.bootstrapped')
@when_not('kubernetes-worker.cloud.pending',
'kubernetes-worker.cloud.blocked')
def start_worker(kube_api, kube_control, auth_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
ingress_ip = get_ingress_address(kube_control)
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
creds = db.get('credentials')
data_changed('kube-control.creds', creds)
create_config(random.choice(servers), creds)
configure_kubelet(dns, ingress_ip)
configure_kube_proxy(servers, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
set_state('kubernetes-worker.label-config-required')
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress daemon set enabled, launch the ingress
load balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-daemon-set.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('config.changed.labels')
def handle_labels_changed():
set_state('kubernetes-worker.label-config-required')
@when('kubernetes-worker.label-config-required',
'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the
node. '''
# Get the user's configured labels.
config = hookenv.config()
user_labels = {}
for item in config.get('labels').split(' '):
if '=' in item:
key, val = item.split('=')
user_labels[key] = val
else:
hookenv.log('Skipping malformed option: {}.'.format(item))
# Collect the current label state.
current_labels = db.get('current_labels') or {}
# Remove any labels that the user has removed from the config.
for key in list(current_labels.keys()):
if key not in user_labels:
try:
remove_label(key)
del current_labels[key]
db.set('current_labels', current_labels)
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Add any new labels.
for key, val in user_labels.items():
try:
set_label(key, val)
current_labels[key] = val
db.set('current_labels', current_labels)
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Set the juju-application label.
try:
set_label('juju-application', hookenv.service_name())
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Label configuration complete.
remove_state('kubernetes-worker.label-config-required')
@when_any('config.changed.kubelet-extra-args',
'config.changed.proxy-extra-args',
'config.changed.kubelet-extra-config')
def config_changed_requires_restart():
set_state('kubernetes-worker.restart-needed')
@when('config.changed.docker-logins')
def docker_logins_changed():
"""Set a flag to handle new docker login options.
If docker daemon options have also changed, set a flag to ensure the
daemon is restarted prior to running docker login.
"""
config = hookenv.config()
if data_changed('docker-opts', config['docker-opts']):
hookenv.log('Found new docker daemon options. Requesting a restart.')
# State will be removed by layer-docker after restart
set_state('docker.restart')
set_state('kubernetes-worker.docker-login')
@when('kubernetes-worker.docker-login')
@when_not('docker.restart')
def run_docker_login():
"""Login to a docker registry with configured credentials."""
config = hookenv.config()
previous_logins = config.previous('docker-logins')
logins = config['docker-logins']
logins = json.loads(logins)
if previous_logins:
previous_logins = json.loads(previous_logins)
next_servers = {login['server'] for login in logins}
previous_servers = {login['server'] for login in previous_logins}
servers_to_logout = previous_servers - next_servers
for server in servers_to_logout:
cmd = ['docker', 'logout', server]
subprocess.check_call(cmd)
for login in logins:
server = login['server']
username = login['username']
password = login['password']
cmd = ['docker', 'login', server, '-u', username, '-p', password]
subprocess.check_call(cmd)
remove_state('kubernetes-worker.docker-login')
set_state('kubernetes-worker.restart-needed')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server, creds):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
token=creds['client_token'], user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig(kubeclientconfig_path, server, ca,
token=creds['client_token'], user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca,
token=creds['kubelet_token'], user='kubelet')
create_kubeconfig(kubeproxyconfig_path, server, ca,
token=creds['proxy_token'], user='kube-proxy')
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = 'kubernetes-worker.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def merge_kubelet_extra_config(config, extra_config):
''' Updates config to include the contents of extra_config. This is done
recursively to allow deeply nested dictionaries to be merged.
This is destructive: it modifies the config dict that is passed in.
'''
for k, extra_config_value in extra_config.items():
if isinstance(extra_config_value, dict):
config_value = config.setdefault(k, {})
merge_kubelet_extra_config(config_value, extra_config_value)
else:
config[k] = extra_config_value
def configure_kubelet(dns, ingress_ip):
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = {}
kubelet_opts['require-kubeconfig'] = 'true'
kubelet_opts['kubeconfig'] = kubeconfig_path
kubelet_opts['network-plugin'] = 'cni'
kubelet_opts['v'] = '0'
kubelet_opts['logtostderr'] = 'true'
kubelet_opts['node-ip'] = ingress_ip
kubelet_opts['allow-privileged'] = set_privileged()
if is_state('endpoint.aws.ready'):
kubelet_opts['cloud-provider'] = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_config_path = _cloud_config_path('kubelet')
kubelet_opts['cloud-provider'] = 'gce'
kubelet_opts['cloud-config'] = str(cloud_config_path)
elif is_state('endpoint.openstack.ready'):
cloud_config_path = _cloud_config_path('kubelet')
kubelet_opts['cloud-provider'] = 'openstack'
kubelet_opts['cloud-config'] = str(cloud_config_path)
elif is_state('endpoint.vsphere.joined'):
# vsphere just needs to be joined on the worker (vs 'ready')
cloud_config_path = _cloud_config_path('kubelet')
kubelet_opts['cloud-provider'] = 'vsphere'
# NB: vsphere maps node product-id to its uuid (no config file needed).
uuid_file = '/sys/class/dmi/id/product_uuid'
with open(uuid_file, 'r') as f:
uuid = f.read().strip()
kubelet_opts['provider-id'] = 'vsphere://{}'.format(uuid)
elif is_state('endpoint.azure.ready'):
azure = endpoint_from_flag('endpoint.azure.ready')
cloud_config_path = _cloud_config_path('kubelet')
kubelet_opts['cloud-provider'] = 'azure'
kubelet_opts['cloud-config'] = str(cloud_config_path)
kubelet_opts['provider-id'] = azure.vm_id
if get_version('kubelet') >= (1, 10):
# Put together the KubeletConfiguration data
kubelet_config = {
'apiVersion': 'kubelet.config.k8s.io/v1beta1',
'kind': 'KubeletConfiguration',
'address': '0.0.0.0',
'authentication': {
'anonymous': {
'enabled': False
},
'x509': {
'clientCAFile': ca_cert_path
}
},
'clusterDomain': dns['domain'],
'failSwapOn': False,
'port': 10250,
'tlsCertFile': server_cert_path,
'tlsPrivateKeyFile': server_key_path
}
if dns['enable-kube-dns']:
kubelet_config['clusterDNS'] = [dns['sdn-ip']]
if is_state('kubernetes-worker.gpu.enabled'):
kubelet_config['featureGates'] = {
'DevicePlugins': True
}
# Add kubelet-extra-config. This needs to happen last so that it
# overrides any config provided by the charm.
kubelet_extra_config = hookenv.config('kubelet-extra-config')
kubelet_extra_config = yaml.load(kubelet_extra_config)
merge_kubelet_extra_config(kubelet_config, kubelet_extra_config)
# Render the file and configure Kubelet to use it
os.makedirs('/root/cdk/kubelet', exist_ok=True)
with open('/root/cdk/kubelet/config.yaml', 'w') as f:
f.write('# Generated by kubernetes-worker charm, do not edit\n')
yaml.dump(kubelet_config, f)
kubelet_opts['config'] = '/root/cdk/kubelet/config.yaml'
else:
# NOTE: This is for 1.9. Once we've dropped 1.9 support, we can remove
# this whole block and the parent if statement.
kubelet_opts['address'] = '0.0.0.0'
kubelet_opts['anonymous-auth'] = 'false'
kubelet_opts['client-ca-file'] = ca_cert_path
kubelet_opts['cluster-domain'] = dns['domain']
kubelet_opts['fail-swap-on'] = 'false'
kubelet_opts['port'] = '10250'
kubelet_opts['tls-cert-file'] = server_cert_path
kubelet_opts['tls-private-key-file'] = server_key_path
if dns['enable-kube-dns']:
kubelet_opts['cluster-dns'] = dns['sdn-ip']
if is_state('kubernetes-worker.gpu.enabled'):
kubelet_opts['feature-gates'] = 'DevicePlugins=true'
if get_version('kubelet') >= (1, 11):
kubelet_opts['dynamic-config-dir'] = '/root/cdk/kubelet/dynamic-config'
configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
def configure_kube_proxy(api_servers, cluster_cidr):
kube_proxy_opts = {}
kube_proxy_opts['cluster-cidr'] = cluster_cidr
kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
kube_proxy_opts['logtostderr'] = 'true'
kube_proxy_opts['v'] = '0'
kube_proxy_opts['master'] = random.choice(api_servers)
kube_proxy_opts['hostname-override'] = get_node_name()
if b'lxc' in check_output('virt-what', shell=True):
kube_proxy_opts['conntrack-max-per-core'] = '0'
configure_kubernetes_service('kube-proxy', kube_proxy_opts,
'proxy-extra-args')
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
@when_any('config.changed.default-backend-image',
'config.changed.ingress-ssl-chain-completion',
'config.changed.nginx-image')
@when('kubernetes-worker.config.created')
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
config = hookenv.config()
# need to test this in case we get in
# here from a config change to the image
if not config.get('ingress'):
return
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
context['defaultbackend_image'] = config.get('default-backend-image')
if (context['defaultbackend_image'] == "" or
context['defaultbackend_image'] == "auto"):
if context['arch'] == 's390x':
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-s390x:1.5"
elif context['arch'] == 'arm64':
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-arm64:1.5"
else:
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-amd64:1.5"
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress daemon set controller manifest
context['ssl_chain_completion'] = config.get(
'ingress-ssl-chain-completion')
context['ingress_image'] = config.get('nginx-image')
if context['ingress_image'] == "" or context['ingress_image'] == "auto":
images = {'amd64': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.16.1', # noqa
'arm64': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-arm64:0.16.1', # noqa
's390x': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-s390x:0.16.1', # noqa
'ppc64el': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-ppc64le:0.16.1', # noqa
}
context['ingress_image'] = images.get(context['arch'], images['amd64'])
if get_version('kubelet') < (1, 9):
context['daemonset_api_version'] = 'extensions/v1beta1'
else:
context['daemonset_api_version'] = 'apps/v1'
context['juju_application'] = hookenv.service_name()
manifest = addon_path.format('ingress-daemon-set.yaml')
render('ingress-daemon-set.yaml', manifest, context)
hookenv.log('Creating the ingress daemon set.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if successful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Return 'true' if privileged containers are needed.
This is when a) the user requested them
b) user does not care (auto) and GPUs are available in a pre
1.9 era
"""
privileged = hookenv.config('allow-privileged').lower()
gpu_needs_privileged = (is_state('kubernetes-worker.gpu.enabled') and
get_version('kubelet') < (1, 9))
if privileged == 'auto':
privileged = 'true' if gpu_needs_privileged else 'false'
if privileged == 'false' and gpu_needs_privileged:
disable_gpu()
remove_state('kubernetes-worker.gpu.enabled')
# No need to restart kubernetes (set the restart-needed state)
# because set-privileged is already in the restart path
return privileged
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('nvidia-docker.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
if get_version('kubelet') < (1, 9):
hookenv.status_set(
'active',
'Upgrade to snap channel >= 1.9/stable to enable GPU support.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
set_label('gpu', 'true')
set_label('cuda', 'true')
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('nvidia-docker.installed')
@when_not('kubernetes-worker.restart-needed')
def nvidia_departed():
"""Cuda departed, probably due to the docker layer switching to a
non nvidia-docker."""
disable_gpu()
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
"""
hookenv.log('Disabling gpu mode')
# Remove node labels
remove_label('gpu')
remove_label('cuda')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when('kube-control.connected')
def request_kubelet_and_proxy_credentials(kube_control):
""" Request kubelet node authorization with a well formed kubelet user.
This also implies that we are requesting kube-proxy auth. """
# The kube-cotrol interface is created to support RBAC.
# At this point we might as well do the right thing and return the hostname
# even if it will only be used when we enable RBAC
nodeuser = 'system:node:{}'.format(get_node_name().lower())
kube_control.set_auth_request(nodeuser)
@when('kube-control.connected')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
nodeuser = 'system:node:{}'.format(get_node_name().lower())
creds = kube_control.get_auth_credentials(nodeuser)
if creds and creds['user'] == nodeuser:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('worker.auth.bootstrapped')
if data_changed('kube-control.creds', creds):
set_state('kubernetes-worker.restart-needed')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
try:
goal_state = hookenv.goal_state()
except NotImplementedError:
goal_state = {}
if 'kube-control' in goal_state.get('relations', {}):
hookenv.status_set(
'waiting',
'Waiting for kubernetes-master to become ready')
else:
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
@when('docker.ready')
def fix_iptables_for_docker_1_13():
""" Fix iptables FORWARD policy for Docker >=1.13
https://github.com/kubernetes/kubernetes/issues/40182
https://github.com/kubernetes/kubernetes/issues/39823
"""
cmd = ['iptables', '-w', '300', '-P', 'FORWARD', 'ACCEPT']
check_call(cmd)
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
def get_node_name():
kubelet_extra_args = parse_extra_args('kubelet-extra-args')
cloud_provider = kubelet_extra_args.get('cloud-provider', '')
if is_state('endpoint.aws.ready'):
cloud_provider = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_provider = 'gce'
elif is_state('endpoint.openstack.ready'):
cloud_provider = 'openstack'
elif is_state('endpoint.vsphere.ready'):
cloud_provider = 'vsphere'
elif is_state('endpoint.azure.ready'):
cloud_provider = 'azure'
if cloud_provider == 'aws':
return getfqdn().lower()
else:
return gethostname().lower()
class ApplyNodeLabelFailed(Exception):
pass
def persistent_call(cmd, retry_message):
deadline = time.time() + 180
while time.time() < deadline:
code = subprocess.call(cmd)
if code == 0:
return True
hookenv.log(retry_message)
time.sleep(1)
else:
return False
def set_label(label, value):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}={3} --overwrite'
cmd = cmd.format(kubeconfig_path, nodename, label, value)
cmd = cmd.split()
retry = 'Failed to apply label %s=%s. Will retry.' % (label, value)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
def remove_label(label):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}-'
cmd = cmd.format(kubeconfig_path, nodename, label)
cmd = cmd.split()
retry = 'Failed to remove label {0}. Will retry.'.format(label)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
@when_any('endpoint.aws.joined',
'endpoint.gcp.joined',
'endpoint.openstack.joined',
'endpoint.vsphere.joined',
'endpoint.azure.joined')
@when_not('kubernetes-worker.cloud.ready')
def set_cloud_pending():
k8s_version = get_version('kubelet')
k8s_1_11 = k8s_version >= (1, 11)
k8s_1_12 = k8s_version >= (1, 12)
vsphere_joined = is_state('endpoint.vsphere.joined')
azure_joined = is_state('endpoint.azure.joined')
if (vsphere_joined and not k8s_1_12) or (azure_joined and not k8s_1_11):
set_state('kubernetes-worker.cloud.blocked')
else:
remove_state('kubernetes-worker.cloud.blocked')
set_state('kubernetes-worker.cloud.pending')
@when_any('endpoint.aws.joined',
'endpoint.gcp.joined',
'endpoint.azure.joined')
@when('kube-control.cluster_tag.available')
@when_not('kubernetes-worker.cloud.request-sent')
def request_integration():
hookenv.status_set('maintenance', 'requesting cloud integration')
kube_control = endpoint_from_flag('kube-control.cluster_tag.available')
cluster_tag = kube_control.get_cluster_tag()
if is_state('endpoint.aws.joined'):
cloud = endpoint_from_flag('endpoint.aws.joined')
cloud.tag_instance({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.tag_instance_security_group({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.tag_instance_subnet({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.enable_object_storage_management(['kubernetes-*'])
elif is_state('endpoint.gcp.joined'):
cloud = endpoint_from_flag('endpoint.gcp.joined')
cloud.label_instance({
'k8s-io-cluster-name': cluster_tag,
})
cloud.enable_object_storage_management()
elif is_state('endpoint.azure.joined'):
cloud = endpoint_from_flag('endpoint.azure.joined')
cloud.tag_instance({
'k8s-io-cluster-name': cluster_tag,
})
cloud.enable_object_storage_management()
cloud.enable_instance_inspection()
cloud.enable_dns_management()
set_state('kubernetes-worker.cloud.request-sent')
hookenv.status_set('waiting', 'Waiting for cloud integration')
@when_none('endpoint.aws.joined',
'endpoint.gcp.joined',
'endpoint.openstack.joined',
'endpoint.vsphere.joined',
'endpoint.azure.joined')
def clear_cloud_flags():
remove_state('kubernetes-worker.cloud.pending')
remove_state('kubernetes-worker.cloud.request-sent')
remove_state('kubernetes-worker.cloud.blocked')
remove_state('kubernetes-worker.cloud.ready')
@when_any('endpoint.aws.ready',
'endpoint.gcp.ready',
'endpoint.openstack.ready',
'endpoint.vsphere.ready',
'endpoint.azure.ready')
@when_not('kubernetes-worker.cloud.blocked',
'kubernetes-worker.cloud.ready')
def cloud_ready():
remove_state('kubernetes-worker.cloud.pending')
if is_state('endpoint.gcp.ready'):
_write_gcp_snap_config('kubelet')
elif is_state('endpoint.openstack.ready'):
_write_openstack_snap_config('kubelet')
elif is_state('endpoint.azure.ready'):
_write_azure_snap_config('kubelet')
set_state('kubernetes-worker.cloud.ready')
set_state('kubernetes-worker.restart-needed') # force restart
def _snap_common_path(component):
return Path('/var/snap/{}/common'.format(component))
def _cloud_config_path(component):
return _snap_common_path(component) / 'cloud-config.conf'
def _gcp_creds_path(component):
return _snap_common_path(component) / 'gcp-creds.json'
def _daemon_env_path(component):
return _snap_common_path(component) / 'environment'
def _write_gcp_snap_config(component):
# gcp requires additional credentials setup
gcp = endpoint_from_flag('endpoint.gcp.ready')
creds_path = _gcp_creds_path(component)
with creds_path.open('w') as fp:
os.fchmod(fp.fileno(), 0o600)
fp.write(gcp.credentials)
# create a cloud-config file that sets token-url to nil to make the
# services use the creds env var instead of the metadata server, as
# well as making the cluster multizone
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text('[Global]\n'
'token-url = nil\n'
'multizone = true\n')
daemon_env_path = _daemon_env_path(component)
if daemon_env_path.exists():
daemon_env = daemon_env_path.read_text()
if not daemon_env.endswith('\n'):
daemon_env += '\n'
else:
daemon_env = ''
if gcp_creds_env_key not in daemon_env:
daemon_env += '{}={}\n'.format(gcp_creds_env_key, creds_path)
daemon_env_path.parent.mkdir(parents=True, exist_ok=True)
daemon_env_path.write_text(daemon_env)
def _write_openstack_snap_config(component):
# openstack requires additional credentials setup
openstack = endpoint_from_flag('endpoint.openstack.ready')
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text('\n'.join([
'[Global]',
'auth-url = {}'.format(openstack.auth_url),
'username = {}'.format(openstack.username),
'password = {}'.format(openstack.password),
'tenant-name = {}'.format(openstack.project_name),
'domain-name = {}'.format(openstack.user_domain_name),
]))
def _write_azure_snap_config(component):
azure = endpoint_from_flag('endpoint.azure.ready')
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text(json.dumps({
'useInstanceMetadata': True,
'useManagedIdentityExtension': True,
'subscriptionId': azure.subscription_id,
'resourceGroup': azure.resource_group,
'location': azure.resource_group_location,
'vnetName': azure.vnet_name,
'vnetResourceGroup': azure.vnet_resource_group,
'subnetName': azure.subnet_name,
'securityGroupName': azure.security_group_name,
}))
def get_first_mount(mount_relation):
mount_relation_list = mount_relation.mounts()
if mount_relation_list and len(mount_relation_list) > 0:
# mount relation list is a list of the mount layer relations
# for now we just use the first one that is nfs
for mount in mount_relation_list:
# for now we just check the first mount and use that.
# the nfs charm only supports one for now.
if ('mounts' in mount and
mount['mounts'][0]['fstype'] == 'nfs'):
return mount['mounts'][0]
return None
@when('nfs.available')
def nfs_state_control(mount):
''' Determine if we should remove the state that controls the re-render
and execution of the nfs-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs '''
mount_data = get_first_mount(mount)
if mount_data:
nfs_relation_data = {
'options': mount_data['options'],
'host': mount_data['hostname'],
'mountpoint': mount_data['mountpoint'],
'fstype': mount_data['fstype']
}
# Re-execute the rendering if the data has changed.
if data_changed('nfs-config', nfs_relation_data):
hookenv.log('reconfiguring nfs')
remove_state('nfs.configured')
@when('nfs.available')
@when_not('nfs.configured')
def nfs_storage(mount):
'''NFS on kubernetes requires nfs config rendered into a deployment of
the nfs client provisioner. That will handle the persistent volume claims
with no persistent volume to back them.'''
mount_data = get_first_mount(mount)
if not mount_data:
return
addon_path = '/root/cdk/addons/{}'
# Render the NFS deployment
manifest = addon_path.format('nfs-provisioner.yaml')
render('nfs-provisioner.yaml', manifest, mount_data)
hookenv.log('Creating the nfs provisioner.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create nfs provisioner. Will attempt again next update.') # noqa
return
set_state('nfs.configured')
| apache-2.0 |
savoirfairelinux/odoo | addons/hr_expense/hr_expense.py | 22 | 25312 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
def _employee_get(obj, cr, uid, context=None):
if context is None:
context = {}
ids = obj.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)
if ids:
return ids[0]
return False
class hr_expense_expense(osv.osv):
def _amount(self, cr, uid, ids, field_name, arg, context=None):
res= {}
for expense in self.browse(cr, uid, ids, context=context):
total = 0.0
for line in expense.line_ids:
total += line.unit_amount * line.unit_quantity
res[expense.id] = total
return res
def _get_expense_from_line(self, cr, uid, ids, context=None):
return [line.expense_id.id for line in self.pool.get('hr.expense.line').browse(cr, uid, ids, context=context)]
def _get_currency(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, [uid], context=context)[0]
return user.company_id.currency_id.id
_name = "hr.expense.expense"
_inherit = ['mail.thread']
_description = "Expense"
_order = "id desc"
_track = {
'state': {
'hr_expense.mt_expense_approved': lambda self, cr, uid, obj, ctx=None: obj.state == 'accepted',
'hr_expense.mt_expense_refused': lambda self, cr, uid, obj, ctx=None: obj.state == 'cancelled',
'hr_expense.mt_expense_confirmed': lambda self, cr, uid, obj, ctx=None: obj.state == 'confirm',
},
}
_columns = {
'name': fields.char('Description', size=128, required=True, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'id': fields.integer('Sheet ID', readonly=True),
'date': fields.date('Date', select=True, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'journal_id': fields.many2one('account.journal', 'Force Journal', help = "The journal used when the expense is done."),
'employee_id': fields.many2one('hr.employee', "Employee", required=True, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'user_id': fields.many2one('res.users', 'User', required=True),
'date_confirm': fields.date('Confirmation Date', select=True, help="Date of the confirmation of the sheet expense. It's filled when the button Confirm is pressed."),
'date_valid': fields.date('Validation Date', select=True, help="Date of the acceptation of the sheet expense. It's filled when the button Accept is pressed."),
'user_valid': fields.many2one('res.users', 'Validation By', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'account_move_id': fields.many2one('account.move', 'Ledger Posting'),
'line_ids': fields.one2many('hr.expense.line', 'expense_id', 'Expense Lines', readonly=True, states={'draft':[('readonly',False)]} ),
'note': fields.text('Note'),
'amount': fields.function(_amount, string='Total Amount', digits_compute=dp.get_precision('Account'),
store={
'hr.expense.line': (_get_expense_from_line, ['unit_amount','unit_quantity'], 10)
}),
'currency_id': fields.many2one('res.currency', 'Currency', required=True, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'department_id':fields.many2one('hr.department','Department', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'company_id': fields.many2one('res.company', 'Company', required=True),
'state': fields.selection([
('draft', 'New'),
('cancelled', 'Refused'),
('confirm', 'Waiting Approval'),
('accepted', 'Approved'),
('done', 'Waiting Payment'),
('paid', 'Paid'),
],
'Status', readonly=True, track_visibility='onchange',
help='When the expense request is created the status is \'Draft\'.\n It is confirmed by the user and request is sent to admin, the status is \'Waiting Confirmation\'.\
\nIf the admin accepts it, the status is \'Accepted\'.\n If the accounting entries are made for the expense request, the status is \'Waiting Payment\'.'),
}
_defaults = {
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'hr.employee', context=c),
'date': fields.date.context_today,
'state': 'draft',
'employee_id': _employee_get,
'user_id': lambda cr, uid, id, c={}: id,
'currency_id': _get_currency,
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default.update(
account_move_id=False,
date_confirm=False,
date_valid=False,
user_valid=False)
return super(hr_expense_expense, self).copy(cr, uid, id, default=default, context=context)
def unlink(self, cr, uid, ids, context=None):
for rec in self.browse(cr, uid, ids, context=context):
if rec.state != 'draft':
raise osv.except_osv(_('Warning!'),_('You can only delete draft expenses!'))
return super(hr_expense_expense, self).unlink(cr, uid, ids, context)
def onchange_currency_id(self, cr, uid, ids, currency_id=False, company_id=False, context=None):
res = {'value': {'journal_id': False}}
journal_ids = self.pool.get('account.journal').search(cr, uid, [('type','=','purchase'), ('currency','=',currency_id), ('company_id', '=', company_id)], context=context)
if journal_ids:
res['value']['journal_id'] = journal_ids[0]
return res
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
emp_obj = self.pool.get('hr.employee')
department_id = False
company_id = False
if employee_id:
employee = emp_obj.browse(cr, uid, employee_id, context=context)
department_id = employee.department_id.id
company_id = employee.company_id.id
return {'value': {'department_id': department_id, 'company_id': company_id}}
def expense_confirm(self, cr, uid, ids, context=None):
for expense in self.browse(cr, uid, ids):
if expense.employee_id and expense.employee_id.parent_id.user_id:
self.message_subscribe_users(cr, uid, [expense.id], user_ids=[expense.employee_id.parent_id.user_id.id])
return self.write(cr, uid, ids, {'state': 'confirm', 'date_confirm': time.strftime('%Y-%m-%d')}, context=context)
def expense_accept(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'accepted', 'date_valid': time.strftime('%Y-%m-%d'), 'user_valid': uid}, context=context)
def expense_canceled(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'cancelled'}, context=context)
def account_move_get(self, cr, uid, expense_id, context=None):
'''
This method prepare the creation of the account move related to the given expense.
:param expense_id: Id of expense for which we are creating account_move.
:return: mapping between fieldname and value of account move to create
:rtype: dict
'''
journal_obj = self.pool.get('account.journal')
expense = self.browse(cr, uid, expense_id, context=context)
company_id = expense.company_id.id
date = expense.date_confirm
ref = expense.name
journal_id = False
if expense.journal_id:
journal_id = expense.journal_id.id
else:
journal_id = journal_obj.search(cr, uid, [('type', '=', 'purchase'), ('company_id', '=', company_id)])
if not journal_id:
raise osv.except_osv(_('Error!'), _("No expense journal found. Please make sure you have a journal with type 'purchase' configured."))
journal_id = journal_id[0]
return self.pool.get('account.move').account_move_prepare(cr, uid, journal_id, date=date, ref=ref, company_id=company_id, context=context)
def line_get_convert(self, cr, uid, x, part, date, context=None):
partner_id = self.pool.get('res.partner')._find_accounting_partner(part).id
return {
'date_maturity': x.get('date_maturity', False),
'partner_id': partner_id,
'name': x['name'][:64],
'date': date,
'debit': x['price']>0 and x['price'],
'credit': x['price']<0 and -x['price'],
'account_id': x['account_id'],
'analytic_lines': x.get('analytic_lines', False),
'amount_currency': x['price']>0 and abs(x.get('amount_currency', False)) or -abs(x.get('amount_currency', False)),
'currency_id': x.get('currency_id', False),
'tax_code_id': x.get('tax_code_id', False),
'tax_amount': x.get('tax_amount', False),
'ref': x.get('ref', False),
'quantity': x.get('quantity',1.00),
'product_id': x.get('product_id', False),
'product_uom_id': x.get('uos_id', False),
'analytic_account_id': x.get('account_analytic_id', False),
}
def compute_expense_totals(self, cr, uid, exp, company_currency, ref, account_move_lines, context=None):
'''
internal method used for computation of total amount of an expense in the company currency and
in the expense currency, given the account_move_lines that will be created. It also do some small
transformations at these account_move_lines (for multi-currency purposes)
:param account_move_lines: list of dict
:rtype: tuple of 3 elements (a, b ,c)
a: total in company currency
b: total in hr.expense currency
c: account_move_lines potentially modified
'''
cur_obj = self.pool.get('res.currency')
if context is None:
context={}
context.update({'date': exp.date_confirm or time.strftime('%Y-%m-%d')})
total = 0.0
total_currency = 0.0
for i in account_move_lines:
if exp.currency_id.id != company_currency:
i['currency_id'] = exp.currency_id.id
i['amount_currency'] = i['price']
i['price'] = cur_obj.compute(cr, uid, exp.currency_id.id,
company_currency, i['price'],
context=context)
else:
i['amount_currency'] = False
i['currency_id'] = False
total -= i['price']
total_currency -= i['amount_currency'] or i['price']
return total, total_currency, account_move_lines
def action_move_create(self, cr, uid, ids, context=None):
'''
main function that is called when trying to create the accounting entries related to an expense
'''
move_obj = self.pool.get('account.move')
for exp in self.browse(cr, uid, ids, context=context):
if not exp.employee_id.address_home_id:
raise osv.except_osv(_('Error!'), _('The employee must have a home address.'))
if not exp.employee_id.address_home_id.property_account_payable.id:
raise osv.except_osv(_('Error!'), _('The employee must have a payable account set on his home address.'))
company_currency = exp.company_id.currency_id.id
diff_currency_p = exp.currency_id.id <> company_currency
#create the move that will contain the accounting entries
move_id = move_obj.create(cr, uid, self.account_move_get(cr, uid, exp.id, context=context), context=context)
#one account.move.line per expense line (+taxes..)
eml = self.move_line_get(cr, uid, exp.id, context=context)
#create one more move line, a counterline for the total on payable account
total, total_currency, eml = self.compute_expense_totals(cr, uid, exp, company_currency, exp.name, eml, context=context)
acc = exp.employee_id.address_home_id.property_account_payable.id
eml.append({
'type': 'dest',
'name': '/',
'price': total,
'account_id': acc,
'date_maturity': exp.date_confirm,
'amount_currency': diff_currency_p and total_currency or False,
'currency_id': diff_currency_p and exp.currency_id.id or False,
'ref': exp.name
})
#convert eml into an osv-valid format
lines = map(lambda x:(0,0,self.line_get_convert(cr, uid, x, exp.employee_id.address_home_id, exp.date_confirm, context=context)), eml)
journal_id = move_obj.browse(cr, uid, move_id, context).journal_id
# post the journal entry if 'Skip 'Draft' State for Manual Entries' is checked
if journal_id.entry_posted:
move_obj.button_validate(cr, uid, [move_id], context)
move_obj.write(cr, uid, [move_id], {'line_id': lines}, context=context)
self.write(cr, uid, ids, {'account_move_id': move_id, 'state': 'done'}, context=context)
return True
def move_line_get(self, cr, uid, expense_id, context=None):
res = []
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
if context is None:
context = {}
exp = self.browse(cr, uid, expense_id, context=context)
company_currency = exp.company_id.currency_id.id
for line in exp.line_ids:
mres = self.move_line_get_item(cr, uid, line, context)
if not mres:
continue
res.append(mres)
tax_code_found= False
#Calculate tax according to default tax on product
taxes = []
#Taken from product_id_onchange in account.invoice
if line.product_id:
fposition_id = False
fpos_obj = self.pool.get('account.fiscal.position')
fpos = fposition_id and fpos_obj.browse(cr, uid, fposition_id, context=context) or False
product = line.product_id
taxes = product.supplier_taxes_id
#If taxes are not related to the product, maybe they are in the account
if not taxes:
a = product.property_account_expense.id #Why is not there a check here?
if not a:
a = product.categ_id.property_account_expense_categ.id
a = fpos_obj.map_account(cr, uid, fpos, a)
taxes = a and self.pool.get('account.account').browse(cr, uid, a, context=context).tax_ids or False
tax_id = fpos_obj.map_tax(cr, uid, fpos, taxes)
if not taxes:
continue
#Calculating tax on the line and creating move?
for tax in tax_obj.compute_all(cr, uid, taxes,
line.unit_amount ,
line.unit_quantity, line.product_id,
exp.user_id.partner_id)['taxes']:
tax_code_id = tax['base_code_id']
tax_amount = line.total_amount * tax['base_sign']
if tax_code_found:
if not tax_code_id:
continue
res.append(self.move_line_get_item(cr, uid, line, context))
res[-1]['price'] = 0.0
res[-1]['account_analytic_id'] = False
elif not tax_code_id:
continue
tax_code_found = True
res[-1]['tax_code_id'] = tax_code_id
res[-1]['tax_amount'] = cur_obj.compute(cr, uid, exp.currency_id.id, company_currency, tax_amount, context={'date': exp.date_confirm})
##
is_price_include = tax_obj.read(cr,uid,tax['id'],['price_include'],context)['price_include']
if is_price_include:
## We need to deduce the price for the tax
res[-1]['price'] = res[-1]['price'] - (tax['amount'] * tax['base_sign'] or 0.0)
assoc_tax = {
'type':'tax',
'name':tax['name'],
'price_unit': tax['price_unit'],
'quantity': 1,
'price': tax['amount'] * tax['base_sign'] or 0.0,
'account_id': tax['account_collected_id'] or mres['account_id'],
'tax_code_id': tax['tax_code_id'],
'tax_amount': tax['amount'] * tax['base_sign'],
}
res.append(assoc_tax)
return res
def move_line_get_item(self, cr, uid, line, context=None):
company = line.expense_id.company_id
property_obj = self.pool.get('ir.property')
if line.product_id:
acc = line.product_id.property_account_expense
if not acc:
acc = line.product_id.categ_id.property_account_expense_categ
if not acc:
raise osv.except_osv(_('Error!'), _('No purchase account found for the product %s (or for his category), please configure one.') % (line.product_id.name))
else:
acc = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category', context={'force_company': company.id})
if not acc:
raise osv.except_osv(_('Error!'), _('Please configure Default Expense account for Product purchase: `property_account_expense_categ`.'))
return {
'type':'src',
'name': line.name.split('\n')[0][:64],
'price_unit':line.unit_amount,
'quantity':line.unit_quantity,
'price':line.total_amount,
'account_id':acc.id,
'product_id':line.product_id.id,
'uos_id':line.uom_id.id,
'account_analytic_id':line.analytic_account.id,
}
def action_view_move(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing account.move of given expense ids.
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
expense = self.browse(cr, uid, ids[0], context=context)
assert expense.account_move_id
try:
dummy, view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'view_move_form')
except ValueError, e:
view_id = False
result = {
'name': _('Expense Account Move'),
'view_type': 'form',
'view_mode': 'form',
'view_id': view_id,
'res_model': 'account.move',
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': expense.account_move_id.id,
}
return result
class product_template(osv.osv):
_inherit = "product.template"
_columns = {
'hr_expense_ok': fields.boolean('Can be Expensed', help="Specify if the product can be selected in an HR expense line."),
}
class hr_expense_line(osv.osv):
_name = "hr.expense.line"
_description = "Expense Line"
def _amount(self, cr, uid, ids, field_name, arg, context=None):
if not ids:
return {}
cr.execute("SELECT l.id,COALESCE(SUM(l.unit_amount*l.unit_quantity),0) AS amount FROM hr_expense_line l WHERE id IN %s GROUP BY l.id ",(tuple(ids),))
res = dict(cr.fetchall())
return res
def _get_uom_id(self, cr, uid, context=None):
result = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'product', 'product_uom_unit')
return result and result[1] or False
_columns = {
'name': fields.char('Expense Note', size=128, required=True),
'date_value': fields.date('Date', required=True),
'expense_id': fields.many2one('hr.expense.expense', 'Expense', ondelete='cascade', select=True),
'total_amount': fields.function(_amount, string='Total', digits_compute=dp.get_precision('Account')),
'unit_amount': fields.float('Unit Price', digits_compute=dp.get_precision('Product Price')),
'unit_quantity': fields.float('Quantities', digits_compute= dp.get_precision('Product Unit of Measure')),
'product_id': fields.many2one('product.product', 'Product', domain=[('hr_expense_ok','=',True)]),
'uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True),
'description': fields.text('Description'),
'analytic_account': fields.many2one('account.analytic.account','Analytic account'),
'ref': fields.char('Reference', size=32),
'sequence': fields.integer('Sequence', select=True, help="Gives the sequence order when displaying a list of expense lines."),
}
_defaults = {
'unit_quantity': 1,
'date_value': lambda *a: time.strftime('%Y-%m-%d'),
'uom_id': _get_uom_id,
}
_order = "sequence, date_value desc"
def onchange_product_id(self, cr, uid, ids, product_id, context=None):
res = {}
if product_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
res['name'] = product.name
amount_unit = product.price_get('standard_price')[product.id]
res['unit_amount'] = amount_unit
res['uom_id'] = product.uom_id.id
return {'value': res}
def onchange_uom(self, cr, uid, ids, product_id, uom_id, context=None):
res = {'value':{}}
if not uom_id or not product_id:
return res
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, uom_id, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('Selected Unit of Measure does not belong to the same category as the product Unit of Measure')}
res['value'].update({'uom_id': product.uom_id.id})
return res
class account_move_line(osv.osv):
_inherit = "account.move.line"
def reconcile(self, cr, uid, ids, type='auto', writeoff_acc_id=False, writeoff_period_id=False, writeoff_journal_id=False, context=None):
res = super(account_move_line, self).reconcile(cr, uid, ids, type=type, writeoff_acc_id=writeoff_acc_id, writeoff_period_id=writeoff_period_id, writeoff_journal_id=writeoff_journal_id, context=context)
#when making a full reconciliation of account move lines 'ids', we may need to recompute the state of some hr.expense
account_move_ids = [aml.move_id.id for aml in self.browse(cr, uid, ids, context=context)]
expense_obj = self.pool.get('hr.expense.expense')
currency_obj = self.pool.get('res.currency')
if account_move_ids:
expense_ids = expense_obj.search(cr, uid, [('account_move_id', 'in', account_move_ids)], context=context)
for expense in expense_obj.browse(cr, uid, expense_ids, context=context):
if expense.state == 'done':
#making the postulate it has to be set paid, then trying to invalidate it
new_status_is_paid = True
for aml in expense.account_move_id.line_id:
if aml.account_id.type == 'payable' and not currency_obj.is_zero(cr, uid, expense.company_id.currency_id, aml.amount_residual):
new_status_is_paid = False
if new_status_is_paid:
expense_obj.write(cr, uid, [expense.id], {'state': 'paid'}, context=context)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
geometalab/Vector-Tiles-Reader-QGIS-Plugin | ext-libs/google/protobuf/internal/text_encoding_test.py | 126 | 2903 | #! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.text_encoding."""
try:
import unittest2 as unittest #PY26
except ImportError:
import unittest
from google.protobuf import text_encoding
TEST_VALUES = [
("foo\\rbar\\nbaz\\t",
"foo\\rbar\\nbaz\\t",
b"foo\rbar\nbaz\t"),
("\\'full of \\\"sound\\\" and \\\"fury\\\"\\'",
"\\'full of \\\"sound\\\" and \\\"fury\\\"\\'",
b"'full of \"sound\" and \"fury\"'"),
("signi\\\\fying\\\\ nothing\\\\",
"signi\\\\fying\\\\ nothing\\\\",
b"signi\\fying\\ nothing\\"),
("\\010\\t\\n\\013\\014\\r",
"\x08\\t\\n\x0b\x0c\\r",
b"\010\011\012\013\014\015")]
class TextEncodingTestCase(unittest.TestCase):
def testCEscape(self):
for escaped, escaped_utf8, unescaped in TEST_VALUES:
self.assertEqual(escaped,
text_encoding.CEscape(unescaped, as_utf8=False))
self.assertEqual(escaped_utf8,
text_encoding.CEscape(unescaped, as_utf8=True))
def testCUnescape(self):
for escaped, escaped_utf8, unescaped in TEST_VALUES:
self.assertEqual(unescaped, text_encoding.CUnescape(escaped))
self.assertEqual(unescaped, text_encoding.CUnescape(escaped_utf8))
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
emilio/servo | tests/wpt/web-platform-tests/tools/third_party/html5lib/html5lib/tests/test_sanitizer.py | 29 | 5540 | from __future__ import absolute_import, division, unicode_literals
from html5lib import constants, parseFragment, serialize
from html5lib.filters import sanitizer
def runSanitizerTest(_, expected, input):
parsed = parseFragment(expected)
expected = serialize(parsed,
omit_optional_tags=False,
use_trailing_solidus=True,
space_before_trailing_solidus=False,
quote_attr_values="always",
quote_char='"',
alphabetical_attributes=True)
assert expected == sanitize_html(input)
def sanitize_html(stream):
parsed = parseFragment(stream)
serialized = serialize(parsed,
sanitize=True,
omit_optional_tags=False,
use_trailing_solidus=True,
space_before_trailing_solidus=False,
quote_attr_values="always",
quote_char='"',
alphabetical_attributes=True)
return serialized
def test_should_handle_astral_plane_characters():
sanitized = sanitize_html("<p>𝒵 𝔸</p>")
expected = '<p>\U0001d4b5 \U0001d538</p>'
assert expected == sanitized
def test_should_allow_relative_uris():
sanitized = sanitize_html('<p><a href="/example.com"></a></p>')
expected = '<p><a href="/example.com"></a></p>'
assert expected == sanitized
def test_invalid_data_uri():
sanitized = sanitize_html('<audio controls="" src="data:foobar"></audio>')
expected = '<audio controls></audio>'
assert expected == sanitized
def test_invalid_ipv6_url():
sanitized = sanitize_html('<a href="h://]">')
expected = "<a></a>"
assert expected == sanitized
def test_data_uri_disallowed_type():
sanitized = sanitize_html('<audio controls="" src="data:text/html,<html>"></audio>')
expected = "<audio controls></audio>"
assert expected == sanitized
def test_sanitizer():
for ns, tag_name in sanitizer.allowed_elements:
if ns != constants.namespaces["html"]:
continue
if tag_name in ['caption', 'col', 'colgroup', 'optgroup', 'option', 'table', 'tbody', 'td',
'tfoot', 'th', 'thead', 'tr', 'select']:
continue # TODO
if tag_name == 'image':
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<img title=\"1\"/>foo <bad>bar</bad> baz",
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name))
elif tag_name == 'br':
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<br title=\"1\"/>foo <bad>bar</bad> baz<br/>",
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name))
elif tag_name in constants.voidElements:
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<%s title=\"1\"/>foo <bad>bar</bad> baz" % tag_name,
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name))
else:
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<%s title=\"1\">foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name))
for ns, attribute_name in sanitizer.allowed_attributes:
if ns is not None:
continue
if attribute_name != attribute_name.lower():
continue # TODO
if attribute_name == 'style':
continue
attribute_value = 'foo'
if attribute_name in sanitizer.attr_val_is_uri:
attribute_value = '%s://sub.domain.tld/path/object.ext' % sanitizer.allowed_protocols[0]
yield (runSanitizerTest, "test_should_allow_%s_attribute" % attribute_name,
"<p %s=\"%s\">foo <bad>bar</bad> baz</p>" % (attribute_name, attribute_value),
"<p %s='%s'>foo <bad>bar</bad> baz</p>" % (attribute_name, attribute_value))
for protocol in sanitizer.allowed_protocols:
rest_of_uri = '//sub.domain.tld/path/object.ext'
if protocol == 'data':
rest_of_uri = 'image/png;base64,aGVsbG8gd29ybGQ='
yield (runSanitizerTest, "test_should_allow_uppercase_%s_uris" % protocol,
"<img src=\"%s:%s\">foo</a>" % (protocol, rest_of_uri),
"""<img src="%s:%s">foo</a>""" % (protocol, rest_of_uri))
for protocol in sanitizer.allowed_protocols:
rest_of_uri = '//sub.domain.tld/path/object.ext'
if protocol == 'data':
rest_of_uri = 'image/png;base64,aGVsbG8gd29ybGQ='
protocol = protocol.upper()
yield (runSanitizerTest, "test_should_allow_uppercase_%s_uris" % protocol,
"<img src=\"%s:%s\">foo</a>" % (protocol, rest_of_uri),
"""<img src="%s:%s">foo</a>""" % (protocol, rest_of_uri))
def test_lowercase_color_codes_in_style():
sanitized = sanitize_html("<p style=\"border: 1px solid #a2a2a2;\"></p>")
expected = '<p style=\"border: 1px solid #a2a2a2;\"></p>'
assert expected == sanitized
def test_uppercase_color_codes_in_style():
sanitized = sanitize_html("<p style=\"border: 1px solid #A2A2A2;\"></p>")
expected = '<p style=\"border: 1px solid #A2A2A2;\"></p>'
assert expected == sanitized
| mpl-2.0 |
benoitsteiner/tensorflow-xsmm | tensorflow/python/profiler/pprof_profiler_test.py | 70 | 5174 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pprof_profiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
from proto import profile_pb2
from tensorflow.core.framework import step_stats_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.profiler import pprof_profiler
class PprofProfilerTest(test.TestCase):
def testDataEmpty(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
graph = test.mock.MagicMock()
graph.get_operations.return_value = []
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEquals(0, len(profiles))
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEquals(0, len(profile_files))
def testRunMetadataEmpty(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = 'Add/123'
op1.traceback = [('a/b/file1', 10, 'some_var')]
op1.type = 'add'
graph.get_operations.return_value = [op1]
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEquals(0, len(profiles))
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEquals(0, len(profile_files))
def testValidProfile(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
node1 = step_stats_pb2.NodeExecStats(
node_name='Add/123',
op_start_rel_micros=3,
op_end_rel_micros=5,
all_end_rel_micros=4)
run_metadata = config_pb2.RunMetadata()
device1 = run_metadata.step_stats.dev_stats.add()
device1.device = 'deviceA'
device1.node_stats.extend([node1])
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = 'Add/123'
op1.traceback = [
('a/b/file1', 10, 'apply_op', 'abc'), ('a/c/file2', 12, 'my_op', 'def')]
op1.type = 'add'
graph.get_operations.return_value = [op1]
expected_proto = """sample_type {
type: 5
unit: 5
}
sample_type {
type: 6
unit: 7
}
sample_type {
type: 8
unit: 7
}
sample {
value: 1
value: 4
value: 2
label {
key: 1
str: 2
}
label {
key: 3
str: 4
}
}
string_table: ""
string_table: "node_name"
string_table: "Add/123"
string_table: "op_type"
string_table: "add"
string_table: "count"
string_table: "all_time"
string_table: "nanoseconds"
string_table: "op_time"
string_table: "Device 1 of 1: deviceA"
comment: 9
"""
# Test with protos
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEquals(1, len(profiles))
self.assertTrue('deviceA' in profiles)
self.assertEquals(expected_proto, str(profiles['deviceA']))
# Test with files
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEquals(1, len(profile_files))
with gzip.open(profile_files[0]) as profile_file:
profile_contents = profile_file.read()
profile = profile_pb2.Profile()
profile.ParseFromString(profile_contents)
self.assertEquals(expected_proto, str(profile))
def testProfileWithWhileLoop(self):
options = config_pb2.RunOptions()
options.trace_level = config_pb2.RunOptions.FULL_TRACE
run_metadata = config_pb2.RunMetadata()
num_iters = 5
with self.test_session() as sess:
i = constant_op.constant(0)
c = lambda i: math_ops.less(i, num_iters)
b = lambda i: math_ops.add(i, 1)
r = control_flow_ops.while_loop(c, b, [i])
sess.run(r, options=options, run_metadata=run_metadata)
profiles = pprof_profiler.get_profiles(sess.graph, run_metadata)
self.assertEquals(1, len(profiles))
profile = next(iter(profiles.values()))
add_samples = [] # Samples for the while/Add node
for sample in profile.sample:
if profile.string_table[sample.label[0].str] == 'while/Add':
add_samples.append(sample)
# Values for same nodes are aggregated.
self.assertEquals(1, len(add_samples))
# Value of "count" should be equal to number of iterations.
self.assertEquals(num_iters, add_samples[0].value[0])
if __name__ == '__main__':
test.main()
| apache-2.0 |
odootr/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/Translation.py | 384 | 11372 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer [email protected]
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import string
import unohelper
import xmlrpclib
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from lib.functions import *
from lib.error import ErrorDialog
from LoginTest import *
from lib.rpc import *
database="test_001"
uid = 3
class AddLang(unohelper.Base, XJobExecutor ):
def __init__(self, sVariable="", sFields="", sDisplayName="", bFromModify=False):
LoginTest()
if not loginstatus and __name__=="package":
exit(1)
global passwd
self.password = passwd
self.win = DBModalDialog(60, 50, 180, 225, "Set Lang Builder")
self.win.addFixedText("lblVariable", 27, 12, 60, 15, "Variable :")
self.win.addComboBox("cmbVariable", 180-120-2, 10, 120, 15,True,
itemListenerProc=self.cmbVariable_selected)
self.insVariable = self.win.getControl( "cmbVariable" )
self.win.addFixedText("lblFields", 10, 32, 60, 15, "Variable Fields :")
self.win.addComboListBox("lstFields", 180-120-2, 30, 120, 150, False,itemListenerProc=self.lstbox_selected)
self.insField = self.win.getControl( "lstFields" )
self.win.addFixedText("lblUName", 8, 187, 60, 15, "Displayed name :")
self.win.addEdit("txtUName", 180-120-2, 185, 120, 15,)
self.win.addButton('btnOK',-5 ,-5, 45, 15, 'Ok', actionListenerProc = self.btnOk_clicked )
self.win.addButton('btnCancel',-5 - 45 - 5 ,-5,45,15,'Cancel', actionListenerProc = self.btnCancel_clicked )
self.sValue=None
self.sObj=None
self.aSectionList=[]
self.sGDisplayName=sDisplayName
self.aItemList=[]
self.aComponentAdd=[]
self.aObjectList=[]
self.aListFields=[]
self.aVariableList=[]
EnumDocument(self.aItemList,self.aComponentAdd)
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
self.sMyHost= ""
global url
self.sock=RPCSession(url)
if not docinfo.getUserFieldValue(3) == "" and not docinfo.getUserFieldValue(0)=="":
self.sMyHost= docinfo.getUserFieldValue(0)
self.count=0
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
self.count += 1
getList(self.aObjectList, self.sMyHost,self.count)
cursor = doc.getCurrentController().getViewCursor()
text=cursor.getText()
tcur=text.createTextCursorByRange(cursor)
self.aVariableList.extend( filter( lambda obj: obj[:obj.find("(")] == "Objects", self.aObjectList ) )
for i in range(len(self.aItemList)):
anItem = self.aItemList[i][1]
component = self.aComponentAdd[i]
if component == "Document":
sLVal = anItem[anItem.find(",'") + 2:anItem.find("')")]
self.aVariableList.extend( filter( lambda obj: obj[:obj.find("(")] == sLVal, self.aObjectList ) )
if tcur.TextSection:
getRecersiveSection(tcur.TextSection,self.aSectionList)
if component in self.aSectionList:
sLVal = anItem[anItem.find(",'") + 2:anItem.find("')")]
self.aVariableList.extend( filter( lambda obj: obj[:obj.find("(")] == sLVal, self.aObjectList ) )
if tcur.TextTable:
if not component == "Document" and component[component.rfind(".") + 1:] == tcur.TextTable.Name:
VariableScope(tcur,self.insVariable,self.aObjectList,self.aComponentAdd,self.aItemList,component)
self.bModify=bFromModify
if self.bModify==True:
sItem=""
for anObject in self.aObjectList:
if anObject[:anObject.find("(")] == sVariable:
sItem = anObject
self.insVariable.setText( sItem )
genTree(sItem[sItem.find("(")+1:sItem.find(")")],self.aListFields, self.insField,self.sMyHost,2,ending_excl=['one2many','many2one','many2many','reference'], recur=['many2one'])
self.sValue= self.win.getListBoxItem("lstFields",self.aListFields.index(sFields))
for var in self.aVariableList:
self.model_ids = self.sock.execute(database, uid, self.password, 'ir.model' , 'search', [('model','=',var[var.find("(")+1:var.find(")")])])
fields=['name','model']
self.model_res = self.sock.execute(database, uid, self.password, 'ir.model', 'read', self.model_ids,fields)
if self.model_res <> []:
self.insVariable.addItem(var[:var.find("(")+1] + self.model_res[0]['name'] + ")" ,self.insVariable.getItemCount())
else:
self.insVariable.addItem(var ,self.insVariable.getItemCount())
self.win.doModalDialog("lstFields",self.sValue)
else:
ErrorDialog("Please insert user define field Field-1 or Field-4","Just go to File->Properties->User Define \nField-1 E.g. http://localhost:8069 \nOR \nField-4 E.g. account.invoice")
self.win.endExecute()
def lstbox_selected(self, oItemEvent):
try:
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
sItem= self.win.getComboBoxText("cmbVariable")
for var in self.aVariableList:
if var[:var.find("(")+1]==sItem[:sItem.find("(")+1]:
sItem = var
sMain=self.aListFields[self.win.getListBoxSelectedItemPos("lstFields")]
t=sMain.rfind('/lang')
if t!=-1:
sObject=self.getRes(self.sock,sItem[sItem.find("(")+1:-1],sMain[1:])
ids = self.sock.execute(database, uid, self.password, sObject , 'search', [])
res = self.sock.execute(database, uid, self.password, sObject , 'read',[ids[0]])
self.win.setEditText("txtUName",res[0][sMain[sMain.rfind("/")+1:]])
else:
ErrorDialog("Please select a language.")
except:
import traceback;traceback.print_exc()
self.win.setEditText("txtUName","TTT")
if self.bModify:
self.win.setEditText("txtUName",self.sGDisplayName)
def getRes(self, sock, sObject, sVar):
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
res = sock.execute(database, uid, self.password, sObject , 'fields_get')
key = res.keys()
key.sort()
myval=None
if not sVar.find("/")==-1:
myval=sVar[:sVar.find("/")]
else:
myval=sVar
if myval in key:
if (res[myval]['type'] in ['many2one']):
sObject = res[myval]['relation']
return self.getRes(sock,res[myval]['relation'], sVar[sVar.find("/")+1:])
else:
return sObject
def cmbVariable_selected(self, oItemEvent):
if self.count > 0 :
try:
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
self.win.removeListBoxItems("lstFields", 0, self.win.getListBoxItemCount("lstFields"))
self.aListFields=[]
tempItem = self.win.getComboBoxText("cmbVariable")
for var in self.aVariableList:
if var[:var.find("(")] == tempItem[:tempItem.find("(")]:
sItem=var
genTree(
sItem[ sItem.find("(") + 1:sItem.find(")")],
self.aListFields,
self.insField,
self.sMyHost,
2,
ending_excl=['one2many','many2one','many2many','reference'],
recur=['many2one']
)
except:
import traceback;traceback.print_exc()
def btnOk_clicked(self, oActionEvent):
self.bOkay = True
desktop=getDesktop()
doc = desktop.getCurrentComponent()
cursor = doc.getCurrentController().getViewCursor()
itemSelected = self.win.getListBoxSelectedItem( "lstFields" )
itemSelectedPos = self.win.getListBoxSelectedItemPos( "lstFields" )
txtUName = self.win.getEditText("txtUName")
sKey=u""+ txtUName
if itemSelected != "" and txtUName != "" and self.bModify==True :
oCurObj=cursor.TextField
sObjName=self.insVariable.getText()
sObjName=sObjName[:sObjName.find("(")]
sValue=u"[[ setLang" + sObjName + self.aListFields[itemSelectedPos].replace("/",".") + ")" " ]]"
oCurObj.Items = (sKey,sValue)
oCurObj.update()
self.win.endExecute()
elif itemSelected != "" and txtUName != "" :
oInputList = doc.createInstance("com.sun.star.text.TextField.DropDown")
sObjName=self.win.getComboBoxText("cmbVariable")
sObjName=sObjName[:sObjName.find("(")]
widget = ( cursor.TextTable and cursor.TextTable.getCellByName( cursor.Cell.CellName ) or doc.Text )
sValue = u"[[setLang" + "(" + sObjName + self.aListFields[itemSelectedPos].replace("/",".") +")" " ]]"
oInputList.Items = (sKey,sValue)
widget.insertTextContent(cursor,oInputList,False)
self.win.endExecute()
else:
ErrorDialog("Please fill appropriate data in name field \nor select particular value from the list of fields.")
def btnCancel_clicked(self, oActionEvent):
self.win.endExecute()
if __name__<>"package" and __name__=="__main__":
AddLang()
elif __name__=="package":
g_ImplementationHelper.addImplementation( AddLang, "org.openoffice.openerp.report.langtag", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ozzyjohnson/zaproxy | python/api/src/zapv2/users.py | 15 | 2918 | # Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2015 the ZAP development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file was automatically generated.
"""
class users(object):
def __init__(self, zap):
self.zap = zap
def users_list(self, contextid=''):
return next(self.zap._request(self.zap.base + 'users/view/usersList/', {'contextId' : contextid}).itervalues())
def get_user_by_id(self, contextid='', userid=''):
return next(self.zap._request(self.zap.base + 'users/view/getUserById/', {'contextId' : contextid, 'userId' : userid}).itervalues())
def get_authentication_credentials_config_params(self, contextid):
return next(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentialsConfigParams/', {'contextId' : contextid}).itervalues())
def get_authentication_credentials(self, contextid, userid):
return next(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentials/', {'contextId' : contextid, 'userId' : userid}).itervalues())
def new_user(self, contextid, name, apikey=''):
return next(self.zap._request(self.zap.base + 'users/action/newUser/', {'contextId' : contextid, 'name' : name, 'apikey' : apikey}).itervalues())
def remove_user(self, contextid, userid, apikey=''):
return next(self.zap._request(self.zap.base + 'users/action/removeUser/', {'contextId' : contextid, 'userId' : userid, 'apikey' : apikey}).itervalues())
def set_user_enabled(self, contextid, userid, enabled, apikey=''):
return next(self.zap._request(self.zap.base + 'users/action/setUserEnabled/', {'contextId' : contextid, 'userId' : userid, 'enabled' : enabled, 'apikey' : apikey}).itervalues())
def set_user_name(self, contextid, userid, name, apikey=''):
return next(self.zap._request(self.zap.base + 'users/action/setUserName/', {'contextId' : contextid, 'userId' : userid, 'name' : name, 'apikey' : apikey}).itervalues())
def set_authentication_credentials(self, contextid, userid, authcredentialsconfigparams='', apikey=''):
return next(self.zap._request(self.zap.base + 'users/action/setAuthenticationCredentials/', {'contextId' : contextid, 'userId' : userid, 'authCredentialsConfigParams' : authcredentialsconfigparams, 'apikey' : apikey}).itervalues())
| apache-2.0 |
ajnirp/servo | tests/wpt/web-platform-tests/tools/pytest/testing/test_unittest.py | 171 | 22302 | from _pytest.main import EXIT_NOTESTSCOLLECTED
import pytest
def test_simple_unittest(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
def testpassing(self):
self.assertEquals('foo', 'foo')
def test_failing(self):
self.assertEquals('foo', 'bar')
""")
reprec = testdir.inline_run(testpath)
assert reprec.matchreport("testpassing").passed
assert reprec.matchreport("test_failing").failed
def test_runTest_method(testdir):
testdir.makepyfile("""
import unittest
class MyTestCaseWithRunTest(unittest.TestCase):
def runTest(self):
self.assertEquals('foo', 'foo')
class MyTestCaseWithoutRunTest(unittest.TestCase):
def runTest(self):
self.assertEquals('foo', 'foo')
def test_something(self):
pass
""")
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines("""
*MyTestCaseWithRunTest::runTest*
*MyTestCaseWithoutRunTest::test_something*
*2 passed*
""")
def test_isclasscheck_issue53(testdir):
testpath = testdir.makepyfile("""
import unittest
class _E(object):
def __getattr__(self, tag):
pass
E = _E()
""")
result = testdir.runpytest(testpath)
assert result.ret == EXIT_NOTESTSCOLLECTED
def test_setup(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
def setUp(self):
self.foo = 1
def setup_method(self, method):
self.foo2 = 1
def test_both(self):
self.assertEquals(1, self.foo)
assert self.foo2 == 1
def teardown_method(self, method):
assert 0, "42"
""")
reprec = testdir.inline_run("-s", testpath)
assert reprec.matchreport("test_both", when="call").passed
rep = reprec.matchreport("test_both", when="teardown")
assert rep.failed and '42' in str(rep.longrepr)
def test_setUpModule(testdir):
testpath = testdir.makepyfile("""
l = []
def setUpModule():
l.append(1)
def tearDownModule():
del l[0]
def test_hello():
assert l == [1]
def test_world():
assert l == [1]
""")
result = testdir.runpytest(testpath)
result.stdout.fnmatch_lines([
"*2 passed*",
])
def test_setUpModule_failing_no_teardown(testdir):
testpath = testdir.makepyfile("""
l = []
def setUpModule():
0/0
def tearDownModule():
l.append(1)
def test_hello():
pass
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=0, failed=1)
call = reprec.getcalls("pytest_runtest_setup")[0]
assert not call.item.module.l
def test_new_instances(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
def test_func1(self):
self.x = 2
def test_func2(self):
assert not hasattr(self, 'x')
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=2)
def test_teardown(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
l = []
def test_one(self):
pass
def tearDown(self):
self.l.append(None)
class Second(unittest.TestCase):
def test_check(self):
self.assertEquals(MyTestCase.l, [None])
""")
reprec = testdir.inline_run(testpath)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 0, failed
assert passed == 2
assert passed + skipped + failed == 2
@pytest.mark.skipif("sys.version_info < (2,7)")
def test_unittest_skip_issue148(testdir):
testpath = testdir.makepyfile("""
import unittest
@unittest.skip("hello")
class MyTestCase(unittest.TestCase):
@classmethod
def setUpClass(self):
xxx
def test_one(self):
pass
@classmethod
def tearDownClass(self):
xxx
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(skipped=1)
def test_method_and_teardown_failing_reporting(testdir):
testdir.makepyfile("""
import unittest, pytest
class TC(unittest.TestCase):
def tearDown(self):
assert 0, "down1"
def test_method(self):
assert False, "down2"
""")
result = testdir.runpytest("-s")
assert result.ret == 1
result.stdout.fnmatch_lines([
"*tearDown*",
"*assert 0*",
"*test_method*",
"*assert False*",
"*1 failed*1 error*",
])
def test_setup_failure_is_shown(testdir):
testdir.makepyfile("""
import unittest
import pytest
class TC(unittest.TestCase):
def setUp(self):
assert 0, "down1"
def test_method(self):
print ("never42")
xyz
""")
result = testdir.runpytest("-s")
assert result.ret == 1
result.stdout.fnmatch_lines([
"*setUp*",
"*assert 0*down1*",
"*1 failed*",
])
assert 'never42' not in result.stdout.str()
def test_setup_setUpClass(testdir):
testpath = testdir.makepyfile("""
import unittest
import pytest
class MyTestCase(unittest.TestCase):
x = 0
@classmethod
def setUpClass(cls):
cls.x += 1
def test_func1(self):
assert self.x == 1
def test_func2(self):
assert self.x == 1
@classmethod
def tearDownClass(cls):
cls.x -= 1
def test_teareddown():
assert MyTestCase.x == 0
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=3)
def test_setup_class(testdir):
testpath = testdir.makepyfile("""
import unittest
import pytest
class MyTestCase(unittest.TestCase):
x = 0
def setup_class(cls):
cls.x += 1
def test_func1(self):
assert self.x == 1
def test_func2(self):
assert self.x == 1
def teardown_class(cls):
cls.x -= 1
def test_teareddown():
assert MyTestCase.x == 0
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=3)
@pytest.mark.parametrize("type", ['Error', 'Failure'])
def test_testcase_adderrorandfailure_defers(testdir, type):
testdir.makepyfile("""
from unittest import TestCase
import pytest
class MyTestCase(TestCase):
def run(self, result):
excinfo = pytest.raises(ZeroDivisionError, lambda: 0/0)
try:
result.add%s(self, excinfo._excinfo)
except KeyboardInterrupt:
raise
except:
pytest.fail("add%s should not raise")
def test_hello(self):
pass
""" % (type, type))
result = testdir.runpytest()
assert 'should not raise' not in result.stdout.str()
@pytest.mark.parametrize("type", ['Error', 'Failure'])
def test_testcase_custom_exception_info(testdir, type):
testdir.makepyfile("""
from unittest import TestCase
import py, pytest
import _pytest._code
class MyTestCase(TestCase):
def run(self, result):
excinfo = pytest.raises(ZeroDivisionError, lambda: 0/0)
# we fake an incompatible exception info
from _pytest.monkeypatch import monkeypatch
mp = monkeypatch()
def t(*args):
mp.undo()
raise TypeError()
mp.setattr(_pytest._code, 'ExceptionInfo', t)
try:
excinfo = excinfo._excinfo
result.add%(type)s(self, excinfo)
finally:
mp.undo()
def test_hello(self):
pass
""" % locals())
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"NOTE: Incompatible Exception Representation*",
"*ZeroDivisionError*",
"*1 failed*",
])
def test_testcase_totally_incompatible_exception_info(testdir):
item, = testdir.getitems("""
from unittest import TestCase
class MyTestCase(TestCase):
def test_hello(self):
pass
""")
item.addError(None, 42)
excinfo = item._excinfo.pop(0)
assert 'ERROR: Unknown Incompatible' in str(excinfo.getrepr())
def test_module_level_pytestmark(testdir):
testpath = testdir.makepyfile("""
import unittest
import pytest
pytestmark = pytest.mark.xfail
class MyTestCase(unittest.TestCase):
def test_func1(self):
assert 0
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testcase_skip_property(testdir):
pytest.importorskip('twisted.trial.unittest')
testpath = testdir.makepyfile("""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
skip = 'dont run'
def test_func(self):
pass
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testfunction_skip_property(testdir):
pytest.importorskip('twisted.trial.unittest')
testpath = testdir.makepyfile("""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
def test_func(self):
pass
test_func.skip = 'dont run'
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testcase_todo_property(testdir):
pytest.importorskip('twisted.trial.unittest')
testpath = testdir.makepyfile("""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
todo = 'dont run'
def test_func(self):
assert 0
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testfunction_todo_property(testdir):
pytest.importorskip('twisted.trial.unittest')
testpath = testdir.makepyfile("""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
def test_func(self):
assert 0
test_func.todo = 'dont run'
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
class TestTrialUnittest:
def setup_class(cls):
cls.ut = pytest.importorskip("twisted.trial.unittest")
def test_trial_testcase_runtest_not_collected(self, testdir):
testdir.makepyfile("""
from twisted.trial.unittest import TestCase
class TC(TestCase):
def test_hello(self):
pass
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
testdir.makepyfile("""
from twisted.trial.unittest import TestCase
class TC(TestCase):
def runTest(self):
pass
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_trial_exceptions_with_skips(self, testdir):
testdir.makepyfile("""
from twisted.trial import unittest
import pytest
class TC(unittest.TestCase):
def test_hello(self):
pytest.skip("skip_in_method")
@pytest.mark.skipif("sys.version_info != 1")
def test_hello2(self):
pass
@pytest.mark.xfail(reason="iwanto")
def test_hello3(self):
assert 0
def test_hello4(self):
pytest.xfail("i2wanto")
def test_trial_skip(self):
pass
test_trial_skip.skip = "trialselfskip"
def test_trial_todo(self):
assert 0
test_trial_todo.todo = "mytodo"
def test_trial_todo_success(self):
pass
test_trial_todo_success.todo = "mytodo"
class TC2(unittest.TestCase):
def setup_class(cls):
pytest.skip("skip_in_setup_class")
def test_method(self):
pass
""")
result = testdir.runpytest("-rxs")
assert result.ret == 0
result.stdout.fnmatch_lines_random([
"*XFAIL*test_trial_todo*",
"*trialselfskip*",
"*skip_in_setup_class*",
"*iwanto*",
"*i2wanto*",
"*sys.version_info*",
"*skip_in_method*",
"*4 skipped*3 xfail*1 xpass*",
])
def test_trial_error(self, testdir):
testdir.makepyfile("""
from twisted.trial.unittest import TestCase
from twisted.internet.defer import Deferred
from twisted.internet import reactor
class TC(TestCase):
def test_one(self):
crash
def test_two(self):
def f(_):
crash
d = Deferred()
d.addCallback(f)
reactor.callLater(0.3, d.callback, None)
return d
def test_three(self):
def f():
pass # will never get called
reactor.callLater(0.3, f)
# will crash at teardown
def test_four(self):
def f(_):
reactor.callLater(0.3, f)
crash
d = Deferred()
d.addCallback(f)
reactor.callLater(0.3, d.callback, None)
return d
# will crash both at test time and at teardown
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*ERRORS*",
"*DelayedCalls*",
"*test_four*",
"*NameError*crash*",
"*test_one*",
"*NameError*crash*",
"*test_three*",
"*DelayedCalls*",
"*test_two*",
"*crash*",
])
def test_trial_pdb(self, testdir):
p = testdir.makepyfile("""
from twisted.trial import unittest
import pytest
class TC(unittest.TestCase):
def test_hello(self):
assert 0, "hellopdb"
""")
child = testdir.spawn_pytest(p)
child.expect("hellopdb")
child.sendeof()
def test_djangolike_testcase(testdir):
# contributed from Morten Breekevold
testdir.makepyfile("""
from unittest import TestCase, main
class DjangoLikeTestCase(TestCase):
def setUp(self):
print ("setUp()")
def test_presetup_has_been_run(self):
print ("test_thing()")
self.assertTrue(hasattr(self, 'was_presetup'))
def tearDown(self):
print ("tearDown()")
def __call__(self, result=None):
try:
self._pre_setup()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
import sys
result.addError(self, sys.exc_info())
return
super(DjangoLikeTestCase, self).__call__(result)
try:
self._post_teardown()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
import sys
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
print ("_pre_setup()")
self.was_presetup = True
def _post_teardown(self):
print ("_post_teardown()")
""")
result = testdir.runpytest("-s")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*_pre_setup()*",
"*setUp()*",
"*test_thing()*",
"*tearDown()*",
"*_post_teardown()*",
])
def test_unittest_not_shown_in_traceback(testdir):
testdir.makepyfile("""
import unittest
class t(unittest.TestCase):
def test_hello(self):
x = 3
self.assertEquals(x, 4)
""")
res = testdir.runpytest()
assert "failUnlessEqual" not in res.stdout.str()
def test_unorderable_types(testdir):
testdir.makepyfile("""
import unittest
class TestJoinEmpty(unittest.TestCase):
pass
def make_test():
class Test(unittest.TestCase):
pass
Test.__name__ = "TestFoo"
return Test
TestFoo = make_test()
""")
result = testdir.runpytest()
assert "TypeError" not in result.stdout.str()
assert result.ret == EXIT_NOTESTSCOLLECTED
def test_unittest_typerror_traceback(testdir):
testdir.makepyfile("""
import unittest
class TestJoinEmpty(unittest.TestCase):
def test_hello(self, arg1):
pass
""")
result = testdir.runpytest()
assert "TypeError" in result.stdout.str()
assert result.ret == 1
@pytest.mark.skipif("sys.version_info < (2,7)")
def test_unittest_unexpected_failure(testdir):
testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
@unittest.expectedFailure
def test_func1(self):
assert 0
@unittest.expectedFailure
def test_func2(self):
assert 1
""")
result = testdir.runpytest("-rxX")
result.stdout.fnmatch_lines([
"*XFAIL*MyTestCase*test_func1*",
"*XPASS*MyTestCase*test_func2*",
"*1 xfailed*1 xpass*",
])
@pytest.mark.parametrize('fix_type, stmt', [
('fixture', 'return'),
('yield_fixture', 'yield'),
])
def test_unittest_setup_interaction(testdir, fix_type, stmt):
testdir.makepyfile("""
import unittest
import pytest
class MyTestCase(unittest.TestCase):
@pytest.{fix_type}(scope="class", autouse=True)
def perclass(self, request):
request.cls.hello = "world"
{stmt}
@pytest.{fix_type}(scope="function", autouse=True)
def perfunction(self, request):
request.instance.funcname = request.function.__name__
{stmt}
def test_method1(self):
assert self.funcname == "test_method1"
assert self.hello == "world"
def test_method2(self):
assert self.funcname == "test_method2"
def test_classattr(self):
assert self.__class__.hello == "world"
""".format(fix_type=fix_type, stmt=stmt))
result = testdir.runpytest()
result.stdout.fnmatch_lines("*3 passed*")
def test_non_unittest_no_setupclass_support(testdir):
testpath = testdir.makepyfile("""
class TestFoo:
x = 0
@classmethod
def setUpClass(cls):
cls.x = 1
def test_method1(self):
assert self.x == 0
@classmethod
def tearDownClass(cls):
cls.x = 1
def test_not_teareddown():
assert TestFoo.x == 0
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=2)
def test_no_teardown_if_setupclass_failed(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
x = 0
@classmethod
def setUpClass(cls):
cls.x = 1
assert False
def test_func1(self):
cls.x = 10
@classmethod
def tearDownClass(cls):
cls.x = 100
def test_notTornDown():
assert MyTestCase.x == 1
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=1, failed=1)
def test_issue333_result_clearing(testdir):
testdir.makeconftest("""
def pytest_runtest_call(__multicall__, item):
__multicall__.execute()
assert 0
""")
testdir.makepyfile("""
import unittest
class TestIt(unittest.TestCase):
def test_func(self):
0/0
""")
reprec = testdir.inline_run()
reprec.assertoutcome(failed=1)
@pytest.mark.skipif("sys.version_info < (2,7)")
def test_unittest_raise_skip_issue748(testdir):
testdir.makepyfile(test_foo="""
import unittest
class MyTestCase(unittest.TestCase):
def test_one(self):
raise unittest.SkipTest('skipping due to reasons')
""")
result = testdir.runpytest("-v", '-rs')
result.stdout.fnmatch_lines("""
*SKIP*[1]*test_foo.py*skipping due to reasons*
*1 skipped*
""")
@pytest.mark.skipif("sys.version_info < (2,7)")
def test_unittest_skip_issue1169(testdir):
testdir.makepyfile(test_foo="""
import unittest
class MyTestCase(unittest.TestCase):
@unittest.skip("skipping due to reasons")
def test_skip(self):
self.fail()
""")
result = testdir.runpytest("-v", '-rs')
result.stdout.fnmatch_lines("""
*SKIP*[1]*skipping due to reasons*
*1 skipped*
""")
| mpl-2.0 |
blahgeek/xiami_downloader | lib/track.py | 1 | 2132 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by [email protected] at 2014-04-18
import os
from .decrypt import decrypt
from .http import download
from mutagen.easyid3 import EasyID3
from mutagen.id3 import ID3, APIC
class Track(object):
''' A song '''
def __init__(self, tag):
''' @tag: a BeautifulSoup Tag representing a track'''
keys = ('title', 'album_name', 'artist')
values = map(lambda x: tag.find(name=x).text, keys)
self.id3 = dict(zip(keys, values))
geturl = lambda key: tag.find(name=key).text.strip()
self.url = decrypt(geturl('location'))
self.picurl = geturl('pic')
self.lyricurl = geturl('lyric')
if not self.lyricurl.endswith('.lrc'):
self.lyricurl = None
self.filename = self.id3['artist'] + ' - ' + self.id3['title'] + '.mp3'
def download(self, save_path, progress_bar):
self.filename = download(self.url, save_path, self.filename, progress_bar)
return self.filename
def download_lyric(self, save_path, progress_bar):
if self.lyricurl is None:
progress_bar.msg('No lyric available.\n')
return
progress_bar.msg('Downloading lyric...')
download(self.lyricurl, save_path, self.filename, None)
progress_bar.msg('Done.\n')
def patch_id3(self, save_path, progress_bar):
filepath = os.path.join(save_path, self.filename).encode('utf8')
progress_bar.msg('Patching ID3...')
cover_f, mime_type = download(self.picurl)
easyid3 = EasyID3()
easyid3['title'] = self.id3['title']
easyid3['album'] = self.id3['album_name']
easyid3['artist'] = self.id3['artist'].split(';')
easyid3['performer'] = easyid3['artist'][0]
easyid3.save(filepath)
harid3 = ID3(filepath)
harid3.add(APIC(
encoding = 3,
mime = mime_type,
type = 3,
desc = 'Cover picture from xiami.com fetched by BlahGeek.',
data = cover_f.read()))
harid3.save()
progress_bar.msg('Done.\n')
| mit |
cryptickp/libcloud | libcloud/test/compute/test_types.py | 26 | 1969 | from unittest import TestCase
from libcloud.compute.types import Provider, NodeState, StorageVolumeState, \
VolumeSnapshotState, Type
class TestType(Type):
INUSE = "inuse"
class TestTestType(TestCase):
model = TestType
attribute = TestType.INUSE
def test_provider_tostring(self):
self.assertEqual(Provider.tostring(TestType.INUSE), "INUSE")
def test_provider_fromstring(self):
self.assertEqual(TestType.fromstring("inuse"), TestType.INUSE)
def test_provider_fromstring_caseinsensitive(self):
self.assertEqual(TestType.fromstring("INUSE"), TestType.INUSE)
class TestProvider(TestCase):
def test_provider_tostring(self):
self.assertEqual(Provider.tostring(Provider.RACKSPACE), "RACKSPACE")
def test_provider_fromstring(self):
self.assertEqual(Provider.fromstring("rackspace"), Provider.RACKSPACE)
class TestNodeState(TestCase):
def test_nodestate_tostring(self):
self.assertEqual(NodeState.tostring(NodeState.RUNNING), "RUNNING")
def test_nodestate_fromstring(self):
self.assertEqual(NodeState.fromstring("running"), NodeState.RUNNING)
class TestStorageVolumeState(TestCase):
def test_storagevolumestate_tostring(self):
self.assertEqual(
StorageVolumeState.tostring(StorageVolumeState.AVAILABLE),
"AVAILABLE"
)
def test_storagevolumestate_fromstring(self):
self.assertEqual(
StorageVolumeState.fromstring("available"),
StorageVolumeState.AVAILABLE
)
class TestVolumeSnapshotState(TestCase):
def test_volumesnapshotstate_tostring(self):
self.assertEqual(
VolumeSnapshotState.tostring(VolumeSnapshotState.AVAILABLE),
"AVAILABLE"
)
def test_volumesnapshotstate_fromstring(self):
self.assertEqual(
VolumeSnapshotState.fromstring("available"),
VolumeSnapshotState.AVAILABLE
)
| apache-2.0 |
lmprice/ansible | lib/ansible/modules/web_infrastructure/htpasswd.py | 48 | 8684 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Nimbis Services, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: htpasswd
version_added: "1.3"
short_description: manage user files for basic authentication
description:
- Add and remove username/password entries in a password file using htpasswd.
- This is used by web servers such as Apache and Nginx for basic authentication.
options:
path:
required: true
aliases: [ dest, destfile ]
description:
- Path to the file that contains the usernames and passwords
name:
required: true
aliases: [ username ]
description:
- User name to add or remove
password:
required: false
description:
- Password associated with user.
- Must be specified if user does not exist yet.
crypt_scheme:
required: false
choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
default: "apr_md5_crypt"
description:
- Encryption scheme to be used. As well as the four choices listed
here, you can also use any other hash supported by passlib, such as
md5_crypt and sha256_crypt, which are linux passwd hashes. If you
do so the password file will not be compatible with Apache or Nginx
state:
required: false
choices: [ present, absent ]
default: "present"
description:
- Whether the user entry should be present or not
create:
required: false
type: bool
default: "yes"
description:
- Used with C(state=present). If specified, the file will be created
if it does not already exist. If set to "no", will fail if the
file does not exist
notes:
- "This module depends on the I(passlib) Python library, which needs to be installed on all target systems."
- "On Debian, Ubuntu, or Fedora: install I(python-passlib)."
- "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)."
requirements: [ passlib>=1.6 ]
author: "Ansible Core Team"
extends_documentation_fragment: files
"""
EXAMPLES = """
# Add a user to a password file and ensure permissions are set
- htpasswd:
path: /etc/nginx/passwdfile
name: janedoe
password: '9s36?;fyNp'
owner: root
group: www-data
mode: 0640
# Remove a user from a password file
- htpasswd:
path: /etc/apache2/passwdfile
name: foobar
state: absent
# Add a user to a password file suitable for use by libpam-pwdfile
- htpasswd:
path: /etc/mail/passwords
name: alex
password: oedu2eGh
crypt_scheme: md5_crypt
"""
import os
import tempfile
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
try:
from passlib.apache import HtpasswdFile, htpasswd_context
from passlib.context import CryptContext
import passlib
except ImportError:
passlib_installed = False
else:
passlib_installed = True
apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
def create_missing_directories(dest):
destpath = os.path.dirname(dest)
if not os.path.exists(destpath):
os.makedirs(destpath)
def present(dest, username, password, crypt_scheme, create, check_mode):
""" Ensures user is present
Returns (msg, changed) """
if crypt_scheme in apache_hashes:
context = htpasswd_context
else:
context = CryptContext(schemes=[crypt_scheme] + apache_hashes)
if not os.path.exists(dest):
if not create:
raise ValueError('Destination %s does not exist' % dest)
if check_mode:
return ("Create %s" % dest, True)
create_missing_directories(dest)
if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context)
else:
ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context)
if getattr(ht, 'set_password', None):
ht.set_password(username, password)
else:
ht.update(username, password)
ht.save()
return ("Created %s and added %s" % (dest, username), True)
else:
if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context)
else:
ht = HtpasswdFile(dest, default=crypt_scheme, context=context)
found = None
if getattr(ht, 'check_password', None):
found = ht.check_password(username, password)
else:
found = ht.verify(username, password)
if found:
return ("%s already present" % username, False)
else:
if not check_mode:
if getattr(ht, 'set_password', None):
ht.set_password(username, password)
else:
ht.update(username, password)
ht.save()
return ("Add/update %s" % username, True)
def absent(dest, username, check_mode):
""" Ensures user is absent
Returns (msg, changed) """
if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
ht = HtpasswdFile(dest, new=False)
else:
ht = HtpasswdFile(dest)
if username not in ht.users():
return ("%s not present" % username, False)
else:
if not check_mode:
ht.delete(username)
ht.save()
return ("Remove %s" % username, True)
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
arg_spec = dict(
path=dict(required=True, aliases=["dest", "destfile"]),
name=dict(required=True, aliases=["username"]),
password=dict(required=False, default=None, no_log=True),
crypt_scheme=dict(required=False, default="apr_md5_crypt"),
state=dict(required=False, default="present"),
create=dict(type='bool', default='yes'),
)
module = AnsibleModule(argument_spec=arg_spec,
add_file_common_args=True,
supports_check_mode=True)
path = module.params['path']
username = module.params['name']
password = module.params['password']
crypt_scheme = module.params['crypt_scheme']
state = module.params['state']
create = module.params['create']
check_mode = module.check_mode
if not passlib_installed:
module.fail_json(msg="This module requires the passlib Python library")
# Check file for blank lines in effort to avoid "need more than 1 value to unpack" error.
try:
f = open(path, "r")
except IOError:
# No preexisting file to remove blank lines from
f = None
else:
try:
lines = f.readlines()
finally:
f.close()
# If the file gets edited, it returns true, so only edit the file if it has blank lines
strip = False
for line in lines:
if not line.strip():
strip = True
break
if strip:
# If check mode, create a temporary file
if check_mode:
temp = tempfile.NamedTemporaryFile()
path = temp.name
f = open(path, "w")
try:
[f.write(line) for line in lines if line.strip()]
finally:
f.close()
try:
if state == 'present':
(msg, changed) = present(path, username, password, crypt_scheme, create, check_mode)
elif state == 'absent':
if not os.path.exists(path):
module.exit_json(msg="%s not present" % username,
warnings="%s does not exist" % path, changed=False)
(msg, changed) = absent(path, username, check_mode)
else:
module.fail_json(msg="Invalid state: %s" % state)
check_file_attrs(module, changed, msg)
module.exit_json(msg=msg, changed=changed)
except Exception as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
nephila/cmsplugin-filer | cmsplugin_filer_teaser/south_migrations/0002_add_teaser_style_choice.py | 11 | 14099 |
from south.db import db
from django.db import models
from cmsplugin_filer_teaser.models import *
from cmsplugin_filer_utils.migration import rename_tables_new_to_old
class Migration:
cms_plugin_table_mapping = (
# (old_name, new_name),
('cmsplugin_filerteaser', 'cmsplugin_filer_teaser_filerteaser'),
)
def forwards(self, orm):
rename_tables_new_to_old(db, self.cms_plugin_table_mapping)
# Adding field 'FilerTeaser.style'
db.add_column('cmsplugin_filerteaser', 'style', orm['cmsplugin_filer_teaser.filerteaser:style'])
def backwards(self, orm):
rename_tables_new_to_old(db, self.cms_plugin_table_mapping)
# Deleting field 'FilerTeaser.style'
db.delete_column('cmsplugin_filerteaser', 'style')
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.CMSPlugin']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'menu_login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cmsplugin_filer_teaser.filerteaser': {
'Meta': {'db_table': "'cmsplugin_filerteaser'"},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'free_link': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
'page_link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'style': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'use_autoscale': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_file_type_plugin_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'file_field': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'unique_together': "(('parent', 'name'),)"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cmsplugin_filer_teaser']
| bsd-3-clause |
EKiefer/edge-starter | py34env/Lib/site-packages/pip/utils/outdated.py | 513 | 5455 | from __future__ import absolute_import
import datetime
import json
import logging
import os.path
import sys
from pip._vendor import lockfile
from pip._vendor.packaging import version as packaging_version
from pip.compat import total_seconds, WINDOWS
from pip.models import PyPI
from pip.locations import USER_CACHE_DIR, running_under_virtualenv
from pip.utils import ensure_dir, get_installed_version
from pip.utils.filesystem import check_path_owner
SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
logger = logging.getLogger(__name__)
class VirtualenvSelfCheckState(object):
def __init__(self):
self.statefile_path = os.path.join(sys.prefix, "pip-selfcheck.json")
# Load the existing state
try:
with open(self.statefile_path) as statefile:
self.state = json.load(statefile)
except (IOError, ValueError):
self.state = {}
def save(self, pypi_version, current_time):
# Attempt to write out our version check file
with open(self.statefile_path, "w") as statefile:
json.dump(
{
"last_check": current_time.strftime(SELFCHECK_DATE_FMT),
"pypi_version": pypi_version,
},
statefile,
sort_keys=True,
separators=(",", ":")
)
class GlobalSelfCheckState(object):
def __init__(self):
self.statefile_path = os.path.join(USER_CACHE_DIR, "selfcheck.json")
# Load the existing state
try:
with open(self.statefile_path) as statefile:
self.state = json.load(statefile)[sys.prefix]
except (IOError, ValueError, KeyError):
self.state = {}
def save(self, pypi_version, current_time):
# Check to make sure that we own the directory
if not check_path_owner(os.path.dirname(self.statefile_path)):
return
# Now that we've ensured the directory is owned by this user, we'll go
# ahead and make sure that all our directories are created.
ensure_dir(os.path.dirname(self.statefile_path))
# Attempt to write out our version check file
with lockfile.LockFile(self.statefile_path):
if os.path.exists(self.statefile_path):
with open(self.statefile_path) as statefile:
state = json.load(statefile)
else:
state = {}
state[sys.prefix] = {
"last_check": current_time.strftime(SELFCHECK_DATE_FMT),
"pypi_version": pypi_version,
}
with open(self.statefile_path, "w") as statefile:
json.dump(state, statefile, sort_keys=True,
separators=(",", ":"))
def load_selfcheck_statefile():
if running_under_virtualenv():
return VirtualenvSelfCheckState()
else:
return GlobalSelfCheckState()
def pip_version_check(session):
"""Check for an update for pip.
Limit the frequency of checks to once per week. State is stored either in
the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix
of the pip script path.
"""
installed_version = get_installed_version("pip")
if installed_version is None:
return
pip_version = packaging_version.parse(installed_version)
pypi_version = None
try:
state = load_selfcheck_statefile()
current_time = datetime.datetime.utcnow()
# Determine if we need to refresh the state
if "last_check" in state.state and "pypi_version" in state.state:
last_check = datetime.datetime.strptime(
state.state["last_check"],
SELFCHECK_DATE_FMT
)
if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60:
pypi_version = state.state["pypi_version"]
# Refresh the version if we need to or just see if we need to warn
if pypi_version is None:
resp = session.get(
PyPI.pip_json_url,
headers={"Accept": "application/json"},
)
resp.raise_for_status()
pypi_version = [
v for v in sorted(
list(resp.json()["releases"]),
key=packaging_version.parse,
)
if not packaging_version.parse(v).is_prerelease
][-1]
# save that we've performed a check
state.save(pypi_version, current_time)
remote_version = packaging_version.parse(pypi_version)
# Determine if our pypi_version is older
if (pip_version < remote_version and
pip_version.base_version != remote_version.base_version):
# Advise "python -m pip" on Windows to avoid issues
# with overwriting pip.exe.
if WINDOWS:
pip_cmd = "python -m pip"
else:
pip_cmd = "pip"
logger.warning(
"You are using pip version %s, however version %s is "
"available.\nYou should consider upgrading via the "
"'%s install --upgrade pip' command.",
pip_version, pypi_version, pip_cmd
)
except Exception:
logger.debug(
"There was an error checking the latest version of pip",
exc_info=True,
)
| mit |
JimTheMan/Ngrx-Effect-Chaining-Example | NgrxEffectChaining/node_modules/node-gyp/gyp/pylib/gyp/msvs_emulation.py | 1407 | 47697 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module helps emulate Visual Studio 2008 behavior on top of other
build systems, primarily ninja.
"""
import os
import re
import subprocess
import sys
from gyp.common import OrderedSet
import gyp.MSVSUtil
import gyp.MSVSVersion
windows_quoter_regex = re.compile(r'(\\*)"')
def QuoteForRspFile(arg):
"""Quote a command line argument so that it appears as one argument when
processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
Windows programs)."""
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
# threads. This is actually the quoting rules for CommandLineToArgvW, not
# for the shell, because the shell doesn't do anything in Windows. This
# works more or less because most programs (including the compiler, etc.)
# use that function to handle command line arguments.
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
# preceding it, and results in n backslashes + the quote. So we substitute
# in 2* what we match, +1 more, plus the quote.
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
# %'s also need to be doubled otherwise they're interpreted as batch
# positional arguments. Also make sure to escape the % so that they're
# passed literally through escaping so they can be singled to just the
# original %. Otherwise, trying to pass the literal representation that
# looks like an environment variable to the shell (e.g. %PATH%) would fail.
arg = arg.replace('%', '%%')
# These commands are used in rsp files, so no escaping for the shell (via ^)
# is necessary.
# Finally, wrap the whole thing in quotes so that the above quote rule
# applies and whitespace isn't a word break.
return '"' + arg + '"'
def EncodeRspFileList(args):
"""Process a list of arguments using QuoteCmdExeArgument."""
# Note that the first argument is assumed to be the command. Don't add
# quotes around it because then built-ins like 'echo', etc. won't work.
# Take care to normpath only the path in the case of 'call ../x.bat' because
# otherwise the whole thing is incorrectly interpreted as a path and not
# normalized correctly.
if not args: return ''
if args[0].startswith('call '):
call, program = args[0].split(' ', 1)
program = call + ' ' + os.path.normpath(program)
else:
program = os.path.normpath(args[0])
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
def _GenericRetrieve(root, default, path):
"""Given a list of dictionary keys |path| and a tree of dicts |root|, find
value at path, or return |default| if any of the path doesn't exist."""
if not root:
return default
if not path:
return root
return _GenericRetrieve(root.get(path[0]), default, path[1:])
def _AddPrefix(element, prefix):
"""Add |prefix| to |element| or each subelement if element is iterable."""
if element is None:
return element
# Note, not Iterable because we don't want to handle strings like that.
if isinstance(element, list) or isinstance(element, tuple):
return [prefix + e for e in element]
else:
return prefix + element
def _DoRemapping(element, map):
"""If |element| then remap it through |map|. If |element| is iterable then
each item will be remapped. Any elements not found will be removed."""
if map is not None and element is not None:
if not callable(map):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if isinstance(element, list) or isinstance(element, tuple):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
def _AppendOrReturn(append, element):
"""If |append| is None, simply return |element|. If |append| is not None,
then add |element| to it, adding each item in |element| if it's a list or
tuple."""
if append is not None and element is not None:
if isinstance(element, list) or isinstance(element, tuple):
append.extend(element)
else:
append.append(element)
else:
return element
def _FindDirectXInstallation():
"""Try to find an installation location for the DirectX SDK. Check for the
standard environment variable, and if that doesn't exist, try to find
via the registry. May return None if not found in either location."""
# Return previously calculated value, if there is one
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
return _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
# Setup params to pass to and attempt to launch reg.exe.
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + "\\"
# Cache return value
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
return dxsdk_dir
def GetGlobalVSMacroEnv(vs_version):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents. Returns all variables that are independent of the target."""
env = {}
# '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when
# Visual Studio is actually installed.
if vs_version.Path():
env['$(VSInstallDir)'] = vs_version.Path()
env['$(VCInstallDir)'] = os.path.join(vs_version.Path(), 'VC') + '\\'
# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
# set. This happens when the SDK is sync'd via src-internal, rather than
# by typical end-user installation of the SDK. If it's not set, we don't
# want to leave the unexpanded variable in the path, so simply strip it.
dxsdk_dir = _FindDirectXInstallation()
env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else ''
# Try to find an installation location for the Windows DDK by checking
# the WDK_DIR environment variable, may be None.
env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '')
return env
def ExtractSharedMSVSSystemIncludes(configs, generator_flags):
"""Finds msvs_system_include_dirs that are common to all targets, removes
them from all targets, and returns an OrderedSet containing them."""
all_system_includes = OrderedSet(
configs[0].get('msvs_system_include_dirs', []))
for config in configs[1:]:
system_includes = config.get('msvs_system_include_dirs', [])
all_system_includes = all_system_includes & OrderedSet(system_includes)
if not all_system_includes:
return None
# Expand macros in all_system_includes.
env = GetGlobalVSMacroEnv(GetVSVersion(generator_flags))
expanded_system_includes = OrderedSet([ExpandMacros(include, env)
for include in all_system_includes])
if any(['$' in include for include in expanded_system_includes]):
# Some path relies on target-specific variables, bail.
return None
# Remove system includes shared by all targets from the targets.
for config in configs:
includes = config.get('msvs_system_include_dirs', [])
if includes: # Don't insert a msvs_system_include_dirs key if not needed.
# This must check the unexpanded includes list:
new_includes = [i for i in includes if i not in all_system_includes]
config['msvs_system_include_dirs'] = new_includes
return expanded_system_includes
class MsvsSettings(object):
"""A class that understands the gyp 'msvs_...' values (especially the
msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
class helps map those settings to command line options."""
def __init__(self, spec, generator_flags):
self.spec = spec
self.vs_version = GetVSVersion(generator_flags)
supported_fields = [
('msvs_configuration_attributes', dict),
('msvs_settings', dict),
('msvs_system_include_dirs', list),
('msvs_disabled_warnings', list),
('msvs_precompiled_header', str),
('msvs_precompiled_source', str),
('msvs_configuration_platform', str),
('msvs_target_platform', str),
]
configs = spec['configurations']
for field, default in supported_fields:
setattr(self, field, {})
for configname, config in configs.iteritems():
getattr(self, field)[configname] = config.get(field, default())
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
unsupported_fields = [
'msvs_prebuild',
'msvs_postbuild',
]
unsupported = []
for field in unsupported_fields:
for config in configs.values():
if field in config:
unsupported += ["%s not supported (target %s)." %
(field, spec['target_name'])]
if unsupported:
raise Exception('\n'.join(unsupported))
def GetExtension(self):
"""Returns the extension for the target, with no leading dot.
Uses 'product_extension' if specified, otherwise uses MSVS defaults based on
the target type.
"""
ext = self.spec.get('product_extension', None)
if ext:
return ext
return gyp.MSVSUtil.TARGET_TYPE_EXT.get(self.spec['type'], '')
def GetVSMacroEnv(self, base_to_build=None, config=None):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents."""
target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64'
target_name = self.spec.get('product_prefix', '') + \
self.spec.get('product_name', self.spec['target_name'])
target_dir = base_to_build + '\\' if base_to_build else ''
target_ext = '.' + self.GetExtension()
target_file_name = target_name + target_ext
replacements = {
'$(InputName)': '${root}',
'$(InputPath)': '${source}',
'$(IntDir)': '$!INTERMEDIATE_DIR',
'$(OutDir)\\': target_dir,
'$(PlatformName)': target_platform,
'$(ProjectDir)\\': '',
'$(ProjectName)': self.spec['target_name'],
'$(TargetDir)\\': target_dir,
'$(TargetExt)': target_ext,
'$(TargetFileName)': target_file_name,
'$(TargetName)': target_name,
'$(TargetPath)': os.path.join(target_dir, target_file_name),
}
replacements.update(GetGlobalVSMacroEnv(self.vs_version))
return replacements
def ConvertVSMacros(self, s, base_to_build=None, config=None):
"""Convert from VS macro names to something equivalent."""
env = self.GetVSMacroEnv(base_to_build, config=config)
return ExpandMacros(s, env)
def AdjustLibraries(self, libraries):
"""Strip -l from library if it's specified with that."""
libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
return [lib + '.lib' if not lib.endswith('.lib') else lib for lib in libs]
def _GetAndMunge(self, field, path, default, prefix, append, map):
"""Retrieve a value from |field| at |path| or return |default|. If
|append| is specified, and the item is found, it will be appended to that
object instead of returned. If |map| is specified, results will be
remapped through |map| before being returned or appended."""
result = _GenericRetrieve(field, default, path)
result = _DoRemapping(result, map)
result = _AddPrefix(result, prefix)
return _AppendOrReturn(append, result)
class _GetWrapper(object):
def __init__(self, parent, field, base_path, append=None):
self.parent = parent
self.field = field
self.base_path = [base_path]
self.append = append
def __call__(self, name, map=None, prefix='', default=None):
return self.parent._GetAndMunge(self.field, self.base_path + [name],
default=default, prefix=prefix, append=self.append, map=map)
def GetArch(self, config):
"""Get architecture based on msvs_configuration_platform and
msvs_target_platform. Returns either 'x86' or 'x64'."""
configuration_platform = self.msvs_configuration_platform.get(config, '')
platform = self.msvs_target_platform.get(config, '')
if not platform: # If no specific override, use the configuration's.
platform = configuration_platform
# Map from platform to architecture.
return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86')
def _TargetConfig(self, config):
"""Returns the target-specific configuration."""
# There's two levels of architecture/platform specification in VS. The
# first level is globally for the configuration (this is what we consider
# "the" config at the gyp level, which will be something like 'Debug' or
# 'Release_x64'), and a second target-specific configuration, which is an
# override for the global one. |config| is remapped here to take into
# account the local target-specific overrides to the global configuration.
arch = self.GetArch(config)
if arch == 'x64' and not config.endswith('_x64'):
config += '_x64'
if arch == 'x86' and config.endswith('_x64'):
config = config.rsplit('_', 1)[0]
return config
def _Setting(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_settings."""
return self._GetAndMunge(
self.msvs_settings[config], path, default, prefix, append, map)
def _ConfigAttrib(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_configuration_attributes."""
return self._GetAndMunge(
self.msvs_configuration_attributes[config],
path, default, prefix, append, map)
def AdjustIncludeDirs(self, include_dirs, config):
"""Updates include_dirs to expand VS specific paths, and adds the system
include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def AdjustMidlIncludeDirs(self, midl_include_dirs, config):
"""Updates midl_include_dirs to expand VS specific paths, and adds the
system include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = midl_include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def GetComputedDefines(self, config):
"""Returns the set of defines that are injected to the defines list based
on other VS settings."""
config = self._TargetConfig(config)
defines = []
if self._ConfigAttrib(['CharacterSet'], config) == '1':
defines.extend(('_UNICODE', 'UNICODE'))
if self._ConfigAttrib(['CharacterSet'], config) == '2':
defines.append('_MBCS')
defines.extend(self._Setting(
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
return defines
def GetCompilerPdbName(self, config, expand_special):
"""Get the pdb file name that should be used for compiler invocations, or
None if there's no explicit name specified."""
config = self._TargetConfig(config)
pdbname = self._Setting(
('VCCLCompilerTool', 'ProgramDataBaseFileName'), config)
if pdbname:
pdbname = expand_special(self.ConvertVSMacros(pdbname))
return pdbname
def GetMapFileName(self, config, expand_special):
"""Gets the explicitly overriden map file name for a target or returns None
if it's not set."""
config = self._TargetConfig(config)
map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config)
if map_file:
map_file = expand_special(self.ConvertVSMacros(map_file, config=config))
return map_file
def GetOutputName(self, config, expand_special):
"""Gets the explicitly overridden output name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
type = self.spec['type']
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
# TODO(scottmg): Handle OutputDirectory without OutputFile.
output_file = self._Setting((root, 'OutputFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetPDBName(self, config, expand_special, default):
"""Gets the explicitly overridden pdb name for a target or returns
default if it's not overridden, or if no pdb will be generated."""
config = self._TargetConfig(config)
output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config)
generate_debug_info = self._Setting(
('VCLinkerTool', 'GenerateDebugInformation'), config)
if generate_debug_info == 'true':
if output_file:
return expand_special(self.ConvertVSMacros(output_file, config=config))
else:
return default
else:
return None
def GetNoImportLibrary(self, config):
"""If NoImportLibrary: true, ninja will not expect the output to include
an import library."""
config = self._TargetConfig(config)
noimplib = self._Setting(('NoImportLibrary',), config)
return noimplib == 'true'
def GetAsmflags(self, config):
"""Returns the flags that need to be added to ml invocations."""
config = self._TargetConfig(config)
asmflags = []
safeseh = self._Setting(('MASM', 'UseSafeExceptionHandlers'), config)
if safeseh == 'true':
asmflags.append('/safeseh')
return asmflags
def GetCflags(self, config):
"""Returns the flags that need to be added to .c and .cc compilations."""
config = self._TargetConfig(config)
cflags = []
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
cl = self._GetWrapper(self, self.msvs_settings[config],
'VCCLCompilerTool', append=cflags)
cl('Optimization',
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2')
cl('InlineFunctionExpansion', prefix='/Ob')
cl('DisableSpecificWarnings', prefix='/wd')
cl('StringPooling', map={'true': '/GF'})
cl('EnableFiberSafeOptimizations', map={'true': '/GT'})
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi')
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
cl('FloatingPointModel',
map={'0': 'precise', '1': 'strict', '2': 'fast'}, prefix='/fp:',
default='0')
cl('CompileAsManaged', map={'false': '', 'true': '/clr'})
cl('WholeProgramOptimization', map={'true': '/GL'})
cl('WarningLevel', prefix='/W')
cl('WarnAsError', map={'true': '/WX'})
cl('CallingConvention',
map={'0': 'd', '1': 'r', '2': 'z', '3': 'v'}, prefix='/G')
cl('DebugInformationFormat',
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
cl('MinimalRebuild', map={'true': '/Gm'})
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
cl('RuntimeLibrary',
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
cl('DefaultCharIsUnsigned', map={'true': '/J'})
cl('TreatWChar_tAsBuiltInType',
map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t')
cl('EnablePREfast', map={'true': '/analyze'})
cl('AdditionalOptions', prefix='')
cl('EnableEnhancedInstructionSet',
map={'1': 'SSE', '2': 'SSE2', '3': 'AVX', '4': 'IA32', '5': 'AVX2'},
prefix='/arch:')
cflags.extend(['/FI' + f for f in self._Setting(
('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])])
if self.vs_version.short_name in ('2013', '2013e', '2015'):
# New flag required in 2013 to maintain previous PDB behavior.
cflags.append('/FS')
# ninja handles parallelism by itself, don't have the compiler do it too.
cflags = filter(lambda x: not x.startswith('/MP'), cflags)
return cflags
def _GetPchFlags(self, config, extension):
"""Get the flags to be added to the cflags for precompiled header support.
"""
config = self._TargetConfig(config)
# The PCH is only built once by a particular source file. Usage of PCH must
# only be for the same language (i.e. C vs. C++), so only include the pch
# flags when the language matches.
if self.msvs_precompiled_header[config]:
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
if _LanguageMatchesForPch(source_ext, extension):
pch = os.path.split(self.msvs_precompiled_header[config])[1]
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pch + '.pch']
return []
def GetCflagsC(self, config):
"""Returns the flags that need to be added to .c compilations."""
config = self._TargetConfig(config)
return self._GetPchFlags(config, '.c')
def GetCflagsCC(self, config):
"""Returns the flags that need to be added to .cc compilations."""
config = self._TargetConfig(config)
return ['/TP'] + self._GetPchFlags(config, '.cc')
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
config = self._TargetConfig(config)
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
config, default=[])
libpaths = [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
def GetLibFlags(self, config, gyp_to_build_path):
"""Returns the flags that need to be added to lib commands."""
config = self._TargetConfig(config)
libflags = []
lib = self._GetWrapper(self, self.msvs_settings[config],
'VCLibrarianTool', append=libflags)
libflags.extend(self._GetAdditionalLibraryDirectories(
'VCLibrarianTool', config, gyp_to_build_path))
lib('LinkTimeCodeGeneration', map={'true': '/LTCG'})
lib('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
lib('AdditionalOptions')
return libflags
def GetDefFile(self, gyp_to_build_path):
"""Returns the .def file from sources, if any. Otherwise returns None."""
spec = self.spec
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
return gyp_to_build_path(def_files[0])
elif len(def_files) > 1:
raise Exception("Multiple .def files")
return None
def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path):
""".def files get implicitly converted to a ModuleDefinitionFile for the
linker in the VS generator. Emulate that behaviour here."""
def_file = self.GetDefFile(gyp_to_build_path)
if def_file:
ldflags.append('/DEF:"%s"' % def_file)
def GetPGDName(self, config, expand_special):
"""Gets the explicitly overridden pgd name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
output_file = self._Setting(
('VCLinkerTool', 'ProfileGuidedDatabase'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetLdflags(self, config, gyp_to_build_path, expand_special,
manifest_base_name, output_name, is_executable, build_dir):
"""Returns the flags that need to be added to link commands, and the
manifest files."""
config = self._TargetConfig(config)
ldflags = []
ld = self._GetWrapper(self, self.msvs_settings[config],
'VCLinkerTool', append=ldflags)
self._GetDefFileAsLdflags(ldflags, gyp_to_build_path)
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
ldflags.extend(self._GetAdditionalLibraryDirectories(
'VCLinkerTool', config, gyp_to_build_path))
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
ld('TreatLinkerWarningAsErrors', prefix='/WX',
map={'true': '', 'false': ':NO'})
out = self.GetOutputName(config, expand_special)
if out:
ldflags.append('/OUT:' + out)
pdb = self.GetPDBName(config, expand_special, output_name + '.pdb')
if pdb:
ldflags.append('/PDB:' + pdb)
pgd = self.GetPGDName(config, expand_special)
if pgd:
ldflags.append('/PGD:' + pgd)
map_file = self.GetMapFileName(config, expand_special)
ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file
else '/MAP'})
ld('MapExports', map={'true': '/MAPINFO:EXPORTS'})
ld('AdditionalOptions', prefix='')
minimum_required_version = self._Setting(
('VCLinkerTool', 'MinimumRequiredVersion'), config, default='')
if minimum_required_version:
minimum_required_version = ',' + minimum_required_version
ld('SubSystem',
map={'1': 'CONSOLE%s' % minimum_required_version,
'2': 'WINDOWS%s' % minimum_required_version},
prefix='/SUBSYSTEM:')
stack_reserve_size = self._Setting(
('VCLinkerTool', 'StackReserveSize'), config, default='')
if stack_reserve_size:
stack_commit_size = self._Setting(
('VCLinkerTool', 'StackCommitSize'), config, default='')
if stack_commit_size:
stack_commit_size = ',' + stack_commit_size
ldflags.append('/STACK:%s%s' % (stack_reserve_size, stack_commit_size))
ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE')
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
ld('BaseAddress', prefix='/BASE:')
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
ld('RandomizedBaseAddress',
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
ld('DataExecutionPrevention',
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
ld('ForceSymbolReferences', prefix='/INCLUDE:')
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
ld('LinkTimeCodeGeneration',
map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE',
'4': ':PGUPDATE'},
prefix='/LTCG')
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
ld('EntryPointSymbol', prefix='/ENTRY:')
ld('Profile', map={'true': '/PROFILE'})
ld('LargeAddressAware',
map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE')
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
ld('AdditionalDependencies', prefix='')
if self.GetArch(config) == 'x86':
safeseh_default = 'true'
else:
safeseh_default = None
ld('ImageHasSafeExceptionHandlers',
map={'false': ':NO', 'true': ''}, prefix='/SAFESEH',
default=safeseh_default)
# If the base address is not specifically controlled, DYNAMICBASE should
# be on by default.
base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED',
ldflags)
if not base_flags:
ldflags.append('/DYNAMICBASE')
# If the NXCOMPAT flag has not been specified, default to on. Despite the
# documentation that says this only defaults to on when the subsystem is
# Vista or greater (which applies to the linker), the IDE defaults it on
# unless it's explicitly off.
if not filter(lambda x: 'NXCOMPAT' in x, ldflags):
ldflags.append('/NXCOMPAT')
have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags)
manifest_flags, intermediate_manifest, manifest_files = \
self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path,
is_executable and not have_def_file, build_dir)
ldflags.extend(manifest_flags)
return ldflags, intermediate_manifest, manifest_files
def _GetLdManifestFlags(self, config, name, gyp_to_build_path,
allow_isolation, build_dir):
"""Returns a 3-tuple:
- the set of flags that need to be added to the link to generate
a default manifest
- the intermediate manifest that the linker will generate that should be
used to assert it doesn't add anything to the merged one.
- the list of all the manifest files to be merged by the manifest tool and
included into the link."""
generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'),
config,
default='true')
if generate_manifest != 'true':
# This means not only that the linker should not generate the intermediate
# manifest but also that the manifest tool should do nothing even when
# additional manifests are specified.
return ['/MANIFEST:NO'], [], []
output_name = name + '.intermediate.manifest'
flags = [
'/MANIFEST',
'/ManifestFile:' + output_name,
]
# Instead of using the MANIFESTUAC flags, we generate a .manifest to
# include into the list of manifests. This allows us to avoid the need to
# do two passes during linking. The /MANIFEST flag and /ManifestFile are
# still used, and the intermediate manifest is used to assert that the
# final manifest we get from merging all the additional manifest files
# (plus the one we generate here) isn't modified by merging the
# intermediate into it.
# Always NO, because we generate a manifest file that has what we want.
flags.append('/MANIFESTUAC:NO')
config = self._TargetConfig(config)
enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config,
default='true')
manifest_files = []
generated_manifest_outer = \
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \
"<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \
"</assembly>"
if enable_uac == 'true':
execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'),
config, default='0')
execution_level_map = {
'0': 'asInvoker',
'1': 'highestAvailable',
'2': 'requireAdministrator'
}
ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config,
default='false')
inner = '''
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level='%s' uiAccess='%s' />
</requestedPrivileges>
</security>
</trustInfo>''' % (execution_level_map[execution_level], ui_access)
else:
inner = ''
generated_manifest_contents = generated_manifest_outer % inner
generated_name = name + '.generated.manifest'
# Need to join with the build_dir here as we're writing it during
# generation time, but we return the un-joined version because the build
# will occur in that directory. We only write the file if the contents
# have changed so that simply regenerating the project files doesn't
# cause a relink.
build_dir_generated_name = os.path.join(build_dir, generated_name)
gyp.common.EnsureDirExists(build_dir_generated_name)
f = gyp.common.WriteOnDiff(build_dir_generated_name)
f.write(generated_manifest_contents)
f.close()
manifest_files = [generated_name]
if allow_isolation:
flags.append('/ALLOWISOLATION')
manifest_files += self._GetAdditionalManifestFiles(config,
gyp_to_build_path)
return flags, output_name, manifest_files
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
"""Gets additional manifest files that are added to the default one
generated by the linker."""
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
default=[])
if isinstance(files, str):
files = files.split(';')
return [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
for f in files]
def IsUseLibraryDependencyInputs(self, config):
"""Returns whether the target should be linked via Use Library Dependency
Inputs (using component .objs of a given .lib)."""
config = self._TargetConfig(config)
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
return uldi == 'true'
def IsEmbedManifest(self, config):
"""Returns whether manifest should be linked into binary."""
config = self._TargetConfig(config)
embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config,
default='true')
return embed == 'true'
def IsLinkIncremental(self, config):
"""Returns whether the target should be linked incrementally."""
config = self._TargetConfig(config)
link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config)
return link_inc != '1'
def GetRcflags(self, config, gyp_to_ninja_path):
"""Returns the flags that need to be added to invocations of the resource
compiler."""
config = self._TargetConfig(config)
rcflags = []
rc = self._GetWrapper(self, self.msvs_settings[config],
'VCResourceCompilerTool', append=rcflags)
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
rcflags.append('/I' + gyp_to_ninja_path('.'))
rc('PreprocessorDefinitions', prefix='/d')
# /l arg must be in hex without leading '0x'
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
return rcflags
def BuildCygwinBashCommandLine(self, args, path_to_base):
"""Build a command line that runs args via cygwin bash. We assume that all
incoming paths are in Windows normpath'd form, so they need to be
converted to posix style for the part of the command line that's passed to
bash. We also have to do some Visual Studio macro emulation here because
various rules use magic VS names for things. Also note that rules that
contain ninja variables cannot be fixed here (for example ${source}), so
the outer generator needs to make sure that the paths that are written out
are in posix style, if the command line will be used here."""
cygwin_dir = os.path.normpath(
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
cd = ('cd %s' % path_to_base).replace('\\', '/')
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
bash_cmd = ' '.join(args)
cmd = (
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
'bash -c "%s ; %s"' % (cd, bash_cmd))
return cmd
def IsRuleRunUnderCygwin(self, rule):
"""Determine if an action should be run under cygwin. If the variable is
unset, or set to 1 we use cygwin."""
return int(rule.get('msvs_cygwin_shell',
self.spec.get('msvs_cygwin_shell', 1))) != 0
def _HasExplicitRuleForExtension(self, spec, extension):
"""Determine if there's an explicit rule for a particular extension."""
for rule in spec.get('rules', []):
if rule['extension'] == extension:
return True
return False
def _HasExplicitIdlActions(self, spec):
"""Determine if an action should not run midl for .idl files."""
return any([action.get('explicit_idl_action', 0)
for action in spec.get('actions', [])])
def HasExplicitIdlRulesOrActions(self, spec):
"""Determine if there's an explicit rule or action for idl files. When
there isn't we need to generate implicit rules to build MIDL .idl files."""
return (self._HasExplicitRuleForExtension(spec, 'idl') or
self._HasExplicitIdlActions(spec))
def HasExplicitAsmRules(self, spec):
"""Determine if there's an explicit rule for asm files. When there isn't we
need to generate implicit rules to assemble .asm files."""
return self._HasExplicitRuleForExtension(spec, 'asm')
def GetIdlBuildData(self, source, config):
"""Determine the implicit outputs for an idl file. Returns output
directory, outputs, and variables and flags that are required."""
config = self._TargetConfig(config)
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
def midl(name, default=None):
return self.ConvertVSMacros(midl_get(name, default=default),
config=config)
tlb = midl('TypeLibraryName', default='${root}.tlb')
header = midl('HeaderFileName', default='${root}.h')
dlldata = midl('DLLDataFileName', default='dlldata.c')
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
proxy = midl('ProxyFileName', default='${root}_p.c')
# Note that .tlb is not included in the outputs as it is not always
# generated depending on the content of the input idl file.
outdir = midl('OutputDirectory', default='')
output = [header, dlldata, iid, proxy]
variables = [('tlb', tlb),
('h', header),
('dlldata', dlldata),
('iid', iid),
('proxy', proxy)]
# TODO(scottmg): Are there configuration settings to set these flags?
target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64'
flags = ['/char', 'signed', '/env', target_platform, '/Oicf']
return outdir, output, variables, flags
def _LanguageMatchesForPch(source_ext, pch_source_ext):
c_exts = ('.c',)
cc_exts = ('.cc', '.cxx', '.cpp')
return ((source_ext in c_exts and pch_source_ext in c_exts) or
(source_ext in cc_exts and pch_source_ext in cc_exts))
class PrecompiledHeader(object):
"""Helper to generate dependencies and build rules to handle generation of
precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
"""
def __init__(
self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext):
self.settings = settings
self.config = config
pch_source = self.settings.msvs_precompiled_source[self.config]
self.pch_source = gyp_to_build_path(pch_source)
filename, _ = os.path.splitext(pch_source)
self.output_obj = gyp_to_unique_output(filename + obj_ext).lower()
def _PchHeader(self):
"""Get the header that will appear in an #include line for all source
files."""
return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
def GetObjDependencies(self, sources, objs, arch):
"""Given a list of sources files and the corresponding object files,
returns a list of the pch files that should be depended upon. The
additional wrapping in the return value is for interface compatibility
with make.py on Mac, and xcode_emulation.py."""
assert arch is None
if not self._PchHeader():
return []
pch_ext = os.path.splitext(self.pch_source)[1]
for source in sources:
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
return [(None, None, self.output_obj)]
return []
def GetPchBuildCommands(self, arch):
"""Not used on Windows as there are no additional build steps required
(instead, existing steps are modified in GetFlagsModifications below)."""
return []
def GetFlagsModifications(self, input, output, implicit, command,
cflags_c, cflags_cc, expand_special):
"""Get the modified cflags and implicit dependencies that should be used
for the pch compilation step."""
if input == self.pch_source:
pch_output = ['/Yc' + self._PchHeader()]
if command == 'cxx':
return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))],
self.output_obj, [])
elif command == 'cc':
return ([('cflags_c', map(expand_special, cflags_c + pch_output))],
self.output_obj, [])
return [], output, implicit
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'),
allow_fallback=False)
return vs_version
def _GetVsvarsSetupArgs(generator_flags, arch):
vs = GetVSVersion(generator_flags)
return vs.SetupScript()
def ExpandMacros(string, expansions):
"""Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
for the canonical way to retrieve a suitable dict."""
if '$' in string:
for old, new in expansions.iteritems():
assert '$(' not in new, new
string = string.replace(old, new)
return string
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip()
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags,
system_includes, open_out):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path.
When the following procedure to generate environment files does not
meet your requirement (e.g. for custom toolchains), you can pass
"-G ninja_use_custom_environment_files" to the gyp to suppress file
generation and use custom environment files prepared by yourself."""
archs = ('x86', 'x64')
if generator_flags.get('ninja_use_custom_environment_files', 0):
cl_paths = {}
for arch in archs:
cl_paths[arch] = 'cl.exe'
return cl_paths
vs = GetVSVersion(generator_flags)
cl_paths = {}
for arch in archs:
# Extract environment variables for subprocesses.
args = vs.SetupScript(arch)
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
env = _ExtractImportantEnvironment(variables)
# Inject system includes from gyp files into INCLUDE.
if system_includes:
system_includes = system_includes | OrderedSet(
env.get('INCLUDE', '').split(';'))
env['INCLUDE'] = ';'.join(system_includes)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb')
f.write(env_block)
f.close()
# Find cl.exe location for this architecture.
args = vs.SetupScript(arch)
args.extend(('&&',
'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i'))
popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
output, _ = popen.communicate()
cl_paths[arch] = _ExtractCLPath(output)
return cl_paths
def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
"""Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails."""
if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
no_specials = filter(lambda x: '$' not in x, sources)
relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
missing = filter(lambda x: not os.path.exists(x), relative)
if missing:
# They'll look like out\Release\..\..\stuff\things.cc, so normalize the
# path for a slightly less crazy looking output.
cleaned_up = [os.path.normpath(x) for x in missing]
raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
# Sets some values in default_variables, which are required for many
# generators, run on Windows.
def CalculateCommonVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
| gpl-3.0 |
tyndare/osmose-backend | analysers/analyser_merge_bicycle_rental_FR_bm.py | 1 | 3368 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Frédéric Rodrigo 2014 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from Analyser_Merge import Analyser_Merge, Source, SHP, Load, Mapping, Select, Generate
class Analyser_Merge_Bicycle_Rental_FR_bm(Analyser_Merge):
def __init__(self, config, logger = None):
self.missing_official = {"item":"8160", "class": 1, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"BM bicycle rental not integrated") }
self.possible_merge = {"item":"8161", "class": 3, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"BM bicycle rental integration suggestion") }
self.update_official = {"item":"8162", "class": 4, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"BM bicycle update") }
Analyser_Merge.__init__(self, config, logger,
"http://data.bordeaux-metropole.fr/data.php?themes=10",
u"Station VCUB",
SHP(Source(attribution = u"Bordeaux Métropole", millesime = "08/2016",
fileUrl = "http://data.bordeaux-metropole.fr/files.php?gid=43&format=2", zip = "TB_STVEL_P.shp", encoding = "ISO-8859-15")),
Load(("ST_X(geom)",), ("ST_Y(geom)",), srid = 2154),
Mapping(
select = Select(
types = ["nodes"],
tags = {"amenity": "bicycle_rental"}),
osmRef = "ref",
conflationDistance = 100,
generate = Generate(
static1 = {
"amenity": "bicycle_rental",
"network": "VCUB"},
static2 = {"source": self.source},
mapping1 = {
"name": "NOM",
"ref": "NUMSTAT",
"capacity": "NBSUPPOR",
"vending": lambda res: "subscription" if res["TERMBANC"] == "OUI" else None,
"description": lambda res: "VCUB+" if res["TARIF"] == "VLS PLUS" else None} )))
| gpl-3.0 |
fabioz/Pydev | plugins/org.python.pydev.jython/Lib/sysconfig.py | 13 | 26368 | """Provide access to Python's configuration information.
"""
import sys
import os
from os.path import pardir, realpath
_INSTALL_SCHEMES = {
'posix_prefix': {
'stdlib': '{base}/lib/python{py_version_short}',
'platstdlib': '{platbase}/lib/python{py_version_short}',
'purelib': '{base}/lib/python{py_version_short}/site-packages',
'platlib': '{platbase}/lib/python{py_version_short}/site-packages',
'include': '{base}/include/python{py_version_short}',
'platinclude': '{platbase}/include/python{py_version_short}',
'scripts': '{base}/bin',
'data': '{base}',
},
'posix_home': {
'stdlib': '{base}/lib/python',
'platstdlib': '{base}/lib/python',
'purelib': '{base}/lib/python',
'platlib': '{base}/lib/python',
'include': '{base}/include/python',
'platinclude': '{base}/include/python',
'scripts': '{base}/bin',
'data' : '{base}',
},
'nt': {
'stdlib': '{base}/Lib',
'platstdlib': '{base}/Lib',
'purelib': '{base}/Lib/site-packages',
'platlib': '{base}/Lib/site-packages',
'include': '{base}/Include',
'platinclude': '{base}/Include',
'scripts': '{base}/Scripts',
'data' : '{base}',
},
'os2': {
'stdlib': '{base}/Lib',
'platstdlib': '{base}/Lib',
'purelib': '{base}/Lib/site-packages',
'platlib': '{base}/Lib/site-packages',
'include': '{base}/Include',
'platinclude': '{base}/Include',
'scripts': '{base}/Scripts',
'data' : '{base}',
},
'os2_home': {
'stdlib': '{userbase}/lib/python{py_version_short}',
'platstdlib': '{userbase}/lib/python{py_version_short}',
'purelib': '{userbase}/lib/python{py_version_short}/site-packages',
'platlib': '{userbase}/lib/python{py_version_short}/site-packages',
'include': '{userbase}/include/python{py_version_short}',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
'nt_user': {
'stdlib': '{userbase}/Python{py_version_nodot}',
'platstdlib': '{userbase}/Python{py_version_nodot}',
'purelib': '{userbase}/Python{py_version_nodot}/site-packages',
'platlib': '{userbase}/Python{py_version_nodot}/site-packages',
'include': '{userbase}/Python{py_version_nodot}/Include',
'scripts': '{userbase}/Scripts',
'data' : '{userbase}',
},
'posix_user': {
'stdlib': '{userbase}/lib/python{py_version_short}',
'platstdlib': '{userbase}/lib/python{py_version_short}',
'purelib': '{userbase}/lib/python{py_version_short}/site-packages',
'platlib': '{userbase}/lib/python{py_version_short}/site-packages',
'include': '{userbase}/include/python{py_version_short}',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
'osx_framework_user': {
'stdlib': '{userbase}/lib/python',
'platstdlib': '{userbase}/lib/python',
'purelib': '{userbase}/lib/python/site-packages',
'platlib': '{userbase}/lib/python/site-packages',
'include': '{userbase}/include',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
'java': {
'stdlib': '{base}/lib/jython',
'platstdlib': '{base}/lib/jython',
'purelib': '{base}/lib/jython',
'platlib': '{base}/lib/jython',
'include': '{base}/include/jython',
'platinclude': '{base}/include/jython',
'scripts': '{base}/bin',
'data' : '{base}',
},
'java_user': {
'stdlib': '{userbase}/lib/jython{py_version_short}',
'platstdlib': '{userbase}/lib/jython{py_version_short}',
'purelib': '{userbase}/lib/jython{py_version_short}/site-packages',
'platlib': '{userbase}/lib/jython{py_version_short}/site-packages',
'include': '{userbase}/include/jython{py_version_short}',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
}
_SCHEME_KEYS = ('stdlib', 'platstdlib', 'purelib', 'platlib', 'include',
'scripts', 'data')
_PY_VERSION = sys.version.split()[0]
_PY_VERSION_SHORT = sys.version[:3]
_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2]
_PREFIX = os.path.normpath(sys.prefix)
_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
_CONFIG_VARS = None
_USER_BASE = None
def _safe_realpath(path):
try:
return realpath(path)
except OSError:
return path
if sys.executable:
_PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
_PROJECT_BASE = _safe_realpath(os.getcwd())
if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
def is_python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)):
return True
return False
_PYTHON_BUILD = is_python_build()
if _PYTHON_BUILD:
for scheme in ('posix_prefix', 'posix_home'):
_INSTALL_SCHEMES[scheme]['include'] = '{projectbase}/Include'
_INSTALL_SCHEMES[scheme]['platinclude'] = '{srcdir}'
def _subst_vars(s, local_vars):
try:
return s.format(**local_vars)
except KeyError:
try:
return s.format(**os.environ)
except KeyError, var:
raise AttributeError('{%s}' % var)
def _extend_dict(target_dict, other_dict):
target_keys = target_dict.keys()
for key, value in other_dict.items():
if key in target_keys:
continue
target_dict[key] = value
def _expand_vars(scheme, vars):
res = {}
if vars is None:
vars = {}
_extend_dict(vars, get_config_vars())
for key, value in _INSTALL_SCHEMES[scheme].items():
if os.name in ('posix', 'nt', 'java'):
value = os.path.expanduser(value)
res[key] = os.path.normpath(_subst_vars(value, vars))
return res
def _get_default_scheme():
if os.name == 'posix':
# the default scheme for posix is posix_prefix
return 'posix_prefix'
return os.name
def _getuserbase():
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
# what about 'os2emx', 'riscos' ?
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
return env_base if env_base else joinuser(base, "Python")
if sys.platform == "darwin":
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
return env_base if env_base else \
joinuser("~", "Library", framework, "%d.%d"
% (sys.version_info[:2]))
return env_base if env_base else joinuser("~", ".local")
def _parse_makefile(filename, vars=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
import re
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
if vars is None:
vars = {}
done = {}
notdone = {}
with open(filename) as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line.strip() == '':
continue
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
while notdone:
for name in notdone.keys():
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try: value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
del notdone[name]
else:
# bogus variable reference; just drop it since we can't deal
del notdone[name]
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
vars.update(done)
return vars
def _get_makefile_filename():
if _PYTHON_BUILD:
return os.path.join(_PROJECT_BASE, "Makefile")
return os.path.join(get_path('platstdlib'), "config", "Makefile")
def _init_posix(vars):
"""Initialize the module as appropriate for POSIX systems."""
# load the installed Makefile:
makefile = _get_makefile_filename()
try:
_parse_makefile(makefile, vars)
except IOError, e:
msg = "invalid Python installation: unable to open %s" % makefile
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# load the installed pyconfig.h:
config_h = get_config_h_filename()
try:
with open(config_h) as f:
parse_config_h(f, vars)
except IOError, e:
msg = "invalid Python installation: unable to open %s" % config_h
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if _PYTHON_BUILD:
vars['LDSHARED'] = vars['BLDSHARED']
def _init_non_posix(vars):
"""Initialize the module as appropriate for NT"""
# set basic install directories
vars['LIBDEST'] = get_path('stdlib')
vars['BINLIBDEST'] = get_path('platstdlib')
vars['INCLUDEPY'] = get_path('include')
vars['SO'] = '.pyd'
vars['EXE'] = '.exe'
vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
#
# public APIs
#
def parse_config_h(fp, vars=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
import re
if vars is None:
vars = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try: v = int(v)
except ValueError: pass
vars[n] = v
else:
m = undef_rx.match(line)
if m:
vars[m.group(1)] = 0
return vars
def get_config_h_filename():
"""Returns the path of pyconfig.h."""
if _PYTHON_BUILD:
if os.name == "nt":
inc_dir = os.path.join(_PROJECT_BASE, "PC")
else:
inc_dir = _PROJECT_BASE
else:
inc_dir = get_path('platinclude')
return os.path.join(inc_dir, 'pyconfig.h')
def get_scheme_names():
"""Returns a tuple containing the schemes names."""
schemes = _INSTALL_SCHEMES.keys()
schemes.sort()
return tuple(schemes)
def get_path_names():
"""Returns a tuple containing the paths names."""
return _SCHEME_KEYS
def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
"""Returns a mapping containing an install scheme.
``scheme`` is the install scheme name. If not provided, it will
return the default scheme for the current platform.
"""
if expand:
return _expand_vars(scheme, vars)
else:
return _INSTALL_SCHEMES[scheme]
def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
"""Returns a path corresponding to the scheme.
``scheme`` is the install scheme name.
"""
return get_paths(scheme, vars, expand)[name]
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform.
On Unix, this means every variable defined in Python's installed Makefile;
On Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
import re
global _CONFIG_VARS
if _CONFIG_VARS is None:
_CONFIG_VARS = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# Distutils.
_CONFIG_VARS['prefix'] = _PREFIX
_CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX
_CONFIG_VARS['py_version'] = _PY_VERSION
_CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT
_CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2]
_CONFIG_VARS['base'] = _PREFIX
_CONFIG_VARS['platbase'] = _EXEC_PREFIX
_CONFIG_VARS['projectbase'] = _PROJECT_BASE
if os.name in ('nt', 'os2'):
_init_non_posix(_CONFIG_VARS)
if os.name == 'posix':
_init_posix(_CONFIG_VARS)
# Setting 'userbase' is done below the call to the
# init function to enable using 'get_config_var' in
# the init-function.
_CONFIG_VARS['userbase'] = _getuserbase()
if 'srcdir' not in _CONFIG_VARS:
_CONFIG_VARS['srcdir'] = _PROJECT_BASE
# Convert srcdir into an absolute path if it appears necessary.
# Normally it is relative to the build directory. However, during
# testing, for example, we might be running a non-installed python
# from a different directory.
if _PYTHON_BUILD and os.name == "posix":
base = _PROJECT_BASE
try:
cwd = os.getcwd()
except OSError:
cwd = None
if (not os.path.isabs(_CONFIG_VARS['srcdir']) and
base != cwd):
# srcdir is relative and we are not in the same directory
# as the executable. Assume executable is in the build
# directory and make srcdir absolute.
srcdir = os.path.join(base, _CONFIG_VARS['srcdir'])
_CONFIG_VARS['srcdir'] = os.path.normpath(srcdir)
if sys.platform == 'darwin':
kernel_version = os.uname()[2] # Kernel version (8.4.3)
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_CONFIG_VARS[key] = flags
else:
# Allow the user to override the architecture flags using
# an environment variable.
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_CONFIG_VARS[key] = flags
# If we're on OSX 10.5 or later and the user tries to
# compiles an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail.
#
# The major usecase for this is users using a Python.org
# binary installer on OSX 10.6: that installer uses
# the 10.4u SDK, but that SDK is not installed by default
# when you install Xcode.
#
CFLAGS = _CONFIG_VARS.get('CFLAGS', '')
m = re.search('-isysroot\s+(\S+)', CFLAGS)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags)
_CONFIG_VARS[key] = flags
if args:
vals = []
for name in args:
vals.append(_CONFIG_VARS.get(name))
return vals
else:
return _CONFIG_VARS
def get_config_var(name):
"""Return the value of a single variable using the dictionary returned by
'get_config_vars()'.
Equivalent to get_config_vars().get(name)
"""
return get_config_vars().get(name)
def get_platform():
"""Return a string that identifies the current platform.
This is used mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
import re
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return sys.platform
j = sys.version.find(")", i)
look = sys.version[i+len(prefix):j].lower()
if look == 'amd64':
return 'win-amd64'
if look == 'itanium':
return 'win-ia64'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
osname, host, release, version, machine = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = osname.lower().replace('/', '')
machine = machine.replace(' ', '_')
machine = machine.replace('/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# We can't use "platform.architecture()[0]" because a
# bootstrap problem. We use a dict to get an error
# if some suspicious happens.
bitness = {2147483647:"32bit", 9223372036854775807:"64bit"}
machine += ".%s" % bitness[sys.maxint]
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile (r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
cfgvars = get_config_vars()
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
if 1:
# Always calculate the release of the running machine,
# needed to determine if we can build fat binaries or not.
macrelease = macver
# Get the system version. Reading this plist is a documented
# way to get the system version (see the documentation for
# the Gestalt Manager)
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(
r'<key>ProductUserVisibleVersion</key>\s*' +
r'<string>(.*?)</string>', f.read())
if m is not None:
macrelease = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
finally:
f.close()
if not macver:
macver = macrelease
if macver:
release = macver
osname = "macosx"
if (macrelease + '.') >= '10.4.' and \
'-arch' in get_config_vars().get('CFLAGS', '').strip():
# The universal build will build fat binaries, but not on
# systems before 10.4
#
# Try to detect 4-way universal builds, those have machine-type
# 'universal' instead of 'fat'.
machine = 'fat'
cflags = get_config_vars().get('CFLAGS')
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r"%(archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxint >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxint >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return "%s-%s-%s" % (osname, release, machine)
def get_python_version():
return _PY_VERSION_SHORT
| epl-1.0 |
Endika/django | tests/many_to_one/tests.py | 15 | 29129 | import datetime
from copy import deepcopy
from django.core.exceptions import FieldError, MultipleObjectsReturned
from django.db import models, transaction
from django.db.utils import IntegrityError
from django.test import TestCase
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.translation import ugettext_lazy
from .models import (
Article, Category, Child, City, District, First, Parent, Record, Relation,
Reporter, School, Student, Third, ToFieldChild,
)
class ManyToOneTests(TestCase):
def setUp(self):
# Create a few Reporters.
self.r = Reporter(first_name='John', last_name='Smith', email='[email protected]')
self.r.save()
self.r2 = Reporter(first_name='Paul', last_name='Jones', email='[email protected]')
self.r2.save()
# Create an Article.
self.a = Article(headline="This is a test", pub_date=datetime.date(2005, 7, 27), reporter=self.r)
self.a.save()
def test_get(self):
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
# These are strings instead of unicode strings because that's what was used in
# the creation of this reporter (and we haven't refreshed the data from the
# database, which always returns unicode strings).
self.assertEqual((r.first_name, self.r.last_name), ('John', 'Smith'))
def test_create(self):
# You can also instantiate an Article by passing the Reporter's ID
# instead of a Reporter object.
a3 = Article(headline="Third article", pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
a3.save()
self.assertEqual(a3.reporter.id, self.r.id)
# Similarly, the reporter ID can be a string.
a4 = Article(headline="Fourth article", pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
a4.save()
self.assertEqual(repr(a4.reporter), "<Reporter: John Smith>")
def test_add(self):
# Create an Article via the Reporter object.
new_article = self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
self.assertEqual(repr(new_article), "<Article: John's second story>")
self.assertEqual(new_article.reporter.id, self.r.id)
# Create a new article, and add it to the article set.
new_article2 = Article(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
msg = "<Article: Paul's story> instance isn't saved. Use bulk=False or save the object first."
with self.assertRaisesMessage(ValueError, msg):
self.r.article_set.add(new_article2)
self.r.article_set.add(new_article2, bulk=False)
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(
self.r.article_set.all(),
["<Article: John's second story>", "<Article: Paul's story>", "<Article: This is a test>"]
)
# Add the same article to a different article set - check that it moves.
self.r2.article_set.add(new_article2)
self.assertEqual(new_article2.reporter.id, self.r2.id)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Adding an object of the wrong type raises TypeError.
with transaction.atomic():
with six.assertRaisesRegex(self, TypeError,
"'Article' instance expected, got <Reporter.*"):
self.r.article_set.add(self.r2)
self.assertQuerysetEqual(
self.r.article_set.all(),
["<Article: John's second story>", "<Article: This is a test>"]
)
def test_set(self):
new_article = self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again.
self.r2.article_set.set([new_article, new_article2])
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(
self.r2.article_set.all(),
["<Article: John's second story>", "<Article: Paul's story>"]
)
# Funny case - because the ForeignKey cannot be null,
# existing members of the set must remain.
self.r.article_set.set([new_article])
self.assertQuerysetEqual(
self.r.article_set.all(),
["<Article: John's second story>", "<Article: This is a test>"]
)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
def test_reverse_assignment_deprecation(self):
msg = (
"Direct assignment to the reverse side of a related set is "
"deprecated due to the implicit save() that happens. Use "
"article_set.set() instead."
)
with self.assertRaisesMessage(RemovedInDjango20Warning, msg):
self.r2.article_set = []
def test_assign(self):
new_article = self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter directly using the descriptor.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again using set() method.
self.r2.article_set.set([new_article, new_article2])
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(
self.r2.article_set.all(),
["<Article: John's second story>", "<Article: Paul's story>"]
)
# Because the ForeignKey cannot be null, existing members of the set
# must remain.
self.r.article_set.set([new_article])
self.assertQuerysetEqual(
self.r.article_set.all(),
["<Article: John's second story>", "<Article: This is a test>"]
)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Reporter cannot be null - there should not be a clear or remove method
self.assertFalse(hasattr(self.r2.article_set, 'remove'))
self.assertFalse(hasattr(self.r2.article_set, 'clear'))
def test_selects(self):
self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
# Reporter objects have access to their related Article objects.
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='This'), ["<Article: This is a test>"])
self.assertEqual(self.r.article_set.count(), 2)
self.assertEqual(self.r2.article_set.count(), 1)
# Get articles by id
self.assertQuerysetEqual(Article.objects.filter(id__exact=self.a.id), ["<Article: This is a test>"])
self.assertQuerysetEqual(Article.objects.filter(pk=self.a.id), ["<Article: This is a test>"])
# Query on an article property
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='This'), ["<Article: This is a test>"])
# The API automatically follows relationships as far as you need.
# Use double underscores to separate relationships.
# This works as many levels deep as you want. There's no limit.
# Find all Articles for any Reporter whose first name is "John".
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John'),
["<Article: John's second story>", "<Article: This is a test>"]
)
# Check that implied __exact also works
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name='John'),
["<Article: John's second story>", "<Article: This is a test>"]
)
# Query twice over the related field.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John', reporter__last_name__exact='Smith'),
["<Article: John's second story>", "<Article: This is a test>"]
)
# The underlying query only makes one join when a related table is referenced twice.
queryset = Article.objects.filter(reporter__first_name__exact='John', reporter__last_name__exact='Smith')
self.assertNumQueries(1, list, queryset)
self.assertEqual(queryset.query.get_compiler(queryset.db).as_sql()[0].count('INNER JOIN'), 1)
# The automatically joined table has a predictable name.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John').extra(
where=["many_to_one_reporter.last_name='Smith'"]),
["<Article: John's second story>", "<Article: This is a test>"]
)
# ... and should work fine with the unicode that comes out of forms.Form.cleaned_data
self.assertQuerysetEqual(
(Article.objects
.filter(reporter__first_name__exact='John')
.extra(where=["many_to_one_reporter.last_name='%s'" % 'Smith'])),
["<Article: John's second story>", "<Article: This is a test>"]
)
# Find all Articles for a Reporter.
# Use direct ID check, pk check, and object comparison
self.assertQuerysetEqual(
Article.objects.filter(reporter__id__exact=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__pk=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r.id, self.r2.id]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r, self.r2]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# You can also use a queryset instead of a literal list of instances.
# The queryset must be reduced to a list of values using values(),
# then converted into a query
self.assertQuerysetEqual(
Article.objects.filter(
reporter__in=Reporter.objects.filter(first_name='John').values('pk').query
).distinct(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_reverse_selects(self):
a3 = Article.objects.create(
headline="Third article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=self.r.id,
)
Article.objects.create(
headline="Fourth article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=self.r.id,
)
john_smith = ["<Reporter: John Smith>"]
# Reporters can be queried
self.assertQuerysetEqual(Reporter.objects.filter(id__exact=self.r.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(pk=self.r.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(first_name__startswith='John'), john_smith)
# Reporters can query in opposite direction of ForeignKey definition
self.assertQuerysetEqual(Reporter.objects.filter(article__id__exact=self.a.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article__pk=self.a.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article__in=[self.a.id, a3.id]).distinct(), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article__in=[self.a.id, a3]).distinct(), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article__in=[self.a, a3]).distinct(), john_smith)
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T'),
["<Reporter: John Smith>", "<Reporter: John Smith>"],
ordered=False
)
self.assertQuerysetEqual(Reporter.objects.filter(article__headline__startswith='T').distinct(), john_smith)
# Counting in the opposite direction works in conjunction with distinct()
self.assertEqual(Reporter.objects.filter(article__headline__startswith='T').count(), 2)
self.assertEqual(Reporter.objects.filter(article__headline__startswith='T').distinct().count(), 1)
# Queries can go round in circles.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John'),
[
"<Reporter: John Smith>",
"<Reporter: John Smith>",
"<Reporter: John Smith>",
],
ordered=False
)
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John').distinct(),
john_smith
)
self.assertQuerysetEqual(Reporter.objects.filter(article__reporter__exact=self.r).distinct(), john_smith)
# Check that implied __exact also works.
self.assertQuerysetEqual(Reporter.objects.filter(article__reporter=self.r).distinct(), john_smith)
# It's possible to use values() calls across many-to-one relations.
# (Note, too, that we clear the ordering here so as not to drag the
# 'headline' field into the columns being used to determine uniqueness)
d = {'reporter__first_name': 'John', 'reporter__last_name': 'Smith'}
qs = Article.objects.filter(
reporter=self.r,
).distinct().order_by().values('reporter__first_name', 'reporter__last_name')
self.assertEqual([d], list(qs))
def test_select_related(self):
# Check that Article.objects.select_related().dates() works properly when
# there are multiple Articles with the same date but different foreign-key
# objects (Reporters).
r1 = Reporter.objects.create(first_name='Mike', last_name='Royko', email='[email protected]')
r2 = Reporter.objects.create(first_name='John', last_name='Kass', email='[email protected]')
Article.objects.create(headline='First', pub_date=datetime.date(1980, 4, 23), reporter=r1)
Article.objects.create(headline='Second', pub_date=datetime.date(1980, 4, 23), reporter=r2)
self.assertEqual(
list(Article.objects.select_related().dates('pub_date', 'day')),
[datetime.date(1980, 4, 23), datetime.date(2005, 7, 27)]
)
self.assertEqual(
list(Article.objects.select_related().dates('pub_date', 'month')),
[datetime.date(1980, 4, 1), datetime.date(2005, 7, 1)]
)
self.assertEqual(
list(Article.objects.select_related().dates('pub_date', 'year')),
[datetime.date(1980, 1, 1), datetime.date(2005, 1, 1)]
)
def test_delete(self):
self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
Article.objects.create(headline="Third article", pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
Article.objects.create(
headline="Fourth article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=str(self.r.id),
)
# If you delete a reporter, his articles will be deleted.
self.assertQuerysetEqual(
Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: Third article>",
"<Article: This is a test>",
]
)
self.assertQuerysetEqual(
Reporter.objects.order_by('first_name'),
["<Reporter: John Smith>", "<Reporter: Paul Jones>"]
)
self.r2.delete()
self.assertQuerysetEqual(
Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Third article>",
"<Article: This is a test>",
]
)
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'), ["<Reporter: John Smith>"])
# You can delete using a JOIN in the query.
Reporter.objects.filter(article__headline__startswith='This').delete()
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertQuerysetEqual(Article.objects.all(), [])
def test_explicit_fk(self):
# Create a new Article with get_or_create using an explicit value
# for a ForeignKey.
a2, created = Article.objects.get_or_create(
headline="John's second test",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r.id,
)
self.assertTrue(created)
self.assertEqual(a2.reporter.id, self.r.id)
# You can specify filters containing the explicit FK value.
self.assertQuerysetEqual(
Article.objects.filter(reporter_id__exact=self.r.id),
["<Article: John's second test>", "<Article: This is a test>"]
)
# Create an Article by Paul for the same date.
a3 = Article.objects.create(
headline="Paul's commentary",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r2.id,
)
self.assertEqual(a3.reporter.id, self.r2.id)
# Get should respect explicit foreign keys as well.
with self.assertRaises(MultipleObjectsReturned):
Article.objects.get(reporter_id=self.r.id)
self.assertEqual(
repr(a3),
repr(Article.objects.get(reporter_id=self.r2.id, pub_date=datetime.date(2011, 5, 7)))
)
def test_deepcopy_and_circular_references(self):
# Regression for #12876 -- Model methods that include queries that
# recursive don't cause recursion depth problems under deepcopy.
self.r.cached_query = Article.objects.filter(reporter=self.r)
self.assertEqual(repr(deepcopy(self.r)), "<Reporter: John Smith>")
def test_manager_class_caching(self):
r1 = Reporter.objects.create(first_name='Mike')
r2 = Reporter.objects.create(first_name='John')
# Same twice
self.assertIs(r1.article_set.__class__, r1.article_set.__class__)
# Same as each other
self.assertIs(r1.article_set.__class__, r2.article_set.__class__)
def test_create_relation_with_ugettext_lazy(self):
reporter = Reporter.objects.create(first_name='John', last_name='Smith', email='[email protected]')
lazy = ugettext_lazy('test')
reporter.article_set.create(headline=lazy, pub_date=datetime.date(2011, 6, 10))
notlazy = six.text_type(lazy)
article = reporter.article_set.get()
self.assertEqual(article.headline, notlazy)
def test_values_list_exception(self):
expected_message = "Cannot resolve keyword 'notafield' into field. Choices are: %s"
reporter_fields = ', '.join(sorted(f.name for f in Reporter._meta.get_fields()))
with self.assertRaisesMessage(FieldError, expected_message % reporter_fields):
Article.objects.values_list('reporter__notafield')
article_fields = ', '.join(['EXTRA'] + sorted(f.name for f in Article._meta.get_fields()))
with self.assertRaisesMessage(FieldError, expected_message % article_fields):
Article.objects.extra(select={'EXTRA': 'EXTRA_SELECT'}).values_list('notafield')
def test_fk_assignment_and_related_object_cache(self):
# Tests of ForeignKey assignment and the related-object cache (see #6886).
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
# Look up the object again so that we get a "fresh" object.
c = Child.objects.get(name="Child")
p = c.parent
# Accessing the related object again returns the exactly same object.
self.assertIs(c.parent, p)
# But if we kill the cache, we get a new object.
del c._parent_cache
self.assertIsNot(c.parent, p)
# Assigning a new object results in that object getting cached immediately.
p2 = Parent.objects.create(name="Parent 2")
c.parent = p2
self.assertIs(c.parent, p2)
# Assigning None succeeds if field is null=True.
p.bestchild = None
self.assertIsNone(p.bestchild)
# bestchild should still be None after saving.
p.save()
self.assertIsNone(p.bestchild)
# bestchild should still be None after fetching the object again.
p = Parent.objects.get(name="Parent")
self.assertIsNone(p.bestchild)
# Assigning None will not fail: Child.parent is null=False.
setattr(c, "parent", None)
# You also can't assign an object of the wrong type here
with self.assertRaises(ValueError):
setattr(c, "parent", First(id=1, second=1))
# You can assign None to Child.parent during object creation.
Child(name='xyzzy', parent=None)
# But when trying to save a Child with parent=None, the database will
# raise IntegrityError.
with self.assertRaises(IntegrityError), transaction.atomic():
Child.objects.create(name='xyzzy', parent=None)
# Creation using keyword argument should cache the related object.
p = Parent.objects.get(name="Parent")
c = Child(parent=p)
self.assertIs(c.parent, p)
# Creation using keyword argument and unsaved related instance (#8070).
p = Parent()
msg = "save() prohibited to prevent data loss due to unsaved related object 'parent'."
with self.assertRaisesMessage(ValueError, msg):
Child.objects.create(parent=p)
msg = "save() prohibited to prevent data loss due to unsaved related object 'parent'."
with self.assertRaisesMessage(ValueError, msg):
ToFieldChild.objects.create(parent=p)
# Creation using attname keyword argument and an id will cause the
# related object to be fetched.
p = Parent.objects.get(name="Parent")
c = Child(parent_id=p.id)
self.assertIsNot(c.parent, p)
self.assertEqual(c.parent, p)
def test_fk_to_bigautofield(self):
ch = City.objects.create(name='Chicago')
District.objects.create(city=ch, name='Far South')
District.objects.create(city=ch, name='North')
ny = City.objects.create(name='New York', id=2 ** 33)
District.objects.create(city=ny, name='Brooklyn')
District.objects.create(city=ny, name='Manhattan')
def test_multiple_foreignkeys(self):
# Test of multiple ForeignKeys to the same model (bug #7125).
c1 = Category.objects.create(name='First')
c2 = Category.objects.create(name='Second')
c3 = Category.objects.create(name='Third')
r1 = Record.objects.create(category=c1)
r2 = Record.objects.create(category=c1)
r3 = Record.objects.create(category=c2)
r4 = Record.objects.create(category=c2)
r5 = Record.objects.create(category=c3)
Relation.objects.create(left=r1, right=r2)
Relation.objects.create(left=r3, right=r4)
Relation.objects.create(left=r1, right=r3)
Relation.objects.create(left=r5, right=r2)
Relation.objects.create(left=r3, right=r2)
q1 = Relation.objects.filter(left__category__name__in=['First'], right__category__name__in=['Second'])
self.assertQuerysetEqual(q1, ["<Relation: First - Second>"])
q2 = Category.objects.filter(record__left_set__right__category__name='Second').order_by('name')
self.assertQuerysetEqual(q2, ["<Category: First>", "<Category: Second>"])
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
with self.assertRaises(ValueError):
Child.objects.create(name="Grandchild", parent=c)
def test_fk_instantiation_outside_model(self):
# Regression for #12190 -- Should be able to instantiate a FK outside
# of a model, and interrogate its related field.
cat = models.ForeignKey(Category, models.CASCADE)
self.assertEqual('id', cat.remote_field.get_related_field().name)
def test_relation_unsaved(self):
# Test that the <field>_set manager does not join on Null value fields (#17541)
Third.objects.create(name='Third 1')
Third.objects.create(name='Third 2')
th = Third(name="testing")
# The object isn't saved an thus the relation field is null - we won't even
# execute a query in this case.
with self.assertNumQueries(0):
self.assertEqual(th.child_set.count(), 0)
th.save()
# Now the model is saved, so we will need to execute an query.
with self.assertNumQueries(1):
self.assertEqual(th.child_set.count(), 0)
def test_related_object(self):
public_school = School.objects.create(is_public=True)
public_student = Student.objects.create(school=public_school)
private_school = School.objects.create(is_public=False)
private_student = Student.objects.create(school=private_school)
# Only one school is available via all() due to the custom default manager.
self.assertQuerysetEqual(School.objects.all(), ["<School: School object>"])
self.assertEqual(public_student.school, public_school)
# Make sure the base manager is used so that an student can still access
# its related school even if the default manager doesn't normally
# allow it.
self.assertEqual(private_student.school, private_school)
# If the manager is marked "use_for_related_fields", it'll get used instead
# of the "bare" queryset. Usually you'd define this as a property on the class,
# but this approximates that in a way that's easier in tests.
School.objects.use_for_related_fields = True
try:
private_student = Student.objects.get(pk=private_student.pk)
with self.assertRaises(School.DoesNotExist):
private_student.school
finally:
School.objects.use_for_related_fields = False
def test_hasattr_related_object(self):
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(Article(), 'reporter'))
| bsd-3-clause |
TangibleDisplay/garden-ddd | view.py | 4 | 7639 | from object_renderer import ObjectRenderer
from kivy.core.window import Window # noqa
from kivy.lang import Builder
from kivy.properties import NumericProperty, BooleanProperty
from kivy.clock import Clock
from kivy.animation import Animation
from math import cos, sin, pi
def dist(p1, p2):
return ((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) ** .5
class MultitouchCamera(object):
def __init__(self, **kwargs):
super(MultitouchCamera, self).__init__(**kwargs)
self.touches = []
self.touches_center = []
self.touches_dist = 0
Clock.schedule_interval(self.update_cam, 0)
def on_touch_down(self, touch):
if super(MultitouchCamera, self).on_touch_move(touch):
return True
if self.collide_point(*touch.pos):
self.touches.append(touch)
touch.grab(self)
if len(self.touches) > 1:
self.touches_center = self.get_center()
self.touches_dist = self.get_dist(self.touches_center)
def update_cam(self, dt):
if not self.touches:
return
elif len(self.touches) == 1:
self.cam_translation[0] += self.touches[0].dx / 100.
self.cam_translation[1] += self.touches[0].dy / 100.
else:
c = self.get_center()
d = self.get_dist(c)
self.cam_rotation[1] += (c[0] - self.touches_center[0]) / 5.
self.cam_rotation[0] -= (c[1] - self.touches_center[1]) / 5.
self.cam_translation[2] += min(.5, max(-.5, (d - self.touches_dist) / 10.))
self.touches_center = c
self.touches_dist = d
return True
def on_touch_up(self, touch):
if touch.grab_current is self:
touch.ungrab(self)
self.touches.remove(touch)
self.touches_center = self.get_center()
self.touches_dist = self.get_dist(self.touches_center)
else:
return super(MultitouchCamera, self).on_touch_move(touch)
def get_center(self):
return (
sum(t.x for t in self.touches) / float(len(self.touches)),
sum(t.y for t in self.touches) / float(len(self.touches))
) if self.touches else self.center
def get_dist(self, center):
return (sum(
dist(t.pos, center)
for t in self.touches
) / float(len(self.touches))) if self.touches else 0
class MultitouchCenteredCamera(MultitouchCamera):
def setup_scene(self):
super(MultitouchCenteredCamera, self).setup_scene()
self.cam_rot_x.origin = (0, 0, 0)
self.cam_rot_x.origin = (0, 0, 0)
self.cam_rot_x.origin = (0, 0, 0)
def update_cam(self, dt):
if not self.touches:
return
elif len(self.touches) == 1:
self.cam_rotation[1] += self.touches[0].dx / 5.
self.cam_rotation[0] -= self.touches[0].dy / 5.
else:
c = self.get_center()
d = self.get_dist(c)
self.obj_scale += (d - self.touches_dist) / 100.
self.touches_center = c
self.touches_dist = d
return True
class BaseView(ObjectRenderer):
time = NumericProperty(0)
light_radius = NumericProperty(20)
move_light = BooleanProperty(True)
def reset(self, *args):
Animation(
cam_rotation=(20, 0, 0),
cam_translation=(0, -3, -8),
t='in_out_quad',
d=2).start(self)
def update_lights(self, dt):
if not self.move_light:
return
self.time += dt
self.time %= 2 * pi
for i in range(self.nb_lights):
a = self.time + i * 2 * pi / self.nb_lights
self.light_sources[i] = [
cos(a) * self.light_radius, 5, sin(a) * self.light_radius, 1.0]
for k in self.light_sources.keys():
if k >= self.nb_lights:
del(self.light_sources[k])
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
Clock.unschedule(self.reset)
return super(View, self).on_touch_down(touch)
def on_touch_up(self, touch):
if len(self.touches) == 1 and touch in self.touches:
Clock.unschedule(self.reset)
Clock.schedule_once(self.reset, 3)
return super(View, self).on_touch_up(touch)
class View(MultitouchCamera, BaseView):
pass
class CenteredView(MultitouchCenteredCamera, BaseView):
min_scale = NumericProperty(0)
max_scale = NumericProperty(100)
def on_obj_scale(self, *args):
self.obj_scale = max(
self.min_scale, min(
self.max_scale,
self.obj_scale))
super(CenteredView, self).on_obj_scale(*args)
KV = '''
#:import listdir os.listdir
FloatLayout:
View:
canvas.before:
Color:
rgba: .6, .6, .6, 1
Rectangle:
pos: self.pos
size: self.size
id: rendering
scene: 'data/3d/%s' % scene.text
obj_scale: scale.value if scale.value > 0 else 1
display_all: True
ambiant: ambiant.value
diffuse: diffuse.value
specular: specular.value
on_parent: self.reset()
mode: modechoice.text
light_radius: light_radius.value
nb_lights: int(nb_lights.value)
move_light: move_light.active
GridLayout:
size_hint: 1, None
rows: 2
Slider:
min: 0
max: 10
id: scale
step: .1
value: rendering.obj_scale
Spinner:
id: scene
text: 'colorplane.obj'
values:
sorted([x for x in listdir('data/3d/')
if x.lower().endswith(('.3ds', '.obj'))])
Slider:
min: 0
max: 50
step: 1
value: 10
id: light_radius
Slider:
min: 0
max: 50
step: 1
value: 4
id: nb_lights
Spinner:
text: rendering.mode
values: 'triangles', 'points', 'lines'
id: modechoice
Slider:
id: ambiant
min: 0
max: 1
value: .01
step: .01
Slider:
id: diffuse
min: 0
max: 1
value: 1
step: .01
Slider:
id: specular
min: 0
max: 1
value: 1
step: .01
CheckBox:
id: move_light
active: True
Label:
size_hint_y: .1
text: 'scale %s' % scale.value
Label:
size_hint_y: .1
text: 'scene'
Label:
size_hint_y: .1
text: 'light_radius %s' % light_radius.value
Label:
size_hint_y: .1
text: 'nb_lights %s' % nb_lights.value
Label:
text: 'mode'
Label:
size_hint_y: .1
text: 'ambiant %s' % ambiant.value
Label:
size_hint_y: .1
text: 'diffuse %s' % diffuse.value
Label:
size_hint_y: .1
text: 'specular %s' % specular.value
Label:
text: 'move lights'
'''
if __name__ == '__main__':
from kivy.app import App
class App3D(App):
def build(self):
root = Builder.load_string(KV)
Clock.schedule_interval(root.ids.rendering.update_lights, 0)
return root
App3D().run()
| mit |
timeyyy/PyUpdater | pyupdater/utils/keydb.py | 1 | 4569 | # --------------------------------------------------------------------------
# Copyright 2014 Digital Sapphire Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
from __future__ import unicode_literals
import logging
import time
from pyupdater import settings
log = logging.getLogger(__name__)
class KeyDB(object):
"""Handles finding, sorting, getting meta-data, moving packages.
Kwargs:
data_dir (str): Path to directory containing key.db
load (bool):
Meaning:
True: Load db on initialization
False: Do not load db on initialization
"""
def __init__(self, db, load=False):
self.db = db
self.data = None
if load is True:
self.load()
def add_key(self, public, private, key_type='ed25519'):
"""Adds key pair to database
Args:
public (str): Public key
private (str): Private key
key_type (str): The type of key pair. Default ed25519
"""
_time = time.time()
if self.data is None:
self.load()
num = len(self.data) + 1
data = {
'date': _time,
'public': public,
'private': private,
'revoked': False,
'key_type': key_type,
}
log.info('Adding public key to db. {}'.format(len(public)))
self.data[num] = data
self.save()
def get_public_keys(self):
"Returns a list of all valid public keys"
return self._get_keys('public')
def get_private_keys(self):
"Returns a list of all valid private keys"
return self._get_keys('private')
def _get_keys(self, key):
# Returns a list of non revoked keys
if self.data is None:
self.load()
order = []
keys = []
for k, v in self.data.items():
if v['revoked'] is False:
order.append(int(k))
else:
log.debug('Revoked key: {}'.format(k))
order = sorted(order)
for o in order:
try:
data = self.data[o]
log.debug('Got key data')
pub_key = data[key]
keys.append(pub_key)
log.debug('Got public key')
except KeyError: # pragma: no cover
log.debug('Key error')
continue
return keys
def get_revoked_key(self):
"Returns most recent revoked key pair"
if self.data is None:
self.load()
keys = []
for k, v in self.data.items():
if v['revoked'] is True:
keys.append(int(k))
if len(keys) >= 1:
key = sorted(keys)[-1]
info = self.data[key]
else:
info = None
return info
def revoke_key(self, count=1):
"""Revokes key pair
Args:
count (int): The number of keys to revoke. Oldest first
"""
if self.data is None:
self.load()
log.debug('Collecting keys')
keys = map(str, self.data.keys())
keys = sorted(keys)
c = 0
for k in keys:
if c >= count:
break
k = int(k)
if self.data.get(k) is None:
count += 1
continue
if self.data[k]['revoked'] is False:
self.data[k]['revoked'] = True
log.debug('Revoked key')
c += 1
self.save()
def load(self):
"Loads data from key.db"
self.data = self.db.load(settings.CONFIG_DB_KEY_KEYS)
if self.data is None:
log.info('Key.db file not found creating new')
self.data = dict()
def save(self):
"Saves data to key.db"
log.debug('Saving keys...')
self.db.save(settings.CONFIG_DB_KEY_KEYS, self.data)
log.debug('Saved keys...')
| bsd-2-clause |
osamak/student-portal | api/serializers.py | 2 | 4168 | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth import authenticate
from rest_framework import serializers, exceptions
from rest_framework.authtoken.serializers import AuthTokenSerializer
from activities.models import Activity, Episode
from activities.models import Category as ActivityCategory
from clubs.models import Club
from media.models import Buzz, BuzzView
from niqati.models import Code, Category, Collection, Order
class ModifiedAuthTokenSerializer(AuthTokenSerializer):
def validate(self, attrs):
"""Allow both usernames and emails."""
identification = attrs.get('username')
password = attrs.get('password')
if identification and password:
if '@' in identification:
username = identification.split('@')[0]
else:
username = identification
user = authenticate(username=username, password=password)
if user:
if not user.is_active:
msg = _('User account is disabled.')
raise exceptions.ValidationError(msg)
# Currently, the app is only available for students.
try:
college = user.common_profile.college
except (ObjectDoesNotExist, AttributeError):
raise exceptions.ValidationError(u"لم تسجل في بوابة إنجاز كطالب!")
else:
msg = _('Unable to log in with provided credentials.')
raise exceptions.ValidationError(msg)
else:
msg = _('Must include "username" and "password".')
raise exceptions.ValidationError(msg)
attrs['user'] = user
return attrs
class EpisodeSerializer(serializers.ModelSerializer):
class Meta:
model = Episode
fields = ('pk', 'start_datetime', 'end_datetime', 'location')
class ClubSerializer(serializers.ModelSerializer):
class Meta:
model = Club
fields = ('pk', 'name', 'english_name', 'email', 'coordinator', 'gender', 'city')
class ActivityCategorySerializer(serializers.ModelSerializer):
class Meta:
model = ActivityCategory
fields = ('pk', 'ar_name')
class ActivitySerializer(serializers.ModelSerializer):
primary_club = ClubSerializer(read_only=True)
category = ActivityCategorySerializer(read_only=True)
secondary_clubs = ClubSerializer(many=True, read_only=True)
episode_set = EpisodeSerializer(many=True, read_only=True)
class Meta:
model = Activity
fields = ('pk', 'name', 'primary_club', 'secondary_clubs', 'public_description', 'episode_set', 'gender', 'category')
class BuzzSerializer(serializers.ModelSerializer):
class Meta:
model = Buzz
fields = ('pk', 'title', 'body', 'image', 'is_push')
class BuzzViewSerializer(serializers.ModelSerializer):
class Meta:
model = BuzzView
fields = ('pk', 'viewer', 'buzz', 'off_date')
class NiqatiActivitySerializer(serializers.ModelSerializer):
class Meta:
model = Activity
fields = ('name',)
class NiqatiEpisodeSerializer(serializers.ModelSerializer):
activity = NiqatiActivitySerializer(read_only=True)
class Meta:
model = Episode
fields = ('activity',)
class OrderSerializer(serializers.ModelSerializer):
episode = NiqatiEpisodeSerializer(read_only=True)
class Meta:
model = Order
fields = ('episode',)
class NiqatiCategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ('ar_label',)
class CollectionSerializer(serializers.ModelSerializer):
order = OrderSerializer(read_only=True)
category = NiqatiCategorySerializer(read_only=True)
class Meta:
model = Collection
fields = ('category', 'order')
class CodeSerializer(serializers.ModelSerializer):
collection = CollectionSerializer(read_only=True)
class Meta:
model = Code
fields = ('pk', 'points', 'redeem_date', 'collection')
| agpl-3.0 |
p-l-/miasm | example/jitter/mips32.py | 6 | 2383 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
from argparse import ArgumentParser
from miasm2.analysis import debugging, gdbserver
from miasm2.jitter.csts import *
from miasm2.analysis.machine import Machine
from pdb import pm
parser = ArgumentParser(
description="""Sandbox raw binary with mips32 engine
(ex: jit_mips32.py example/mips32_sc_l.bin 0)""")
parser.add_argument("-r", "--log-regs",
help="Log registers value for each instruction",
action="store_true")
parser.add_argument("-m", "--log-mn",
help="Log desassembly conversion for each instruction",
action="store_true")
parser.add_argument("-n", "--log-newbloc",
help="Log basic blocks processed by the Jitter",
action="store_true")
parser.add_argument("-j", "--jitter",
help="Jitter engine. Possible values are : tcc (default), llvm",
default="tcc")
parser.add_argument("-d", "--debugging",
help="Attach a CLI debugguer to the sandboxed programm",
action="store_true")
parser.add_argument("binary",
help="binary to run")
parser.add_argument("addr",
help="start exec on addr")
machine = Machine("mips32l")
def code_sentinelle(jitter):
jitter.run = False
jitter.pc = 0
return True
def jit_mips32_binary(args):
filepath, entryp = args.binary, int(args.addr, 16)
myjit = machine.jitter(jit_type = args.jitter)
myjit.init_stack()
# Log level (if available with jitter engine)
myjit.jit.log_regs = args.log_regs
myjit.jit.log_mn = args.log_mn
myjit.jit.log_newbloc = args.log_newbloc
myjit.vm.add_memory_page(0, PAGE_READ | PAGE_WRITE, open(filepath).read())
myjit.add_breakpoint(0x1337BEEF, code_sentinelle)
# for stack
myjit.vm.add_memory_page(0xF000, PAGE_READ | PAGE_WRITE, "\x00"*0x1000)
myjit.cpu.SP = 0xF800
myjit.cpu.RA = 0x1337BEEF
myjit.init_run(entryp)
# Handle debugging
if args.debugging is True:
dbg = debugging.Debugguer(myjit)
cmd = debugging.DebugCmd(dbg)
cmd.cmdloop()
else:
print(myjit.continue_run())
return myjit
if __name__ == '__main__':
from sys import stderr
args = parser.parse_args()
myjit = jit_mips32_binary(args)
| gpl-2.0 |
chipsCode/cocosbuilder3 | CocosBuilder/libs/nodejs/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/easy_xml_test.py | 2698 | 3270 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the easy_xml.py file. """
import gyp.easy_xml as easy_xml
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_EasyXml_simple(self):
self.assertEqual(
easy_xml.XmlToString(['test']),
'<?xml version="1.0" encoding="utf-8"?><test/>')
self.assertEqual(
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
def test_EasyXml_simple_with_attributes(self):
self.assertEqual(
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
def test_EasyXml_escaping(self):
original = '<test>\'"\r&\nfoo'
converted = '<test>\'"
&
foo'
converted_apos = converted.replace("'", ''')
self.assertEqual(
easy_xml.XmlToString(['test3', {'a': original}, original]),
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
(converted, converted_apos))
def test_EasyXml_pretty(self):
self.assertEqual(
easy_xml.XmlToString(
['test3',
['GrandParent',
['Parent1',
['Child']
],
['Parent2']
]
],
pretty=True),
'<?xml version="1.0" encoding="utf-8"?>\n'
'<test3>\n'
' <GrandParent>\n'
' <Parent1>\n'
' <Child/>\n'
' </Parent1>\n'
' <Parent2/>\n'
' </GrandParent>\n'
'</test3>\n')
def test_EasyXml_complex(self):
# We want to create:
target = (
'<?xml version="1.0" encoding="utf-8"?>'
'<Project>'
'<PropertyGroup Label="Globals">'
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
'<Keyword>Win32Proj</Keyword>'
'<RootNamespace>automated_ui_tests</RootNamespace>'
'</PropertyGroup>'
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
'<PropertyGroup '
'Condition="\'$(Configuration)|$(Platform)\'=='
'\'Debug|Win32\'" Label="Configuration">'
'<ConfigurationType>Application</ConfigurationType>'
'<CharacterSet>Unicode</CharacterSet>'
'</PropertyGroup>'
'</Project>')
xml = easy_xml.XmlToString(
['Project',
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
['Keyword', 'Win32Proj'],
['RootNamespace', 'automated_ui_tests']
],
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
['PropertyGroup',
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
'Label': 'Configuration'},
['ConfigurationType', 'Application'],
['CharacterSet', 'Unicode']
]
])
self.assertEqual(xml, target)
if __name__ == '__main__':
unittest.main()
| mit |
cyberark-bizdev/ansible | lib/ansible/modules/remote_management/hpilo/hpilo_boot.py | 56 | 6644 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Dag Wieers <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: hpilo_boot
version_added: "2.3"
author: Dag Wieers (@dagwieers)
short_description: Boot system using specific media through HP iLO interface
description:
- "This module boots a system through its HP iLO interface. The boot media
can be one of: cdrom, floppy, hdd, network or usb."
- This module requires the hpilo python module.
options:
host:
description:
- The HP iLO hostname/address that is linked to the physical system.
required: true
login:
description:
- The login name to authenticate to the HP iLO interface.
default: Administrator
password:
description:
- The password to authenticate to the HP iLO interface.
default: admin
media:
description:
- The boot media to boot the system from
default: network
choices: [ "cdrom", "floppy", "hdd", "network", "normal", "usb" ]
image:
description:
- The URL of a cdrom, floppy or usb boot media image.
protocol://username:password@hostname:port/filename
- protocol is either 'http' or 'https'
- username:password is optional
- port is optional
state:
description:
- The state of the boot media.
- "no_boot: Do not boot from the device"
- "boot_once: Boot from the device once and then notthereafter"
- "boot_always: Boot from the device each time the serveris rebooted"
- "connect: Connect the virtual media device and set to boot_always"
- "disconnect: Disconnects the virtual media device and set to no_boot"
- "poweroff: Power off the server"
default: boot_once
choices: [ "boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff" ]
force:
description:
- Whether to force a reboot (even when the system is already booted).
- As a safeguard, without force, hpilo_boot will refuse to reboot a server that is already running.
default: no
choices: [ "yes", "no" ]
ssl_version:
description:
- Change the ssl_version used.
default: TLSv1
choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
version_added: '2.4'
requirements:
- hpilo
notes:
- To use a USB key image you need to specify floppy as boot media.
- This module ought to be run from a system that can access the HP iLO
interface directly, either by using C(local_action) or using C(delegate_to).
'''
EXAMPLES = r'''
- name: Task to boot a system using an ISO from an HP iLO interface only if the system is an HP server
hpilo_boot:
host: YOUR_ILO_ADDRESS
login: YOUR_ILO_LOGIN
password: YOUR_ILO_PASSWORD
media: cdrom
image: http://some-web-server/iso/boot.iso
when: cmdb_hwmodel.startswith('HP ')
delegate_to: localhost
- name: Power off a server
hpilo_boot:
host: YOUR_ILO_HOST
login: YOUR_ILO_LOGIN
password: YOUR_ILO_PASSWORD
state: poweroff
delegate_to: localhost
'''
RETURN = '''
# Default return values
'''
import time
import warnings
try:
import hpilo
HAS_HPILO = True
except ImportError:
HAS_HPILO = False
from ansible.module_utils.basic import AnsibleModule
# Suppress warnings from hpilo
warnings.simplefilter('ignore')
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(type='str', required=True),
login=dict(type='str', default='Administrator'),
password=dict(type='str', default='admin', no_log=True),
media=dict(type='str', choices=['cdrom', 'floppy', 'rbsu', 'hdd', 'network', 'normal', 'usb']),
image=dict(type='str'),
state=dict(type='str', default='boot_once', choices=['boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot', 'poweroff']),
force=dict(type='bool', default=False),
ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
)
)
if not HAS_HPILO:
module.fail_json(msg='The hpilo python module is required')
host = module.params['host']
login = module.params['login']
password = module.params['password']
media = module.params['media']
image = module.params['image']
state = module.params['state']
force = module.params['force']
ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
changed = False
status = {}
power_status = 'UNKNOWN'
if media and state in ('boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot'):
# Workaround for: Error communicating with iLO: Problem manipulating EV
try:
ilo.set_one_time_boot(media)
except hpilo.IloError:
time.sleep(60)
ilo.set_one_time_boot(media)
# TODO: Verify if image URL exists/works
if image:
ilo.insert_virtual_media(media, image)
changed = True
if media == 'cdrom':
ilo.set_vm_status('cdrom', state, True)
status = ilo.get_vm_status()
changed = True
elif media in ('floppy', 'usb'):
ilo.set_vf_status(state, True)
status = ilo.get_vf_status()
changed = True
# Only perform a boot when state is boot_once or boot_always, or in case we want to force a reboot
if state in ('boot_once', 'boot_always') or force:
power_status = ilo.get_host_power_status()
if not force and power_status == 'ON':
module.fail_json(msg='HP iLO (%s) reports that the server is already powered on !' % host)
if power_status == 'ON':
ilo.warm_boot_server()
# ilo.cold_boot_server()
changed = True
else:
ilo.press_pwr_btn()
# ilo.reset_server()
# ilo.set_host_power(host_power=True)
changed = True
elif state in ('poweroff'):
power_status = ilo.get_host_power_status()
if not power_status == 'OFF':
ilo.hold_pwr_btn()
# ilo.set_host_power(host_power=False)
changed = True
module.exit_json(changed=changed, power=power_status, **status)
if __name__ == '__main__':
main()
| gpl-3.0 |
landrito/api-client-staging | generated/python/gapic-google-cloud-monitoring-v3/google/cloud/gapic/monitoring/v3/metric_service_client.py | 7 | 30336 | # Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# EDITING INSTRUCTIONS
# This file was generated from the file
# https://github.com/google/googleapis/blob/master/google/monitoring/v3/metric_service.proto,
# and updates to that file get reflected here through a refresh process.
# For the short term, the refresh process will only be runnable by Google engineers.
#
# The only allowed edits are to method and file documentation. A 3-way
# merge preserves those additions if the generated source changes.
"""Accesses the google.monitoring.v3 MetricService API."""
import collections
import json
import os
import pkg_resources
import platform
from google.gax import api_callable
from google.gax import config
from google.gax import path_template
import google.gax
from google.api import metric_pb2 as api_metric_pb2
from google.cloud.gapic.monitoring.v3 import enums
from google.cloud.proto.monitoring.v3 import common_pb2
from google.cloud.proto.monitoring.v3 import metric_pb2 as v3_metric_pb2
from google.cloud.proto.monitoring.v3 import metric_service_pb2
_PageDesc = google.gax.PageDescriptor
class MetricServiceClient(object):
"""
Manages metric descriptors, monitored resource descriptors, and
time series data.
"""
SERVICE_ADDRESS = 'monitoring.googleapis.com'
"""The default address of the service."""
DEFAULT_SERVICE_PORT = 443
"""The default port of the service."""
_PAGE_DESCRIPTORS = {
'list_monitored_resource_descriptors':
_PageDesc('page_token', 'next_page_token', 'resource_descriptors'),
'list_metric_descriptors': _PageDesc('page_token', 'next_page_token',
'metric_descriptors'),
'list_time_series': _PageDesc('page_token', 'next_page_token',
'time_series')
}
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_ALL_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/monitoring',
'https://www.googleapis.com/auth/monitoring.read',
'https://www.googleapis.com/auth/monitoring.write', )
_PROJECT_PATH_TEMPLATE = path_template.PathTemplate('projects/{project}')
_METRIC_DESCRIPTOR_PATH_TEMPLATE = path_template.PathTemplate(
'projects/{project}/metricDescriptors/{metric_descriptor=**}')
_MONITORED_RESOURCE_DESCRIPTOR_PATH_TEMPLATE = path_template.PathTemplate(
'projects/{project}/monitoredResourceDescriptors/{monitored_resource_descriptor}'
)
@classmethod
def project_path(cls, project):
"""Returns a fully-qualified project resource name string."""
return cls._PROJECT_PATH_TEMPLATE.render({'project': project, })
@classmethod
def metric_descriptor_path(cls, project, metric_descriptor):
"""Returns a fully-qualified metric_descriptor resource name string."""
return cls._METRIC_DESCRIPTOR_PATH_TEMPLATE.render({
'project': project,
'metric_descriptor': metric_descriptor,
})
@classmethod
def monitored_resource_descriptor_path(cls, project,
monitored_resource_descriptor):
"""Returns a fully-qualified monitored_resource_descriptor resource name string."""
return cls._MONITORED_RESOURCE_DESCRIPTOR_PATH_TEMPLATE.render({
'project': project,
'monitored_resource_descriptor': monitored_resource_descriptor,
})
@classmethod
def match_project_from_project_name(cls, project_name):
"""Parses the project from a project resource.
Args:
project_name (string): A fully-qualified path representing a project
resource.
Returns:
A string representing the project.
"""
return cls._PROJECT_PATH_TEMPLATE.match(project_name).get('project')
@classmethod
def match_project_from_metric_descriptor_name(cls, metric_descriptor_name):
"""Parses the project from a metric_descriptor resource.
Args:
metric_descriptor_name (string): A fully-qualified path representing a metric_descriptor
resource.
Returns:
A string representing the project.
"""
return cls._METRIC_DESCRIPTOR_PATH_TEMPLATE.match(
metric_descriptor_name).get('project')
@classmethod
def match_metric_descriptor_from_metric_descriptor_name(
cls, metric_descriptor_name):
"""Parses the metric_descriptor from a metric_descriptor resource.
Args:
metric_descriptor_name (string): A fully-qualified path representing a metric_descriptor
resource.
Returns:
A string representing the metric_descriptor.
"""
return cls._METRIC_DESCRIPTOR_PATH_TEMPLATE.match(
metric_descriptor_name).get('metric_descriptor')
@classmethod
def match_project_from_monitored_resource_descriptor_name(
cls, monitored_resource_descriptor_name):
"""Parses the project from a monitored_resource_descriptor resource.
Args:
monitored_resource_descriptor_name (string): A fully-qualified path representing a monitored_resource_descriptor
resource.
Returns:
A string representing the project.
"""
return cls._MONITORED_RESOURCE_DESCRIPTOR_PATH_TEMPLATE.match(
monitored_resource_descriptor_name).get('project')
@classmethod
def match_monitored_resource_descriptor_from_monitored_resource_descriptor_name(
cls, monitored_resource_descriptor_name):
"""Parses the monitored_resource_descriptor from a monitored_resource_descriptor resource.
Args:
monitored_resource_descriptor_name (string): A fully-qualified path representing a monitored_resource_descriptor
resource.
Returns:
A string representing the monitored_resource_descriptor.
"""
return cls._MONITORED_RESOURCE_DESCRIPTOR_PATH_TEMPLATE.match(
monitored_resource_descriptor_name).get(
'monitored_resource_descriptor')
def __init__(self,
service_path=SERVICE_ADDRESS,
port=DEFAULT_SERVICE_PORT,
channel=None,
credentials=None,
ssl_credentials=None,
scopes=None,
client_config=None,
app_name=None,
app_version='',
lib_name=None,
lib_version='',
metrics_headers=()):
"""Constructor.
Args:
service_path (string): The domain name of the API remote host.
port (int): The port on which to connect to the remote host.
channel (:class:`grpc.Channel`): A ``Channel`` instance through
which to make calls.
credentials (object): The authorization credentials to attach to
requests. These credentials identify this application to the
service.
ssl_credentials (:class:`grpc.ChannelCredentials`): A
``ChannelCredentials`` instance for use with an SSL-enabled
channel.
scopes (list[string]): A list of OAuth2 scopes to attach to requests.
client_config (dict):
A dictionary for call options for each method. See
:func:`google.gax.construct_settings` for the structure of
this data. Falls back to the default config if not specified
or the specified config is missing data points.
app_name (string): The name of the application calling
the service. Recommended for analytics purposes.
app_version (string): The version of the application calling
the service. Recommended for analytics purposes.
lib_name (string): The API library software used for calling
the service. (Unless you are writing an API client itself,
leave this as default.)
lib_version (string): The API library software version used
for calling the service. (Unless you are writing an API client
itself, leave this as default.)
metrics_headers (dict): A dictionary of values for tracking
client library metrics. Ultimately serializes to a string
(e.g. 'foo/1.2.3 bar/3.14.1'). This argument should be
considered private.
Returns:
A MetricServiceClient object.
"""
# Unless the calling application specifically requested
# OAuth scopes, request everything.
if scopes is None:
scopes = self._ALL_SCOPES
# Initialize an empty client config, if none is set.
if client_config is None:
client_config = {}
# Initialize metrics_headers as an ordered dictionary
# (cuts down on cardinality of the resulting string slightly).
metrics_headers = collections.OrderedDict(metrics_headers)
metrics_headers['gl-python'] = platform.python_version()
# The library may or may not be set, depending on what is
# calling this client. Newer client libraries set the library name
# and version.
if lib_name:
metrics_headers[lib_name] = lib_version
# Finally, track the GAPIC package version.
metrics_headers['gapic'] = pkg_resources.get_distribution(
'gapic-google-cloud-monitoring-v3', ).version
# Load the configuration defaults.
default_client_config = json.loads(
pkg_resources.resource_string(
__name__, 'metric_service_client_config.json').decode())
defaults = api_callable.construct_settings(
'google.monitoring.v3.MetricService',
default_client_config,
client_config,
config.STATUS_CODE_NAMES,
metrics_headers=metrics_headers,
page_descriptors=self._PAGE_DESCRIPTORS, )
self.metric_service_stub = config.create_stub(
metric_service_pb2.MetricServiceStub,
channel=channel,
service_path=service_path,
service_port=port,
credentials=credentials,
scopes=scopes,
ssl_credentials=ssl_credentials)
self._list_monitored_resource_descriptors = api_callable.create_api_call(
self.metric_service_stub.ListMonitoredResourceDescriptors,
settings=defaults['list_monitored_resource_descriptors'])
self._get_monitored_resource_descriptor = api_callable.create_api_call(
self.metric_service_stub.GetMonitoredResourceDescriptor,
settings=defaults['get_monitored_resource_descriptor'])
self._list_metric_descriptors = api_callable.create_api_call(
self.metric_service_stub.ListMetricDescriptors,
settings=defaults['list_metric_descriptors'])
self._get_metric_descriptor = api_callable.create_api_call(
self.metric_service_stub.GetMetricDescriptor,
settings=defaults['get_metric_descriptor'])
self._create_metric_descriptor = api_callable.create_api_call(
self.metric_service_stub.CreateMetricDescriptor,
settings=defaults['create_metric_descriptor'])
self._delete_metric_descriptor = api_callable.create_api_call(
self.metric_service_stub.DeleteMetricDescriptor,
settings=defaults['delete_metric_descriptor'])
self._list_time_series = api_callable.create_api_call(
self.metric_service_stub.ListTimeSeries,
settings=defaults['list_time_series'])
self._create_time_series = api_callable.create_api_call(
self.metric_service_stub.CreateTimeSeries,
settings=defaults['create_time_series'])
# Service calls
def list_monitored_resource_descriptors(self,
name,
filter_='',
page_size=0,
options=None):
"""
Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in api.list_monitored_resource_descriptors(name):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in api.list_monitored_resource_descriptors(name, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
name (string): The project on which to execute the request. The format is
``\"projects/{project_id_or_number}\"``.
filter_ (string): An optional `filter <https://cloud.google.com/monitoring/api/v3/filters>`_ describing
the descriptors to be returned. The filter can reference
the descriptor's type and labels. For example, the
following filter returns only Google Compute Engine descriptors
that have an ``id`` label:
::
resource.type = starts_with(\"gce_\") AND resource.label:id
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.api.monitored_resource_pb2.MonitoredResourceDescriptor` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = metric_service_pb2.ListMonitoredResourceDescriptorsRequest(
name=name, filter=filter_, page_size=page_size)
return self._list_monitored_resource_descriptors(request, options)
def get_monitored_resource_descriptor(self, name, options=None):
"""
Gets a single monitored resource descriptor. This method does not require a Stackdriver account.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.monitored_resource_descriptor_path('[PROJECT]', '[MONITORED_RESOURCE_DESCRIPTOR]')
>>> response = api.get_monitored_resource_descriptor(name)
Args:
name (string): The monitored resource descriptor to get. The format is
``\"projects/{project_id_or_number}/monitoredResourceDescriptors/{resource_type}\"``.
The ``{resource_type}`` is a predefined type, such as
``cloudsql_database``.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.api.monitored_resource_pb2.MonitoredResourceDescriptor` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = metric_service_pb2.GetMonitoredResourceDescriptorRequest(
name=name)
return self._get_monitored_resource_descriptor(request, options)
def list_metric_descriptors(self,
name,
filter_='',
page_size=0,
options=None):
"""
Lists metric descriptors that match a filter. This method does not require a Stackdriver account.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in api.list_metric_descriptors(name):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in api.list_metric_descriptors(name, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
name (string): The project on which to execute the request. The format is
``\"projects/{project_id_or_number}\"``.
filter_ (string): If this field is empty, all custom and
system-defined metric descriptors are returned.
Otherwise, the `filter <https://cloud.google.com/monitoring/api/v3/filters>`_
specifies which metric descriptors are to be
returned. For example, the following filter matches all
`custom metrics <https://cloud.google.com/monitoring/custom-metrics>`_:
::
metric.type = starts_with(\"custom.googleapis.com/\")
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.api.metric_pb2.MetricDescriptor` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = metric_service_pb2.ListMetricDescriptorsRequest(
name=name, filter=filter_, page_size=page_size)
return self._list_metric_descriptors(request, options)
def get_metric_descriptor(self, name, options=None):
"""
Gets a single metric descriptor. This method does not require a Stackdriver account.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.metric_descriptor_path('[PROJECT]', '[METRIC_DESCRIPTOR]')
>>> response = api.get_metric_descriptor(name)
Args:
name (string): The metric descriptor on which to execute the request. The format is
``\"projects/{project_id_or_number}/metricDescriptors/{metric_id}\"``.
An example value of ``{metric_id}`` is
``\"compute.googleapis.com/instance/disk/read_bytes_count\"``.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.api.metric_pb2.MetricDescriptor` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = metric_service_pb2.GetMetricDescriptorRequest(name=name)
return self._get_metric_descriptor(request, options)
def create_metric_descriptor(self, name, metric_descriptor, options=None):
"""
Creates a new metric descriptor.
User-created metric descriptors define
`custom metrics <https://cloud.google.com/monitoring/custom-metrics>`_.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> from google.api import metric_pb2 as api_metric_pb2
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.project_path('[PROJECT]')
>>> metric_descriptor = api_metric_pb2.MetricDescriptor()
>>> response = api.create_metric_descriptor(name, metric_descriptor)
Args:
name (string): The project on which to execute the request. The format is
``\"projects/{project_id_or_number}\"``.
metric_descriptor (:class:`google.api.metric_pb2.MetricDescriptor`): The new `custom metric <https://cloud.google.com/monitoring/custom-metrics>`_
descriptor.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.api.metric_pb2.MetricDescriptor` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = metric_service_pb2.CreateMetricDescriptorRequest(
name=name, metric_descriptor=metric_descriptor)
return self._create_metric_descriptor(request, options)
def delete_metric_descriptor(self, name, options=None):
"""
Deletes a metric descriptor. Only user-created
`custom metrics <https://cloud.google.com/monitoring/custom-metrics>`_ can be deleted.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.metric_descriptor_path('[PROJECT]', '[METRIC_DESCRIPTOR]')
>>> api.delete_metric_descriptor(name)
Args:
name (string): The metric descriptor on which to execute the request. The format is
``\"projects/{project_id_or_number}/metricDescriptors/{metric_id}\"``.
An example of ``{metric_id}`` is:
``\"custom.googleapis.com/my_test_metric\"``.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = metric_service_pb2.DeleteMetricDescriptorRequest(name=name)
self._delete_metric_descriptor(request, options)
def list_time_series(self,
name,
filter_,
interval,
view,
aggregation=None,
order_by='',
page_size=0,
options=None):
"""
Lists time series that match a filter. This method does not require a Stackdriver account.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> from google.cloud.gapic.monitoring.v3 import enums
>>> from google.cloud.proto.monitoring.v3 import common_pb2
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.project_path('[PROJECT]')
>>> filter_ = ''
>>> interval = common_pb2.TimeInterval()
>>> view = enums.ListTimeSeriesRequest.TimeSeriesView.FULL
>>>
>>> # Iterate over all results
>>> for element in api.list_time_series(name, filter_, interval, view):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in api.list_time_series(name, filter_, interval, view, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
name (string): The project on which to execute the request. The format is
\"projects/{project_id_or_number}\".
filter_ (string): A `monitoring filter <https://cloud.google.com/monitoring/api/v3/filters>`_ that specifies which time
series should be returned. The filter must specify a single metric type,
and can additionally specify metric labels and other information. For
example:
::
metric.type = \"compute.googleapis.com/instance/cpu/usage_time\" AND
metric.label.instance_name = \"my-instance-name\"
interval (:class:`google.cloud.proto.monitoring.v3.common_pb2.TimeInterval`): The time interval for which results should be returned. Only time series
that contain data points in the specified interval are included
in the response.
aggregation (:class:`google.cloud.proto.monitoring.v3.common_pb2.Aggregation`): By default, the raw time series data is returned.
Use this field to combine multiple time series for different
views of the data.
order_by (string): Specifies the order in which the points of the time series should
be returned. By default, results are not ordered. Currently,
this field must be left blank.
view (enum :class:`google.cloud.gapic.monitoring.v3.enums.ListTimeSeriesRequest.TimeSeriesView`): Specifies which information is returned about the time series.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.cloud.proto.monitoring.v3.metric_pb2.TimeSeries` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
if aggregation is None:
aggregation = common_pb2.Aggregation()
# Create the request object.
request = metric_service_pb2.ListTimeSeriesRequest(
name=name,
filter=filter_,
interval=interval,
view=view,
aggregation=aggregation,
order_by=order_by,
page_size=page_size)
return self._list_time_series(request, options)
def create_time_series(self, name, time_series, options=None):
"""
Creates or adds data to one or more time series.
The response is empty if all time series in the request were written.
If any time series could not be written, a corresponding failure message is
included in the error response.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> from google.cloud.proto.monitoring.v3 import metric_pb2 as v3_metric_pb2
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.project_path('[PROJECT]')
>>> time_series = []
>>> api.create_time_series(name, time_series)
Args:
name (string): The project on which to execute the request. The format is
``\"projects/{project_id_or_number}\"``.
time_series (list[:class:`google.cloud.proto.monitoring.v3.metric_pb2.TimeSeries`]): The new data to be added to a list of time series.
Adds at most one data point to each of several time series. The new data
point must be more recent than any other point in its time series. Each
``TimeSeries`` value must fully specify a unique time series by supplying
all label values for the metric and the monitored resource.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = metric_service_pb2.CreateTimeSeriesRequest(
name=name, time_series=time_series)
self._create_time_series(request, options)
| bsd-3-clause |
mdeemer/XlsxWriter | xlsxwriter/test/comparison/test_chartsheet09.py | 8 | 1773 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chartsheet09.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the worksheet properties of an XlsxWriter chartsheet filewith series format properties."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chartsheet = workbook.add_chartsheet()
chart = workbook.add_chart({'type': 'bar'})
chart.axis_ids = [49044480, 49055232]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'values': '=Sheet1!$A$1:$A$5',
'border': {'color': 'yellow'},
'fill': {'color': 'red'},
})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chartsheet.set_chart(chart)
chartsheet.activate()
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pip/_vendor/distlib/locators.py | 328 | 51013 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import gzip
from io import BytesIO
import json
import logging
import os
import posixpath
import re
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
queue, quote, unescape, string_types, build_opener,
HTTPRedirectHandler as BaseRedirectHandler, text_type,
Request, HTTPError, URLError)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata
from .util import (cached_property, parse_credentials, ensure_slash,
split_filename, get_project_data, parse_requirement,
parse_name_and_version, ServerProxy, normalize_name)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__)
HASHER_HASH = re.compile('^(\w+)=([a-f0-9]+)')
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
DEFAULT_INDEX = 'https://pypi.python.org/pypi'
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
return client.list_packages()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None:
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distributions.
"""
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + ('.whl',)
def __init__(self, scheme='default'):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distributions on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
self.errors = queue.Queue()
def get_errors(self):
"""
Return any errors which have occurred.
"""
result = []
while not self.errors.empty(): # pragma: no cover
try:
e = self.errors.get(False)
result.append(e)
except self.errors.Empty:
continue
self.errors.task_done()
return result
def clear_errors(self):
"""
Clear any errors which may have been logged.
"""
# Just get the errors and throw them away
self.get_errors()
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None:
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
self.clear_errors()
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given project release.
"""
t = urlparse(url)
basename = posixpath.basename(t.path)
compatible = True
is_wheel = basename.endswith('.whl')
if is_wheel:
compatible = is_compatible(Wheel(basename), self.wheel_tags)
return (t.scheme != 'https', 'pypi.python.org' in t.netloc,
is_wheel, compatible, basename)
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implementation favours https:// URLs over http://, archives
from PyPI over those from other locations, wheel compatibility (if a
wheel) and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in project name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a project (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
return normalize_name(name1) == normalize_name(name2)
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith('egg='):
logger.debug('%s: version hint in fragment: %r',
project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == '/':
path = path[:-1]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if is_compatible(wheel, self.wheel_tags):
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
'name': wheel.name,
'version': wheel.version,
'filename': wheel.filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
'python-version': ', '.join(
['.'.join(list(v[2:])) for v in wheel.pyver]),
}
except Exception as e: # pragma: no cover
logger.warning('invalid path for wheel: %s', path)
elif path.endswith(self.downloadable_extensions):
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:-len(ext)]
t = self.split_filename(path, project_name)
if not t:
logger.debug('No match for project/version: %s', path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
'name': name,
'version': version,
'filename': filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
#'packagetype': 'sdist',
}
if pyver:
result['python-version'] = pyver
break
if result and algo:
result['%s_digest' % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at keys of the form
'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a
dictionary for a specific version, which typically holds information
gleaned from a filename or URL for an archive for the distribution.
"""
name = info.pop('name')
version = info.pop('version')
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = digest = self._get_digest(info)
url = info['url']
result['digests'][url] = digest
if md.source_url != info['url']:
md.source_url = self.prefer_url(md.source_url, url)
result['urls'].setdefault(version, set()).add(url)
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None:
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if len(versions) > 2: # urls and digests keys are present
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
if k in ('urls', 'digests'):
continue
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release '
'version %s of %s', k, matcher.name)
except Exception: # pragma: no cover
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
version = slist[-1]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get('urls', {}).get(version, set())
d = {}
sd = versions.get('digests', {})
for url in result.download_urls:
if url in sd:
d[url] = sd[url]
result.digests = d
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
for info in urls:
url = info['url']
digest = self._get_digest(info)
result['urls'].setdefault(v, set()).add(url)
result['digests'][url] = digest
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
and probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
dist.locator = self
urls = d['urls']
result[md.version] = dist
for info in d['urls']:
url = info['url']
dist.download_urls.add(url)
dist.digests[url] = self._get_digest(info)
result['urls'].setdefault(md.version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# Now get other releases
for version, infos in d['releases'].items():
if version == md.version:
continue # already done
omd = Metadata(scheme=self.scheme)
omd.name = md.name
omd.version = version
odist = Distribution(omd)
odist.locator = self
result[version] = odist
for info in infos:
url = info['url']
odist.download_urls.add(url)
odist.digests[url] = self._get_digest(info)
result['urls'].setdefault(version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# for info in urls:
# md.source_url = info['url']
# dist.digest = self._get_digest(info)
# dist.locator = self
# for info in urls:
# url = info['url']
# result['urls'].setdefault(md.version, set()).add(url)
# result['digests'][url] = self._get_digest(info)
except Exception as e:
self.errors.put(text_type(e))
logger.exception('JSON fetch failed: %s', e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile("""
(rel\s*=\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\s\n]*))\s+)?
href\s*=\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\s\n]*))
(\s+rel\s*=\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
'deflate': zlib.decompress,
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
'none': lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.setDaemon(True)
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, '%s/' % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(r'\b(linux-(i\d86|x86_64|arm\w+)|'
r'win(32|-amd64)|macosx-?\d+)\b', re.I)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a project.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
self._seen.add(link)
if (not self._process_download(link) and
self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
except Exception as e: # pragma: no cover
self.errors.put(text_type(e))
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
#logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError: # pragma: no cover
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException('Unable to get %s' % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distributions in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path): # pragma: no cover
raise DistlibException('Not a directory: %r' % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
data = get_project_data(name)
if data:
for info in data.get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'], info['version'],
summary=data.get('summary',
'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
result['urls'].setdefault(dist.version, set()).add(info['url'])
return result
class DistPathLocator(Locator):
"""
This locator finds installed distributions in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {'urls': {}, 'digests': {}}
else:
result = {
dist.version: dist,
'urls': {dist.version: set([dist.source_url])},
'digests': {dist.version: set([None])}
}
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
files = result.get('urls', {})
digests = result.get('digests', {})
# next line could overwrite result['urls'], result['digests']
result.update(d)
df = result.get('urls')
if files and df:
for k, v in files.items():
if k in df:
df[k] |= v
else:
df[k] = v
dd = result.get('digests')
if digests and dd:
dd.update(digests)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=3.0),
scheme='legacy')
locate = default_locator.locate
NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*'
r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$')
class DependencyFinder(object):
"""
Locate dependencies for distributions.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distributions.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug('adding distribution %s', dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError: # pragma: no cover
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distributions which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other,
frozenset(unmatched)))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distributions it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distributions returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are post-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ':*:' in meta_extras:
meta_extras.remove(':*:')
# :meta: and :run: are implicitly included
meta_extras |= set([':test:', ':build:', ':dev:'])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug('passed %s as requirement', odist)
else:
dist = odist = self.locator.locate(requirement,
prereleases=prereleases)
if dist is None:
raise DistlibException('Unable to locate %r' % requirement)
logger.debug('located %s', odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
#import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if dist in install_dists:
for key in ('test', 'build', 'dev'):
e = ':%s:' % key
if e in meta_extras:
ereqts |= getattr(dist, '%s_requires' % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug('No providers found for %r', r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug('Cannot satisfy %r', r)
problems.add(('unsatisfied', r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug('Adding %s to install_dists',
provider.name_and_version)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug('%s is a build-time dependency only.',
dist.name_and_version)
logger.debug('find done for %s', odist)
return dists, problems
| mit |
MarkWh1te/xueqiu_predict | python3_env/lib/python3.4/site-packages/pymysql/converters.py | 20 | 12487 | from ._compat import PY2, text_type, long_type, JYTHON, IRONPYTHON, unichr
import datetime
from decimal import Decimal
import re
import time
from .constants import FIELD_TYPE, FLAG
from .charset import charset_by_id, charset_to_encoding
def escape_item(val, charset, mapping=None):
if mapping is None:
mapping = encoders
encoder = mapping.get(type(val))
# Fallback to default when no encoder found
if not encoder:
try:
encoder = mapping[text_type]
except KeyError:
raise TypeError("no default type converter defined")
if encoder in (escape_dict, escape_sequence):
val = encoder(val, charset, mapping)
else:
val = encoder(val, mapping)
return val
def escape_dict(val, charset, mapping=None):
n = {}
for k, v in val.items():
quoted = escape_item(v, charset, mapping)
n[k] = quoted
return n
def escape_sequence(val, charset, mapping=None):
n = []
for item in val:
quoted = escape_item(item, charset, mapping)
n.append(quoted)
return "(" + ",".join(n) + ")"
def escape_set(val, charset, mapping=None):
return ','.join([escape_item(x, charset, mapping) for x in val])
def escape_bool(value, mapping=None):
return str(int(value))
def escape_object(value, mapping=None):
return str(value)
def escape_int(value, mapping=None):
return str(value)
def escape_float(value, mapping=None):
return ('%.15g' % value)
_escape_table = [unichr(x) for x in range(128)]
_escape_table[0] = u'\\0'
_escape_table[ord('\\')] = u'\\\\'
_escape_table[ord('\n')] = u'\\n'
_escape_table[ord('\r')] = u'\\r'
_escape_table[ord('\032')] = u'\\Z'
_escape_table[ord('"')] = u'\\"'
_escape_table[ord("'")] = u"\\'"
def _escape_unicode(value, mapping=None):
"""escapes *value* without adding quote.
Value should be unicode
"""
return value.translate(_escape_table)
if PY2:
def escape_string(value, mapping=None):
"""escape_string escapes *value* but not surround it with quotes.
Value should be bytes or unicode.
"""
if isinstance(value, unicode):
return _escape_unicode(value)
assert isinstance(value, (bytes, bytearray))
value = value.replace('\\', '\\\\')
value = value.replace('\0', '\\0')
value = value.replace('\n', '\\n')
value = value.replace('\r', '\\r')
value = value.replace('\032', '\\Z')
value = value.replace("'", "\\'")
value = value.replace('"', '\\"')
return value
def escape_bytes(value, mapping=None):
assert isinstance(value, (bytes, bytearray))
return b"_binary'%s'" % escape_string(value)
else:
escape_string = _escape_unicode
# On Python ~3.5, str.decode('ascii', 'surrogateescape') is slow.
# (fixed in Python 3.6, http://bugs.python.org/issue24870)
# Workaround is str.decode('latin1') then translate 0x80-0xff into 0udc80-0udcff.
# We can escape special chars and surrogateescape at once.
_escape_bytes_table = _escape_table + [chr(i) for i in range(0xdc80, 0xdd00)]
def escape_bytes(value, mapping=None):
return "_binary'%s'" % value.decode('latin1').translate(_escape_bytes_table)
def escape_unicode(value, mapping=None):
return u"'%s'" % _escape_unicode(value)
def escape_str(value, mapping=None):
return "'%s'" % escape_string(str(value), mapping)
def escape_None(value, mapping=None):
return 'NULL'
def escape_timedelta(obj, mapping=None):
seconds = int(obj.seconds) % 60
minutes = int(obj.seconds // 60) % 60
hours = int(obj.seconds // 3600) % 24 + int(obj.days) * 24
if obj.microseconds:
fmt = "'{0:02d}:{1:02d}:{2:02d}.{3:06d}'"
else:
fmt = "'{0:02d}:{1:02d}:{2:02d}'"
return fmt.format(hours, minutes, seconds, obj.microseconds)
def escape_time(obj, mapping=None):
if obj.microsecond:
fmt = "'{0.hour:02}:{0.minute:02}:{0.second:02}.{0.microsecond:06}'"
else:
fmt = "'{0.hour:02}:{0.minute:02}:{0.second:02}'"
return fmt.format(obj)
def escape_datetime(obj, mapping=None):
if obj.microsecond:
fmt = "'{0.year:04}-{0.month:02}-{0.day:02} {0.hour:02}:{0.minute:02}:{0.second:02}.{0.microsecond:06}'"
else:
fmt = "'{0.year:04}-{0.month:02}-{0.day:02} {0.hour:02}:{0.minute:02}:{0.second:02}'"
return fmt.format(obj)
def escape_date(obj, mapping=None):
fmt = "'{0.year:04}-{0.month:02}-{0.day:02}'"
return fmt.format(obj)
def escape_struct_time(obj, mapping=None):
return escape_datetime(datetime.datetime(*obj[:6]))
def _convert_second_fraction(s):
if not s:
return 0
# Pad zeros to ensure the fraction length in microseconds
s = s.ljust(6, '0')
return int(s[:6])
DATETIME_RE = re.compile(r"(\d{1,4})-(\d{1,2})-(\d{1,2})[T ](\d{1,2}):(\d{1,2}):(\d{1,2})(?:.(\d{1,6}))?")
def convert_datetime(obj):
"""Returns a DATETIME or TIMESTAMP column value as a datetime object:
>>> datetime_or_None('2007-02-25 23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
>>> datetime_or_None('2007-02-25T23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
Illegal values are returned as None:
>>> datetime_or_None('2007-02-31T23:06:20') is None
True
>>> datetime_or_None('0000-00-00 00:00:00') is None
True
"""
if not PY2 and isinstance(obj, (bytes, bytearray)):
obj = obj.decode('ascii')
m = DATETIME_RE.match(obj)
if not m:
return convert_date(obj)
try:
groups = list(m.groups())
groups[-1] = _convert_second_fraction(groups[-1])
return datetime.datetime(*[ int(x) for x in groups ])
except ValueError:
return convert_date(obj)
TIMEDELTA_RE = re.compile(r"(-)?(\d{1,3}):(\d{1,2}):(\d{1,2})(?:.(\d{1,6}))?")
def convert_timedelta(obj):
"""Returns a TIME column as a timedelta object:
>>> timedelta_or_None('25:06:17')
datetime.timedelta(1, 3977)
>>> timedelta_or_None('-25:06:17')
datetime.timedelta(-2, 83177)
Illegal values are returned as None:
>>> timedelta_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function.
"""
if not PY2 and isinstance(obj, (bytes, bytearray)):
obj = obj.decode('ascii')
m = TIMEDELTA_RE.match(obj)
if not m:
return None
try:
groups = list(m.groups())
groups[-1] = _convert_second_fraction(groups[-1])
negate = -1 if groups[0] else 1
hours, minutes, seconds, microseconds = groups[1:]
tdelta = datetime.timedelta(
hours = int(hours),
minutes = int(minutes),
seconds = int(seconds),
microseconds = int(microseconds)
) * negate
return tdelta
except ValueError:
return None
TIME_RE = re.compile(r"(\d{1,2}):(\d{1,2}):(\d{1,2})(?:.(\d{1,6}))?")
def convert_time(obj):
"""Returns a TIME column as a time object:
>>> time_or_None('15:06:17')
datetime.time(15, 6, 17)
Illegal values are returned as None:
>>> time_or_None('-25:06:17') is None
True
>>> time_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function.
Also note that MySQL's TIME column corresponds more closely to
Python's timedelta and not time. However if you want TIME columns
to be treated as time-of-day and not a time offset, then you can
use set this function as the converter for FIELD_TYPE.TIME.
"""
if not PY2 and isinstance(obj, (bytes, bytearray)):
obj = obj.decode('ascii')
m = TIME_RE.match(obj)
if not m:
return None
try:
groups = list(m.groups())
groups[-1] = _convert_second_fraction(groups[-1])
hours, minutes, seconds, microseconds = groups
return datetime.time(hour=int(hours), minute=int(minutes),
second=int(seconds), microsecond=int(microseconds))
except ValueError:
return None
def convert_date(obj):
"""Returns a DATE column as a date object:
>>> date_or_None('2007-02-26')
datetime.date(2007, 2, 26)
Illegal values are returned as None:
>>> date_or_None('2007-02-31') is None
True
>>> date_or_None('0000-00-00') is None
True
"""
if not PY2 and isinstance(obj, (bytes, bytearray)):
obj = obj.decode('ascii')
try:
return datetime.date(*[ int(x) for x in obj.split('-', 2) ])
except ValueError:
return None
def convert_mysql_timestamp(timestamp):
"""Convert a MySQL TIMESTAMP to a Timestamp object.
MySQL >= 4.1 returns TIMESTAMP in the same format as DATETIME:
>>> mysql_timestamp_converter('2007-02-25 22:32:17')
datetime.datetime(2007, 2, 25, 22, 32, 17)
MySQL < 4.1 uses a big string of numbers:
>>> mysql_timestamp_converter('20070225223217')
datetime.datetime(2007, 2, 25, 22, 32, 17)
Illegal values are returned as None:
>>> mysql_timestamp_converter('2007-02-31 22:32:17') is None
True
>>> mysql_timestamp_converter('00000000000000') is None
True
"""
if not PY2 and isinstance(timestamp, (bytes, bytearray)):
timestamp = timestamp.decode('ascii')
if timestamp[4] == '-':
return convert_datetime(timestamp)
timestamp += "0"*(14-len(timestamp)) # padding
year, month, day, hour, minute, second = \
int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[6:8]), \
int(timestamp[8:10]), int(timestamp[10:12]), int(timestamp[12:14])
try:
return datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
def convert_set(s):
if isinstance(s, (bytes, bytearray)):
return set(s.split(b","))
return set(s.split(","))
def through(x):
return x
#def convert_bit(b):
# b = "\x00" * (8 - len(b)) + b # pad w/ zeroes
# return struct.unpack(">Q", b)[0]
#
# the snippet above is right, but MySQLdb doesn't process bits,
# so we shouldn't either
convert_bit = through
def convert_characters(connection, field, data):
field_charset = charset_by_id(field.charsetnr).name
encoding = charset_to_encoding(field_charset)
if field.flags & FLAG.SET:
return convert_set(data.decode(encoding))
if field.flags & FLAG.BINARY:
return data
if connection.use_unicode:
data = data.decode(encoding)
elif connection.charset != field_charset:
data = data.decode(encoding)
data = data.encode(connection.encoding)
return data
encoders = {
bool: escape_bool,
int: escape_int,
long_type: escape_int,
float: escape_float,
str: escape_str,
text_type: escape_unicode,
tuple: escape_sequence,
list: escape_sequence,
set: escape_sequence,
frozenset: escape_sequence,
dict: escape_dict,
bytearray: escape_bytes,
type(None): escape_None,
datetime.date: escape_date,
datetime.datetime: escape_datetime,
datetime.timedelta: escape_timedelta,
datetime.time: escape_time,
time.struct_time: escape_struct_time,
Decimal: escape_object,
}
if not PY2 or JYTHON or IRONPYTHON:
encoders[bytes] = escape_bytes
decoders = {
FIELD_TYPE.BIT: convert_bit,
FIELD_TYPE.TINY: int,
FIELD_TYPE.SHORT: int,
FIELD_TYPE.LONG: int,
FIELD_TYPE.FLOAT: float,
FIELD_TYPE.DOUBLE: float,
FIELD_TYPE.LONGLONG: int,
FIELD_TYPE.INT24: int,
FIELD_TYPE.YEAR: int,
FIELD_TYPE.TIMESTAMP: convert_mysql_timestamp,
FIELD_TYPE.DATETIME: convert_datetime,
FIELD_TYPE.TIME: convert_timedelta,
FIELD_TYPE.DATE: convert_date,
FIELD_TYPE.SET: convert_set,
FIELD_TYPE.BLOB: through,
FIELD_TYPE.TINY_BLOB: through,
FIELD_TYPE.MEDIUM_BLOB: through,
FIELD_TYPE.LONG_BLOB: through,
FIELD_TYPE.STRING: through,
FIELD_TYPE.VAR_STRING: through,
FIELD_TYPE.VARCHAR: through,
FIELD_TYPE.DECIMAL: Decimal,
FIELD_TYPE.NEWDECIMAL: Decimal,
}
# for MySQLdb compatibility
conversions = encoders.copy()
conversions.update(decoders)
Thing2Literal = escape_str
| mit |
rayNymous/nupic | src/nupic/regions/ImageSensorExplorers/Jiggle.py | 17 | 5887 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.regions.ImageSensorExplorers.SpiralSweep import SpiralSweep
DEBUG = 0
class Jiggle(SpiralSweep):
"""
This explorer jiggles the image around each possible location within a specified
radius, being careful not to repeat movements already performed between any
two locations for maximum efficiency.
This explorer can be an efficient means of training a first order temporal
learner for translation shifts within a certain radius from center.
"""
def __init__(self, shift=1, radius=1, *args, **kwargs):
"""
@param shift -- Number of pixels to shift each time.
@param radius -- maximum amount to move away from center. Must be a multiple
of 'shift'
"""
assert(radius >= 1)
SpiralSweep.__init__(self, radius, *args, **kwargs)
if (radius % shift) != 0:
raise RuntimeError("radius must be a multiple of shift")
# Generate the location offsets that we will move to for each image
self.offsets = self._generateOffsets(shift, radius)
self.index = 0
def _generateOffsets(self, shift, radius):
"""
Generate the list of offsets we will visit for each image/filter combination
@param shift - how much each location is separated by
@param radius - maximum radius away from center to move
"""
# Table mapping a jiggle index to the relative location
gJiggleOffsets = [ \
# direction jiggleIndex's
( 0, shift), # up (1,2)
(-shift, shift), # up-right (3,4)
(-shift, 0), # right (5,6)
(-shift,-shift), # down-right (7,8)
( 0, -shift), # down (9,10)
( shift,-shift), # down-left (11,12)
( shift, 0), # left (13,14)
( shift, shift), # up-left (15,16)
]
gJigglesPerformed = []
# ------------------------------------------------------------------------
# Find the next "jiggle" for the current offset
def nextJiggle(location, jiggleIndex, jiggleOffsets, jigglesPerformed):
"""
Find the next jiggle around the current location
@param location - current location
@param jiggleIndex - current jiggle index
@param jiggleOffsets - table of jiggle offsets for each location
@param jigglesPerformed - which jiggles we've performed already between
all possible locations
@retval (jiggleIndex, jiggleTo)
or None if we've already visited all neighbors from this location
"""
#global jigglesPerformed, jiggleOffsets
while True:
jiggleIndex += 1
if jiggleIndex > 16:
return (None, None)
src = tuple(location)
dst = (src[0] + jiggleOffsets[(jiggleIndex-1)/2][0],
src[1] + jiggleOffsets[(jiggleIndex-1)/2][1])
# If the dst is outside our radius, skip it
if max(abs(dst[0]), abs(dst[1])) > radius:
continue
# Going away or coming back?
if (jiggleIndex & 1) == 0:
(src, dst) = (dst, src)
# If we've already peformed this transition between src and dst, skip it
if (jiggleIndex & 1):
awayJiggle = (src, dst)
backJiggle = (dst, src)
if awayJiggle in jigglesPerformed and \
backJiggle in jigglesPerformed:
if DEBUG >= 2: print "already performed jiggle", jiggleIndex, ", skipping"
jiggleIndex += 1
continue
# Add these jiggles to those performed
jigglesPerformed += [awayJiggle, backJiggle]
# Move to dst
if DEBUG >= 2:
print "jiggleIndex:", jiggleIndex, "location:", location,
print "relPosition:", dst
return (jiggleIndex, dst)
# --------------------------------------------------------------------------
# Loop through each loation within the radius and find all the jiggles
# Locations are encoded (x, y) and higher values move towards the top-left
location = [radius, radius] # top-left corner
offsets = [tuple(location)]
while True:
jiggleIndex = 0
# ...............................
# Next jiggle at this location
while True:
(jiggleIndex, offset) = nextJiggle(location, jiggleIndex, gJiggleOffsets,
gJigglesPerformed)
if offset is None:
break
offsets += [offset]
# ...............................
# Next location
# Next location to the right
location[0] -= shift
if location[0] >= -radius:
offsets += [tuple(location)]
continue
# Next row down, on the left
location[0] = radius
location[1] -= shift
if location[1] >= -radius:
offsets += [tuple(location)]
continue
# Exhausted all locations, break out
break
return offsets
| agpl-3.0 |
h4r5h1t/django-hauthy | tests/view_tests/tests/test_specials.py | 66 | 1428 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase, override_settings
@override_settings(ROOT_URLCONF='view_tests.generic_urls')
class URLHandling(TestCase):
"""
Tests for URL handling in views and responses.
"""
redirect_target = "/%E4%B8%AD%E6%96%87/target/"
def test_combining_redirect(self):
"""
Tests that redirecting to an IRI, requiring encoding before we use it
in an HTTP response, is handled correctly. In this case the arg to
HttpRedirect is ASCII but the current request path contains non-ASCII
characters so this test ensures the creation of the full path with a
base non-ASCII part is handled correctly.
"""
response = self.client.get('/中文/')
self.assertRedirects(response, self.redirect_target)
def test_nonascii_redirect(self):
"""
Tests that a non-ASCII argument to HttpRedirect is handled properly.
"""
response = self.client.get('/nonascii_redirect/')
self.assertRedirects(response, self.redirect_target)
def test_permanent_nonascii_redirect(self):
"""
Tests that a non-ASCII argument to HttpPermanentRedirect is handled
properly.
"""
response = self.client.get('/permanent_nonascii_redirect/')
self.assertRedirects(response, self.redirect_target, status_code=301)
| bsd-3-clause |
40223136/2015cdag1man | static/Brython3.1.0-20150301-090019/Lib/formatter.py | 751 | 14930 | """Generic output formatting.
Formatter objects transform an abstract flow of formatting events into
specific output events on writer objects. Formatters manage several stack
structures to allow various properties of a writer object to be changed and
restored; writers need not be able to handle relative changes nor any sort
of ``change back'' operation. Specific writer properties which may be
controlled via formatter objects are horizontal alignment, font, and left
margin indentations. A mechanism is provided which supports providing
arbitrary, non-exclusive style settings to a writer as well. Additional
interfaces facilitate formatting events which are not reversible, such as
paragraph separation.
Writer objects encapsulate device interfaces. Abstract devices, such as
file formats, are supported as well as physical devices. The provided
implementations all work with abstract devices. The interface makes
available mechanisms for setting the properties which formatter objects
manage and inserting data into the output.
"""
import sys
AS_IS = None
class NullFormatter:
"""A formatter which does nothing.
If the writer parameter is omitted, a NullWriter instance is created.
No methods of the writer are called by NullFormatter instances.
Implementations should inherit from this class if implementing a writer
interface but don't need to inherit any implementation.
"""
def __init__(self, writer=None):
if writer is None:
writer = NullWriter()
self.writer = writer
def end_paragraph(self, blankline): pass
def add_line_break(self): pass
def add_hor_rule(self, *args, **kw): pass
def add_label_data(self, format, counter, blankline=None): pass
def add_flowing_data(self, data): pass
def add_literal_data(self, data): pass
def flush_softspace(self): pass
def push_alignment(self, align): pass
def pop_alignment(self): pass
def push_font(self, x): pass
def pop_font(self): pass
def push_margin(self, margin): pass
def pop_margin(self): pass
def set_spacing(self, spacing): pass
def push_style(self, *styles): pass
def pop_style(self, n=1): pass
def assert_line_data(self, flag=1): pass
class AbstractFormatter:
"""The standard formatter.
This implementation has demonstrated wide applicability to many writers,
and may be used directly in most circumstances. It has been used to
implement a full-featured World Wide Web browser.
"""
# Space handling policy: blank spaces at the boundary between elements
# are handled by the outermost context. "Literal" data is not checked
# to determine context, so spaces in literal data are handled directly
# in all circumstances.
def __init__(self, writer):
self.writer = writer # Output device
self.align = None # Current alignment
self.align_stack = [] # Alignment stack
self.font_stack = [] # Font state
self.margin_stack = [] # Margin state
self.spacing = None # Vertical spacing state
self.style_stack = [] # Other state, e.g. color
self.nospace = 1 # Should leading space be suppressed
self.softspace = 0 # Should a space be inserted
self.para_end = 1 # Just ended a paragraph
self.parskip = 0 # Skipped space between paragraphs?
self.hard_break = 1 # Have a hard break
self.have_label = 0
def end_paragraph(self, blankline):
if not self.hard_break:
self.writer.send_line_break()
self.have_label = 0
if self.parskip < blankline and not self.have_label:
self.writer.send_paragraph(blankline - self.parskip)
self.parskip = blankline
self.have_label = 0
self.hard_break = self.nospace = self.para_end = 1
self.softspace = 0
def add_line_break(self):
if not (self.hard_break or self.para_end):
self.writer.send_line_break()
self.have_label = self.parskip = 0
self.hard_break = self.nospace = 1
self.softspace = 0
def add_hor_rule(self, *args, **kw):
if not self.hard_break:
self.writer.send_line_break()
self.writer.send_hor_rule(*args, **kw)
self.hard_break = self.nospace = 1
self.have_label = self.para_end = self.softspace = self.parskip = 0
def add_label_data(self, format, counter, blankline = None):
if self.have_label or not self.hard_break:
self.writer.send_line_break()
if not self.para_end:
self.writer.send_paragraph((blankline and 1) or 0)
if isinstance(format, str):
self.writer.send_label_data(self.format_counter(format, counter))
else:
self.writer.send_label_data(format)
self.nospace = self.have_label = self.hard_break = self.para_end = 1
self.softspace = self.parskip = 0
def format_counter(self, format, counter):
label = ''
for c in format:
if c == '1':
label = label + ('%d' % counter)
elif c in 'aA':
if counter > 0:
label = label + self.format_letter(c, counter)
elif c in 'iI':
if counter > 0:
label = label + self.format_roman(c, counter)
else:
label = label + c
return label
def format_letter(self, case, counter):
label = ''
while counter > 0:
counter, x = divmod(counter-1, 26)
# This makes a strong assumption that lowercase letters
# and uppercase letters form two contiguous blocks, with
# letters in order!
s = chr(ord(case) + x)
label = s + label
return label
def format_roman(self, case, counter):
ones = ['i', 'x', 'c', 'm']
fives = ['v', 'l', 'd']
label, index = '', 0
# This will die of IndexError when counter is too big
while counter > 0:
counter, x = divmod(counter, 10)
if x == 9:
label = ones[index] + ones[index+1] + label
elif x == 4:
label = ones[index] + fives[index] + label
else:
if x >= 5:
s = fives[index]
x = x-5
else:
s = ''
s = s + ones[index]*x
label = s + label
index = index + 1
if case == 'I':
return label.upper()
return label
def add_flowing_data(self, data):
if not data: return
prespace = data[:1].isspace()
postspace = data[-1:].isspace()
data = " ".join(data.split())
if self.nospace and not data:
return
elif prespace or self.softspace:
if not data:
if not self.nospace:
self.softspace = 1
self.parskip = 0
return
if not self.nospace:
data = ' ' + data
self.hard_break = self.nospace = self.para_end = \
self.parskip = self.have_label = 0
self.softspace = postspace
self.writer.send_flowing_data(data)
def add_literal_data(self, data):
if not data: return
if self.softspace:
self.writer.send_flowing_data(" ")
self.hard_break = data[-1:] == '\n'
self.nospace = self.para_end = self.softspace = \
self.parskip = self.have_label = 0
self.writer.send_literal_data(data)
def flush_softspace(self):
if self.softspace:
self.hard_break = self.para_end = self.parskip = \
self.have_label = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
def push_alignment(self, align):
if align and align != self.align:
self.writer.new_alignment(align)
self.align = align
self.align_stack.append(align)
else:
self.align_stack.append(self.align)
def pop_alignment(self):
if self.align_stack:
del self.align_stack[-1]
if self.align_stack:
self.align = align = self.align_stack[-1]
self.writer.new_alignment(align)
else:
self.align = None
self.writer.new_alignment(None)
def push_font(self, font):
size, i, b, tt = font
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
if self.font_stack:
csize, ci, cb, ctt = self.font_stack[-1]
if size is AS_IS: size = csize
if i is AS_IS: i = ci
if b is AS_IS: b = cb
if tt is AS_IS: tt = ctt
font = (size, i, b, tt)
self.font_stack.append(font)
self.writer.new_font(font)
def pop_font(self):
if self.font_stack:
del self.font_stack[-1]
if self.font_stack:
font = self.font_stack[-1]
else:
font = None
self.writer.new_font(font)
def push_margin(self, margin):
self.margin_stack.append(margin)
fstack = [m for m in self.margin_stack if m]
if not margin and fstack:
margin = fstack[-1]
self.writer.new_margin(margin, len(fstack))
def pop_margin(self):
if self.margin_stack:
del self.margin_stack[-1]
fstack = [m for m in self.margin_stack if m]
if fstack:
margin = fstack[-1]
else:
margin = None
self.writer.new_margin(margin, len(fstack))
def set_spacing(self, spacing):
self.spacing = spacing
self.writer.new_spacing(spacing)
def push_style(self, *styles):
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
for style in styles:
self.style_stack.append(style)
self.writer.new_styles(tuple(self.style_stack))
def pop_style(self, n=1):
del self.style_stack[-n:]
self.writer.new_styles(tuple(self.style_stack))
def assert_line_data(self, flag=1):
self.nospace = self.hard_break = not flag
self.para_end = self.parskip = self.have_label = 0
class NullWriter:
"""Minimal writer interface to use in testing & inheritance.
A writer which only provides the interface definition; no actions are
taken on any methods. This should be the base class for all writers
which do not need to inherit any implementation methods.
"""
def __init__(self): pass
def flush(self): pass
def new_alignment(self, align): pass
def new_font(self, font): pass
def new_margin(self, margin, level): pass
def new_spacing(self, spacing): pass
def new_styles(self, styles): pass
def send_paragraph(self, blankline): pass
def send_line_break(self): pass
def send_hor_rule(self, *args, **kw): pass
def send_label_data(self, data): pass
def send_flowing_data(self, data): pass
def send_literal_data(self, data): pass
class AbstractWriter(NullWriter):
"""A writer which can be used in debugging formatters, but not much else.
Each method simply announces itself by printing its name and
arguments on standard output.
"""
def new_alignment(self, align):
print("new_alignment(%r)" % (align,))
def new_font(self, font):
print("new_font(%r)" % (font,))
def new_margin(self, margin, level):
print("new_margin(%r, %d)" % (margin, level))
def new_spacing(self, spacing):
print("new_spacing(%r)" % (spacing,))
def new_styles(self, styles):
print("new_styles(%r)" % (styles,))
def send_paragraph(self, blankline):
print("send_paragraph(%r)" % (blankline,))
def send_line_break(self):
print("send_line_break()")
def send_hor_rule(self, *args, **kw):
print("send_hor_rule()")
def send_label_data(self, data):
print("send_label_data(%r)" % (data,))
def send_flowing_data(self, data):
print("send_flowing_data(%r)" % (data,))
def send_literal_data(self, data):
print("send_literal_data(%r)" % (data,))
class DumbWriter(NullWriter):
"""Simple writer class which writes output on the file object passed in
as the file parameter or, if file is omitted, on standard output. The
output is simply word-wrapped to the number of columns specified by
the maxcol parameter. This class is suitable for reflowing a sequence
of paragraphs.
"""
def __init__(self, file=None, maxcol=72):
self.file = file or sys.stdout
self.maxcol = maxcol
NullWriter.__init__(self)
self.reset()
def reset(self):
self.col = 0
self.atbreak = 0
def send_paragraph(self, blankline):
self.file.write('\n'*blankline)
self.col = 0
self.atbreak = 0
def send_line_break(self):
self.file.write('\n')
self.col = 0
self.atbreak = 0
def send_hor_rule(self, *args, **kw):
self.file.write('\n')
self.file.write('-'*self.maxcol)
self.file.write('\n')
self.col = 0
self.atbreak = 0
def send_literal_data(self, data):
self.file.write(data)
i = data.rfind('\n')
if i >= 0:
self.col = 0
data = data[i+1:]
data = data.expandtabs()
self.col = self.col + len(data)
self.atbreak = 0
def send_flowing_data(self, data):
if not data: return
atbreak = self.atbreak or data[0].isspace()
col = self.col
maxcol = self.maxcol
write = self.file.write
for word in data.split():
if atbreak:
if col + len(word) >= maxcol:
write('\n')
col = 0
else:
write(' ')
col = col + 1
write(word)
col = col + len(word)
atbreak = 1
self.col = col
self.atbreak = data[-1].isspace()
def test(file = None):
w = DumbWriter()
f = AbstractFormatter(w)
if file is not None:
fp = open(file)
elif sys.argv[1:]:
fp = open(sys.argv[1])
else:
fp = sys.stdin
for line in fp:
if line == '\n':
f.end_paragraph(1)
else:
f.add_flowing_data(line)
f.end_paragraph(0)
if __name__ == '__main__':
test()
| gpl-3.0 |
peterheim1/robbie | bin/follow_head.py | 1 | 5817 | #!/usr/bin/env python
# freely inspired by http://www.ros.org/wiki/arbotix_python/follow_controller
import roslib
import rospy, actionlib
from control_msgs.msg import FollowJointTrajectoryAction
from dynamixel_msgs.msg import JointState as JointStateDynamixel
from std_msgs.msg import Float64
class JointSubscriber():
def __init__(self,joint):
rospy.Subscriber(joint + '/state', JointStateDynamixel, self.joint_state_cb)
rospy.loginfo('Subscribing for %s joint state.',joint)
self.joint_name = joint
self.state = JointStateDynamixel()
self.received = False
def joint_state_cb(self,msg):
if self.received is False:
self.received = True
self.state = msg
def get_position(self):
return self.state.current_pos
class JointCommander():
def __init__(self,joint):
self.joint_name = joint
self.pub = rospy.Publisher(joint + '/command',Float64)
def command(self,pos):
rospy.loginfo('publishing, joint ' + self.joint_name + ', value ' + str(pos))
self.pub.publish(pos)
class FollowController():
def __init__(self):
self.ns = 'head_controller'
self.joints = ['head_pan_joint', 'head_tilt_mod_joint']
namespace = rospy.get_namespace()
rospy.loginfo('Configured for ' + str(len(self.joints)) + 'joints')
self.joint_subs = [JointSubscriber(name) for name in self.joints]
self.joint_pubs = [JointCommander(name) for name in self.joints]
self.joints_names = []
for idx in range(0,len(self.joints)):
self.joints_names.append(self.joints[idx])
# action server
self.name = self.ns + '/head_trajectory'
self.server = actionlib.SimpleActionServer(self.name, FollowJointTrajectoryAction, execute_cb=self.actionCb, auto_start=False)
rospy.loginfo("Started head_Controller ("+self.name+"). Joints: " + str(self.joints))
def startup(self):
self.server.start()
def actionCb(self, goal):
rospy.loginfo(self.name + ": Action goal recieved.")
traj = goal.trajectory
if set(self.joints_names) != set(traj.joint_names):
msg = "Trajectory joint names does not match action controlled joints." + str(traj.joint_names )
rospy.logerr(msg)
self.server.set_aborted(text=msg)
return
if not traj.points:
msg = "Trajectory empty."
rospy.logerr(msg)
self.server.set_aborted(text=msg)
return
try:
indexes = [traj.joint_names.index(joint) for joint in self.joints_names]
except ValueError as val:
msg = "Trajectory invalid."
rospy.logerr(msg)
self.server.set_aborted(text=msg)
return
if self.executeTrajectory(traj):
self.server.set_succeeded()
rospy.loginfo('Executed.')
else:
rospy.logerr('Execution failed.')
self.server.set_aborted(text="Execution failed.")
def executeTrajectory(self, traj):
rospy.loginfo("Executing trajectory with " + str(len(traj.points)) + ' point(s)')
try:
indexes = [traj.joint_names.index(joint) for joint in self.joints_names]
except ValueError as val:
return False
time = rospy.Time.now()
start = traj.header.stamp
#success = True
for point in traj.points:
if self.server.is_preempt_requested():
rospy.loginfo('Stopping head control')
self.server.set_preempted()
#success = False
break
while rospy.Time.now() + rospy.Duration(0.01) < start:
rospy.sleep(0.01)
desired = [ point.positions[k] for k in indexes ]
endtime = start + point.time_from_start
for i in range(0,len(self.joints)):
self.joint_pubs[i].command(desired[i])
while rospy.Time.now() + rospy.Duration(0.01) < endtime:
rospy.sleep(0.01)
return True
#===============================================================================
# def active(self):
# """ Is controller overriding servo internal control? """
# return self.server.is_active() or self.executing
#
# def getDiagnostics(self):
# """ Get a diagnostics status. """
# msg = DiagnosticStatus()
# msg.name = self.name
# msg.level = DiagnosticStatus.OK
# msg.message = "OK"
# if self.active():
# msg.values.append(KeyValue("State", "Active"))
# else:
# msg.values.append(KeyValue("State", "Not Active"))
# return msg
#===============================================================================
if __name__ == '__main__':
rospy.init_node('head_traj_controller', anonymous=True)
rospy.loginfo('head traj action node.')
c = FollowController()
rospy.loginfo('Starting action server')
c.startup()
rospy.loginfo('Spinning')
rospy.spin()
| gpl-3.0 |
hospace/ToughRADIUS | toughradius/radiusd/plugins/auth_rate_limit.py | 5 | 2192 | #!/usr/bin/env python
#coding=utf-8
from twisted.python import log
from toughradius.radiusd.settings import *
import datetime
def std_rate(resp,_in,_out):
input_limit = str(_in)
output_limit = str(_out)
_class = input_limit.zfill(8) + input_limit.zfill(8) + output_limit.zfill(8) + output_limit.zfill(8)
resp['Class'] = _class
return resp
def ros_rate(resp,_in,_out):
_irate = _in/1024
_orate = _out/1024
resp['Mikrotik-Rate-Limit'] = '%sk/%sk'%(_irate,_orate)
return resp
def aikuai_rate(resp,_in,_out):
_irate = _in/1024/8
_orate = _out/1024/8
resp['RP-Upstream-Speed-Limit'] = _irate
resp['RP-Downstream-Speed-Limit'] = _orate
return resp
def cisco_rate(resp,_in,_out):
return resp
def radback_rate(resp,_in,_out):
return resp
def h3c_rate(resp,_in,_out):
resp = std_rate(resp, _in, _out)
resp['H3C-Input-Average-Rate'] = _in
resp['H3C-Input-Peak-Rate'] = _in
resp['H3C-Output-Average-Rate'] = _out
resp['H3C-Output-Peak-Rate'] = _out
return resp
def zte_rate(resp,_in,_out):
resp['ZTE-Rate-Ctrl-Scr-Up'] = _in/1024
resp['ZTE-Rate-Ctrl-Scr-Down'] = _out/1024
return resp
def huawei_rate(resp,_in,_out):
resp = std_rate(resp,_in,_out)
resp['Huawei-Input-Average-Rate'] = _in
resp['Huawei-Input-Peak-Rate'] = _in
resp['Huawei-Output-Average-Rate'] = _out
resp['Huawei-Output-Peak-Rate'] = _out
return resp
rate_funcs = {
'0' : std_rate,
'9' : cisco_rate,
'2011' : huawei_rate,
'2352' : radback_rate,
'3902' : zte_rate,
'25506' : h3c_rate,
'14988' : ros_rate,
'10055' : aikuai_rate
}
def process(req=None,resp=None,user=None,radiusd=None,**kwargs):
store = radiusd.store
if store.is_white_roster(req.get_mac_addr()):
return resp
product = store.get_product(user['product_id'])
if not product:return resp
input_limit = product['input_max_limit']
output_limit = product['output_max_limit']
if input_limit == 0 and output_limit == 0:
return std_rate(resp,0,0)
_vendor = req.vendor_id or '0'
return rate_funcs[_vendor](resp,input_limit,output_limit)
| agpl-3.0 |
TeamExodus/external_chromium_org | tools/memory_inspector/memory_inspector/classification/native_heap_classifier.py | 89 | 8377 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module classifies NativeHeap objects filtering their allocations.
The only filter currently available is 'stacktrace', which works as follows:
{'name': 'rule-1', 'stacktrace': 'foo' }
{'name': 'rule-2', 'stacktrace': ['foo', r'bar\s+baz']}
{'name': 'rule-3', 'source_path': 'sk.*allocator'}
{'name': 'rule-3', 'source_path': 'sk', 'stacktrace': 'SkAllocator'}
rule-1 will match any allocation that has 'foo' in one of its stack frames.
rule-2 will match any allocation that has a stack frame matching 'foo' AND a
followed by a stack frame matching 'bar baz'. Note that order matters, so rule-2
will not match a stacktrace like ['bar baz', 'foo'].
rule-3 will match any allocation in which at least one of the source paths in
its stack frames matches the regex sk.*allocator.
rule-4 will match any allocation which satisfies both the conditions.
TODO(primiano): introduce more filters after the first prototype with UI, for
instance, filter by library file name or by allocation size.
"""
import collections
import posixpath
import re
from memory_inspector.classification import results
from memory_inspector.classification import rules
from memory_inspector.core import exceptions
from memory_inspector.core import native_heap
_RESULT_KEYS = ['bytes_allocated', 'bytes_resident']
def LoadRules(content):
"""Loads and parses a native-heap rule tree from a content (string).
Returns:
An instance of |rules.Rule|, which nodes are |_NHeapRule| instances.
"""
return rules.Load(content, _NHeapRule)
def Classify(nativeheap, rule_tree):
"""Creates aggregated results of native heaps using the provided rules.
Args:
nativeheap: the heap dump being processed (a |NativeHeap| instance).
rule_tree: the user-defined rules that define the filtering categories.
Returns:
An instance of |AggreatedResults|.
"""
assert(isinstance(nativeheap, native_heap.NativeHeap))
assert(isinstance(rule_tree, rules.Rule))
res = results.AggreatedResults(rule_tree, _RESULT_KEYS)
for allocation in nativeheap.allocations:
res.AddToMatchingNodes(allocation,
[allocation.size, allocation.resident_size])
return res
def InferHeuristicRulesFromHeap(nheap, max_depth=3, threshold=0.02):
"""Infers the rules tree from a symbolized heap snapshot.
In lack of a specific set of rules, this method can be invoked to infer a
meaningful rule tree starting from a heap snapshot. It will build a compact
radix tree from the source paths of the stack traces, which height is at most
|max_depth|, selecting only those nodes which contribute for at least
|threshold| (1.0 = 100%) w.r.t. the total allocation of the heap snapshot.
"""
assert(isinstance(nheap, native_heap.NativeHeap))
def RadixTreeInsert(node, path):
"""Inserts a string (path) into a radix tree (a python recursive dict).
e.g.: [/a/b/c, /a/b/d, /z/h] -> {'/a/b/': {'c': {}, 'd': {}}, '/z/h': {}}
"""
def GetCommonPrefix(args):
"""Returns the common prefix between two paths (no partial paths).
e.g.: /tmp/bar, /tmp/baz will return /tmp/ (and not /tmp/ba as the dumb
posixpath.commonprefix implementation would do)
"""
parts = posixpath.commonprefix(args).rpartition(posixpath.sep)[0]
return parts + posixpath.sep if parts else ''
for node_path in node.iterkeys():
pfx = GetCommonPrefix([node_path, path])
if not pfx:
continue
if len(pfx) < len(node_path):
node[pfx] = {node_path[len(pfx):] : node[node_path]}
del node[node_path]
if len(path) > len(pfx):
RadixTreeInsert(node[pfx], path[len(pfx):])
return
node[path] = {} # No common prefix, create new child in current node.
# Given an allocation of size N and its stack trace, heuristically determines
# the source directory to be blamed for the N bytes.
# The blamed_dir is the one which appears more times in the top 8 stack frames
# (excluding the first 2, which usually are just the (m|c)alloc call sites).
# At the end, this will generate a *leaderboard* (|blamed_dirs|) which
# associates, to each source path directory, the number of bytes allocated.
blamed_dirs = collections.Counter() # '/s/path' : bytes_from_this_path (int)
total_allocated = 0
for alloc in nheap.allocations:
dir_histogram = collections.Counter()
for frame in alloc.stack_trace.frames[2:10]:
# Compute a histogram (for each allocation) of the top source dirs.
if not frame.symbol or not frame.symbol.source_info:
continue
src_file = frame.symbol.source_info[0].source_file_path
src_dir = posixpath.dirname(src_file.replace('\\', '/')) + '/'
dir_histogram.update([src_dir])
if not dir_histogram:
continue
# Add the blamed dir to the leaderboard.
blamed_dir = dir_histogram.most_common()[0][0]
blamed_dirs.update({blamed_dir : alloc.size})
total_allocated += alloc.size
# Select only the top paths from the leaderboard which contribute for more
# than |threshold| and make a radix tree out of them.
radix_tree = {}
for blamed_dir, alloc_size in blamed_dirs.most_common():
if (1.0 * alloc_size / total_allocated) < threshold:
break
RadixTreeInsert(radix_tree, blamed_dir)
# The final step consists in generating a rule tree from the radix tree. This
# is a pretty straightforward tree-clone operation, they have the same shape.
def GenRulesFromRadixTree(radix_tree_node, max_depth, parent_path=''):
children = []
if max_depth > 0:
for node_path, node_children in radix_tree_node.iteritems():
child_rule = {
'name': node_path[-16:],
'source_path': '^' + re.escape(parent_path + node_path),
'children': GenRulesFromRadixTree(
node_children, max_depth - 1, parent_path + node_path)}
children += [child_rule]
return children
rules_tree = GenRulesFromRadixTree(radix_tree, max_depth)
return LoadRules(str(rules_tree))
class _NHeapRule(rules.Rule):
def __init__(self, name, filters):
super(_NHeapRule, self).__init__(name)
# The 'stacktrace' filter can be either a string (simple case, one regex) or
# a list of strings (complex case, see doc in the header of this file).
stacktrace_regexs = filters.get('stacktrace', [])
if isinstance(stacktrace_regexs, basestring):
stacktrace_regexs = [stacktrace_regexs]
self._stacktrace_regexs = []
for regex in stacktrace_regexs:
try:
self._stacktrace_regexs.append(re.compile(regex))
except re.error, descr:
raise exceptions.MemoryInspectorException(
'Stacktrace regex error "%s" : %s' % (regex, descr))
# The 'source_path' regex, instead, simply matches the source file path.
self._path_regex = None
path_regex = filters.get('source_path')
if path_regex:
try:
self._path_regex = re.compile(path_regex)
except re.error, descr:
raise exceptions.MemoryInspectorException(
'Path regex error "%s" : %s' % (path_regex, descr))
def Match(self, allocation):
# Match the source file path, if the 'source_path' filter is specified.
if self._path_regex:
path_matches = False
for frame in allocation.stack_trace.frames:
if frame.symbol and frame.symbol.source_info:
if self._path_regex.search(
frame.symbol.source_info[0].source_file_path):
path_matches = True
break
if not path_matches:
return False
# Match the stack traces symbols, if the 'stacktrace' filter is specified.
if not self._stacktrace_regexs:
return True
cur_regex_idx = 0
cur_regex = self._stacktrace_regexs[0]
for frame in allocation.stack_trace.frames:
if frame.symbol and cur_regex.search(frame.symbol.name):
# The current regex has been matched.
if cur_regex_idx == len(self._stacktrace_regexs) - 1:
return True # All the provided regexs have been matched, we're happy.
cur_regex_idx += 1
cur_regex = self._stacktrace_regexs[cur_regex_idx]
return False # Not all the provided regexs have been matched. | bsd-3-clause |
cloudwatt/contrail-controller | src/opserver/stats.py | 7 | 5232 | #!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# stats
#
# Query StatsOracle info from analytics
#
import sys
import argparse
import json
import datetime
from opserver_util import OpServerUtils
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames, NodeTypeNames
import sandesh.viz.constants as VizConstants
from pysandesh.gen_py.sandesh.ttypes import SandeshType, SandeshLevel
STAT_TABLE_LIST = [xx.stat_type + "." + xx.stat_attr for xx in VizConstants._STAT_TABLES]
class StatQuerier(object):
def __init__(self):
self._args = None
# end __init__
# Public functions
def parse_args(self):
"""
Eg. python stats.py --analytics-api-ip 127.0.0.1
--analytics-api-port 8081
--table AnalyticsCpuState.cpu_info
--where name=a6s40 cpu_info.module_id=Collector
--select "T=60 SUM(cpu_info.cpu_share)"
--sort "SUM(cpu_info.cpu_share)"
[--start-time now-10m --end-time now] | --last 10m
python stats.py --table AnalyticsCpuState.cpu_info
"""
defaults = {
'analytics_api_ip': '127.0.0.1',
'analytics_api_port': '8081',
'start_time': 'now-10m',
'end_time': 'now',
'select' : [],
'where' : [],
'sort': []
}
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.set_defaults(**defaults)
parser.add_argument("--analytics-api-ip", help="IP address of Analytics API Server")
parser.add_argument("--analytics-api-port", help="Port of Analytcis API Server")
parser.add_argument(
"--start-time", help="Logs start time (format now-10m, now-1h)")
parser.add_argument("--end-time", help="Logs end time")
parser.add_argument(
"--last", help="Logs from last time period (format 10m, 1d)")
parser.add_argument(
"--table", help="StatTable to query", choices=STAT_TABLE_LIST)
parser.add_argument(
"--dtable", help="Dynamic StatTable to query")
parser.add_argument(
"--select", help="List of Select Terms", nargs='+')
parser.add_argument(
"--where", help="List of Where Terms to be ANDed", nargs='+')
parser.add_argument(
"--sort", help="List of Sort Terms", nargs='+')
self._args = parser.parse_args()
if self._args.table is None and self._args.dtable is None:
return -1
try:
self._start_time, self._end_time = \
OpServerUtils.parse_start_end_time(
start_time = self._args.start_time,
end_time = self._args.end_time,
last = self._args.last)
except:
return -1
return 0
# end parse_args
# Public functions
def query(self):
query_url = OpServerUtils.opserver_query_url(
self._args.analytics_api_ip,
self._args.analytics_api_port)
if self._args.dtable is not None:
rtable = self._args.dtable
else:
rtable = self._args.table
query_dict = OpServerUtils.get_query_dict(
"StatTable." + rtable, str(self._start_time), str(self._end_time),
select_fields = self._args.select,
where_clause = "AND".join(self._args.where),
sort_fields = self._args.sort)
print json.dumps(query_dict)
resp = OpServerUtils.post_url_http(
query_url, json.dumps(query_dict), sync = True)
res = None
if resp is not None:
res = json.loads(resp)
res = res['value']
return res
# end query
def display(self, result):
if result == [] or result is None:
return
for res in result:
print res
# end display
# end class StatQuerier
def main():
querier = StatQuerier()
if querier.parse_args() != 0:
return
if len(querier._args.select)==0 and querier._args.dtable is None:
tab_url = "http://" + querier._args.analytics_api_ip + ":" +\
querier._args.analytics_api_port +\
"/analytics/table/StatTable." + querier._args.table
schematxt = OpServerUtils.get_url_http(tab_url + "/schema")
schema = json.loads(schematxt.text)['columns']
for pp in schema:
if pp.has_key('suffixes') and pp['suffixes']:
des = "%s %s" % (pp['name'],str(pp['suffixes']))
else:
des = "%s" % pp['name']
if pp['index']:
valuetxt = OpServerUtils.get_url_http(tab_url + "/column-values/" + pp['name'])
print "%s : %s %s" % (des,pp['datatype'], valuetxt.text)
else:
print "%s : %s" % (des,pp['datatype'])
else:
result = querier.query()
querier.display(result)
# end main
if __name__ == "__main__":
main()
| apache-2.0 |
JinfengChen/CIRCfinder | script/extract_anchor.py | 1 | 1494 | #!/usr/bin/env python
import sys
import pysam
def main():
if len(sys.argv) != 5:
print('extract_anchor.py sam fa i length')
sys.exit(0)
i = sys.argv[3]
length = int(sys.argv[4]) + 10
flag = ''
fa = pysam.Fastafile(sys.argv[2])
with open(sys.argv[1], 'r') as sam:
for line in sam:
if not line.startswith('@'):
label = line.split()[0] # intron label
if flag != label:
flag = label
read_start = open('read/' + label + '_read_start_' + i + '.fa', 'w')
read_end = open('read/' + label + '_read_end_' + i + '.fa', 'w')
pos = int(line.split()[3])
idx = pos / length * length
cigar = line.split()[1]
if cigar == '0':
offset = pos % length
else:
offset = pos % length + 10
seq = fa.fetch('index', idx, idx + length)
head = '>' + label + '_' + str(idx) + '_' + str(offset)
head += '_' + cigar + '\n'
if cigar == '0':
read_start.write(head + seq[:(offset - 1)] + '\n')
read_end.write(head + seq[(offset - 1):-10] + '\n')
else:
read_end.write(head + seq[:(offset - 1)] + '\n')
read_start.write(head + seq[(offset - 1):-10] + '\n')
if __name__ == '__main__':
main()
| mit |
Nirvedh/CoarseCoherence | src/mem/slicc/symbols/Event.py | 92 | 1754 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.symbols.Symbol import Symbol
class Event(Symbol):
def __repr__(self):
return "[Event: %s]" % self.ident
__all__ = [ "Event" ]
| bsd-3-clause |
Phrozyn/MozDef | alerts/auditd_sftp.py | 2 | 1625 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
from lib.alerttask import AlertTask
from mozdef_util.query_models import SearchQuery, TermMatch, PhraseMatch
class AlertSFTPEvent(AlertTask):
def main(self):
search_query = SearchQuery(minutes=5)
search_query.add_must([
TermMatch('category', 'execve'),
TermMatch('processname', 'audisp-json'),
TermMatch('details.processname', 'ssh'),
PhraseMatch('details.parentprocess', 'sftp')
])
self.filtersManual(search_query)
self.searchEventsSimple()
self.walkEvents()
# Set alert properties
def onEvent(self, event):
category = 'execve'
severity = 'NOTICE'
tags = ['audisp-json, audit']
srchost = 'unknown'
username = 'unknown'
directory = 'unknown'
x = event['_source']
if 'hostname' in x:
srchost = x['hostname']
if 'details' in x:
if 'originaluser' in x['details']:
username = x['details']['originaluser']
if 'cwd' in x['details']:
directory = x['details']['cwd']
summary = 'SFTP Event by {0} from host {1} in directory {2}'.format(username, srchost, directory)
# Create the alert object based on these properties
return self.createAlertDict(summary, category, tags, [event], severity)
| mpl-2.0 |
auferack08/edx-platform | common/lib/xmodule/xmodule/tests/test_editing_module.py | 27 | 2425 | """ Tests for editing descriptors"""
import unittest
import os
import logging
from mock import Mock
from pkg_resources import resource_string
from opaque_keys.edx.locations import Location
from xmodule.editing_module import TabsEditingDescriptor
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.tests import get_test_descriptor_system
log = logging.getLogger(__name__)
class TabsEditingDescriptorTestCase(unittest.TestCase):
""" Testing TabsEditingDescriptor"""
def setUp(self):
super(TabsEditingDescriptorTestCase, self).setUp()
system = get_test_descriptor_system()
system.render_template = Mock(return_value="<div>Test Template HTML</div>")
self.tabs = [
{
'name': "Test_css",
'template': "tabs/codemirror-edit.html",
'current': True,
'css': {
'scss': [resource_string(__name__,
'../../test_files/test_tabseditingdescriptor.scss')],
'css': [resource_string(__name__,
'../../test_files/test_tabseditingdescriptor.css')]
}
},
{
'name': "Subtitles",
'template': "video/subtitles.html",
},
{
'name': "Settings",
'template': "tabs/video-metadata-edit-tab.html"
}
]
TabsEditingDescriptor.tabs = self.tabs
self.descriptor = system.construct_xblock_from_class(
TabsEditingDescriptor,
scope_ids=ScopeIds(None, None, None, Location('org', 'course', 'run', 'category', 'name', 'revision')),
field_data=DictFieldData({}),
)
def test_get_css(self):
"""test get_css"""
css = self.descriptor.get_css()
test_files_dir = os.path.dirname(__file__).replace('xmodule/tests', 'test_files')
test_css_file = os.path.join(test_files_dir, 'test_tabseditingdescriptor.scss')
with open(test_css_file) as new_css:
added_css = new_css.read()
self.assertEqual(css['scss'].pop(), added_css)
self.assertEqual(css['css'].pop(), added_css)
def test_get_context(self):
""""test get_context"""
rendered_context = self.descriptor.get_context()
self.assertListEqual(rendered_context['tabs'], self.tabs)
| agpl-3.0 |
nitin-cherian/LifeLongLearning | Python/Experiments/JINJA/RealPython/jinja_env/lib/python3.5/site-packages/flask/sessions.py | 119 | 14394 | # -*- coding: utf-8 -*-
"""
flask.sessions
~~~~~~~~~~~~~~
Implements cookie based sessions based on itsdangerous.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import uuid
import hashlib
from base64 import b64encode, b64decode
from datetime import datetime
from werkzeug.http import http_date, parse_date
from werkzeug.datastructures import CallbackDict
from . import Markup, json
from ._compat import iteritems, text_type
from .helpers import total_seconds
from itsdangerous import URLSafeTimedSerializer, BadSignature
class SessionMixin(object):
"""Expands a basic dictionary with an accessors that are expected
by Flask extensions and users for the session.
"""
def _get_permanent(self):
return self.get('_permanent', False)
def _set_permanent(self, value):
self['_permanent'] = bool(value)
#: this reflects the ``'_permanent'`` key in the dict.
permanent = property(_get_permanent, _set_permanent)
del _get_permanent, _set_permanent
#: some session backends can tell you if a session is new, but that is
#: not necessarily guaranteed. Use with caution. The default mixin
#: implementation just hardcodes ``False`` in.
new = False
#: for some backends this will always be ``True``, but some backends will
#: default this to false and detect changes in the dictionary for as
#: long as changes do not happen on mutable structures in the session.
#: The default mixin implementation just hardcodes ``True`` in.
modified = True
def _tag(value):
if isinstance(value, tuple):
return {' t': [_tag(x) for x in value]}
elif isinstance(value, uuid.UUID):
return {' u': value.hex}
elif isinstance(value, bytes):
return {' b': b64encode(value).decode('ascii')}
elif callable(getattr(value, '__html__', None)):
return {' m': text_type(value.__html__())}
elif isinstance(value, list):
return [_tag(x) for x in value]
elif isinstance(value, datetime):
return {' d': http_date(value)}
elif isinstance(value, dict):
return dict((k, _tag(v)) for k, v in iteritems(value))
elif isinstance(value, str):
try:
return text_type(value)
except UnicodeError:
from flask.debughelpers import UnexpectedUnicodeError
raise UnexpectedUnicodeError(u'A byte string with '
u'non-ASCII data was passed to the session system '
u'which can only store unicode strings. Consider '
u'base64 encoding your string (String was %r)' % value)
return value
class TaggedJSONSerializer(object):
"""A customized JSON serializer that supports a few extra types that
we take for granted when serializing (tuples, markup objects, datetime).
"""
def dumps(self, value):
return json.dumps(_tag(value), separators=(',', ':'))
LOADS_MAP = {
' t': tuple,
' u': uuid.UUID,
' b': b64decode,
' m': Markup,
' d': parse_date,
}
def loads(self, value):
def object_hook(obj):
if len(obj) != 1:
return obj
the_key, the_value = next(iteritems(obj))
# Check the key for a corresponding function
return_function = self.LOADS_MAP.get(the_key)
if return_function:
# Pass the value to the function
return return_function(the_value)
# Didn't find a function for this object
return obj
return json.loads(value, object_hook=object_hook)
session_json_serializer = TaggedJSONSerializer()
class SecureCookieSession(CallbackDict, SessionMixin):
"""Base class for sessions based on signed cookies."""
def __init__(self, initial=None):
def on_update(self):
self.modified = True
CallbackDict.__init__(self, initial, on_update)
self.modified = False
class NullSession(SecureCookieSession):
"""Class used to generate nicer error messages if sessions are not
available. Will still allow read-only access to the empty session
but fail on setting.
"""
def _fail(self, *args, **kwargs):
raise RuntimeError('The session is unavailable because no secret '
'key was set. Set the secret_key on the '
'application to something unique and secret.')
__setitem__ = __delitem__ = clear = pop = popitem = \
update = setdefault = _fail
del _fail
class SessionInterface(object):
"""The basic interface you have to implement in order to replace the
default session interface which uses werkzeug's securecookie
implementation. The only methods you have to implement are
:meth:`open_session` and :meth:`save_session`, the others have
useful defaults which you don't need to change.
The session object returned by the :meth:`open_session` method has to
provide a dictionary like interface plus the properties and methods
from the :class:`SessionMixin`. We recommend just subclassing a dict
and adding that mixin::
class Session(dict, SessionMixin):
pass
If :meth:`open_session` returns ``None`` Flask will call into
:meth:`make_null_session` to create a session that acts as replacement
if the session support cannot work because some requirement is not
fulfilled. The default :class:`NullSession` class that is created
will complain that the secret key was not set.
To replace the session interface on an application all you have to do
is to assign :attr:`flask.Flask.session_interface`::
app = Flask(__name__)
app.session_interface = MySessionInterface()
.. versionadded:: 0.8
"""
#: :meth:`make_null_session` will look here for the class that should
#: be created when a null session is requested. Likewise the
#: :meth:`is_null_session` method will perform a typecheck against
#: this type.
null_session_class = NullSession
#: A flag that indicates if the session interface is pickle based.
#: This can be used by Flask extensions to make a decision in regards
#: to how to deal with the session object.
#:
#: .. versionadded:: 0.10
pickle_based = False
def make_null_session(self, app):
"""Creates a null session which acts as a replacement object if the
real session support could not be loaded due to a configuration
error. This mainly aids the user experience because the job of the
null session is to still support lookup without complaining but
modifications are answered with a helpful error message of what
failed.
This creates an instance of :attr:`null_session_class` by default.
"""
return self.null_session_class()
def is_null_session(self, obj):
"""Checks if a given object is a null session. Null sessions are
not asked to be saved.
This checks if the object is an instance of :attr:`null_session_class`
by default.
"""
return isinstance(obj, self.null_session_class)
def get_cookie_domain(self, app):
"""Helpful helper method that returns the cookie domain that should
be used for the session cookie if session cookies are used.
"""
if app.config['SESSION_COOKIE_DOMAIN'] is not None:
return app.config['SESSION_COOKIE_DOMAIN']
if app.config['SERVER_NAME'] is not None:
# chop off the port which is usually not supported by browsers
rv = '.' + app.config['SERVER_NAME'].rsplit(':', 1)[0]
# Google chrome does not like cookies set to .localhost, so
# we just go with no domain then. Flask documents anyways that
# cross domain cookies need a fully qualified domain name
if rv == '.localhost':
rv = None
# If we infer the cookie domain from the server name we need
# to check if we are in a subpath. In that case we can't
# set a cross domain cookie.
if rv is not None:
path = self.get_cookie_path(app)
if path != '/':
rv = rv.lstrip('.')
return rv
def get_cookie_path(self, app):
"""Returns the path for which the cookie should be valid. The
default implementation uses the value from the ``SESSION_COOKIE_PATH``
config var if it's set, and falls back to ``APPLICATION_ROOT`` or
uses ``/`` if it's ``None``.
"""
return app.config['SESSION_COOKIE_PATH'] or \
app.config['APPLICATION_ROOT'] or '/'
def get_cookie_httponly(self, app):
"""Returns True if the session cookie should be httponly. This
currently just returns the value of the ``SESSION_COOKIE_HTTPONLY``
config var.
"""
return app.config['SESSION_COOKIE_HTTPONLY']
def get_cookie_secure(self, app):
"""Returns True if the cookie should be secure. This currently
just returns the value of the ``SESSION_COOKIE_SECURE`` setting.
"""
return app.config['SESSION_COOKIE_SECURE']
def get_expiration_time(self, app, session):
"""A helper method that returns an expiration date for the session
or ``None`` if the session is linked to the browser session. The
default implementation returns now + the permanent session
lifetime configured on the application.
"""
if session.permanent:
return datetime.utcnow() + app.permanent_session_lifetime
def should_set_cookie(self, app, session):
"""Indicates whether a cookie should be set now or not. This is
used by session backends to figure out if they should emit a
set-cookie header or not. The default behavior is controlled by
the ``SESSION_REFRESH_EACH_REQUEST`` config variable. If
it's set to ``False`` then a cookie is only set if the session is
modified, if set to ``True`` it's always set if the session is
permanent.
This check is usually skipped if sessions get deleted.
.. versionadded:: 0.11
"""
if session.modified:
return True
save_each = app.config['SESSION_REFRESH_EACH_REQUEST']
return save_each and session.permanent
def open_session(self, app, request):
"""This method has to be implemented and must either return ``None``
in case the loading failed because of a configuration error or an
instance of a session object which implements a dictionary like
interface + the methods and attributes on :class:`SessionMixin`.
"""
raise NotImplementedError()
def save_session(self, app, session, response):
"""This is called for actual sessions returned by :meth:`open_session`
at the end of the request. This is still called during a request
context so if you absolutely need access to the request you can do
that.
"""
raise NotImplementedError()
class SecureCookieSessionInterface(SessionInterface):
"""The default session interface that stores sessions in signed cookies
through the :mod:`itsdangerous` module.
"""
#: the salt that should be applied on top of the secret key for the
#: signing of cookie based sessions.
salt = 'cookie-session'
#: the hash function to use for the signature. The default is sha1
digest_method = staticmethod(hashlib.sha1)
#: the name of the itsdangerous supported key derivation. The default
#: is hmac.
key_derivation = 'hmac'
#: A python serializer for the payload. The default is a compact
#: JSON derived serializer with support for some extra Python types
#: such as datetime objects or tuples.
serializer = session_json_serializer
session_class = SecureCookieSession
def get_signing_serializer(self, app):
if not app.secret_key:
return None
signer_kwargs = dict(
key_derivation=self.key_derivation,
digest_method=self.digest_method
)
return URLSafeTimedSerializer(app.secret_key, salt=self.salt,
serializer=self.serializer,
signer_kwargs=signer_kwargs)
def open_session(self, app, request):
s = self.get_signing_serializer(app)
if s is None:
return None
val = request.cookies.get(app.session_cookie_name)
if not val:
return self.session_class()
max_age = total_seconds(app.permanent_session_lifetime)
try:
data = s.loads(val, max_age=max_age)
return self.session_class(data)
except BadSignature:
return self.session_class()
def save_session(self, app, session, response):
domain = self.get_cookie_domain(app)
path = self.get_cookie_path(app)
# Delete case. If there is no session we bail early.
# If the session was modified to be empty we remove the
# whole cookie.
if not session:
if session.modified:
response.delete_cookie(app.session_cookie_name,
domain=domain, path=path)
return
# Modification case. There are upsides and downsides to
# emitting a set-cookie header each request. The behavior
# is controlled by the :meth:`should_set_cookie` method
# which performs a quick check to figure out if the cookie
# should be set or not. This is controlled by the
# SESSION_REFRESH_EACH_REQUEST config flag as well as
# the permanent flag on the session itself.
if not self.should_set_cookie(app, session):
return
httponly = self.get_cookie_httponly(app)
secure = self.get_cookie_secure(app)
expires = self.get_expiration_time(app, session)
val = self.get_signing_serializer(app).dumps(dict(session))
response.set_cookie(app.session_cookie_name, val,
expires=expires, httponly=httponly,
domain=domain, path=path, secure=secure)
| mit |
BjerknesClimateDataCentre/QuinCe | external_scripts/NRT/configure_nrt.py | 2 | 5398 | import os, logging
import toml, json
import urllib.error
import base64
from tabulate import tabulate
# Local modules
import quince, nrtdb, nrtftp
import RetrieverFactory, PreprocessorFactory
# Extract the list of IDs from a set of instruments
def get_ids(instruments):
result = []
for instrument in instruments:
result.append(instrument["id"])
return result
# Draw the table of instruments
def make_instrument_table(instruments, ids, showType):
table_data = []
if showType:
table_data = [["ID", "Name", "Owner", "Type", "Preprocessor"]]
else:
table_data = [["ID", "Name", "Owner"]]
for instrument in instruments:
draw = True
if ids is not None and instrument["id"] not in ids:
draw = False
if draw:
if showType:
table_data.append([instrument["id"],
instrument["name"],
instrument["owner"],
"None" if instrument["type"] is None
else instrument["type"],
instrument["preprocessor"]])
else:
table_data.append([instrument["id"],
instrument["name"],
instrument["owner"]])
print(tabulate(table_data, headers="firstrow"))
#######################################################
def main():
ftpconn = None
dbconn = None
try:
# Blank logger - sends everything to /dev/null
logging.basicConfig(filename=os.devnull)
logger = logging.getLogger('configure_nrt')
logger.setLevel(level=10)
with open("config.toml", "r") as config_file:
config = toml.loads(config_file.read())
print("Connecting to FTP servers...")
ftpconn = nrtftp.connect_ftp(config["FTP"])
dbconn = nrtdb.get_db_conn(config["Database"]["location"])
print("Getting QuinCe instruments...")
quince_instruments = quince.get_instruments(config)
print("Getting NRT instruments...")
nrt_instruments = nrtdb.get_instruments(dbconn)
# Check that NRT and QuinCe are in sync
quince_ids = get_ids(quince_instruments)
nrt_ids = get_ids(nrt_instruments)
# Remove old instruments from NRT
orphaned_ids = list(set(nrt_ids) - set(quince_ids))
if len(orphaned_ids) > 0:
print("The following instruments are no longer in QuinCe and will be removed:\n")
make_instrument_table(nrt_instruments, orphaned_ids, False)
go = input("Enter Y to proceed, or anything else to quit: ")
if not go.lower() == "y":
exit()
else:
nrtdb.delete_instruments(dbconn, orphaned_ids)
# Add new instruments from QuinCe
new_ids = list(set(quince_ids) - set(nrt_ids))
if len(new_ids) > 0:
print("The following instruments are new in QuinCe and will be added:\n")
make_instrument_table(quince_instruments, new_ids, False)
go = input("Enter Y to proceed, or anything else to quit: ")
if not go.lower() == "y":
exit()
else:
nrtdb.add_instruments(dbconn, quince_instruments, new_ids)
nrtftp.add_instruments(ftpconn, config["FTP"], new_ids)
# Main configuration loop
quit = False
while not quit:
print()
instruments = nrtdb.get_instruments(dbconn)
make_instrument_table(instruments, None, True)
command = input("\nEnter instrument ID to configure, or Q to quit: ").lower()
if command == "q":
quit = True
else:
instrument_id = None
try:
instrument_id = int(command)
except:
pass
if instrument_id is not None:
instrument = nrtdb.get_instrument(dbconn, instrument_id)
if instrument is not None:
retriever = None
print()
print("Current configuration for instrument %d (%s):" % \
(instrument["id"], instrument["name"]))
print()
print("TYPE: %s" % (instrument["type"]))
if instrument["type"] is not None and instrument["config"] is not None:
retriever = RetrieverFactory.get_instance(instrument["type"], \
instrument["id"], logger, json.loads(instrument["config"]))
retriever.print_configuration()
print()
print("PREPROCESSOR: %s" % instrument["preprocessor"])
print()
change = input("Change configuration (y/n)? ").lower()
if change == "y":
new_type = RetrieverFactory.ask_retriever_type()
if new_type is None:
nrtdb.store_configuration(dbconn, instrument["id"], None)
else:
if new_type != instrument["type"]:
retriever = RetrieverFactory.get_new_instance(new_type)
config_ok = False
while not config_ok:
print()
config_ok = retriever.enter_configuration()
print()
preprocessor = PreprocessorFactory.ask_preprocessor()
nrtdb.store_configuration(dbconn, instrument["id"], retriever, preprocessor)
except urllib.error.URLError as e:
print(e)
except urllib.error.HTTPError as e:
print("%s %s" % (e.code, e.reason))
finally:
if dbconn is not None:
nrtdb.close(dbconn)
if ftpconn is not None:
ftpconn.close()
if __name__ == '__main__':
main()
| gpl-3.0 |
odoousers2014/LibrERP | account_invoice_force_number/invoice.py | 2 | 1305 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2011 Domsense srl (<http://www.domsense.com>)
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
class account_invoice(osv.osv):
_inherit = "account.invoice"
_columns = {
'internal_number': fields.char('Invoice Number', size=32, readonly=True, states={'draft':[('readonly',False)]}),
}
account_invoice()
| agpl-3.0 |
paltman/nashvegas | nashvegas/management/commands/syncdb.py | 4 | 1185 | from django.core.management import call_command
from django.core.management.commands.syncdb import Command as SyncDBCommand
from optparse import make_option
class Command(SyncDBCommand):
option_list = SyncDBCommand.option_list + (
make_option('--skip-migrations',
action='store_false',
dest='migrations',
default=True,
help='Skip nashvegas migrations, do traditional syncdb'),
)
def handle_noargs(self, **options):
# Run migrations first
if options.get("database"):
databases = [options.get("database")]
else:
databases = None
migrations = options.get('migrations')
if migrations:
call_command(
"upgradedb",
do_execute=True,
databases=databases,
interactive=options.get("interactive"),
verbosity=options.get("verbosity"),
)
# Follow up with a syncdb on anything that wasnt included in migrations
# (this catches things like test-only models)
super(Command, self).handle_noargs(**options)
| mit |
Wilee999/panda3d | direct/src/controls/NonPhysicsWalker.py | 11 | 11774 | """
NonPhysicsWalker.py is for avatars.
A walker control such as this one provides:
- creation of the collision nodes
- handling the keyboard and mouse input for avatar movement
- moving the avatar
it does not:
- play sounds
- play animations
although it does send messeges that allow a listener to play sounds or
animations based on walker events.
"""
from direct.directnotify import DirectNotifyGlobal
from direct.showbase import DirectObject
from direct.controls.ControlManager import CollisionHandlerRayStart
from direct.showbase.InputStateGlobal import inputState
from direct.task.Task import Task
from pandac.PandaModules import *
class NonPhysicsWalker(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory("NonPhysicsWalker")
wantDebugIndicator = base.config.GetBool('want-avatar-physics-indicator', 0)
# Ghost mode overrides this:
slideName = "slide-is-disabled"
# special methods
def __init__(self):
DirectObject.DirectObject.__init__(self)
self.worldVelocity = Vec3.zero()
self.collisionsActive = 0
self.speed=0.0
self.rotationSpeed=0.0
self.slideSpeed=0.0
self.vel=Vec3(0.0, 0.0, 0.0)
self.stopThisFrame = 0
def setWalkSpeed(self, forward, jump, reverse, rotate):
assert self.debugPrint("setWalkSpeed()")
self.avatarControlForwardSpeed=forward
#self.avatarControlJumpForce=jump
self.avatarControlReverseSpeed=reverse
self.avatarControlRotateSpeed=rotate
def getSpeeds(self):
#assert self.debugPrint("getSpeeds()")
return (self.speed, self.rotationSpeed, self.slideSpeed)
def setAvatar(self, avatar):
self.avatar = avatar
if avatar is not None:
pass # setup the avatar
def setAirborneHeightFunc(self, getAirborneHeight):
self.getAirborneHeight = getAirborneHeight
def setWallBitMask(self, bitMask):
self.cSphereBitMask = bitMask
def setFloorBitMask(self, bitMask):
self.cRayBitMask = bitMask
def swapFloorBitMask(self, oldMask, newMask):
self.cRayBitMask = self.cRayBitMask &~ oldMask
self.cRayBitMask |= newMask
if self.cRayNodePath and not self.cRayNodePath.isEmpty():
self.cRayNodePath.node().setFromCollideMask(self.cRayBitMask)
def initializeCollisions(self, collisionTraverser, avatarNodePath,
avatarRadius = 1.4, floorOffset = 1.0, reach = 1.0):
"""
Set up the avatar for collisions
"""
assert not avatarNodePath.isEmpty()
self.cTrav = collisionTraverser
self.avatarNodePath = avatarNodePath
# Set up the collision sphere
# This is a sphere on the ground to detect barrier collisions
self.cSphere = CollisionSphere(0.0, 0.0, 0.0, avatarRadius)
cSphereNode = CollisionNode('NPW.cSphereNode')
cSphereNode.addSolid(self.cSphere)
self.cSphereNodePath = avatarNodePath.attachNewNode(cSphereNode)
cSphereNode.setFromCollideMask(self.cSphereBitMask)
cSphereNode.setIntoCollideMask(BitMask32.allOff())
# Set up the collison ray
# This is a ray cast from your head down to detect floor polygons.
# This ray start is arbitrarily high in the air. Feel free to use
# a higher or lower value depending on whether you want an avatar
# that is outside of the world to step up to the floor when they
# get under valid floor:
self.cRay = CollisionRay(0.0, 0.0, CollisionHandlerRayStart, 0.0, 0.0, -1.0)
cRayNode = CollisionNode('NPW.cRayNode')
cRayNode.addSolid(self.cRay)
self.cRayNodePath = avatarNodePath.attachNewNode(cRayNode)
cRayNode.setFromCollideMask(self.cRayBitMask)
cRayNode.setIntoCollideMask(BitMask32.allOff())
# set up wall collision mechanism
self.pusher = CollisionHandlerPusher()
self.pusher.setInPattern("enter%in")
self.pusher.setOutPattern("exit%in")
# set up floor collision mechanism
self.lifter = CollisionHandlerFloor()
self.lifter.setInPattern("on-floor")
self.lifter.setOutPattern("off-floor")
self.lifter.setOffset(floorOffset)
self.lifter.setReach(reach)
# Limit our rate-of-fall with the lifter.
# If this is too low, we actually "fall" off steep stairs
# and float above them as we go down. I increased this
# from 8.0 to 16.0 to prevent this
self.lifter.setMaxVelocity(16.0)
self.pusher.addCollider(self.cSphereNodePath, avatarNodePath)
self.lifter.addCollider(self.cRayNodePath, avatarNodePath)
# activate the collider with the traverser and pusher
self.setCollisionsActive(1)
def deleteCollisions(self):
del self.cTrav
del self.cSphere
self.cSphereNodePath.removeNode()
del self.cSphereNodePath
del self.cRay
self.cRayNodePath.removeNode()
del self.cRayNodePath
del self.pusher
del self.lifter
def setTag(self, key, value):
self.cSphereNodePath.setTag(key, value)
def setCollisionsActive(self, active = 1):
assert self.debugPrint("setCollisionsActive(active%s)"%(active,))
if self.collisionsActive != active:
self.collisionsActive = active
if active:
self.cTrav.addCollider(self.cSphereNodePath, self.pusher)
self.cTrav.addCollider(self.cRayNodePath, self.lifter)
else:
self.cTrav.removeCollider(self.cSphereNodePath)
self.cTrav.removeCollider(self.cRayNodePath)
# Now that we have disabled collisions, make one more pass
# right now to ensure we aren't standing in a wall.
self.oneTimeCollide()
def placeOnFloor(self):
"""
Make a reasonable effor to place the avatar on the ground.
For example, this is useful when switching away from the
current walker.
"""
# With these on, getAirborneHeight is not returning the correct value so
# when we open our book while swimming we pop down underneath the ground
# self.oneTimeCollide()
# self.avatarNodePath.setZ(self.avatarNodePath.getZ()-self.getAirborneHeight())
# Since this is the non physics walker - wont they already be on the ground?
return
def oneTimeCollide(self):
"""
Makes one quick collision pass for the avatar, for instance as
a one-time straighten-things-up operation after collisions
have been disabled.
"""
tempCTrav = CollisionTraverser("oneTimeCollide")
tempCTrav.addCollider(self.cSphereNodePath, self.pusher)
tempCTrav.addCollider(self.cRayNodePath, self.lifter)
tempCTrav.traverse(render)
def addBlastForce(self, vector):
pass
def displayDebugInfo(self):
"""
For debug use.
"""
onScreenDebug.add("controls", "NonPhysicsWalker")
def _calcSpeeds(self):
# get the button states:
forward = inputState.isSet("forward")
reverse = inputState.isSet("reverse")
turnLeft = inputState.isSet("turnLeft")
turnRight = inputState.isSet("turnRight")
slide = inputState.isSet(self.slideName) or 0
#jump = inputState.isSet("jump")
# Check for Auto-Run
if base.localAvatar.getAutoRun():
forward = 1
reverse = 0
# Determine what the speeds are based on the buttons:
self.speed=(forward and self.avatarControlForwardSpeed or
reverse and -self.avatarControlReverseSpeed)
# Should fSlide be renamed slideButton?
self.slideSpeed=slide and ((reverse and turnLeft and -self.avatarControlReverseSpeed*(0.75)) or
(reverse and turnRight and self.avatarControlReverseSpeed*(0.75)) or
(turnLeft and -self.avatarControlForwardSpeed*(0.75)) or
(turnRight and self.avatarControlForwardSpeed*(0.75)))
self.rotationSpeed=not slide and (
(turnLeft and self.avatarControlRotateSpeed) or
(turnRight and -self.avatarControlRotateSpeed))
def handleAvatarControls(self, task):
"""
Check on the arrow keys and update the avatar.
"""
if not self.lifter.hasContact():
# hack fix for falling through the floor:
messenger.send("walkerIsOutOfWorld", [self.avatarNodePath])
self._calcSpeeds()
if __debug__:
debugRunning = inputState.isSet("debugRunning")
if debugRunning:
self.speed*=4.0
self.slideSpeed*=4.0
self.rotationSpeed*=1.25
if self.wantDebugIndicator:
self.displayDebugInfo()
# How far did we move based on the amount of time elapsed?
dt=ClockObject.getGlobalClock().getDt()
# Check to see if we're moving at all:
if self.speed or self.slideSpeed or self.rotationSpeed:
if self.stopThisFrame:
distance = 0.0
slideDistance = 0.0
rotation = 0.0
self.stopThisFrame = 0
else:
distance = dt * self.speed
slideDistance = dt * self.slideSpeed
rotation = dt * self.rotationSpeed
# Take a step in the direction of our previous heading.
self.vel=Vec3(Vec3.forward() * distance +
Vec3.right() * slideDistance)
if self.vel != Vec3.zero():
# rotMat is the rotation matrix corresponding to
# our previous heading.
rotMat=Mat3.rotateMatNormaxis(self.avatarNodePath.getH(), Vec3.up())
step=rotMat.xform(self.vel)
self.avatarNodePath.setFluidPos(Point3(self.avatarNodePath.getPos()+step))
self.avatarNodePath.setH(self.avatarNodePath.getH()+rotation)
messenger.send("avatarMoving")
else:
self.vel.set(0.0, 0.0, 0.0)
self.__oldPosDelta = self.avatarNodePath.getPosDelta(render)
self.__oldDt = dt
try:
self.worldVelocity = self.__oldPosDelta*(1/self.__oldDt)
except:
# divide by zero
self.worldVelocity = 0
return Task.cont
def doDeltaPos(self):
assert self.debugPrint("doDeltaPos()")
def reset(self):
assert self.debugPrint("reset()")
def getVelocity(self):
return self.vel
def enableAvatarControls(self):
"""
Activate the arrow keys, etc.
"""
assert self.debugPrint("enableAvatarControls")
assert self.collisionsActive
taskName = "AvatarControls-%s"%(id(self),)
# remove any old
taskMgr.remove(taskName)
# spawn the new task
taskMgr.add(self.handleAvatarControls, taskName)
def disableAvatarControls(self):
"""
Ignore the arrow keys, etc.
"""
assert self.debugPrint("disableAvatarControls")
taskName = "AvatarControls-%s"%(id(self),)
taskMgr.remove(taskName)
def flushEventHandlers(self):
if hasattr(self, 'cTrav'):
self.pusher.flush()
self.lifter.flush() # not currently defined or needed
if __debug__:
def debugPrint(self, message):
"""for debugging"""
return self.notify.debug(
str(id(self))+' '+message)
| bsd-3-clause |
dkavraal/typecatcher | typecatcher/html_preview.py | 1 | 5738 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2012 Andrew Starr-Bochicchio <[email protected]>
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from locale import gettext as _
from gi.repository import Gtk
from typecatcher_lib.helpers import get_media_file
from random import choice
def html_font_view(font=None, text=None):
start_page_icon = get_media_file("typecatcher.svg")
try:
con_icon_name = "nm-no-connection"
con_theme = Gtk.IconTheme.get_default()
con_info = con_theme.lookup_icon(con_icon_name, 64, 0)
con_icon_uri = "file://" + con_info.get_filename()
except AttributeError:
con_icon_uri = get_media_file("no-connection.svg")
try:
installed_icon_name = "gtk-apply"
installed_theme = Gtk.IconTheme.get_default()
installed_info = installed_theme.lookup_icon(installed_icon_name, 64, 0)
installed_icon_uri = "file://" + installed_info.get_filename()
except AttributeError:
installed_icon_uri = get_media_file("installed.svg")
loader = get_media_file("ajax-loader.gif")
text_preview = select_text_preview(text)
html = """
<html>
<head>
<script src="http://ajax.googleapis.com/ajax/libs/webfont/1.4.10/webfont.js"></script>
<style>
body { font-size: 36px; }
#installed { float: right; font-size: 12px; width:50px; text-align:center; display: None; }
textarea { font: inherit; font-size: inherit; border: None; overflow: hidden; outline: none; width: 90%%; height: 100%%; }
#text_preview { display: None; }
#no_connect { text-align: center; display: None; font-size: 18px; }
#start_page { text-align: center; bottom: 0px;}
.wf-loading { height: 100%%; overflow: hidden; background: url(%s) center center no-repeat fixed;}
.wf-loading * { opacity: 0; }
.wf-active body, .wf-inactive body {
-webkit-animation: fade .25s ease;
animation: fade .25s ease;
-webkit-animation-fill-mode: both;
animation-fill-mode: both;
}
@-webkit-keyframes fade {
0%% { display: none; opacity: 0; }
1%% { display: block; }
100%%{ opacity: 1; }
}
@keyframes fade {
0%% { display: none; opacity: 0; }
1%% { display: block; }
100%%{ opacity: 1; }
}
</style>
</head>
<body>
<div id="installed">
<img src="%s" width=64 height=64>
<p>%s</p>
</div>
<div id='no_connect'>
<img src="%s" width=64 height=64 > <h1>%s</h1>
<p>%s</p>
</div>
<div id='text_preview'>
%s
</div>
<div id='start_page'>
<img src="%s" width=128 height=128>
<p>TypeCatcher</p>
</div>
</body>
</html>
""" % (loader, installed_icon_uri, _("Installed"),
con_icon_uri, _("Font not available."),
_("Please check your network connection."), text_preview,
start_page_icon)
return html
def select_text_preview(text):
ipsum = """Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat."""
kafka = _("One morning, when Gregor Samsa woke from troubled dreams, he found himself transformed in his bed into a horrible vermin. He lay on his armour-like back, and if he lifted his head a little he could see his brown belly, slightly domed and divided by arches into stiff sections.")
hgg = _("Far out in the uncharted backwaters of the unfashionable end of the Western Spiral arm of the Galaxy lies a small unregarded yellow sun. Orbiting this at a distance of roughly ninety-eight million miles is an utterly insignificant little blue-green planet...")
ggm = _("Many years later, as he faced the firing squad, Colonel Aureliano Buendia was to remember that distant afternoon when his father took him to discover ice.")
ralph = _("I am an invisible man. No, I am not a spook like those who haunted Edgar Allan Poe; nor am I one of your Hollywood-movie ectoplasms. I am a man of substance, of flesh and bone, fiber and liquids — and I might even be said to possess a mind. I am invisible, understand, simply because people refuse to see me.")
jj = _("Stately, plump Buck Mulligan came from the stairhead, bearing a bowl of lather on which a mirror and a razor lay crossed. A yellow dressinggown, ungirdled, was sustained gently behind him on the mild morning air.")
text_pool = [ipsum, kafka, ggm, hgg, ralph, jj]
if text is None or text == "random":
selected_text = choice(text_pool)
return "<p> %s </p>" % selected_text
elif text == "ipsum":
return "<p> %s </p>" % ipsum
elif text == "kafka":
return "<p> %s </p>" % kafka
elif text == "hgg":
return "<p> %s </p>" % hgg
elif text == "ggm":
return "<p> %s </p>" % ggm
elif text == "ralph":
return "<p> %s </p>" % ralph
elif text == "jj":
return "<p> %s </p>" % jj
elif text == "custom":
return "<textarea> %s </textarea>" % (_('Enter text...'))
| gpl-3.0 |
arouzrokh1/jon | synth.py | 1 | 1436 | from pyttsx.drivers import _espeak
import ctypes
import wave
import time
import threading
import StringIO
class Synth(object):
_done = False
def __init__(self):
self.rate = _espeak.Initialize(_espeak.AUDIO_OUTPUT_RETRIEVAL, 1000)
assert self.rate != -1, 'could not initialize espeak'
_espeak.SetSynthCallback(self)
self.lock = threading.Lock()
def __call__(self, wav, numsamples, events):
if self._done:
return 0
data = ctypes.string_at(wav, numsamples*2)
if len(data) == 0:
self._done = True
return 0
self.wav.writeframes(data)
return 0
def say(self, say, out):
with self.lock:
self.wav = wave.open(out, 'w')
self.wav.setnchannels(1)
def __call__(self, wav, numsamples, events):
if self._done:
return 0
data = ctypes.string_at(wav, numsamples*2)
if len(data) == 0:
self._done = True
return 0
self.wav.writeframes(data)
return 0
def say(self, say, out):
with self.lock:
self.wav = wave.open(out, 'w')
self.wav.setnchannels(1)
self.wav.setsampwidth(2)
self.wav.setframerate(self.rate)
self._done = False
_espeak.Synth(say)
while not self._done:
time.sleep(0)
self.wav.close()
| apache-2.0 |
pincopallino93/rdfendpoints | lib/rdflib/plugins/parsers/structureddata.py | 6 | 12225 | #!/usr/bin/env python
"""
Extraction parsers for structured data embedded into HTML or XML files.
The former may include RDFa or microdata. The syntax and the extraction
procedures are based on:
* The RDFa specifications: http://www.w3.org/TR/#tr_RDFa
* The microdata specification: http://www.w3.org/TR/microdata/
* The specification of the microdata to RDF conversion:
http://www.w3.org/TR/microdata-rdf/
License: W3C Software License,
http://www.w3.org/Consortium/Legal/copyright-software
Author: Ivan Herman
Copyright: W3C
"""
from rdflib.parser import (
Parser, StringInputSource, URLInputSource, FileInputSource)
try:
import html5lib
assert html5lib
html5lib = True
except ImportError:
import warnings
warnings.warn(
'html5lib not found! RDFa and Microdata ' +
'parsers will not be available.')
html5lib = False
def _get_orig_source(source):
"""
A bit of a hack; the RDFa/microdata parsers need more than what the
upper layers of RDFLib provide...
This method returns the original source references.
"""
if isinstance(source, StringInputSource):
orig_source = source.getByteStream()
elif isinstance(source, URLInputSource):
orig_source = source.url
elif isinstance(source, FileInputSource):
orig_source = source.file.name
source.file.close()
else:
orig_source = source.getByteStream()
baseURI = source.getPublicId()
return (baseURI, orig_source)
def _check_error(graph):
from .pyRdfa import RDFA_Error, ns_rdf
from .pyRdfa.options import ns_dc
for (s, p, o) in graph.triples((None, ns_rdf["type"], RDFA_Error)):
for (x, y, msg) in graph.triples((s, ns_dc["description"], None)):
raise Exception("RDFa parsing Error! %s" % msg)
# This is the parser interface as it would look when called from the
# rest of RDFLib
class RDFaParser(Parser):
"""
Wrapper around the RDFa 1.1 parser. For further details on the RDFa 1.1
processing, see the relevant W3C documents at
http://www.w3.org/TR/#tr_RDFa. RDFa 1.1 is defined for XHTML, HTML5, SVG
and, in general, for any XML language.
Note that the parser can also handle RDFa 1.0 if the extra parameter is
used and/or the input source uses RDFa 1.0 specific @version or DTD-s.
"""
def parse(self, source, graph,
pgraph=None,
media_type="",
rdfa_version=None,
embedded_rdf=False,
vocab_expansion=False, vocab_cache=False):
"""
@param source: one of the input sources that the RDFLib package defined
@type source: InputSource class instance
@param graph: target graph for the triples; output graph, in RDFa spec.
parlance
@type graph: RDFLib Graph
@keyword pgraph: target for error and warning triples; processor graph,
in RDFa spec. parlance. If set to None, these triples are ignored
@type pgraph: RDFLib Graph
@keyword media_type: explicit setting of the preferred media type
(a.k.a. content type) of the the RDFa source. None means the content
type of the HTTP result is used, or a guess is made based on the
suffix of a file
@type media_type: string
@keyword rdfa_version: 1.0 or 1.1. If the value is "", then, by
default, 1.1 is used unless the source has explicit signals to use
1.0 (e.g., using a @version attribute, using a DTD set up for 1.0, etc)
@type rdfa_version: string
@keyword embedded_rdf: some formats allow embedding RDF in other
formats: (X)HTML can contain turtle in a special <script> element,
SVG can have RDF/XML embedded in a <metadata> element. This flag
controls whether those triples should be interpreted and added to
the output graph. Some languages (e.g., SVG) require this, and the
flag is ignored.
@type embedded_rdf: Boolean
@keyword vocab_expansion: whether the RDFa @vocab attribute should
also mean vocabulary expansion (see the RDFa 1.1 spec for further
details)
@type vocab_expansion: Boolean
@keyword vocab_cache: in case vocab expansion is used, whether the
expansion data (i.e., vocabulary) should be cached locally. This
requires the ability for the local application to write on the
local file system
@type vocab_chache: Boolean
"""
if html5lib is False:
raise ImportError(
'html5lib is not installed, cannot use ' +
'RDFa and Microdata parsers.')
(baseURI, orig_source) = _get_orig_source(source)
self._process(graph, pgraph, baseURI, orig_source,
media_type=media_type,
rdfa_version=rdfa_version,
embedded_rdf=embedded_rdf,
vocab_expansion=vocab_expansion, vocab_cache=vocab_cache)
def _process(self, graph, pgraph, baseURI, orig_source,
media_type="",
rdfa_version=None,
embedded_rdf=False,
vocab_expansion=False, vocab_cache=False):
from .pyRdfa import pyRdfa, Options
from rdflib import Graph
processor_graph = pgraph if pgraph is not None else Graph()
self.options = Options(output_processor_graph=True,
embedded_rdf=embedded_rdf,
vocab_expansion=vocab_expansion,
vocab_cache=vocab_cache)
if media_type is None:
media_type = ""
processor = pyRdfa(self.options,
base=baseURI,
media_type=media_type,
rdfa_version=rdfa_version)
processor.graph_from_source(orig_source, graph=graph, pgraph=processor_graph, rdfOutput=False)
# This may result in an exception if the graph parsing led to an error
_check_error(processor_graph)
class RDFa10Parser(Parser):
"""
This is just a convenience class to wrap around the RDFa 1.0 parser.
"""
def parse(self, source, graph, pgraph=None, media_type=""):
"""
@param source: one of the input sources that the RDFLib package defined
@type source: InputSource class instance
@param graph: target graph for the triples; output graph, in RDFa
spec. parlance
@type graph: RDFLib Graph
@keyword pgraph: target for error and warning triples; processor
graph, in RDFa spec. parlance. If set to None, these triples are
ignored
@type pgraph: RDFLib Graph
@keyword media_type: explicit setting of the preferred media type
(a.k.a. content type) of the the RDFa source. None means the content
type of the HTTP result is used, or a guess is made based on the
suffix of a file
@type media_type: string
@keyword rdfOutput: whether Exceptions should be catched and added,
as triples, to the processor graph, or whether they should be raised.
@type rdfOutput: Boolean
"""
RDFaParser().parse(source, graph, pgraph=pgraph,
media_type=media_type, rdfa_version="1.0")
class MicrodataParser(Parser):
"""
Wrapper around an HTML5 microdata, extracted and converted into RDF. For
the specification of microdata, see the relevant section of the HTML5
spec: http://www.w3.org/TR/microdata/; for the algorithm used to extract
microdata into RDF, see http://www.w3.org/TR/microdata-rdf/.
"""
def parse(self, source, graph, vocab_expansion=False, vocab_cache=False):
"""
@param source: one of the input sources that the RDFLib package defined
@type source: InputSource class instance
@param graph: target graph for the triples; output graph, in RDFa
spec. parlance
@type graph: RDFLib Graph
@keyword vocab_expansion: whether the RDFa @vocab attribute should
also mean vocabulary expansion (see the RDFa 1.1 spec for further
details)
@type vocab_expansion: Boolean
@keyword vocab_cache: in case vocab expansion is used, whether the
expansion data (i.e., vocabulary) should be cached locally. This
requires the ability for the local application to write on the
local file system
@type vocab_chache: Boolean
@keyword rdfOutput: whether Exceptions should be catched and added,
as triples, to the processor graph, or whether they should be raised.
@type rdfOutput: Boolean
"""
if html5lib is False:
raise ImportError(
'html5lib is not installed, cannot use RDFa ' +
'and Microdata parsers.')
(baseURI, orig_source) = _get_orig_source(source)
self._process(graph, baseURI, orig_source,
vocab_expansion=vocab_expansion,
vocab_cache=vocab_cache)
def _process(self, graph, baseURI, orig_source,
vocab_expansion=False, vocab_cache=False):
from .pyMicrodata import pyMicrodata
processor = pyMicrodata(base=baseURI, vocab_expansion=vocab_expansion,
vocab_cache=vocab_cache)
processor.graph_from_source(
orig_source, graph=graph, rdfOutput=False)
class StructuredDataParser(Parser):
"""
Convenience parser to extract both RDFa (including embedded Turtle)
and microdata from an HTML file.
It is simply a wrapper around the specific parsers.
"""
def parse(self, source, graph,
pgraph=None,
rdfa_version="",
vocab_expansion=False,
vocab_cache=False,
media_type='text/html'
):
"""
@param source: one of the input sources that the RDFLib package defined
@type source: InputSource class instance
@param graph: target graph for the triples; output graph, in RDFa
spec. parlance
@keyword rdfa_version: 1.0 or 1.1. If the value is "", then, by
default, 1.1 is used unless the source has explicit signals to use 1.0
(e.g., using a @version attribute, using a DTD set up for 1.0, etc)
@type rdfa_version: string
@type graph: RDFLib Graph
@keyword pgraph: target for error and warning triples; processor
graph, in RDFa spec. parlance. If set to None, these triples are
ignored
@type pgraph: RDFLib Graph
@keyword vocab_expansion: whether the RDFa @vocab attribute should
also mean vocabulary expansion (see the RDFa 1.1 spec for further
details)
@type vocab_expansion: Boolean
@keyword vocab_cache: in case vocab expansion is used, whether the
expansion data (i.e., vocabulary) should be cached locally. This
requires the ability for the local application to write on the
local file system
@type vocab_chache: Boolean
@keyword rdfOutput: whether Exceptions should be catched and added,
as triples, to the processor graph, or whether they should be raised.
@type rdfOutput: Boolean
"""
# Note that the media_type argument is ignored, and is here only to avoid an 'unexpected argument' error.
# This parser works for text/html only anyway...
(baseURI, orig_source) = _get_orig_source(source)
if rdfa_version == "" : rdfa_version = "1.1"
RDFaParser()._process(graph, pgraph, baseURI, orig_source,
media_type='text/html',
rdfa_version=rdfa_version,
vocab_expansion=vocab_expansion,
vocab_cache=vocab_cache)
MicrodataParser()._process(graph, baseURI, orig_source,
vocab_expansion=vocab_expansion,
vocab_cache=vocab_cache)
from .hturtle import HTurtleParser
HTurtleParser()._process(graph, baseURI, orig_source, media_type='text/html')
| apache-2.0 |
nkalodimas/invenio | modules/websession/lib/websession_web_tests.py | 27 | 12821 | # -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebMessage module web tests."""
from invenio.config import CFG_SITE_SECURE_URL
from invenio.testutils import make_test_suite, \
run_test_suite, \
InvenioWebTestCase
class InvenioWebSessionWebTest(InvenioWebTestCase):
"""WebSession web tests."""
def _delete_messages(self):
"""Delete all messages from users inbox"""
self.find_element_by_link_text_with_timeout("Personalize")
self.browser.find_element_by_link_text("Personalize").click()
self.find_element_by_link_text_with_timeout("Your Messages")
self.browser.find_element_by_link_text("Your Messages").click()
self.find_element_by_xpath_with_timeout("//input[@name='del_all' and @value='Delete All']")
self.browser.find_element_by_xpath("//input[@name='del_all' and @value='Delete All']").click()
self.handle_popup_dialog()
self.find_element_by_xpath_with_timeout("//input[@value='Yes']")
self.browser.find_element_by_xpath("//input[@value='Yes']").click()
def test_create_group(self):
"""websession - web test create a group"""
self.browser.get(CFG_SITE_SECURE_URL)
# login as romeo
self.login(username="romeo", password="r123omeo")
self.find_element_by_link_text_with_timeout("Personalize")
self.browser.find_element_by_link_text("Personalize").click()
self.find_element_by_link_text_with_timeout("Your Groups")
self.browser.find_element_by_link_text("Your Groups").click()
self.find_element_by_name_with_timeout("create_group")
self.browser.find_element_by_name("create_group").click()
self.fill_textbox(textbox_name="group_name", text="group test")
self.fill_textbox(textbox_name="group_description", text="test")
self.choose_selectbox_option_by_label(selectbox_name="join_policy", label="Visible and open for new members")
self.find_element_by_name_with_timeout("create_button")
self.browser.find_element_by_name("create_button").click()
self.handle_popup_dialog()
self.page_source_test(expected_text='You have successfully created a new group.')
self.find_element_by_xpath_with_timeout("//small")
self.browser.find_element_by_xpath("//small").click()
self.find_element_by_name_with_timeout("delete")
self.browser.find_element_by_name("delete").click()
self.find_element_by_name_with_timeout("delete")
self.browser.find_element_by_name("delete").click()
self.page_source_test(expected_text='You have successfully deleted a group.')
self._delete_messages()
self.logout()
def test_message_group(self):
"""websession - web test send a message to any group"""
self.browser.get(CFG_SITE_SECURE_URL)
# login as juliet
self.login(username="juliet", password="j123uliet")
self.find_element_by_link_text_with_timeout("Personalize")
self.browser.find_element_by_link_text("Personalize").click()
self.find_element_by_link_text_with_timeout("Your Messages")
self.browser.find_element_by_link_text("Your Messages").click()
self.find_element_by_name_with_timeout("del_all")
self.browser.find_element_by_name("del_all").click()
self.handle_popup_dialog()
self.fill_textbox(textbox_name="search_pattern", text="montague-family")
self.find_element_by_name_with_timeout("search_group")
self.browser.find_element_by_name("search_group").click()
self.choose_selectbox_option_by_label(selectbox_name="names_selected", label="montague-family")
self.find_element_by_name_with_timeout("add_group")
self.browser.find_element_by_name("add_group").click()
self.fill_textbox(textbox_name="msg_subject", text="hello everybody")
self.fill_textbox(textbox_name="msg_body", text="hello")
self.find_element_by_name_with_timeout("send_button")
self.browser.find_element_by_name("send_button").click()
self._delete_messages()
self.logout()
# login as romeo
self.login(username="romeo", password="r123omeo")
self.find_element_by_link_text_with_timeout("Your Messages")
self.browser.find_element_by_link_text("Your Messages").click()
self.find_element_by_link_text_with_timeout("hello everybody")
self.browser.find_element_by_link_text("hello everybody").click()
self.page_source_test(expected_text='hello')
self.find_element_by_name_with_timeout("delete")
self.browser.find_element_by_name("delete").click()
self.logout()
def test_create_open_group(self):
"""websession - web test create an open group and join it"""
self.browser.get(CFG_SITE_SECURE_URL)
# login as juliet
self.login(username="juliet", password="j123uliet")
self.find_element_by_link_text_with_timeout("Personalize")
self.browser.find_element_by_link_text("Personalize").click()
self.find_element_by_link_text_with_timeout("Your Groups")
self.browser.find_element_by_link_text("Your Groups").click()
self.find_element_by_name_with_timeout("create_group")
self.browser.find_element_by_name("create_group").click()
self.fill_textbox(textbox_name="group_name", text="my_friends")
self.fill_textbox(textbox_name="group_description", text="all my friends")
self.choose_selectbox_option_by_label(selectbox_name="join_policy", label="Visible and open for new members")
self.find_element_by_name_with_timeout("create_button")
self.browser.find_element_by_name("create_button").click()
self.handle_popup_dialog()
self.logout()
# login as romeo
self.login(username="romeo", password="r123omeo")
self.find_element_by_link_text_with_timeout("Your Groups")
self.browser.find_element_by_link_text("Your Groups").click()
self.find_element_by_name_with_timeout("join_group")
self.browser.find_element_by_name("join_group").click()
self.choose_selectbox_option_by_label(selectbox_name="grpID", label="my_friends")
self.find_element_by_name_with_timeout("join_button")
self.browser.find_element_by_name("join_button").click()
self.handle_popup_dialog()
self.logout()
# login as hyde
self.login(username="hyde", password="h123yde")
self.find_element_by_link_text_with_timeout("Your Groups")
self.browser.find_element_by_link_text("Your Groups").click()
self.find_element_by_name_with_timeout("join_group")
self.browser.find_element_by_name("join_group").click()
self.choose_selectbox_option_by_label(selectbox_name="grpID", label="my_friends")
self.find_element_by_name_with_timeout("join_button")
self.browser.find_element_by_name("join_button").click()
self.handle_popup_dialog()
self.logout()
# login as romeo
self.login(username="romeo", password="r123omeo")
self.find_element_by_link_text_with_timeout("Your Groups")
self.browser.find_element_by_link_text("Your Groups").click()
self.find_element_by_name_with_timeout("leave")
self.browser.find_element_by_name("leave").click()
self.choose_selectbox_option_by_label(selectbox_name="grpID", label="my_friends")
self.find_element_by_name_with_timeout("leave_button")
self.browser.find_element_by_name("leave_button").click()
self.handle_popup_dialog()
self.find_element_by_name_with_timeout("leave_button")
self.browser.find_element_by_name("leave_button").click()
self.logout()
# login as juliet
self.login(username="juliet", password="j123uliet")
self.find_element_by_link_text_with_timeout("Your Groups")
self.browser.find_element_by_link_text("Your Groups").click()
self.find_element_by_xpath_with_timeout("//small")
self.browser.find_element_by_xpath("//small").click()
self.find_element_by_name_with_timeout("delete")
self.browser.find_element_by_name("delete").click()
self.find_element_by_name_with_timeout("delete")
self.browser.find_element_by_name("delete").click()
self._delete_messages()
self.logout()
# login as hyde
self.login(username="hyde", password="h123yde")
self._delete_messages()
self.logout()
def test_set_group(self):
"""websession - web test set group"""
self.browser.get(CFG_SITE_SECURE_URL)
# login as juliet
self.login(username="juliet", password="j123uliet")
self.find_element_by_link_text_with_timeout("Personalize")
self.browser.find_element_by_link_text("Personalize").click()
self.find_element_by_link_text_with_timeout("Your Groups")
self.browser.find_element_by_link_text("Your Groups").click()
self.find_element_by_name_with_timeout("create_group")
self.browser.find_element_by_name("create_group").click()
self.fill_textbox(textbox_name="group_name", text="my_friends")
self.fill_textbox(textbox_name="group_description", text="all my friends")
self.choose_selectbox_option_by_label(selectbox_name="join_policy", label="Visible but new members need approval")
self.find_element_by_name_with_timeout("create_button")
self.browser.find_element_by_name("create_button").click()
self.handle_popup_dialog()
self.logout()
# login as romeo
self.login(username="romeo", password="r123omeo")
self.find_element_by_link_text_with_timeout("Your Groups")
self.browser.find_element_by_link_text("Your Groups").click()
self.find_element_by_name_with_timeout("join_group")
self.browser.find_element_by_name("join_group").click()
self.fill_textbox(textbox_name="group_name", text="my_friends")
self.find_element_by_name_with_timeout("join_button")
self.browser.find_element_by_name("join_button").click()
self.handle_popup_dialog()
self.choose_selectbox_option_by_label(selectbox_name="grpID", label="my_friends")
self.find_element_by_name_with_timeout("join_button")
self.browser.find_element_by_name("join_button").click()
self.logout()
# login as juliet
self.login(username="juliet", password="j123uliet")
self.find_element_by_link_text_with_timeout("Your Messages")
self.browser.find_element_by_link_text("Your Messages").click()
self.find_element_by_link_text_with_timeout("Group my_friends: New membership request")
self.browser.find_element_by_link_text("Group my_friends: New membership request").click()
self.find_element_by_link_text_with_timeout("accept or reject")
self.browser.find_element_by_link_text("accept or reject").click()
self.choose_selectbox_option_by_label(selectbox_name="pending_member_id", label="romeo")
self.find_element_by_name_with_timeout("add_member")
self.browser.find_element_by_name("add_member").click()
self.find_element_by_link_text_with_timeout("Your Groups")
self.browser.find_element_by_link_text("Your Groups").click()
self.find_element_by_xpath_with_timeout("//small")
self.browser.find_element_by_xpath("//small").click()
self.find_element_by_name_with_timeout("delete")
self.browser.find_element_by_name("delete").click()
self.find_element_by_name_with_timeout("delete")
self.browser.find_element_by_name("delete").click()
self._delete_messages()
self.logout()
# login as romeo
self.login(username="romeo", password="r123omeo")
self._delete_messages()
self.logout()
TEST_SUITE = make_test_suite(InvenioWebSessionWebTest, )
if __name__ == '__main__':
run_test_suite(TEST_SUITE, warn_user=True)
| gpl-2.0 |
mozilla-metrics/socorro-toolbox | src/main/python/graphics-correlations.py | 1 | 6775 | from optparse import OptionParser
import psycopg2
import os, sys
import csv
import re
from collections import namedtuple
p = OptionParser(usage='usage: %prog [options] outfile')
p.add_option('-V', '--product-version', dest='productversion',
help='Firefox version', default=None)
p.add_option('-s', '--start-date', dest='startdate',
help='Start date (YYYY-MM-DD)', default=None)
p.add_option('-e', '--end-date', dest='enddate',
help='End date (YYYY-MM-DD)', default=None)
p.add_option('-c', '--cutoff', dest='cutoff',
help="Minimum cutoff", default=50)
opts, args = p.parse_args()
if len(args) != 1 or opts.productversion is None or opts.startdate is None or opts.enddate is None:
p.error("Required arguments missing")
sys.exit(1)
datere = re.compile(r'\d{4}-\d{2}-\d{2}$')
if datere.match(opts.startdate) is None or datere.match(opts.enddate) is None:
p.print_usage()
sys.exit(1)
outpattern, = args
connectionstring = open(os.path.expanduser('~/socorro.connection'), 'r').read().strip()
conn = psycopg2.connect(connectionstring)
cur = conn.cursor()
cur.execute('''
SELECT product_version_id
FROM product_versions
WHERE product_name = 'Firefox' AND version_string = %s''', (opts.productversion,))
if cur.rowcount != 1:
print >>sys.stderr, "More than one build has version '%s'" % opts.productversion
sys.exit(2)
productversion, = cur.fetchone()
# Note: the startdate/enddate are substituted by python into the query string.
# This is required so that the partition optimizer can exclude irrelevant
# partitions. The product version is passed as a bound parameter.
byvendorq = """
WITH BySignatureGraphics AS (
SELECT
signature_id,
substring(r.app_notes from E'AdapterVendorID: (?:0x)?(\\\\w+)') AS AdapterVendorID,
COUNT(*) AS c
FROM
reports r JOIN reports_clean rc ON rc.uuid = r.uuid
WHERE
r.date_processed > '%(startdate)s'
AND r.date_processed < '%(enddate)s'
AND rc.date_processed > '%(startdate)s'
AND rc.date_processed < '%(enddate)s'
AND r.app_notes ~ E'AdapterVendorID: (?:0x)?(\\\\w+)'
AND r.os_name ~ 'Windows NT'
AND r.hangid IS NULL
AND rc.product_version_id = %%(product_version_id)s
GROUP BY signature_id, AdapterVendorID
),
BySignature AS (
SELECT
signature_id,
SUM(c) AS total_by_signature
FROM BySignatureGraphics
GROUP BY signature_id
),
ByGraphics AS (
SELECT
AdapterVendorID,
SUM(c) AS total_by_graphics
FROM BySignatureGraphics
GROUP BY AdapterVendorID
),
AllCrashes AS (
SELECT SUM(c) AS grand_total
FROM BySignatureGraphics
),
Correlations AS (
SELECT
s.signature,
bsg.AdapterVendorID,
bsg.c,
total_by_signature,
total_by_graphics,
(bsg.c::float * grand_total::float) /
(total_by_signature::float * total_by_graphics::float) AS correlation,
grand_total
FROM
BySignatureGraphics AS bsg
JOIN BySignature ON BySignature.signature_id = bsg.signature_id
JOIN ByGraphics ON
ByGraphics.AdapterVendorID = bsg.AdapterVendorID
CROSS JOIN AllCrashes
JOIN signatures AS s
ON bsg.signature_id = s.signature_id
WHERE
bsg.c > 25
AND total_by_signature > %%(cutoff)s
AND bsg.AdapterVendorID != '0000'
),
HighCorrelation AS (
SELECT
signature,
MAX(correlation) AS highcorrelation
FROM Correlations
GROUP BY signature
)
SELECT
Correlations.*
FROM
Correlations
JOIN HighCorrelation ON
Correlations.signature = HighCorrelation.signature
""" % {'startdate': opts.startdate, 'enddate': opts.enddate}
bycpufamilyq = """
WITH BySignatureCPU AS (
SELECT
signature_id,
substring(r.cpu_info FROM E'\\\\A(\\\\w+ family \\\\d+) model \\\\d+ stepping \\\\d+( \\\\| \\\\d+)?\\\\Z') AS cpufamily,
COUNT(*) AS c
FROM
reports r JOIN reports_clean rc on rc.uuid = r.uuid
WHERE
r.date_processed > '%(startdate)s'
AND r.date_processed < '%(enddate)s'
AND rc.date_processed > '%(startdate)s'
AND rc.date_processed < '%(enddate)s'
AND r.os_name ~ 'Windows NT'
AND r.hangid IS NULL
AND rc.product_version_id = %%(product_version_id)s
AND r.cpu_info ~ E'\\\\A(\\\\w+ family \\\\d+) model \\\\d+ stepping \\\\d+( \\\\| \\\\d+)?\\\\Z'
GROUP BY signature_id, cpufamily
),
BySignature AS (
SELECT
signature_id,
SUM(c) AS total_by_signature
FROM BySignatureCPU
GROUP BY signature_id
),
ByCPU AS (
SELECT
cpufamily,
SUM(c) AS total_by_cpu
FROM BySignatureCPU
GROUP BY cpufamily
),
AllCrashes AS (
SELECT SUM(c) AS grand_total
FROM BySignatureCPU
),
Correlations AS (
SELECT
s.signature,
bsg.cpufamily,
bsg.c,
total_by_signature,
total_by_cpu,
(bsg.c::float * grand_total::float) /
(total_by_signature::float * total_by_cpu::float) AS correlation,
grand_total
FROM
BySignatureCPU AS bsg
JOIN BySignature ON BySignature.signature_id = bsg.signature_id
JOIN ByCPU ON
ByCPU.cpufamily = bsg.cpufamily
CROSS JOIN AllCrashes
JOIN signatures AS s
ON bsg.signature_id = s.signature_id
WHERE
total_by_signature > %%(cutoff)s
),
HighCorrelation AS (
SELECT
signature,
MAX(correlation) AS highcorrelation
FROM Correlations
GROUP BY signature
)
SELECT
Correlations.*
FROM
Correlations
JOIN HighCorrelation ON
Correlations.signature = HighCorrelation.signature
WHERE highcorrelation > 2
""" % {'startdate': opts.startdate, 'enddate': opts.enddate}
Result = namedtuple('Result', ('signature', 'vendorid', 'c', 'bysig', 'bygraphics', 'grandtotal', 'correlation'))
def savedata(cur, filename):
results = []
for signature, vendorid, c, bysig, bygraphics, correlation, grandtotal in cur:
# if bygraph < 50:
# continue
# Also, if less than 20 (SWAG!) unique users have experienced this crash,
# the correlation is going to be really noisy and probably meaningless.
# if correlation < 4:
# continue
results.append(Result(signature, vendorid, c, bysig, bygraphics, grandtotal, correlation))
results.sort(key=lambda r: (r.bysig, r.signature, r.correlation), reverse=True)
fd = open(filename, 'w')
w = csv.writer(fd)
w.writerow(('signature', 'vendorid', 'c', 'bysig', 'bygraphics', 'grandtotal', 'correlation'))
for r in results:
correlation = "{0:.2f}".format(r.correlation)
w.writerow((r.signature, r.vendorid, r.c, r.bysig, r.bygraphics, r.grandtotal, correlation))
fd.close()
params = {
'product_version_id': productversion,
'cutoff': opts.cutoff
}
cur.execute(byvendorq, params)
savedata(cur, outpattern + 'byadaptervendor.csv')
cur.execute(bycpufamilyq, params)
savedata(cur, outpattern + 'bycpufamily.csv')
| apache-2.0 |
mdtraj/mdtraj | mdtraj/__init__.py | 6 | 2628 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""The mdtraj package contains tools for loading and saving molecular dynamics
trajectories in a variety of formats, including Gromacs XTC & TRR, CHARMM/NAMD
DCD, AMBER BINPOS, PDB, and HDF5.
"""
import numpy as _ # silence cython related numpy warnings, see github.com/numpy/numpy/pull/432
from .formats.registry import FormatRegistry
from .formats.xtc import load_xtc
from .formats.trr import load_trr
from .formats.hdf5 import load_hdf5
from .formats.lh5 import load_lh5
from .formats.netcdf import load_netcdf
from .formats.mdcrd import load_mdcrd
from .formats.dcd import load_dcd
from .formats.binpos import load_binpos
from .formats.pdb import load_pdb
from .formats.arc import load_arc
from .formats.openmmxml import load_xml
from .formats.prmtop import load_prmtop
from .formats.psf import load_psf
from .formats.mol2 import load_mol2
from .formats.amberrst import load_restrt, load_ncrestrt
from .formats.lammpstrj import load_lammpstrj
from .formats.dtr import load_dtr, load_stk
from .formats.xyzfile import load_xyz
from .formats.hoomdxml import load_hoomdxml
from .formats.tng import load_tng
from .core import element
from ._rmsd import rmsd, rmsf
from ._lprmsd import lprmsd
from .core.topology import Topology, Single, Double, Triple, Amide, Aromatic
from .geometry import *
from .core.trajectory import *
from .nmr import *
from . import reporters
def capi():
import os
import sys
module_path = sys.modules['mdtraj'].__path__[0]
return {
'lib_dir': os.path.join(module_path, 'core', 'lib'),
'include_dir': os.path.join(module_path, 'core', 'lib'),
}
| lgpl-2.1 |
decebel/dataAtom_alpha | bin/plug/py/external/pattern/text/search.py | 1 | 41884 | #### PATTERN | EN | PATTERN MATCHING ###############################################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <[email protected]>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
import re
import itertools
# Following classes emulate those in en.parser.tree:
class Text(list):
def __init__(self, string="", token=["word"]):
list.__init__(self, [Sentence(s+".", token) for s in string.split(".")])
@property
def sentences(self):
return self
class Sentence:
def __init__(self, string="", token=["word"]):
for punctuation in ".,;?!:": # Naive tokenization.
string = string.replace(punctuation, " "+punctuation)
string = re.sub(r"\s+", " ", string)
string = string.split(" ")
self.words = [Word(self, w, index=i) for i,w in enumerate(string)]
@property
def chunks(self):
return []
class Word(object):
def __init__(self, sentence, string, tag=None, index=0):
self.sentence, self.string, self.tag, self.index = sentence, string, tag, index
def __repr__(self):
return "Word(%s)" % repr(self.string)
def _get_type(self):
return self.tag
def _set_type(self, v):
self.tag = v
type = property(_get_type, _set_type)
@property
def chunk(self):
return None
@property
def lemma(self):
return None
#--- STRING MATCHING -------------------------------------------------------------------------------
WILDCARD = "*"
regexp = type(re.compile(r"."))
def _match(string, pattern):
""" Returns True if the pattern is part of the given word string.
The pattern can include a wildcard (*front, back*, *both*, in*side),
or it can be a compiled regular expression.
"""
p = pattern
try:
if p.startswith(WILDCARD) and (p.endswith(WILDCARD) and p[1:-1] in string or string.endswith(p[1:])):
return True
if p.endswith(WILDCARD) and not p.endswith("\\"+WILDCARD) and string.startswith(p[:-1]):
return True
if p == string:
return True
if WILDCARD in p[1:-1]:
p = p.split(WILDCARD)
return string.startswith(p[0]) and string.endswith(p[-1])
except AttributeError:
# For performance (doing isinstance() at the end is 10% faster for regular strings).
if isinstance(p, regexp):
return p.search(string) is not None
return False
#--- LIST FUNCTIONS --------------------------------------------------------------------------------
def unique(list):
""" Returns a list copy in which each item occurs only once.
"""
u=[]; [u.append(item) for item in list if item not in u]
return u
def unique2(list):
""" Returns a list copy in which each item occurs only once.
This is faster than unique(), but list items must be hashable.
"""
u, s = [], {}
for item in list:
if item not in s: u.append(item)
s[item] = True
return u
def find(function, list):
""" Returns the first item in the list for which function(item) is True, None otherwise.
"""
for item in list:
if function(item) is True:
return item
def combinations(list, n):
""" Returns all possible combinations of length n of the given items in the list.
"""
if n == 0: yield [] # Only one possibility, the empty list.
else:
for i in xrange(len(list)):
for c in combinations(list, n-1):
yield [list[i]] + c
def variations(list, optional=lambda item: False):
""" Returns all possible variations of a list containing optional items.
"""
# Boolean pattern, True where pattern is optional.
# (A) (B) C --> True True False
o = [optional(item) for item in list]
V = []
# All the possible True/False combinations of optionals.
# (A) (B) C --> True True, True False, False True, False False.
for c in combinations([True, False], sum(o)):
# If True in boolean pattern, replace by boolean in current combination.
# (A) (B) C --> True True False, True False False, False True False, False False False.
v = [b and (b and c.pop(0)) for b in o]
# Replace True by pattern at that index.
# --> (A) (B) C, (A) C, (B) C, C.
v = [list[i] for i in range(len(v)) if not v[i]]
if v not in V:
V.append(v)
# Longest-first.
V.sort(lambda a, b: len(b) - len(a))
return V
#### TAXONOMY ######################################################################################
#--- ORDERED DICTIONARY ----------------------------------------------------------------------------
class odict(dict):
""" Dictionary with ordered keys.
With reversed=True, the latest keys will be returned first when traversing the dictionary.
Taxonomy (see below) uses ordered dictionaries:
if a taxonomy term has multiple parents, the last parent is the default.
"""
def __init__(self, d=None, reversed=True):
dict.__init__(self)
self._o = [] # The ordered keys.
self._f = reversed and self._insertkey or self._appendkey
if d != None: self.update(dict(d))
@property
def reversed(self):
return self._f == self._insertkey
@classmethod
def fromkeys(cls, k, v=None, reversed=True):
d = cls(reversed=reversed)
for k in k: d.__setitem__(k,v)
return d
def _insertkey(self, k):
if k not in self: self._o.insert(0,k) # Sort newest-first with reversed=True.
def _appendkey(self, k):
if k not in self: self._o.append(k) # Sort oldest-first with reversed=False.
def append(self, (k, v)):
""" Takes a (key, value)-tuple. Sets the given key to the given value.
If the key exists, pushes the updated item to the head (or tail) of the dict.
"""
if k in self: self.__delitem__(k)
self.__setitem__(k,v)
def update(self, d):
for k,v in d.items(): self.__setitem__(k,v)
def setdefault(self, k, v=None):
if not k in self: self.__setitem__(k,v)
return self[k]
def __setitem__(self, k, v):
self._f(k); dict.__setitem__(self, k, v)
def __delitem__(self, k):
dict.__delitem__(self, k); self._o.remove(k)
def pop(self, k):
self._o.remove(k); return dict.pop(self, k)
def clear(self):
dict.clear(self); self._o=[]
def keys(self):
return self._o
def values(self):
return map(self.get, self._o)
def items(self):
return zip(self._o, self.values())
def __iter__(self):
return self._o.__iter__()
def copy(self):
d = self.__class__(reversed=self.reversed)
for k,v in (self.reversed and reversed(self.items()) or self.items()): d[k] = v
return d
def __repr__(self):
return "{%s}" % ", ".join(["%s: %s" % (repr(k), repr(v)) for k, v in self.items()])
#--- TAXONOMY --------------------------------------------------------------------------------------
class Taxonomy(dict):
def __init__(self):
""" Hierarchical tree of words classified by semantic type.
For example: "rose" and "daffodil" can be classified as "flower":
taxonomy.append("rose", type="flower")
taxonomy.append("daffodil", type="flower")
print taxonomy.children("flower")
Taxonomy terms can be used in a Pattern:
FLOWER will match "flower" as well as "rose" and "daffodil".
The taxonomy is case insensitive by default.
"""
self.case_sensitive = False
self._values = {}
self.classifiers = []
def _normalize(self, term):
try:
return not self.case_sensitive and term.lower() or term
except: # Not a string.
return term
def __contains__(self, term):
# Check if the term is in the dictionary.
# If the term is not in the dictionary, check the classifiers.
term = self._normalize(term)
if dict.__contains__(self, term):
return True
for classifier in self.classifiers:
if classifier.parents(term) \
or classifier.children(term):
return True
return False
def append(self, term, type=None, value=None):
""" Appends the given term to the taxonomy and tags it as the given type.
Optionally, a disambiguation value can be supplied.
For example: taxonomy.append("many", "quantity", "50-200")
"""
term = self._normalize(term)
type = self._normalize(type)
self.setdefault(term, (odict(), odict()))[0].append((type, True))
self.setdefault(type, (odict(), odict()))[1].append((term, True))
self._values[term] = value
def classify(self, term, **kwargs):
""" Returns the (most recently added) semantic type for the given term ("many" => "quantity").
If the term is not in the dictionary, try Taxonomy.classifiers.
"""
term = self._normalize(term)
if dict.__contains__(self, term):
return self[term][0].keys()[-1]
# If the term is not in the dictionary, check the classifiers.
# Returns the first term in the list returned by a classifier.
for classifier in self.classifiers:
# **kwargs are useful if the classifier requests extra information,
# for example the part-of-speech tag.
v = classifier.parents(term, **kwargs)
if v:
return v[0]
def parents(self, term, recursive=False, **kwargs):
""" Returns a list of all semantic types for the given term.
If recursive=True, traverses parents up to the root.
"""
def dfs(term, recursive=False, visited={}, **kwargs):
if term in visited: # Break on cyclic relations.
return []
visited[term], a = True, []
if dict.__contains__(self, term):
a = self[term][0].keys()
for classifier in self.classifiers:
a.extend(classifier.parents(term, **kwargs) or [])
if recursive:
for w in a: a += dfs(w, recursive, visited, **kwargs)
return a
return unique2(dfs(self._normalize(term), recursive, {}, **kwargs))
def children(self, term, recursive=False, **kwargs):
""" Returns all terms of the given semantic type: "quantity" => ["many", "lot", "few", ...]
If recursive=True, traverses children down to the leaves.
"""
def dfs(term, recursive=False, visited={}, **kwargs):
if term in visited: # Break on cyclic relations.
return []
visited[term], a = True, []
if dict.__contains__(self, term):
a = self[term][1].keys()
for classifier in self.classifiers:
a.extend(classifier.children(term, **kwargs) or [])
if recursive:
for w in a: a += dfs(w, recursive, visited, **kwargs)
return a
return unique2(dfs(self._normalize(term), recursive, {}, **kwargs))
def value(self, term, **kwargs):
""" Returns the value of the given term ("many" => "50-200")
"""
term = self._normalize(term)
if term in self._values:
return self._values[term]
for classifier in self.classifiers:
v = classifier.value(term, **kwargs)
if v is not None:
return v
def remove(self, term):
if dict.__contains__(self, term):
for w in self.parents(term):
self[w][1].pop(term)
dict.pop(self, term)
# Global taxonomy:
TAXONOMY = taxonomy = Taxonomy()
#taxonomy.append("rose", type="flower")
#taxonomy.append("daffodil", type="flower")
#taxonomy.append("flower", type="plant")
#print taxonomy.classify("rose")
#print taxonomy.children("plant", recursive=True)
#c = Classifier(parents=lambda term: term.endswith("ness") and ["quality"] or [])
#taxonomy.classifiers.append(c)
#print taxonomy.classify("roughness")
#--- TAXONOMY CLASSIFIER ---------------------------------------------------------------------------
class Classifier:
def __init__(self, parents=lambda term: [], children=lambda term: [], value=lambda term: None):
""" A classifier uses a rule-based approach to enrich the taxonomy, for example:
c = Classifier(parents=lambda term: term.endswith("ness") and ["quality"] or [])
taxonomy.classifiers.append(c)
This tags any word ending in -ness as "quality".
This is much shorter than manually adding "roughness", "sharpness", ...
Other examples of useful classifiers: calling en.wordnet.Synset.hyponyms() or en.number().
"""
self.parents = parents
self.children = children
self.value = value
# Classifier(parents=lambda word: word.endswith("ness") and ["quality"] or [])
# Classifier(parents=lambda word, chunk=None: chunk=="VP" and [ACTION] or [])
class WordNetClassifier(Classifier):
def __init__(self, wordnet=None):
if wordnet is None:
try: from en import wordnet
except:
pass
Classifier.__init__(self, self._parents, self._children)
self.wordnet = wordnet
def _children(self, word, pos="NN"):
try: return [w.senses[0] for w in self.wordnet.synsets(word, pos)[0].hyponyms()]
except KeyError:
pass
def _parents(self, word, pos="NN"):
try: return [w.senses[0] for w in self.wordnet.synsets(word, pos)[0].hypernyms()]
except KeyError:
pass
#from en import wordnet
#taxonomy.classifiers.append(WordNetClassifier(wordnet))
#print taxonomy.parents("ponder", pos="VB")
#print taxonomy.children("computer")
#### PATTERN #######################################################################################
#--- PATTERN CONSTRAINT ----------------------------------------------------------------------------
# Allowed chunk, role and part-of-speech tags (Penn Treebank II):
CHUNKS = dict.fromkeys(["NP", "PP", "VP", "ADVP", "ADJP", "SBAR", "PRT", "INTJ"], True)
ROLES = dict.fromkeys(["SBJ", "OBJ", "PRD", "TMP", "CLR", "LOC", "DIR", "EXT", "PRP"], True)
TAGS = dict.fromkeys(["CC", "CD", "DT", "EX", "FW", "IN", "JJ", "JJR", "JJS", "JJ*",
"LS", "MD", "NN", "NNS", "NNP", "NNPS", "NN*", "PDT", "PRP",
"PRP$", "PRP*", "RB", "RBR", "RBS", "RB*", "RP", "SYM", "TO",
"UH", "VB", "VBZ", "VBP", "VBD", "VBN", "VBG", "VB*",
"WDT", "WP*", "WRB", ".", ",", ":", "(", ")"], True)
ALPHA = re.compile("[a-zA-Z]")
has_alpha = lambda string: ALPHA.match(string) is not None
class Constraint:
def __init__(self, words=[], tags=[], chunks=[], roles=[], taxa=[], optional=False, multiple=False, first=False, taxonomy=TAXONOMY, exclude=None):
""" A range of words, tags and taxonomy terms that matches certain words in a sentence.
For example:
Constraint.fromstring("with|of") matches either "with" or "of".
Constraint.fromstring("(JJ)") optionally matches an adjective.
Constraint.fromstring("NP|SBJ") matches subject noun phrases.
Constraint.fromstring("QUANTITY|QUALITY") matches quantity-type and quality-type taxa.
"""
self.index = 0
self.words = list(words) # Allowed words/lemmata (of, with, ...)
self.tags = list(tags) # Allowed parts-of-speech (NN, JJ, ...)
self.chunks = list(chunks) # Allowed chunk types (NP, VP, ...)
self.roles = list(roles) # Allowed chunk roles (SBJ, OBJ, ...)
self.taxa = list(taxa) # Allowed word categories.
self.taxonomy = taxonomy
self.optional = optional
self.multiple = multiple
self.first = first
self.exclude = exclude # Constraint of words that are *not* allowed, or None.
@classmethod
def fromstring(cls, s, **kwargs):
""" Returns a new Constraint from the given string.
Uppercase words indicate either a tag ("NN", "JJ", "VP")
or a taxonomy term (e.g., "PRODUCT", "PERSON").
Syntax:
( defines an optional constraint, e.g., "(JJ)".
[ defines a constraint with spaces, e.g., "[Mac OS X | Windows Vista]".
_ is converted to spaces, e.g., "Windows_Vista".
| separates different options, e.g., "ADJP|ADVP".
! can be used as a word prefix to disallow it.
* can be used as a wildcard character, e.g., "soft*|JJ*".
? as a suffix defines a constraint that is optional, e.g., "JJ?".
+ as a suffix defines a constraint that can span multiple words, e.g., "JJ+".
^ as a prefix defines a constraint that can only match the first word.
These characters need to be escaped if used as content: "\(".
"""
C = cls(**kwargs)
s = s.strip()
s = s.strip("{}")
s = s.strip()
for i in range(3):
# Wrapping order of control characters is ignored:
# (NN+) == (NN)+ == NN?+ == NN+? == [NN+?] == [NN]+?
if s.startswith("^"):
s = s[1: ]; C.first = True
if s.endswith("+") and not s.endswith("\+"):
s = s[0:-1]; C.multiple = True
if s.endswith("?") and not s.endswith("\?"):
s = s[0:-1]; C.optional = True
if s.startswith("(") and s.endswith(")"):
s = s[1:-1]; C.optional = True
if s.startswith("[") and s.endswith("]"):
s = s[1:-1]
s = re.sub(r"^\\\^", "^", s)
s = re.sub(r"\\\+$", "+", s)
s = s.replace("\_", "&uscore;")
s = s.replace("_"," ")
s = s.replace("&uscore;", "_")
s = s.replace("&lparen;", "(")
s = s.replace("&rparen;", ")")
s = s.replace("[", "[")
s = s.replace("]", "]")
s = s.replace("&lcurly;", "{")
s = s.replace("&rcurly;", "}")
s = s.replace("\(", "(")
s = s.replace("\)", ")")
s = s.replace("\[", "[")
s = s.replace("\]", "]")
s = s.replace("\{", "{")
s = s.replace("\}", "}")
s = s.replace("\*", "*")
s = s.replace("\?", "?")
s = s.replace("\+", "+")
s = s.replace("\^", "^")
s = s.replace("\|", "⊢")
s = s.split("|")
s = [v.replace("⊢", "|").strip() for v in s]
for v in s:
C._append(v)
return C
def _append(self, v):
if v.startswith("!") and self.exclude is None:
self.exclude = Constraint()
if v.startswith("!"):
self.exclude._append(v[1:]); return
if "!" in v:
v = v.replace("\!", "!")
if v != v.upper():
self.words.append(v.lower())
elif v in TAGS:
self.tags.append(v)
elif v in CHUNKS:
self.chunks.append(v)
elif v in ROLES:
self.roles.append(v)
elif v in self.taxonomy or has_alpha(v):
self.taxa.append(v.lower())
else:
# Uppercase words indicate tags or taxonomy terms.
# However, this also matches "*" or "?" or "0.25".
# Unless such punctuation is defined in the taxonomy, it is added to Range.words.
self.words.append(v.lower())
def match(self, word):
""" Return True if the given Word is part of the constraint:
- the word (or lemma) occurs in Constraint.words, OR
- the word (or lemma) occurs in Constraint.taxa taxonomy tree, AND
- the word and/or chunk tags match those defined in the constraint.
Individual terms in Constraint.words or the taxonomy can contain wildcards (*).
Some part-of-speech-tags can also contain wildcards: NN*, VB*, JJ*, RB*
If the given word contains spaces (e.g., proper noun),
the entire chunk will also be compared.
For example: Constraint(words=["Mac OS X*"])
matches the word "Mac" if the word occurs in a Chunk("Mac OS X 10.5").
"""
# If the constraint can only match the first word, Word.index must be 0.
if self.first and word.index > 0:
return False
# If the constraint defines excluded options, Word can not match any of these.
if self.exclude and self.exclude.match(word):
return False
# If the constraint defines allowed tags, Word.tag needs to match one of these.
if self.tags:
if find(lambda w: _match(word.tag, w), self.tags) is None:
return False
# If the constraint defines allowed chunks, Word.chunk.tag needs to match one of these.
if self.chunks:
ch = word.chunk and word.chunk.tag or None
if find(lambda w: _match(ch, w), self.chunks) is None:
return False
# If the constraint defines allowed role, Word.chunk.tag needs to match one of these.
if self.roles:
R = word.chunk and [r2 for r1, r2 in word.chunk.relations] or []
if find(lambda w: w in R, self.roles) is None:
return False
# If the constraint defines allowed words,
# Word.string.lower() OR Word.lemma needs to match one of these.
b = True # b==True when word in constraint (or Constraints.words=[]).
if len(self.words) + len(self.taxa) > 0:
s1 = word.string.lower()
s2 = word.lemma
b = False
for w in itertools.chain(self.words, self.taxa):
# If the constraint has a word with spaces (e.g., a proper noun),
# compare it to the entire chunk.
try:
if " " in w and (s1 in w or s2 and s2 in w or "*" in w):
s1 = word.chunk and word.chunk.string.lower() or s1
s2 = word.chunk and " ".join([x or "" for x in word.chunk.lemmata]) or s2
except:
s1 = s1
s2 = None
# Compare the word to the allowed words (which can contain wildcards).
if _match(s1, w):
b=True; break
# Compare the word lemma to the allowed words, e.g.,
# if "was" is not in the constraint, perhaps "be" is, which is a good match.
if s2 and _match(s2, w):
b=True; break
# If the constraint defines allowed taxonomy terms,
# and the given word did not match an allowed word, traverse the taxonomy.
# The search goes up from the given word to its parents in the taxonomy.
# This is faster than traversing all the children of terms in Constraint.taxa.
# The drawback is that:
# 1) Wildcards in the taxonomy are not detected (use classifiers instead),
# 2) Classifier.children() has no effect, only Classifier.parent().
if self.taxa and (not self.words or (self.words and not b)):
for s in (
word.string, # "ants"
word.lemma, # "ant"
word.chunk and word.chunk.string or None, # "army ants"
word.chunk and " ".join([x or "" for x in word.chunk.lemmata]) or None): # "army ant"
if s is not None:
if self.taxonomy.case_sensitive is False:
s = s.lower()
# Compare ancestors of the word to each term in Constraint.taxa.
for p in self.taxonomy.parents(s, recursive=True):
if find(lambda s: p==s, self.taxa): # No wildcards.
return True
return b
def __repr__(self):
s = []
for k,v in (
( "words", self.words),
( "tags", self.tags),
("chunks", self.chunks),
( "roles", self.roles),
( "taxa", self.taxa)):
if v: s.append("%s=%s" % (k, repr(v)))
return "Constraint(%s)" % ", ".join(s)
@property
def string(self):
a = self.words + self.tags + self.chunks + self.roles + [w.upper() for w in self.taxa]
a = (escape(s) for s in a)
a = (s.replace("\\*", "*") for s in a)
a = [s.replace(" ", "_") for s in a]
if self.exclude:
a.extend("!"+s for s in self.exclude.string[1:-1].split("|"))
return (self.optional and "%s(%s)%s" or "%s[%s]%s") % (
self.first and "^" or "", "|".join(a), self.multiple and "+" or "")
#--- PATTERN ---------------------------------------------------------------------------------------
STRICT = "strict"
GREEDY = "greedy"
class Pattern:
def __init__(self, sequence=[], *args, **kwargs):
""" A sequence of constraints that matches certain phrases in a sentence.
The given list of Constraint objects can contain nested lists (groups).
"""
# Parse nested lists and tuples from the sequence into groups.
# [DT [JJ NN]] => Match.group(1) will yield the JJ NN sequences.
def _ungroup(sequence, groups=None):
for v in sequence:
if isinstance(v, (list, tuple)):
if groups is not None:
groups.append(list(_ungroup(v, groups=None)))
for v in _ungroup(v, groups):
yield v
else:
yield v
self.groups = []
self.sequence = list(_ungroup(sequence, groups=self.groups))
# Assign Constraint.index:
i = 0
for constraint in self.sequence:
constraint.index = i; i+=1
# There are two search modes: STRICT and GREEDY.
# - In STRICT, "rabbit" matches only the string "rabbit".
# - In GREEDY, "rabbit|NN" matches the string "rabbit" tagged "NN".
# - In GREEDY, "rabbit" matches "the big white rabbit" (the entire chunk is a match).
# - Pattern.greedy(chunk, constraint) determines (True/False) if a chunk is a match.
self.strict = kwargs.get("strict", STRICT in args and not GREEDY in args)
self.greedy = kwargs.get("greedy", lambda chunk, constraint: True)
def __iter__(self):
return iter(self.sequence)
def __len__(self):
return len(self.sequence)
def __getitem__(self, i):
return self.sequence[i]
@classmethod
def fromstring(cls, s, *args, **kwargs):
""" Returns a new Pattern from the given string.
Constraints are separated by a space.
If a constraint contains a space, it must be wrapped in [].
"""
s = s.replace("\(", "&lparen;")
s = s.replace("\)", "&rparen;")
s = s.replace("\[", "[")
s = s.replace("\]", "]")
s = s.replace("\{", "&lcurly;")
s = s.replace("\}", "&rcurly;")
p = []
i = 0
for m in re.finditer(r"\[.*?\]|\(.*?\)", s):
# Spaces in a range encapsulated in square brackets are encoded.
# "[Windows Vista]" is one range, don't split on space.
p.append(s[i:m.start()])
p.append(s[m.start():m.end()].replace(" ", "&space;")); i=m.end()
p.append(s[i:])
s = "".join(p)
s = s.replace("][", "] [")
s = s.replace(")(", ") (")
s = s.replace("\|", "⊢")
s = re.sub(r"\s+\|\s+", "|", s)
s = re.sub(r"\s+", " ", s)
s = re.sub(r"\{\s+", "{", s)
s = re.sub(r"\s+\}", "}", s)
s = s.split(" ")
s = [v.replace("&space;"," ") for v in s]
P = cls([], *args, **kwargs)
G, O, i = [], [], 0
for s in s:
constraint = Constraint.fromstring(s.strip("{}"), taxonomy=kwargs.get("taxonomy", TAXONOMY))
constraint.index = len(P.sequence)
P.sequence.append(constraint)
# Push a new group on the stack if string starts with "{".
# Parse constraint from string, add it to all open groups.
# Pop latest group from stack if string ends with "}".
# Insert groups in opened-first order (i).
while s.startswith("{"):
s = s[1:]
G.append((i, [])); i+=1
O.append([])
for g in G:
g[1].append(constraint)
while s.endswith("}"):
s = s[:-1]
if G: O[G[-1][0]] = G[-1][1]; G.pop()
P.groups = [g for g in O if g]
return P
def search(self, sentence):
""" Returns a list of all matches found in the given sentence.
"""
if sentence.__class__.__name__ == "Text":
a=[]; [a.extend(self.search(s)) for s in sentence]; return a
a = []
v = self._variations()
u = {}
m = self.match(sentence, _v=v)
while m:
a.append(m)
m = self.match(sentence, start=m.words[-1].index+1, _v=v, _u=u)
return a
def match(self, sentence, start=0, _v=None, _u=None):
""" Returns the first match found in the given sentence, or None.
"""
if sentence.__class__.__name__ == "Text":
return find(lambda (m,s): m!=None, ((self.match(s, start, _v), s) for s in sentence))[0]
if isinstance(sentence, basestring):
sentence = Sentence(sentence)
# Variations (_v) further down the list may match words more to the front.
# We need to check all of them. Unmatched variations are blacklisted (_u).
# Pattern.search() calls Pattern.match() with a persistent blacklist (1.5x faster).
a = []
for sequence in (_v is not None and _v or self._variations()):
if _u is not None and id(sequence) in _u:
continue
m = self._match(sequence, sentence, start)
if m is not None:
a.append((m.words[0].index, len(m.words), m))
if m is not None and m.words[0].index == start:
return m
if m is None and _u is not None:
_u[id(sequence)] = False
# Return the leftmost-longest.
if len(a) > 0:
return sorted(a)[0][-1]
def _variations(self):
v = variations(self.sequence, optional=lambda constraint: constraint.optional)
v = sorted(v, key=len, reverse=True)
return v
def _match(self, sequence, sentence, start=0, i=0, w0=None, map=None, d=0):
# Backtracking tree search.
# Finds the first match in the sentence of the given sequence of constraints.
# start : the current word index.
# i : the current constraint index.
# w0 : the first word that matches a constraint.
# map : a dictionary of (Word index, Constraint) items.
# d : recursion depth.
# XXX - We can probably rewrite all of this using (faster) regular expressions.
if map is None:
map = {}
# MATCH
if i == len(sequence):
if w0 is not None:
w1 = sentence.words[start-1]
# Greedy algorithm:
# - "cat" matches "the big cat" if "cat" is head of the chunk.
# - "Tom" matches "Tom the cat" if "Tom" is head of the chunk.
# - This behavior is ignored with POS-tag constraints:
# "Tom|NN" can only match single words, not chunks.
# - This is also True for negated POS-tags (e.g., !NN).
w01 = [w0, w1]
for j in (0, -1):
constraint, w = sequence[j], w01[j]
if self.strict is False and w.chunk is not None:
if len(constraint.tags) == 0:
if constraint.exclude is None or len(constraint.exclude.tags) == 0:
if constraint.match(w.chunk.head):
w01[j] = w.chunk.words[j]
if constraint.exclude and constraint.exclude.match(w.chunk.head):
return None
if self.greedy(w.chunk, constraint) is False: # User-defined.
return None
w0, w1 = w01
# Update map for optional chunk words (see below).
words = sentence.words[w0.index:w1.index+1]
for w in words:
if w.index not in map and w.chunk:
wx = find(lambda w: w.index in map, reversed(w.chunk.words))
if wx:
map[w.index] = map[wx.index]
# Return matched word range, we'll need the map to build Match.constituents().
return Match(self, words, map)
return None
# RECURSION
constraint = sequence[i]
for w in sentence.words[start:]:
#print " "*d, "match?", w, sequence[i].string # DEBUG
if i < len(sequence) and constraint.match(w):
#print " "*d, "match!", w, sequence[i].string # DEBUG
map[w.index] = constraint
if constraint.multiple:
# Next word vs. same constraint if Constraint.multiple=True.
m = self._match(sequence, sentence, w.index+1, i, w0 or w, map, d+1)
if m: return m
# Next word vs. next constraint.
m = self._match(sequence, sentence, w.index+1, i+1, w0 or w, map, d+1)
if m: return m
# Chunk words other than the head are optional:
# - Pattern.fromstring("cat") matches "cat" but also "the big cat" (overspecification).
# - Pattern.fromstring("cat|NN") does not match "the big cat" (explicit POS-tag).
if w0 is not None and len(constraint.tags) == 0:
if constraint.exclude is None or len(constraint.exclude.tags) == 0:
if self.strict is False and w.chunk is not None and w.chunk.head != w:
continue
break
# Part-of-speech tags match one single word.
if w0 is not None and len(constraint.tags) > 0:
break
if w0 is not None and constraint.exclude and len(constraint.exclude.tags) > 0:
break
@property
def string(self):
return " ".join(constraint.string for constraint in self.sequence)
_cache = {}
_CACHE_SIZE = 100 # Number of dynamic Pattern objects to keep in cache.
def compile(pattern, *args, **kwargs):
""" Returns a Pattern from the given string or regular expression.
Recently compiled patterns are kept in cache.
"""
id, p = repr(pattern)+repr(args), None
if id in _cache:
return _cache[id]
if isinstance(pattern, basestring):
p = Pattern.fromstring(pattern, *args, **kwargs)
if isinstance(pattern, regexp):
p = Pattern([Constraint(words=[pattern], taxonomy=kwargs.get("taxonomy", TAXONOMY))], *args, **kwargs)
if len(_cache) > _CACHE_SIZE:
_cache.clear()
if isinstance(p, Pattern):
_cache[id] = p
return p
else:
raise TypeError, "can't compile '%s' object" % pattern.__class__.__name__
def match(pattern, sentence, *args, **kwargs):
""" Returns the first match found in the given sentence, or None.
"""
return compile(pattern, *args, **kwargs).match(sentence)
def search(pattern, sentence, *args, **kwargs):
""" Returns a list of all matches found in the given sentence.
"""
return compile(pattern, *args, **kwargs).search(sentence)
def escape(string):
""" Returns the string with control characters for Pattern syntax escaped.
For example: "hello!" => "hello\!".
"""
for ch in ("{","}","[","]","(",")","_","|","!","*","+","^"):
string = string.replace(ch, "\\"+ch)
return string
#--- PATTERN MATCH ---------------------------------------------------------------------------------
class Match:
def __init__(self, pattern, words=[], map={}):
""" Search result returned from Pattern.match(sentence),
containing a sequence of Word objects.
"""
self.pattern = pattern
self.words = words
self._map1 = dict() # Word index to Constraint.
self._map2 = dict() # Constraint index to list of Word indices.
for w in self.words:
self._map1[w.index] = map[w.index]
for k,v in self._map1.items():
self._map2.setdefault(self.pattern.sequence.index(v),[]).append(k)
for k,v in self._map2.items():
v.sort()
def __len__(self):
return len(self.words)
def __iter__(self):
return iter(self.words)
def __getitem__(self, i):
return self.words.__getitem__(i)
@property
def start(self):
return self.words and self.words[0].index or None
@property
def stop(self):
return self.words and self.words[-1].index+1 or None
def constraint(self, word):
""" Returns the constraint that matches the given Word, or None.
"""
if word.index in self._map1:
return self._map1[word.index]
def constraints(self, chunk):
""" Returns a list of constraints that match the given Chunk.
"""
a = [self._map1[w.index] for w in chunk.words if w.index in self._map1]
b = []; [b.append(constraint) for constraint in a if constraint not in b]
return b
def constituents(self, constraint=None):
""" Returns a list of Word and Chunk objects,
where words have been grouped into their chunks whenever possible.
Optionally, returns only chunks/words that match given constraint(s), or constraint index.
"""
# Select only words that match the given constraint.
# Note: this will only work with constraints from Match.pattern.sequence.
W = self.words
n = len(self.pattern.sequence)
if isinstance(constraint, (int, Constraint)):
if isinstance(constraint, int):
i = constraint
i = i<0 and i%n or i
else:
i = self.pattern.sequence.index(constraint)
W = self._map2.get(i,[])
W = [self.words[i-self.words[0].index] for i in W]
if isinstance(constraint, (list, tuple)):
W = []; [W.extend(self._map2.get(j<0 and j%n or j,[])) for j in constraint]
W = [self.words[i-self.words[0].index] for i in W]
W = unique(W)
a = []
i = 0
while i < len(W):
w = W[i]
if w.chunk and W[i:i+len(w.chunk)] == w.chunk.words:
i += len(w.chunk) - 1
a.append(w.chunk)
else:
a.append(w)
i += 1
return a
def group(self, index, chunked=False):
""" Returns a list of Word objects that match the given group.
With chunked=True, returns a list of Word + Chunk objects - see Match.constituents().
A group consists of consecutive constraints wrapped in { }, e.g.,
search("{JJ JJ} NN", Sentence(parse("big black cat"))).group(1) => big black.
"""
if index < 0 or index > len(self.pattern.groups):
raise IndexError, "no such group"
if index > 0 and index <= len(self.pattern.groups):
g = self.pattern.groups[index-1]
if index == 0:
g = self.pattern.sequence
if chunked is True:
return Group(self, self.constituents(constraint=[self.pattern.sequence.index(x) for x in g]))
return Group(self, [w for w in self.words if self.constraint(w) in g])
@property
def string(self):
return " ".join(w.string for w in self.words)
def __repr__(self):
return "Match(words=%s)" % repr(self.words)
#--- PATTERN MATCH GROUP ---------------------------------------------------------------------------
class Group(list):
def __init__(self, match, words):
list.__init__(self, words)
self.match = match
@property
def words(self):
return list(self)
@property
def start(self):
return self and self[0].index or None
@property
def stop(self):
return self and self[-1].index+1 or None
@property
def string(self):
return " ".join(w.string for w in self)
#from en import Sentence, parse
#s = Sentence(parse("I was looking at the big cat, and the big cat was staring back", lemmata=True))
#p = Pattern.fromstring("(*_look*|)+ (at|)+ (DT|)+ (*big_cat*)+")
#
#def profile():
# import time
# t = time.time()
# for i in range(100):
# p.search(s)
# print time.time()-t
#
#import cProfile
##import pstats
#cProfile.run("profile()", "_profile")
#p = pstats.Stats("_profile")
#p.stream = open("_profile", "w")
#p.sort_stats("time").print_stats(30)
#p.stream.close()
#s = open("_profile").read()
#print s
| apache-2.0 |
hanx11/shadowsocks | shadowsocks/crypto/rc4_md5.py | 1042 | 1339 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import hashlib
from shadowsocks.crypto import openssl
__all__ = ['ciphers']
def create_cipher(alg, key, iv, op, key_as_bytes=0, d=None, salt=None,
i=1, padding=1):
md5 = hashlib.md5()
md5.update(key)
md5.update(iv)
rc4_key = md5.digest()
return openssl.OpenSSLCrypto(b'rc4', rc4_key, b'', op)
ciphers = {
'rc4-md5': (16, 16, create_cipher),
}
def test():
from shadowsocks.crypto import util
cipher = create_cipher('rc4-md5', b'k' * 32, b'i' * 16, 1)
decipher = create_cipher('rc4-md5', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
if __name__ == '__main__':
test()
| apache-2.0 |
mrquim/repository.mrquim | script.video.F4mProxy/lib/f4mUtils/codec.py | 88 | 2613 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""Classes for reading/writing binary data (such as TLS records)."""
from .compat import *
class Writer(object):
def __init__(self):
self.bytes = bytearray(0)
def add(self, x, length):
self.bytes += bytearray(length)
newIndex = len(self.bytes) - 1
for count in range(length):
self.bytes[newIndex] = x & 0xFF
x >>= 8
newIndex -= 1
def addFixSeq(self, seq, length):
for e in seq:
self.add(e, length)
def addVarSeq(self, seq, length, lengthLength):
self.add(len(seq)*length, lengthLength)
for e in seq:
self.add(e, length)
class Parser(object):
def __init__(self, bytes):
self.bytes = bytes
self.index = 0
def get(self, length):
if self.index + length > len(self.bytes):
raise SyntaxError()
x = 0
for count in range(length):
x <<= 8
x |= self.bytes[self.index]
self.index += 1
return x
def getFixBytes(self, lengthBytes):
if self.index + lengthBytes > len(self.bytes):
raise SyntaxError()
bytes = self.bytes[self.index : self.index+lengthBytes]
self.index += lengthBytes
return bytes
def getVarBytes(self, lengthLength):
lengthBytes = self.get(lengthLength)
return self.getFixBytes(lengthBytes)
def getFixList(self, length, lengthList):
l = [0] * lengthList
for x in range(lengthList):
l[x] = self.get(length)
return l
def getVarList(self, length, lengthLength):
lengthList = self.get(lengthLength)
if lengthList % length != 0:
raise SyntaxError()
lengthList = lengthList // length
l = [0] * lengthList
for x in range(lengthList):
l[x] = self.get(length)
return l
def startLengthCheck(self, lengthLength):
self.lengthCheck = self.get(lengthLength)
self.indexCheck = self.index
def setLengthCheck(self, length):
self.lengthCheck = length
self.indexCheck = self.index
def stopLengthCheck(self):
if (self.index - self.indexCheck) != self.lengthCheck:
raise SyntaxError()
def atLengthCheck(self):
if (self.index - self.indexCheck) < self.lengthCheck:
return False
elif (self.index - self.indexCheck) == self.lengthCheck:
return True
else:
raise SyntaxError()
| gpl-2.0 |
alphafoobar/intellij-community | python/lib/Lib/site-packages/django/contrib/auth/backends.py | 230 | 4582 | from django.db import connection
from django.contrib.auth.models import User, Permission
class ModelBackend(object):
"""
Authenticates against django.contrib.auth.models.User.
"""
supports_object_permissions = False
supports_anonymous_user = True
supports_inactive_user = True
# TODO: Model, login attribute name and password attribute name should be
# configurable.
def authenticate(self, username=None, password=None):
try:
user = User.objects.get(username=username)
if user.check_password(password):
return user
except User.DoesNotExist:
return None
def get_group_permissions(self, user_obj):
"""
Returns a set of permission strings that this user has through his/her
groups.
"""
if not hasattr(user_obj, '_group_perm_cache'):
if user_obj.is_superuser:
perms = Permission.objects.all()
else:
perms = Permission.objects.filter(group__user=user_obj)
perms = perms.values_list('content_type__app_label', 'codename').order_by()
user_obj._group_perm_cache = set(["%s.%s" % (ct, name) for ct, name in perms])
return user_obj._group_perm_cache
def get_all_permissions(self, user_obj):
if user_obj.is_anonymous():
return set()
if not hasattr(user_obj, '_perm_cache'):
user_obj._perm_cache = set([u"%s.%s" % (p.content_type.app_label, p.codename) for p in user_obj.user_permissions.select_related()])
user_obj._perm_cache.update(self.get_group_permissions(user_obj))
return user_obj._perm_cache
def has_perm(self, user_obj, perm):
if not user_obj.is_active:
return False
return perm in self.get_all_permissions(user_obj)
def has_module_perms(self, user_obj, app_label):
"""
Returns True if user_obj has any permissions in the given app_label.
"""
if not user_obj.is_active:
return False
for perm in self.get_all_permissions(user_obj):
if perm[:perm.index('.')] == app_label:
return True
return False
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
class RemoteUserBackend(ModelBackend):
"""
This backend is to be used in conjunction with the ``RemoteUserMiddleware``
found in the middleware module of this package, and is used when the server
is handling authentication outside of Django.
By default, the ``authenticate`` method creates ``User`` objects for
usernames that don't already exist in the database. Subclasses can disable
this behavior by setting the ``create_unknown_user`` attribute to
``False``.
"""
# Create a User object if not already in the database?
create_unknown_user = True
def authenticate(self, remote_user):
"""
The username passed as ``remote_user`` is considered trusted. This
method simply returns the ``User`` object with the given username,
creating a new ``User`` object if ``create_unknown_user`` is ``True``.
Returns None if ``create_unknown_user`` is ``False`` and a ``User``
object with the given username is not found in the database.
"""
if not remote_user:
return
user = None
username = self.clean_username(remote_user)
# Note that this could be accomplished in one try-except clause, but
# instead we use get_or_create when creating unknown users since it has
# built-in safeguards for multiple threads.
if self.create_unknown_user:
user, created = User.objects.get_or_create(username=username)
if created:
user = self.configure_user(user)
else:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
pass
return user
def clean_username(self, username):
"""
Performs any cleaning on the "username" prior to using it to get or
create the user object. Returns the cleaned username.
By default, returns the username unchanged.
"""
return username
def configure_user(self, user):
"""
Configures a user after creation and returns the updated user.
By default, returns the user unmodified.
"""
return user
| apache-2.0 |
hilaskis/UAV_MissionPlanner | Lib/xml/sax/xmlreader.py | 113 | 12632 | """An XML Reader is the SAX 2 name for an XML parser. XML Parsers
should be based on this code. """
import handler
from _exceptions import SAXNotSupportedException, SAXNotRecognizedException
# ===== XMLREADER =====
class XMLReader:
"""Interface for reading an XML document using callbacks.
XMLReader is the interface that an XML parser's SAX2 driver must
implement. This interface allows an application to set and query
features and properties in the parser, to register event handlers
for document processing, and to initiate a document parse.
All SAX interfaces are assumed to be synchronous: the parse
methods must not return until parsing is complete, and readers
must wait for an event-handler callback to return before reporting
the next event."""
def __init__(self):
self._cont_handler = handler.ContentHandler()
self._dtd_handler = handler.DTDHandler()
self._ent_handler = handler.EntityResolver()
self._err_handler = handler.ErrorHandler()
def parse(self, source):
"Parse an XML document from a system identifier or an InputSource."
raise NotImplementedError("This method must be implemented!")
def getContentHandler(self):
"Returns the current ContentHandler."
return self._cont_handler
def setContentHandler(self, handler):
"Registers a new object to receive document content events."
self._cont_handler = handler
def getDTDHandler(self):
"Returns the current DTD handler."
return self._dtd_handler
def setDTDHandler(self, handler):
"Register an object to receive basic DTD-related events."
self._dtd_handler = handler
def getEntityResolver(self):
"Returns the current EntityResolver."
return self._ent_handler
def setEntityResolver(self, resolver):
"Register an object to resolve external entities."
self._ent_handler = resolver
def getErrorHandler(self):
"Returns the current ErrorHandler."
return self._err_handler
def setErrorHandler(self, handler):
"Register an object to receive error-message events."
self._err_handler = handler
def setLocale(self, locale):
"""Allow an application to set the locale for errors and warnings.
SAX parsers are not required to provide localization for errors
and warnings; if they cannot support the requested locale,
however, they must throw a SAX exception. Applications may
request a locale change in the middle of a parse."""
raise SAXNotSupportedException("Locale support not implemented")
def getFeature(self, name):
"Looks up and returns the state of a SAX2 feature."
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
"Sets the state of a SAX2 feature."
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def getProperty(self, name):
"Looks up and returns the value of a SAX2 property."
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
"Sets the value of a SAX2 property."
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
class IncrementalParser(XMLReader):
"""This interface adds three extra methods to the XMLReader
interface that allow XML parsers to support incremental
parsing. Support for this interface is optional, since not all
underlying XML parsers support this functionality.
When the parser is instantiated it is ready to begin accepting
data from the feed method immediately. After parsing has been
finished with a call to close the reset method must be called to
make the parser ready to accept new data, either from feed or
using the parse method.
Note that these methods must _not_ be called during parsing, that
is, after parse has been called and before it returns.
By default, the class also implements the parse method of the XMLReader
interface using the feed, close and reset methods of the
IncrementalParser interface as a convenience to SAX 2.0 driver
writers."""
def __init__(self, bufsize=2**16):
self._bufsize = bufsize
XMLReader.__init__(self)
def parse(self, source):
import saxutils
source = saxutils.prepare_input_source(source)
self.prepareParser(source)
file = source.getByteStream()
buffer = file.read(self._bufsize)
while buffer != "":
self.feed(buffer)
buffer = file.read(self._bufsize)
self.close()
def feed(self, data):
"""This method gives the raw XML data in the data parameter to
the parser and makes it parse the data, emitting the
corresponding events. It is allowed for XML constructs to be
split across several calls to feed.
feed may raise SAXException."""
raise NotImplementedError("This method must be implemented!")
def prepareParser(self, source):
"""This method is called by the parse implementation to allow
the SAX 2.0 driver to prepare itself for parsing."""
raise NotImplementedError("prepareParser must be overridden!")
def close(self):
"""This method is called when the entire XML document has been
passed to the parser through the feed method, to notify the
parser that there are no more data. This allows the parser to
do the final checks on the document and empty the internal
data buffer.
The parser will not be ready to parse another document until
the reset method has been called.
close may raise SAXException."""
raise NotImplementedError("This method must be implemented!")
def reset(self):
"""This method is called after close has been called to reset
the parser so that it is ready to parse new documents. The
results of calling parse or feed after close without calling
reset are undefined."""
raise NotImplementedError("This method must be implemented!")
# ===== LOCATOR =====
class Locator:
"""Interface for associating a SAX event with a document
location. A locator object will return valid results only during
calls to DocumentHandler methods; at any other time, the
results are unpredictable."""
def getColumnNumber(self):
"Return the column number where the current event ends."
return -1
def getLineNumber(self):
"Return the line number where the current event ends."
return -1
def getPublicId(self):
"Return the public identifier for the current event."
return None
def getSystemId(self):
"Return the system identifier for the current event."
return None
# ===== INPUTSOURCE =====
class InputSource:
"""Encapsulation of the information needed by the XMLReader to
read entities.
This class may include information about the public identifier,
system identifier, byte stream (possibly with character encoding
information) and/or the character stream of an entity.
Applications will create objects of this class for use in the
XMLReader.parse method and for returning from
EntityResolver.resolveEntity.
An InputSource belongs to the application, the XMLReader is not
allowed to modify InputSource objects passed to it from the
application, although it may make copies and modify those."""
def __init__(self, system_id = None):
self.__system_id = system_id
self.__public_id = None
self.__encoding = None
self.__bytefile = None
self.__charfile = None
def setPublicId(self, public_id):
"Sets the public identifier of this InputSource."
self.__public_id = public_id
def getPublicId(self):
"Returns the public identifier of this InputSource."
return self.__public_id
def setSystemId(self, system_id):
"Sets the system identifier of this InputSource."
self.__system_id = system_id
def getSystemId(self):
"Returns the system identifier of this InputSource."
return self.__system_id
def setEncoding(self, encoding):
"""Sets the character encoding of this InputSource.
The encoding must be a string acceptable for an XML encoding
declaration (see section 4.3.3 of the XML recommendation).
The encoding attribute of the InputSource is ignored if the
InputSource also contains a character stream."""
self.__encoding = encoding
def getEncoding(self):
"Get the character encoding of this InputSource."
return self.__encoding
def setByteStream(self, bytefile):
"""Set the byte stream (a Python file-like object which does
not perform byte-to-character conversion) for this input
source.
The SAX parser will ignore this if there is also a character
stream specified, but it will use a byte stream in preference
to opening a URI connection itself.
If the application knows the character encoding of the byte
stream, it should set it with the setEncoding method."""
self.__bytefile = bytefile
def getByteStream(self):
"""Get the byte stream for this input source.
The getEncoding method will return the character encoding for
this byte stream, or None if unknown."""
return self.__bytefile
def setCharacterStream(self, charfile):
"""Set the character stream for this input source. (The stream
must be a Python 2.0 Unicode-wrapped file-like that performs
conversion to Unicode strings.)
If there is a character stream specified, the SAX parser will
ignore any byte stream and will not attempt to open a URI
connection to the system identifier."""
self.__charfile = charfile
def getCharacterStream(self):
"Get the character stream for this input source."
return self.__charfile
# ===== ATTRIBUTESIMPL =====
class AttributesImpl:
def __init__(self, attrs):
"""Non-NS-aware implementation.
attrs should be of the form {name : value}."""
self._attrs = attrs
def getLength(self):
return len(self._attrs)
def getType(self, name):
return "CDATA"
def getValue(self, name):
return self._attrs[name]
def getValueByQName(self, name):
return self._attrs[name]
def getNameByQName(self, name):
if not name in self._attrs:
raise KeyError, name
return name
def getQNameByName(self, name):
if not name in self._attrs:
raise KeyError, name
return name
def getNames(self):
return self._attrs.keys()
def getQNames(self):
return self._attrs.keys()
def __len__(self):
return len(self._attrs)
def __getitem__(self, name):
return self._attrs[name]
def keys(self):
return self._attrs.keys()
def has_key(self, name):
return name in self._attrs
def __contains__(self, name):
return name in self._attrs
def get(self, name, alternative=None):
return self._attrs.get(name, alternative)
def copy(self):
return self.__class__(self._attrs)
def items(self):
return self._attrs.items()
def values(self):
return self._attrs.values()
# ===== ATTRIBUTESNSIMPL =====
class AttributesNSImpl(AttributesImpl):
def __init__(self, attrs, qnames):
"""NS-aware implementation.
attrs should be of the form {(ns_uri, lname): value, ...}.
qnames of the form {(ns_uri, lname): qname, ...}."""
self._attrs = attrs
self._qnames = qnames
def getValueByQName(self, name):
for (nsname, qname) in self._qnames.items():
if qname == name:
return self._attrs[nsname]
raise KeyError, name
def getNameByQName(self, name):
for (nsname, qname) in self._qnames.items():
if qname == name:
return nsname
raise KeyError, name
def getQNameByName(self, name):
return self._qnames[name]
def getQNames(self):
return self._qnames.values()
def copy(self):
return self.__class__(self._attrs, self._qnames)
def _test():
XMLReader()
IncrementalParser()
Locator()
if __name__ == "__main__":
_test()
| gpl-2.0 |
codepython/CollectorCity-Market-Place | stores/apps/sell/migrations/0004_auto__add_field_sell_cancel.py | 2 | 11769 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Sell.cancel'
db.add_column('sell_sell', 'cancel', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Sell.cancel'
db.delete_column('sell_sell', 'cancel')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'market.marketplace': {
'Meta': {'object_name': 'MarketPlace'},
'base_domain': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '92'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'template_prefix': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '92'})
},
'sell.cart': {
'Meta': {'object_name': 'Cart'},
'bidder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shippingdata': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sell.ShippingData']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"})
},
'sell.cartitem': {
'Meta': {'object_name': 'CartItem'},
'cart': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sell.Cart']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '11', 'decimal_places': '2'}),
'qty': ('django.db.models.fields.IntegerField', [], {})
},
'sell.payment': {
'Meta': {'object_name': 'Payment'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sell': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sell.Sell']", 'unique': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"}),
'state_actual': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'payment_history'", 'unique': 'True', 'null': 'True', 'to': "orm['sell.PaymentHistory']"}),
'total': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '11', 'decimal_places': '2'})
},
'sell.paymenthistory': {
'Meta': {'object_name': 'PaymentHistory'},
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sell.Payment']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'sell.sell': {
'Meta': {'object_name': 'Sell'},
'bidder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'cancel': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment_method': ('django.db.models.fields.CharField', [], {'default': "'Manual Payment'", 'max_length': '255'}),
'shippingdata': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sell.ShippingData']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']", 'null': 'True'}),
'total': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '11', 'decimal_places': '2'}),
'total_taxes': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '11', 'decimal_places': '2'}),
'total_without_taxes': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '11', 'decimal_places': '2'})
},
'sell.sellitem': {
'Meta': {'object_name': 'SellItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '11', 'decimal_places': '2'}),
'qty': ('django.db.models.fields.IntegerField', [], {}),
'sell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sell.Sell']"})
},
'sell.shipping': {
'Meta': {'object_name': 'Shipping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sell': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sell.Sell']", 'unique': 'True'}),
'shipping_service': ('django.db.models.fields.CharField', [], {'default': "'Other'", 'max_length': '255'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"}),
'state_actual': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'shipping_history'", 'unique': 'True', 'null': 'True', 'to': "orm['sell.ShippingHistory']"}),
'tracking_number': ('django.db.models.fields.CharField', [], {'default': "'--'", 'max_length': '255'})
},
'sell.shippingdata': {
'Meta': {'object_name': 'ShippingData'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'street_address': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
},
'sell.shippinghistory': {
'Meta': {'object_name': 'ShippingHistory'},
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shipping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sell.Shipping']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'shops.shop': {
'Meta': {'object_name': 'Shop'},
'admin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'bids': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'39.29038,-76.61219'", 'max_length': '255'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['sell']
| apache-2.0 |
eusi/MissionPlanerHM | Lib/site-packages/numpy/distutils/from_template.py | 51 | 7890 | #!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
"""
process_file(filename)
takes templated file .xxx.src and produces .xxx file where .xxx
is .pyf .f90 or .f using the following template rules:
'<..>' denotes a template.
All function and subroutine blocks in a source file with names that
contain '<..>' will be replicated according to the rules in '<..>'.
The number of comma-separeted words in '<..>' will determine the number of
replicates.
'<..>' may have two different forms, named and short. For example,
named:
<p=d,s,z,c> where anywhere inside a block '<p>' will be replaced with
'd', 's', 'z', and 'c' for each replicate of the block.
<_c> is already defined: <_c=s,d,c,z>
<_t> is already defined: <_t=real,double precision,complex,double complex>
short:
<s,d,c,z>, a short form of the named, useful when no <p> appears inside
a block.
In general, '<..>' contains a comma separated list of arbitrary
expressions. If these expression must contain a comma|leftarrow|rightarrow,
then prepend the comma|leftarrow|rightarrow with a backslash.
If an expression matches '\\<index>' then it will be replaced
by <index>-th expression.
Note that all '<..>' forms in a block must have the same number of
comma-separated entries.
Predefined named template rules:
<prefix=s,d,c,z>
<ftype=real,double precision,complex,double complex>
<ftypereal=real,double precision,\\0,\\1>
<ctype=float,double,complex_float,complex_double>
<ctypereal=float,double,\\0,\\1>
"""
__all__ = ['process_str','process_file']
import os
import sys
import re
routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b',re.I)
routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)',re.I)
function_start_re = re.compile(r'\n (\$|\*)\s*function\b',re.I)
def parse_structure(astr):
""" Return a list of tuples for each function or subroutine each
tuple is the start and end of a subroutine or function to be
expanded.
"""
spanlist = []
ind = 0
while 1:
m = routine_start_re.search(astr,ind)
if m is None:
break
start = m.start()
if function_start_re.match(astr,start,m.end()):
while 1:
i = astr.rfind('\n',ind,start)
if i==-1:
break
start = i
if astr[i:i+7]!='\n $':
break
start += 1
m = routine_end_re.search(astr,m.end())
ind = end = m and m.end()-1 or len(astr)
spanlist.append((start,end))
return spanlist
template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>")
named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>")
list_re = re.compile(r"<\s*((.*?))\s*>")
def find_repl_patterns(astr):
reps = named_re.findall(astr)
names = {}
for rep in reps:
name = rep[0].strip() or unique_key(names)
repl = rep[1].replace('\,','@comma@')
thelist = conv(repl)
names[name] = thelist
return names
item_re = re.compile(r"\A\\(?P<index>\d+)\Z")
def conv(astr):
b = astr.split(',')
l = [x.strip() for x in b]
for i in range(len(l)):
m = item_re.match(l[i])
if m:
j = int(m.group('index'))
l[i] = l[j]
return ','.join(l)
def unique_key(adict):
""" Obtain a unique key given a dictionary."""
allkeys = adict.keys()
done = False
n = 1
while not done:
newkey = '__l%s' % (n)
if newkey in allkeys:
n += 1
else:
done = True
return newkey
template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z')
def expand_sub(substr,names):
substr = substr.replace('\>','@rightarrow@')
substr = substr.replace('\<','@leftarrow@')
lnames = find_repl_patterns(substr)
substr = named_re.sub(r"<\1>",substr) # get rid of definition templates
def listrepl(mobj):
thelist = conv(mobj.group(1).replace('\,','@comma@'))
if template_name_re.match(thelist):
return "<%s>" % (thelist)
name = None
for key in lnames.keys(): # see if list is already in dictionary
if lnames[key] == thelist:
name = key
if name is None: # this list is not in the dictionary yet
name = unique_key(lnames)
lnames[name] = thelist
return "<%s>" % name
substr = list_re.sub(listrepl, substr) # convert all lists to named templates
# newnames are constructed as needed
numsubs = None
base_rule = None
rules = {}
for r in template_re.findall(substr):
if r not in rules:
thelist = lnames.get(r,names.get(r,None))
if thelist is None:
raise ValueError('No replicates found for <%s>' % (r))
if r not in names and not thelist.startswith('_'):
names[r] = thelist
rule = [i.replace('@comma@',',') for i in thelist.split(',')]
num = len(rule)
if numsubs is None:
numsubs = num
rules[r] = rule
base_rule = r
elif num == numsubs:
rules[r] = rule
else:
print("Mismatch in number of replacements (base <%s=%s>)"\
" for <%s=%s>. Ignoring." % (base_rule,
','.join(rules[base_rule]),
r,thelist))
if not rules:
return substr
def namerepl(mobj):
name = mobj.group(1)
return rules.get(name,(k+1)*[name])[k]
newstr = ''
for k in range(numsubs):
newstr += template_re.sub(namerepl, substr) + '\n\n'
newstr = newstr.replace('@rightarrow@','>')
newstr = newstr.replace('@leftarrow@','<')
return newstr
def process_str(allstr):
newstr = allstr
writestr = '' #_head # using _head will break free-format files
struct = parse_structure(newstr)
oldend = 0
names = {}
names.update(_special_names)
for sub in struct:
writestr += newstr[oldend:sub[0]]
names.update(find_repl_patterns(newstr[oldend:sub[0]]))
writestr += expand_sub(newstr[sub[0]:sub[1]],names)
oldend = sub[1]
writestr += newstr[oldend:]
return writestr
include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P<name>[\w\d./\\]+[.]src)['\"]",re.I)
def resolve_includes(source):
d = os.path.dirname(source)
fid = open(source)
lines = []
for line in fid.readlines():
m = include_src_re.match(line)
if m:
fn = m.group('name')
if not os.path.isabs(fn):
fn = os.path.join(d,fn)
if os.path.isfile(fn):
print ('Including file',fn)
lines.extend(resolve_includes(fn))
else:
lines.append(line)
else:
lines.append(line)
fid.close()
return lines
def process_file(source):
lines = resolve_includes(source)
return process_str(''.join(lines))
_special_names = find_repl_patterns('''
<_c=s,d,c,z>
<_t=real,double precision,complex,double complex>
<prefix=s,d,c,z>
<ftype=real,double precision,complex,double complex>
<ctype=float,double,complex_float,complex_double>
<ftypereal=real,double precision,\\0,\\1>
<ctypereal=float,double,\\0,\\1>
''')
if __name__ == "__main__":
try:
file = sys.argv[1]
except IndexError:
fid = sys.stdin
outfile = sys.stdout
else:
fid = open(file,'r')
(base, ext) = os.path.splitext(file)
newname = base
outfile = open(newname,'w')
allstr = fid.read()
writestr = process_str(allstr)
outfile.write(writestr)
| gpl-3.0 |
av8ramit/tensorflow | tensorflow/contrib/distributions/python/ops/bijectors/softplus.py | 81 | 5398 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Softplus bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.ops.distributions import util as distribution_util
__all__ = [
"Softplus",
]
class Softplus(bijector.Bijector):
"""Bijector which computes `Y = g(X) = Log[1 + exp(X)]`.
The softplus `Bijector` has the following two useful properties:
* The domain is the positive real numbers
* `softplus(x) approx x`, for large `x`, so it does not overflow as easily as
the `Exp` `Bijector`.
The optional nonzero `hinge_softness` parameter changes the transition at
zero. With `hinge_softness = c`, the bijector is:
```f_c(x) := c * g(x / c) = c * Log[1 + exp(x / c)].```
For large `x >> 1`, `c * Log[1 + exp(x / c)] approx c * Log[exp(x / c)] = x`,
so the behavior for large `x` is the same as the standard softplus.
As `c > 0` approaches 0 from the right, `f_c(x)` becomes less and less soft,
approaching `max(0, x)`.
* `c = 1` is the default.
* `c > 0` but small means `f(x) approx ReLu(x) = max(0, x)`.
* `c < 0` flips sign and reflects around the `y-axis`: `f_{-c}(x) = -f_c(-x)`.
* `c = 0` results in a non-bijective transformation and triggers an exception.
Example Use:
```python
# Create the Y=g(X)=softplus(X) transform which works only on Tensors with 1
# batch ndim and 2 event ndims (i.e., vector of matrices).
softplus = Softplus(event_ndims=2)
x = [[[1., 2],
[3, 4]],
[[5, 6],
[7, 8]]]
log(1 + exp(x)) == softplus.forward(x)
log(exp(x) - 1) == softplus.inverse(x)
```
Note: log(.) and exp(.) are applied element-wise but the Jacobian is a
reduction over the event space.
"""
@distribution_util.AppendDocstring(
kwargs_dict={
"hinge_softness": (
"Nonzero floating point `Tensor`. Controls the softness of what "
"would otherwise be a kink at the origin. Default is 1.0")})
def __init__(self,
event_ndims=0,
hinge_softness=None,
validate_args=False,
name="softplus"):
with ops.name_scope(name, values=[hinge_softness]):
if hinge_softness is not None:
self._hinge_softness = ops.convert_to_tensor(
hinge_softness, name="hinge_softness")
else:
self._hinge_softness = None
if validate_args:
nonzero_check = check_ops.assert_none_equal(
ops.convert_to_tensor(
0, dtype=self.hinge_softness.dtype),
self.hinge_softness,
message="hinge_softness must be non-zero")
self._hinge_softness = control_flow_ops.with_dependencies(
[nonzero_check], self.hinge_softness)
super(Softplus, self).__init__(
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
def _forward(self, x):
if self.hinge_softness is None:
return nn_ops.softplus(x)
hinge_softness = math_ops.cast(self.hinge_softness, x.dtype)
return hinge_softness * nn_ops.softplus(x / hinge_softness)
def _inverse(self, y):
if self.hinge_softness is None:
return distribution_util.softplus_inverse(y)
hinge_softness = math_ops.cast(self.hinge_softness, y.dtype)
return hinge_softness * distribution_util.softplus_inverse(
y / hinge_softness)
def _inverse_log_det_jacobian(self, y):
# Could also do:
# ildj = math_ops.reduce_sum(y - distribution_util.softplus_inverse(y),
# axis=event_dims)
# but the following is more numerically stable. Ie,
# Y = Log[1 + exp{X}] ==> X = Log[exp{Y} - 1]
# ==> dX/dY = exp{Y} / (exp{Y} - 1)
# = 1 / (1 - exp{-Y}),
# which is the most stable for large Y > 0. For small Y, we use
# 1 - exp{-Y} approx Y.
if self.hinge_softness is not None:
y /= math_ops.cast(self.hinge_softness, y.dtype)
return -math_ops.reduce_sum(math_ops.log(-math_ops.expm1(-y)),
axis=self._event_dims_tensor(y))
def _forward_log_det_jacobian(self, x):
if self.hinge_softness is not None:
x /= math_ops.cast(self.hinge_softness, x.dtype)
return -math_ops.reduce_sum(nn_ops.softplus(-x),
axis=self._event_dims_tensor(x))
@property
def hinge_softness(self):
return self._hinge_softness
| apache-2.0 |
mith1979/ansible_automation | applied_python/applied_python/lib64/python2.7/site-packages/Crypto/Hash/MD4.py | 124 | 2716 | # -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""MD4 cryptographic hash algorithm.
MD4 is specified in RFC1320_ and produces the 128 bit digest of a message.
>>> from Crypto.Hash import MD4
>>>
>>> h = MD4.new()
>>> h.update(b'Hello')
>>> print h.hexdigest()
MD4 stand for Message Digest version 4, and it was invented by Rivest in 1990.
This algorithm is insecure. Do not use it for new designs.
.. _RFC1320: http://tools.ietf.org/html/rfc1320
"""
_revision__ = "$Id$"
__all__ = ['new', 'digest_size', 'MD4Hash' ]
from Crypto.Util.py3compat import *
from Crypto.Hash.hashalgo import HashAlgo
import Crypto.Hash._MD4 as _MD4
hashFactory = _MD4
class MD4Hash(HashAlgo):
"""Class that implements an MD4 hash
:undocumented: block_size
"""
#: ASN.1 Object identifier (OID)::
#:
#: id-md2 OBJECT IDENTIFIER ::= {
#: iso(1) member-body(2) us(840) rsadsi(113549)
#: digestAlgorithm(2) 4
#: }
#:
#: This value uniquely identifies the MD4 algorithm.
oid = b('\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x04')
digest_size = 16
block_size = 64
def __init__(self, data=None):
HashAlgo.__init__(self, hashFactory, data)
def new(self, data=None):
return MD4Hash(data)
def new(data=None):
"""Return a fresh instance of the hash object.
:Parameters:
data : byte string
The very first chunk of the message to hash.
It is equivalent to an early call to `MD4Hash.update()`.
Optional.
:Return: A `MD4Hash` object
"""
return MD4Hash().new(data)
#: The size of the resulting hash in bytes.
digest_size = MD4Hash.digest_size
#: The internal block size of the hash algorithm in bytes.
block_size = MD4Hash.block_size
| apache-2.0 |
minhtuancn/odoo | addons/l10n_bo/__init__.py | 2120 | 1456 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
40223125/w17t2 | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/event.py | 603 | 19086 | #!/usr/bin/env python
'''Pygame module for interacting with events and queues.
Pygame handles all it's event messaging through an event queue. The routines
in this module help you manage that event queue. The input queue is heavily
dependent on the pygame display module. If the display has not been
initialized and a video mode not set, the event queue will not really work.
The queue is a regular queue of Event objects, there are a variety of ways
to access the events it contains. From simply checking for the existance of
events, to grabbing them directly off the stack.
All events have a type identifier. This event type is in between the values
of NOEVENT and NUMEVENTS. All user defined events can have the value of
USEREVENT or higher. It is recommended make sure your event id's follow this
system.
To get the state of various input devices, you can forego the event queue
and access the input devices directly with their appropriate modules; mouse,
key, and joystick. If you use this method, remember that pygame requires some
form of communication with the system window manager and other parts of the
platform. To keep pygame in synch with the system, you will need to call
pygame.event.pump() to keep everything current. You'll want to call this
function usually once per game loop.
The event queue offers some simple filtering. This can help performance
slightly by blocking certain event types from the queue, use the
pygame.event.set_allowed() and pygame.event.set_blocked() to work with
this filtering. All events default to allowed.
Joysticks will not send any events until the device has been initialized.
An Event object contains an event type and a readonly set of member data.
The Event object contains no method functions, just member data. Event
objects are retrieved from the pygame event queue. You can create your
own new events with the pygame.event.Event() function.
Your program must take steps to keep the event queue from overflowing. If the
program is not clearing or getting all events off the queue at regular
intervals, it can overflow. When the queue overflows an exception is thrown.
All Event objects contain an event type identifier in the Event.type member.
You may also get full access to the Event's member data through the Event.dict
method. All other member lookups will be passed through to the Event's
dictionary values.
While debugging and experimenting, you can print the Event objects for a
quick display of its type and members. Events that come from the system
will have a guaranteed set of member items based on the type. Here is a
list of the Event members that are defined with each type.
QUIT
(none)
ACTIVEEVENT
gain, state
KEYDOWN
unicode, key, mod
KEYUP
key, mod
MOUSEMOTION
pos, rel, buttons
MOUSEBUTTONUP
pos, button
MOUSEBUTTONDOWN
pos, button
JOYAXISMOTION
joy, axis, value
JOYBALLMOTION
joy, ball, rel
JOYHATMOTION
joy, hat, value
JOYBUTTONUP
joy, button
JOYBUTTONDOWN
joy, button
VIDEORESIZE
size, w, h
VIDEOEXPOSE
(none)
USEREVENT
code
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from copy import copy
#from ctypes import * #brython
from SDL import *
import pygame.base
import pygame.locals
import pygame.display
def pump():
'''Internally process pygame event handlers.
For each frame of your game, you will need to make some sort of call to
the event queue. This ensures your program can internally interact with
the rest of the operating system. If you are not using other event
functions in your game, you should call pygame.event.pump() to allow
pygame to handle internal actions.
This function is not necessary if your program is consistently processing
events on the queue through the other pygame.event functions.
There are important things that must be dealt with internally in the event
queue. The main window may need to be repainted or respond to the system.
If you fail to make a call to the event queue for too long, the system may
decide your program has locked up.
'''
pygame.display._video_init_check()
SDL_PumpEvents()
def get(typelist=None):
'''Get events from the queue.
pygame.event.get(): return Eventlist
pygame.event.get(type): return Eventlist
pygame.event.get(typelist): return Eventlist
This will get all the messages and remove them from the queue. If a type
or sequence of types is given only those messages will be removed from the
queue.
If you are only taking specific events from the queue, be aware that the
queue could eventually fill up with the events you are not interested.
:Parameters:
`typelist` : int or sequence of int
Event type or list of event types that can be returned.
:rtype: list of `Event`
'''
pygame.display._video_init_check()
if typelist is None:
mask = SDL_ALLEVENTS
else:
if hasattr(typelist, '__len__'):
mask = reduce(lambda a,b: a | SDL_EVENTMASK(b), typelist, 0)
else:
mask = int(typelist)
SDL_PumpEvents()
events = []
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
while new_events:
events.append(Event(0, sdl_event=new_events[0]))
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
return events
def poll():
'''Get a single event from the queue.
Returns a single event from the queue. If the event queue is empty an event
of type pygame.NOEVENT will be returned immediately. The returned event is
removed from the queue.
:rtype: Event
'''
pygame.display._video_init_check()
event = SDL_PollEventAndReturn()
if event:
return Event(0, sdl_event=event, keep_userdata=True)
else:
return Event(pygame.locals.NOEVENT)
def wait():
'''Wait for a single event from the queue.
Returns a single event from the queue. If the queue is empty this function
will wait until one is created. While the program is waiting it will sleep
in an idle state. This is important for programs that want to share the
system with other applications.
:rtype: Event
'''
pygame.display._video_init_check()
return Event(0, sdl_event=SDL_WaitEventAndReturn())
def peek(typelist=None):
'''Test if event types are waiting on the queue.
Returns true if there are any events of the given type waiting on the
queue. If a sequence of event types is passed, this will return True if
any of those events are on the queue.
:Parameters:
`typelist` : int or sequence of int
Event type or list of event types to look for.
:rtype: bool
'''
pygame.display._video_init_check()
if typelist is None:
mask = SDL_ALLEVENTS
else:
if hasattr(typelist, '__len__'):
mask = reduce(lambda a,b: a | SDL_EVENTMASK(b), typelist, 0)
else:
mask = SDL_EVENTMASK(int(typelist))
SDL_PumpEvents()
events = SDL_PeepEvents(1, SDL_PEEKEVENT, mask)
if typelist is None:
if events:
return Event(0, sdl_event=events[0], keep_userdata=True)
else:
return Event(pygame.locals.NOEVENT) # XXX deviation from pygame
return len(events) > 0
def clear(typelist=None):
'''Remove all events from the queue.
Remove all events or events of a specific type from the queue. This has the
same effect as `get` except nothing is returned. This can be slightly more
effecient when clearing a full event queue.
:Parameters:
`typelist` : int or sequence of int
Event type or list of event types to remove.
'''
pygame.display._video_init_check()
if typelist is None:
mask = SDL_ALLEVENTS
else:
if hasattr(typelist, '__len__'):
mask = reduce(lambda a,b: a | SDL_EVENTMASK(b), typelist, 0)
else:
mask = int(typelist)
SDL_PumpEvents()
events = []
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
while new_events:
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
_event_names = {
SDL_ACTIVEEVENT: 'ActiveEvent',
SDL_KEYDOWN: 'KeyDown',
SDL_KEYUP: 'KeyUp',
SDL_MOUSEMOTION: 'MouseMotion',
SDL_MOUSEBUTTONDOWN:'MouseButtonDown',
SDL_MOUSEBUTTONUP: 'MouseButtonUp',
SDL_JOYAXISMOTION: 'JoyAxisMotion',
SDL_JOYBALLMOTION: 'JoyBallMotion',
SDL_JOYHATMOTION: 'JoyHatMotion',
SDL_JOYBUTTONUP: 'JoyButtonUp',
SDL_JOYBUTTONDOWN: 'JoyButtonDown',
SDL_QUIT: 'Quit',
SDL_SYSWMEVENT: 'SysWMEvent',
SDL_VIDEORESIZE: 'VideoResize',
SDL_VIDEOEXPOSE: 'VideoExpose',
SDL_NOEVENT: 'NoEvent'
}
def event_name(event_type):
'''Get the string name from an event id.
Pygame uses integer ids to represent the event types. If you want to
report these types to the user they should be converted to strings. This
will return a the simple name for an event type. The string is in the
CamelCase style.
:Parameters:
- `event_type`: int
:rtype: str
'''
if event_type >= SDL_USEREVENT and event_type < SDL_NUMEVENTS:
return 'UserEvent'
return _event_names.get(event_type, 'Unknown')
def set_blocked(typelist):
'''Control which events are allowed on the queue.
The given event types are not allowed to appear on the event queue. By
default all events can be placed on the queue. It is safe to disable an
event type multiple times.
If None is passed as the argument, this has the opposite effect and none of
the event types are allowed to be placed on the queue.
:note: events posted with `post` will not be blocked.
:Parameters:
`typelist` : int or sequence of int or None
Event type or list of event types to disallow.
'''
pygame.display._video_init_check()
if typelist is None:
SDL_EventState(SDL_ALLEVENTS, SDL_IGNORE)
elif hasattr(typelist, '__len__'):
for val in typelist:
SDL_EventState(val, SDL_IGNORE)
else:
SDL_EventState(typelist, SDL_IGNORE)
def set_allowed(typelist):
'''Control which events are allowed on the queue.
The given event types are allowed to appear on the event queue. By default
all events can be placed on the queue. It is safe to enable an event type
multiple times.
If None is passed as the argument, this has the opposite effect and all of
the event types are allowed to be placed on the queue.
:Parameters:
`typelist` : int or sequence of int or None
Event type or list of event types to disallow.
'''
pygame.display._video_init_check()
if typelist is None:
SDL_EventState(SDL_ALLEVENTS, SDL_ENABLE)
elif hasattr(typelist, '__len__'):
for val in typelist:
SDL_EventState(val, SDL_ENABLE)
else:
SDL_EventState(typelist, SDL_ENABLE)
def get_blocked(typelist):
'''Test if a type of event is blocked from the queue.
Returns true if the given event type is blocked from the queue.
:Parameters:
- `event_type`: int
:rtype: int
'''
pygame.display._video_init_check()
if typelist == None:
return SDL_EventState(SDL_ALLEVENTS, SDL_QUERY) == SDL_ENABLE
elif hasattr(typelist, '__len__'): # XXX undocumented behaviour
for val in typelist:
if SDL_EventState(val, SDL_QUERY) == SDL_ENABLE:
return True
return False
else:
return SDL_EventState(typelist, SDL_QUERY) == SDL_ENABLE
def set_grab(grab):
'''Control the sharing of input devices with other applications.
When your program runs in a windowed environment, it will share the mouse
and keyboard devices with other applications that have focus. If your
program sets the event grab to True, it will lock all input into your
program.
It is best to not always grab the input, since it prevents the user from
doing other things on their system.
:Parameters:
- `grab`: bool
'''
pygame.display._video_init_check()
if grab:
SDL_WM_GrabInput(SDL_GRAB_ON)
else:
SDL_WM_GrabInput(SDL_GRAB_OFF)
def get_grab():
'''Test if the program is sharing input devices.
Returns true when the input events are grabbed for this application. Use
`set_grab` to control this state.
:rtype: bool
'''
pygame.display._video_init_check()
return SDL_WM_GrabInput(SDL_GRAB_QUERY) == SDL_GRAB_ON
_USEROBJECT_CHECK1 = int(0xdeadbeef) # signed
_USEROBJECT_CHECK2 = 0xfeedf00d
_user_event_objects = {}
_user_event_nextid = 1
def post(event):
'''Place a new event on the queue.
This places a new event at the end of the event queue. These Events will
later be retrieved from the other queue functions.
This is usually used for placing pygame.USEREVENT events on the queue.
Although any type of event can be placed, if using the sytem event types
your program should be sure to create the standard attributes with
appropriate values.
:Parameters:
`event` : Event
Event to add to the queue.
'''
global _user_event_nextid
pygame.display._video_init_check()
sdl_event = SDL_Event(event.type)
sdl_event.user.code = _USEROBJECT_CHECK1
sdl_event.user.data1 = c_void_p(_USEROBJECT_CHECK2)
sdl_event.user.data2 = c_void_p(_user_event_nextid)
_user_event_objects[_user_event_nextid] = event
_user_event_nextid += 1
SDL_PushEvent(sdl_event)
class Event:
def __init__(self, event_type, event_dict=None, sdl_event=None,
keep_userdata=False, **attributes):
'''Create a new event object.
Creates a new event with the given type. The event is created with the
given attributes and values. The attributes can come from a dictionary
argument, or as string keys from a dictionary.
The given attributes will be readonly attributes on the new event
object itself. These are the only attributes on the Event object,
there are no methods attached to Event objects.
:Parameters:
`event_type` : int
Event type to create
`event_dict` : dict
Dictionary of attributes to assign.
`sdl_event` : `SDL_Event`
Construct a Pygame event from the given SDL_Event; used
internally.
`keep_userdata` : bool
Used internally.
`attributes` : additional keyword arguments
Additional attributes to assign to the event.
'''
if sdl_event:
uevent = cast(pointer(sdl_event), POINTER(SDL_UserEvent)).contents
if uevent.code == _USEROBJECT_CHECK1 and \
uevent.data1 == _USEROBJECT_CHECK2 and \
uevent.data2 in _user_event_objects:
# An event that was posted; grab dict from local store.
id = sdl_event.data2
for key, value in _user_event_objects[id].__dict__.items():
setattr(self, key, value)
# Free memory unless just peeking
if not keep_userdata:
del _user_event_objects[id]
else:
# Standard SDL event
self.type = sdl_event.type
if self.type == SDL_QUIT:
pass
elif self.type == SDL_ACTIVEEVENT:
self.gain = sdl_event.gain
self.state = sdl_event.state
elif self.type == SDL_KEYDOWN:
self.unicode = sdl_event.keysym.unicode
self.key = sdl_event.keysym.sym
self.mod = sdl_event.keysym.mod
elif self.type == SDL_KEYUP:
self.key = sdl_event.keysym.sym
self.mod = sdl_event.keysym.mod
elif self.type == SDL_MOUSEMOTION:
self.pos = (sdl_event.x, sdl_event.y)
self.rel = (sdl_event.xrel, sdl_event.yrel)
self.buttons = (sdl_event.state & SDL_BUTTON(1) != 0,
sdl_event.state & SDL_BUTTON(2) != 0,
sdl_event.state & SDL_BUTTON(3) != 0)
elif self.type in (SDL_MOUSEBUTTONDOWN, SDL_MOUSEBUTTONUP):
self.pos = (sdl_event.x, sdl_event.y)
self.button = sdl_event.button
elif self.type == SDL_JOYAXISMOTION:
self.joy = sdl_event.which
self.axis = sdl_event.axis
self.value = sdl_event.value / 32767.0
elif self.type == SDL_JOYBALLMOTION:
self.joy = sdl_event.which
self.ball = sdl_event.ball
self.rel = (sdl_event.xrel, sdl_event.yrel)
elif self.type == SDL_JOYHATMOTION:
self.joy = sdl_event.which
self.hat = sdl_event.hat
hx = hy = 0
if sdl_event.value & SDL_HAT_UP:
hy = 1
if sdl_event.value & SDL_HAT_DOWN:
hy = -1
if sdl_event.value & SDL_HAT_RIGHT:
hx = 1
if sdl_event.value & SDL_HAT_LEFT:
hx = -1
self.value = (hx, hy)
elif self.type in (SDL_JOYBUTTONUP, SDL_JOYBUTTONDOWN):
self.joy = sdl_event.which
self.button = sdl_event.button
elif self.type == SDL_VIDEORESIZE:
self.size = (sdl_event.w, sdl_event.h)
self.w = sdl_event.w
self.h = sdl_event.h
elif self.type == SDL_VIDEOEXPOSE:
pass
elif self.type == SDL_SYSWMEVENT:
pass ### XXX: not implemented
elif self.type >= SDL_USEREVENT and self.type < SDL_NUMEVENTS:
self.code = sdl_event.code
else:
# Create an event (not from event queue)
self.type = event_type
if event_dict:
for key, value in event_dict.items():
setattr(self, key, value)
for key, value in attributes.items():
setattr(self, key, value)
# Bizarre undocumented but used by some people.
self.dict = self.__dict__
def __repr__(self):
d = copy(self.__dict__)
del d['type']
return '<Event(%d-%s %r)>' % \
(self.type, event_name(self.type), d)
def __nonzero__(self):
return self.type != SDL_NOEVENT
EventType = Event
| gpl-3.0 |
botswana-harvard/ba-namotswe | ba_namotswe/models/pregnancy.py | 1 | 1266 | from django.db import models
from simple_history.models import HistoricalRecords
from .crf_model_mixin import CrfModelMixin, CrfInlineModelMixin
class PregnancyHistory(CrfModelMixin):
history = HistoricalRecords()
class Meta(CrfModelMixin.Meta):
app_label = 'ba_namotswe'
verbose_name = 'Pregnancy History'
verbose_name_plural = 'Pregnancy History'
class Pregnancy(CrfInlineModelMixin, models.Model):
pregnancy_history = models.ForeignKey(PregnancyHistory)
pregnancy_date = models.DateField(
verbose_name='Date of first clinical documentation of pregnancy',
null=True,
blank=True)
delivery_date = models.DateField(
null=True,
blank=True)
history = HistoricalRecords()
def get_pending_fields(self):
pending_fields = super(Pregnancy, self).get_pending_fields()
if not self.pregnancy_date:
pending_fields.append('pregnancy_date')
if not self.delivery_date:
pending_fields.append('delivery_date')
pending_fields.sort()
return pending_fields
class Meta(CrfInlineModelMixin.Meta):
app_label = 'ba_namotswe'
verbose_name = 'Pregnancy'
crf_inline_parent = 'pregnancy_history'
| gpl-3.0 |
pratapvardhan/pandas | pandas/tests/test_base.py | 2 | 46174 | # -*- coding: utf-8 -*-
from __future__ import print_function
import re
import sys
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.compat as compat
from pandas.core.dtypes.common import (
is_object_dtype, is_datetimetz, is_datetime64_dtype,
needs_i8_conversion)
import pandas.util.testing as tm
from pandas import (Series, Index, DatetimeIndex, TimedeltaIndex,
PeriodIndex, Timedelta, IntervalIndex, Interval,
CategoricalIndex, Timestamp)
from pandas.compat import StringIO, PYPY, long
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.accessor import PandasDelegate
from pandas.core.base import PandasObject, NoNewAttributesMixin
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas._libs.tslib import iNaT
class CheckStringMixin(object):
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
if not compat.PY3:
unicode(self.container) # noqa
def test_tricky_container(self):
if not hasattr(self, 'unicode_container'):
pytest.skip('Need unicode_container to test with this')
repr(self.unicode_container)
str(self.unicode_container)
bytes(self.unicode_container)
if not compat.PY3:
unicode(self.unicode_container) # noqa
class CheckImmutable(object):
mutable_regex = re.compile('does not support mutable operations')
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to assert_raises_regex
# (after the Exception kind).
tm.assert_raises_regex(
TypeError, self.mutable_regex, *args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate(object):
class Delegator(object):
_properties = ['foo']
_methods = ['bar']
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ='property'
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._methods,
typ='method'
)
delegate = self.Delegate(self.Delegator())
def f():
delegate.foo
pytest.raises(TypeError, f)
def f():
delegate.foo = 5
pytest.raises(TypeError, f)
def f():
delegate.foo()
pytest.raises(TypeError, f)
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops(object):
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and
(obj.is_boolean() or not obj._can_hold_na)):
# don't test boolean / int64 index
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name='a')
self.int_index = tm.makeIntIndex(10, name='a')
self.float_index = tm.makeFloatIndex(10, name='a')
self.dt_index = tm.makeDateIndex(10, name='a')
self.dt_tz_index = tm.makeDateIndex(10, name='a').tz_localize(
tz='US/Eastern')
self.period_index = tm.makePeriodIndex(10, name='a')
self.string_index = tm.makeStringIndex(10, name='a')
self.unicode_index = tm.makeUnicodeIndex(10, name='a')
arr = np.random.randn(10)
self.int_series = Series(arr, index=self.int_index, name='a')
self.float_series = Series(arr, index=self.float_index, name='a')
self.dt_series = Series(arr, index=self.dt_index, name='a')
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name='a')
self.string_series = Series(arr, index=self.string_index, name='a')
types = ['bool', 'int', 'float', 'dt', 'dt_tz', 'period', 'string',
'unicode']
fmts = ["{0}_{1}".format(t, f)
for t in types for f in ['index', 'series']]
self.objs = [getattr(self, f)
for f in fmts if getattr(self, f, None) is not None]
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(
getattr(o.index, op), index=o.index, name='a')
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o, op)
# these couuld be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(expected,
np.ndarray):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
pytest.raises(TypeError, lambda: getattr(o, op))
else:
pytest.raises(AttributeError,
lambda: getattr(o, op))
def test_binary_ops_docs(self):
from pandas import DataFrame, Panel
op_map = {'add': '+',
'sub': '-',
'mul': '*',
'mod': '%',
'pow': '**',
'truediv': '/',
'floordiv': '//'}
for op_name in ['add', 'sub', 'mul', 'mod', 'pow', 'truediv',
'floordiv']:
for klass in [Series, DataFrame, Panel]:
operand1 = klass.__name__.lower()
operand2 = 'other'
op = op_map[op_name]
expected_str = ' '.join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = ' '.join([operand2, op, operand1])
assert expected_str in getattr(klass, 'r' + op_name).__doc__
class TestIndexOps(Ops):
def setup_method(self, method):
super(TestIndexOps, self).setup_method(method)
self.is_valid_objs = self.objs
self.not_valid_objs = []
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if isinstance(o, Series):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = o != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
assert not result.iat[1]
# this fails for numpy < 1.9
# and oddly for *some* platforms
# result = None != o # noqa
# assert result.iat[0]
# assert result.iat[1]
if (is_datetime64_dtype(o) or is_datetimetz(o)):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
with pytest.raises(TypeError):
None > o
with pytest.raises(TypeError):
o > None
else:
result = None > o
assert not result.iat[0]
assert not result.iat[1]
result = o < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
for p in ['shape', 'dtype', 'T', 'nbytes']:
assert getattr(o, p, None) is not None
# deprecated properties
for p in ['flags', 'strides', 'itemsize']:
with tm.assert_produces_warning(FutureWarning):
assert getattr(o, p, None) is not None
with tm.assert_produces_warning(FutureWarning):
assert hasattr(o, 'base')
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
with tm.assert_produces_warning(FutureWarning):
assert o.data is not None
except ValueError:
pass
with pytest.raises(ValueError):
o.item() # len > 1
assert o.ndim == 1
assert o.size == len(o)
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_ops(self):
for op in ['max', 'min']:
for o in self.objs:
result = getattr(o, op)()
if not isinstance(o, PeriodIndex):
expected = getattr(o.values, op)()
else:
expected = pd.Period(
ordinal=getattr(o._ndarray_values, op)(),
freq=o.freq)
try:
assert result == expected
except TypeError:
# comparing tz-aware series with np.array results in
# TypeError
expected = expected.astype('M8[ns]').astype('int64')
assert result.value == expected
def test_nanops(self):
# GH 7261
for op in ['max', 'min']:
for klass in [Index, Series]:
obj = klass([np.nan, 2.0])
assert getattr(obj, op)() == 2.0
obj = klass([np.nan])
assert pd.isna(getattr(obj, op)())
obj = klass([])
assert pd.isna(getattr(obj, op)())
obj = klass([pd.NaT, datetime(2011, 11, 1)])
# check DatetimeIndex monotonic path
assert getattr(obj, op)() == datetime(2011, 11, 1)
obj = klass([pd.NaT, datetime(2011, 11, 1), pd.NaT])
# check DatetimeIndex non-monotonic path
assert getattr(obj, op)(), datetime(2011, 11, 1)
# argmin/max
obj = Index(np.arange(5, dtype='int64'))
assert obj.argmin() == 0
assert obj.argmax() == 4
obj = Index([np.nan, 1, np.nan, 2])
assert obj.argmin() == 1
assert obj.argmax() == 3
obj = Index([np.nan])
assert obj.argmin() == -1
assert obj.argmax() == -1
obj = Index([pd.NaT, datetime(2011, 11, 1), datetime(2011, 11, 2),
pd.NaT])
assert obj.argmin() == 1
assert obj.argmax() == 2
obj = Index([pd.NaT])
assert obj.argmin() == -1
assert obj.argmax() == -1
def test_value_counts_unique_nunique(self):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._values
if isinstance(values, Index):
# reset name not to affect latter process
values.name = None
# create repeated values, 'n'th element is repeated by n+1 times
# skip boolean, because it only has 2 values at most
if isinstance(o, Index) and o.is_boolean():
continue
elif isinstance(o, Index):
expected_index = Index(o[::-1])
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = 'a'
else:
expected_index = Index(values[::-1])
idx = o.index.repeat(range(1, len(o) + 1))
rep = np.repeat(values, range(1, len(o) + 1))
o = klass(rep, index=idx, name='a')
# check values has the same dtype as the original
assert o.dtype == orig.dtype
expected_s = Series(range(10, 0, -1), index=expected_index,
dtype='int64', name='a')
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
assert result.index.name is None
assert result.name == 'a'
result = o.unique()
if isinstance(o, Index):
assert isinstance(result, o.__class__)
tm.assert_index_equal(result, orig)
elif is_datetimetz(o):
# datetimetz Series returns array of Timestamp
assert result[0] == orig[0]
for r in result:
assert isinstance(r, Timestamp)
tm.assert_numpy_array_equal(result,
orig._values.astype(object).values)
else:
tm.assert_numpy_array_equal(result, orig.values)
assert o.nunique() == len(np.unique(o.values))
def test_value_counts_unique_nunique_null(self):
for null_obj in [np.nan, None]:
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._ndarray_values
if not self._allow_na_ops(o):
continue
# special assign to the numpy array
if is_datetimetz(o):
if isinstance(o, DatetimeIndex):
v = o.asi8
v[0:2] = iNaT
values = o._shallow_copy(v)
else:
o = o.copy()
o[0:2] = iNaT
values = o._values
elif needs_i8_conversion(o):
values[0:2] = iNaT
values = o._shallow_copy(values)
else:
values[0:2] = null_obj
# check values has the same dtype as the original
assert values.dtype == o.dtype
# create repeated values, 'n'th element is repeated by n+1
# times
if isinstance(o, (DatetimeIndex, PeriodIndex)):
expected_index = o.copy()
expected_index.name = None
# attach name to klass
o = klass(values.repeat(range(1, len(o) + 1)))
o.name = 'a'
else:
if is_datetimetz(o):
expected_index = orig._values._shallow_copy(values)
else:
expected_index = Index(values)
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = 'a'
# check values has the same dtype as the original
assert o.dtype == orig.dtype
# check values correctly have NaN
nanloc = np.zeros(len(o), dtype=np.bool)
nanloc[:3] = True
if isinstance(o, Index):
tm.assert_numpy_array_equal(pd.isna(o), nanloc)
else:
exp = Series(nanloc, o.index, name='a')
tm.assert_series_equal(pd.isna(o), exp)
expected_s_na = Series(list(range(10, 2, -1)) + [3],
index=expected_index[9:0:-1],
dtype='int64', name='a')
expected_s = Series(list(range(10, 2, -1)),
index=expected_index[9:1:-1],
dtype='int64', name='a')
result_s_na = o.value_counts(dropna=False)
tm.assert_series_equal(result_s_na, expected_s_na)
assert result_s_na.index.name is None
assert result_s_na.name == 'a'
result_s = o.value_counts()
tm.assert_series_equal(o.value_counts(), expected_s)
assert result_s.index.name is None
assert result_s.name == 'a'
result = o.unique()
if isinstance(o, Index):
tm.assert_index_equal(result,
Index(values[1:], name='a'))
elif is_datetimetz(o):
# unable to compare NaT / nan
vals = values[2:].astype(object).values
tm.assert_numpy_array_equal(result[1:], vals)
assert result[0] is pd.NaT
else:
tm.assert_numpy_array_equal(result[1:], values[2:])
assert pd.isna(result[0])
assert result.dtype == orig.dtype
assert o.nunique() == 8
assert o.nunique(dropna=False) == 9
def test_value_counts_inferred(self):
klasses = [Index, Series]
for klass in klasses:
s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a']
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.unique(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list('acbd')).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list('cdab'))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([.4, .3, .2, .1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(hist, expected)
def test_value_counts_bins(self):
klasses = [Index, Series]
for klass in klasses:
s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a']
s = klass(s_values)
# bins
pytest.raises(TypeError, lambda bins: s.value_counts(bins=bins), 1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({Interval(0.997, 3.0): 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({Interval(0.997, 3.0): 1.0})
tm.assert_series_equal(res1n, exp1n)
if isinstance(s1, Index):
tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))
else:
exp = np.array([1, 2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(s1.unique(), exp)
assert s1.nunique() == 3
# these return the same
res4 = s1.value_counts(bins=4, dropna=True)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4 = s1.value_counts(bins=4, dropna=False)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series([0.5, 0.25, 0.25, 0],
index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ['a', 'b', 'b', 'b', np.nan, np.nan,
'd', 'd', 'a', 'a', 'b']
s = klass(s_values)
expected = Series([4, 3, 2], index=['b', 'a', 'd'])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(['a', 'b', np.nan, 'd'])
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.array(['a', 'b', np.nan, 'd'], dtype=object)
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 3
s = klass({})
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected,
check_index_type=False)
# returned dtype differs depending on original
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), Index([]), exact=False)
else:
tm.assert_numpy_array_equal(s.unique(), np.array([]),
check_dtype=False)
assert s.nunique() == 0
@pytest.mark.parametrize('klass', [Index, Series])
def test_value_counts_datetime64(self, klass):
# GH 3002, datetime64[ns]
# don't test names though
txt = "\n".join(['xxyyzz20100101PIE', 'xxyyzz20100101GUM',
'xxyyzz20100101EGG', 'xxyyww20090101EGG',
'foofoo20080909PIE', 'foofoo20080909GUM'])
f = StringIO(txt)
df = pd.read_fwf(f, widths=[6, 8, 3],
names=["person_id", "dt", "food"],
parse_dates=["dt"])
s = klass(df['dt'].copy())
s.name = None
idx = pd.to_datetime(['2010-01-01 00:00:00Z',
'2008-09-09 00:00:00Z',
'2009-01-01 00:00:00Z'])
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np_array_datetime64_compat(['2010-01-01 00:00:00Z',
'2009-01-01 00:00:00Z',
'2008-09-09 00:00:00Z'],
dtype='datetime64[ns]')
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), DatetimeIndex(expected))
else:
tm.assert_numpy_array_equal(s.unique(), expected)
assert s.nunique() == 3
# with NaT
s = df['dt'].copy()
s = klass([v for v in s.values] + [pd.NaT])
result = s.value_counts()
assert result.index.dtype == 'datetime64[ns]'
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
expected_s[pd.NaT] = 1
tm.assert_series_equal(result, expected_s)
unique = s.unique()
assert unique.dtype == 'datetime64[ns]'
# numpy_array_equal cannot compare pd.NaT
if isinstance(s, Index):
exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT])
tm.assert_index_equal(unique, exp_idx)
else:
tm.assert_numpy_array_equal(unique[:3], expected)
assert pd.isna(unique[3])
assert s.nunique() == 3
assert s.nunique(dropna=False) == 4
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td, name='dt')
result = td.value_counts()
expected_s = Series([6], index=[Timedelta('1day')], name='dt')
tm.assert_series_equal(result, expected_s)
expected = TimedeltaIndex(['1 days'], name='dt')
if isinstance(td, Index):
tm.assert_index_equal(td.unique(), expected)
else:
tm.assert_numpy_array_equal(td.unique(), expected.values)
td2 = timedelta(1) + (df.dt - df.dt)
td2 = klass(td2, name='dt')
result2 = td2.value_counts()
tm.assert_series_equal(result2, expected_s)
def test_factorize(self):
for orig in self.objs:
o = orig.copy()
if isinstance(o, Index) and o.is_boolean():
exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp)
exp_uniques = o
exp_uniques = Index([False, True])
else:
exp_arr = np.array(range(len(o)), dtype=np.intp)
exp_uniques = o
labels, uniques = o.factorize()
tm.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(uniques, Index(orig),
check_names=False)
else:
# factorize explicitly resets name
tm.assert_index_equal(uniques, exp_uniques,
check_names=False)
def test_factorize_repeated(self):
for orig in self.objs:
o = orig.copy()
# don't test boolean
if isinstance(o, Index) and o.is_boolean():
continue
# sort by value, and create duplicates
if isinstance(o, Series):
o = o.sort_values()
n = o.iloc[5:].append(o)
else:
indexer = o.argsort()
o = o.take(indexer)
n = o[5:].append(o)
exp_arr = np.array([5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
dtype=np.intp)
labels, uniques = n.factorize(sort=True)
tm.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(uniques, Index(orig).sort_values(),
check_names=False)
else:
tm.assert_index_equal(uniques, o, check_names=False)
exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4],
np.intp)
labels, uniques = n.factorize(sort=False)
tm.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
expected = Index(o.iloc[5:10].append(o.iloc[:5]))
tm.assert_index_equal(uniques, expected, check_names=False)
else:
expected = o[5:10].append(o[:5])
tm.assert_index_equal(uniques, expected, check_names=False)
def test_duplicated_drop_duplicates_index(self):
# GH 4060
for original in self.objs:
if isinstance(original, Index):
# special case
if original.is_boolean():
result = original.drop_duplicates()
expected = Index([False, True], name='a')
tm.assert_index_equal(result, expected)
continue
# original doesn't have duplicates
expected = np.array([False] * len(original), dtype=bool)
duplicated = original.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = original.drop_duplicates()
tm.assert_index_equal(result, original)
assert result is not original
# has_duplicates
assert not original.has_duplicates
# create repeated values, 3rd and 5th values are duplicated
idx = original[list(range(len(original))) + [5, 3]]
expected = np.array([False] * len(original) + [True, True],
dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
tm.assert_index_equal(idx.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep='last')
tm.assert_index_equal(result, idx[~expected])
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep=False)
tm.assert_index_equal(result, idx[~expected])
with tm.assert_raises_regex(
TypeError, r"drop_duplicates\(\) got an unexpected "
"keyword argument"):
idx.drop_duplicates(inplace=True)
else:
expected = Series([False] * len(original),
index=original.index, name='a')
tm.assert_series_equal(original.duplicated(), expected)
result = original.drop_duplicates()
tm.assert_series_equal(result, original)
assert result is not original
idx = original.index[list(range(len(original))) + [5, 3]]
values = original._values[list(range(len(original))) + [5, 3]]
s = Series(values, index=idx, name='a')
expected = Series([False] * len(original) + [True, True],
index=idx, name='a')
tm.assert_series_equal(s.duplicated(), expected)
tm.assert_series_equal(s.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = Series(base, index=idx, name='a')
tm.assert_series_equal(s.duplicated(keep='last'), expected)
tm.assert_series_equal(s.drop_duplicates(keep='last'),
s[~np.array(base)])
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = Series(base, index=idx, name='a')
tm.assert_series_equal(s.duplicated(keep=False), expected)
tm.assert_series_equal(s.drop_duplicates(keep=False),
s[~np.array(base)])
s.drop_duplicates(inplace=True)
tm.assert_series_equal(s, original)
def test_drop_duplicates_series_vs_dataframe(self):
# GH 14192
df = pd.DataFrame({'a': [1, 1, 1, 'one', 'one'],
'b': [2, 2, np.nan, np.nan, np.nan],
'c': [3, 3, np.nan, np.nan, 'three'],
'd': [1, 2, 3, 4, 4],
'e': [datetime(2015, 1, 1), datetime(2015, 1, 1),
datetime(2015, 2, 1), pd.NaT, pd.NaT]
})
for column in df.columns:
for keep in ['first', 'last', False]:
dropped_frame = df[[column]].drop_duplicates(keep=keep)
dropped_series = df[column].drop_duplicates(keep=keep)
tm.assert_frame_equal(dropped_frame, dropped_series.to_frame())
def test_fillna(self):
# # GH 11343
# though Index.fillna and Series.fillna has separate impl,
# test here to confirm these works as the same
for orig in self.objs:
o = orig.copy()
values = o.values
# values will not be changed
result = o.fillna(o.astype(object).values[0])
if isinstance(o, Index):
tm.assert_index_equal(o, result)
else:
tm.assert_series_equal(o, result)
# check shallow_copied
assert o is not result
for null_obj in [np.nan, None]:
for orig in self.objs:
o = orig.copy()
klass = type(o)
if not self._allow_na_ops(o):
continue
if needs_i8_conversion(o):
values = o.astype(object).values
fill_value = values[0]
values[0:2] = pd.NaT
else:
values = o.values.copy()
fill_value = o.values[0]
values[0:2] = null_obj
expected = [fill_value] * 2 + list(values[2:])
expected = klass(expected)
o = klass(values)
# check values has the same dtype as the original
assert o.dtype == orig.dtype
result = o.fillna(fill_value)
if isinstance(o, Index):
tm.assert_index_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
# check shallow_copied
assert o is not result
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
for o in self.objs:
res = o.memory_usage()
res_deep = o.memory_usage(deep=True)
if (is_object_dtype(o) or (isinstance(o, Series) and
is_object_dtype(o.index))):
# if there are objects, only deep will pick them up
assert res_deep > res
else:
assert res == res_deep
if isinstance(o, Series):
assert ((o.memory_usage(index=False) +
o.index.memory_usage()) ==
o.memory_usage(index=True))
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = res_deep - sys.getsizeof(o)
assert abs(diff) < 100
def test_searchsorted(self):
# See gh-12238
for o in self.objs:
index = np.searchsorted(o, max(o))
assert 0 <= index <= len(o)
index = np.searchsorted(o, max(o), sorter=range(len(o)))
assert 0 <= index <= len(o)
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
for value in invalid_values:
with pytest.raises(ValueError):
self.int_series.drop_duplicates(inplace=value)
class TestTranspose(Ops):
errmsg = "the 'axes' parameter is not supported"
def test_transpose(self):
for obj in self.objs:
if isinstance(obj, Index):
tm.assert_index_equal(obj.transpose(), obj)
else:
tm.assert_series_equal(obj.transpose(), obj)
def test_transpose_non_default_axes(self):
for obj in self.objs:
tm.assert_raises_regex(ValueError, self.errmsg,
obj.transpose, 1)
tm.assert_raises_regex(ValueError, self.errmsg,
obj.transpose, axes=1)
def test_numpy_transpose(self):
for obj in self.objs:
if isinstance(obj, Index):
tm.assert_index_equal(np.transpose(obj), obj)
else:
tm.assert_series_equal(np.transpose(obj), obj)
tm.assert_raises_regex(ValueError, self.errmsg,
np.transpose, obj, axes=1)
class TestNoNewAttributesMixin(object):
def test_mixin(self):
class T(NoNewAttributesMixin):
pass
t = T()
assert not hasattr(t, "__frozen")
t.a = "test"
assert t.a == "test"
t._freeze()
assert "__frozen" in dir(t)
assert getattr(t, "__frozen")
def f():
t.b = "test"
pytest.raises(AttributeError, f)
assert not hasattr(t, "b")
class TestToIterable(object):
# test that we convert an iterable to python types
dtypes = [
('int8', (int, long)),
('int16', (int, long)),
('int32', (int, long)),
('int64', (int, long)),
('uint8', (int, long)),
('uint16', (int, long)),
('uint32', (int, long)),
('uint64', (int, long)),
('float16', float),
('float32', float),
('float64', float),
('datetime64[ns]', Timestamp),
('datetime64[ns, US/Eastern]', Timestamp),
('timedelta64[ns]', Timedelta)]
@pytest.mark.parametrize(
'dtype, rdtype', dtypes)
@pytest.mark.parametrize(
'method',
[
lambda x: x.tolist(),
lambda x: list(x),
lambda x: list(x.__iter__()),
], ids=['tolist', 'list', 'iter'])
@pytest.mark.parametrize('typ', [Series, Index])
def test_iterable(self, typ, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
'dtype, rdtype, obj',
[
('object', object, 'a'),
('object', (int, long), 1),
('category', object, 'a'),
('category', (int, long), 1)])
@pytest.mark.parametrize(
'method',
[
lambda x: x.tolist(),
lambda x: list(x),
lambda x: list(x.__iter__()),
], ids=['tolist', 'list', 'iter'])
@pytest.mark.parametrize('typ', [Series, Index])
def test_iterable_object_and_category(self, typ, method,
dtype, rdtype, obj):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
'dtype, rdtype', dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test items / iteritems yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
_, result = list(s.iteritems())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
'dtype, rdtype',
dtypes + [
('object', (int, long)),
('category', (int, long))])
@pytest.mark.parametrize('typ', [Series, Index])
def test_iterable_map(self, typ, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
'method',
[
lambda x: x.tolist(),
lambda x: list(x),
lambda x: list(x.__iter__()),
], ids=['tolist', 'list', 'iter'])
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp('1999-12-31'),
Timestamp('2000-12-31')])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp('2011-01-01'), Timestamp('2011-01-02')]
s = Series(vals)
assert s.dtype == 'datetime64[ns]'
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz is None
assert res == exp
vals = [Timestamp('2011-01-01', tz='US/Eastern'),
Timestamp('2011-01-02', tz='US/Eastern')]
s = Series(vals)
assert s.dtype == 'datetime64[ns, US/Eastern]'
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz == exp.tz
assert res == exp
# timedelta
vals = [Timedelta('1 days'), Timedelta('2 days')]
s = Series(vals)
assert s.dtype == 'timedelta64[ns]'
for res, exp in zip(s, vals):
assert isinstance(res, Timedelta)
assert res == exp
# period (object dtype, not boxed)
vals = [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]
s = Series(vals)
assert s.dtype == 'object'
for res, exp in zip(s, vals):
assert isinstance(res, pd.Period)
assert res.freq == 'M'
assert res == exp
@pytest.mark.parametrize('array, expected_type, dtype', [
(np.array([0, 1], dtype=np.int64), np.ndarray, 'int64'),
(np.array(['a', 'b']), np.ndarray, 'object'),
(pd.Categorical(['a', 'b']), pd.Categorical, 'category'),
(pd.DatetimeIndex(['2017', '2018']), np.ndarray, 'datetime64[ns]'),
(pd.DatetimeIndex(['2017', '2018'], tz="US/Central"), pd.DatetimeIndex,
'datetime64[ns, US/Central]'),
(pd.TimedeltaIndex([10**10]), np.ndarray, 'm8[ns]'),
(pd.PeriodIndex([2018, 2019], freq='A'), np.ndarray, 'object'),
(pd.IntervalIndex.from_breaks([0, 1, 2]), np.ndarray, 'object'),
])
def test_values_consistent(array, expected_type, dtype):
l_values = pd.Series(array)._values
r_values = pd.Index(array)._values
assert type(l_values) is expected_type
assert type(l_values) is type(r_values)
if isinstance(l_values, np.ndarray):
tm.assert_numpy_array_equal(l_values, r_values)
elif isinstance(l_values, pd.Index):
tm.assert_index_equal(l_values, r_values)
elif pd.api.types.is_categorical(l_values):
tm.assert_categorical_equal(l_values, r_values)
else:
raise TypeError("Unexpected type {}".format(type(l_values)))
assert l_values.dtype == dtype
assert r_values.dtype == dtype
@pytest.mark.parametrize('array, expected', [
(np.array([0, 1], dtype=np.int64), np.array([0, 1], dtype=np.int64)),
(np.array(['0', '1']), np.array(['0', '1'], dtype=object)),
(pd.Categorical(['a', 'a']), np.array([0, 0], dtype='int8')),
(pd.DatetimeIndex(['2017-01-01T00:00:00']),
np.array(['2017-01-01T00:00:00'], dtype='M8[ns]')),
(pd.DatetimeIndex(['2017-01-01T00:00:00'], tz="US/Eastern"),
np.array(['2017-01-01T05:00:00'], dtype='M8[ns]')),
(pd.TimedeltaIndex([10**10]), np.array([10**10], dtype='m8[ns]')),
pytest.param(
pd.PeriodIndex(['2017', '2018'], freq='D'),
np.array([17167, 17532]),
marks=pytest.mark.xfail(reason="PeriodArray Not implemented")
),
])
def test_ndarray_values(array, expected):
l_values = pd.Series(array)._ndarray_values
r_values = pd.Index(array)._ndarray_values
tm.assert_numpy_array_equal(l_values, r_values)
tm.assert_numpy_array_equal(l_values, expected)
| bsd-3-clause |
invisiblek/python-for-android | python-modules/twisted/twisted/names/secondary.py | 57 | 3201 | # Copyright (c) 2001-2006 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.internet import task, defer
from twisted.names import dns
from twisted.names import common
from twisted.names import client
from twisted.names import resolve
from twisted.python import log, failure
from twisted.application import service
class SecondaryAuthorityService(service.Service):
calls = None
def __init__(self, primary, domains):
"""
@param primary: The IP address of the server from which to perform
zone transfers.
@param domains: A sequence of domain names for which to perform
zone transfers.
"""
self.primary = primary
self.domains = [SecondaryAuthority(primary, d) for d in domains]
def getAuthority(self):
return resolve.ResolverChain(self.domains)
def startService(self):
service.Service.startService(self)
self.calls = [task.LoopingCall(d.transfer) for d in self.domains]
i = 0
from twisted.internet import reactor
for c in self.calls:
# XXX Add errbacks, respect proper timeouts
reactor.callLater(i, c.start, 60 * 60)
i += 1
def stopService(self):
service.Service.stopService(self)
for c in self.calls:
c.stop()
from twisted.names.authority import FileAuthority
class SecondaryAuthority(common.ResolverBase):
"""An Authority that keeps itself updated by performing zone transfers"""
transferring = False
soa = records = None
def __init__(self, primaryIP, domain):
common.ResolverBase.__init__(self)
self.primary = primaryIP
self.domain = domain
def transfer(self):
if self.transferring:
return
self.transfering = True
return client.Resolver(servers=[(self.primary, dns.PORT)]
).lookupZone(self.domain
).addCallback(self._cbZone
).addErrback(self._ebZone
)
def _lookup(self, name, cls, type, timeout=None):
if not self.soa or not self.records:
return defer.fail(failure.Failure(dns.DomainError(name)))
return FileAuthority.__dict__['_lookup'](self, name, cls, type, timeout)
#shouldn't we just subclass? :P
lookupZone = FileAuthority.__dict__['lookupZone']
def _cbZone(self, zone):
ans, _, _ = zone
self.records = r = {}
for rec in ans:
if not self.soa and rec.type == dns.SOA:
self.soa = (str(rec.name).lower(), rec.payload)
else:
r.setdefault(str(rec.name).lower(), []).append(rec.payload)
def _ebZone(self, failure):
log.msg("Updating %s from %s failed during zone transfer" % (self.domain, self.primary))
log.err(failure)
def update(self):
self.transfer().addCallbacks(self._cbTransferred, self._ebTransferred)
def _cbTransferred(self, result):
self.transferring = False
def _ebTransferred(self, failure):
self.transferred = False
log.msg("Transferring %s from %s failed after zone transfer" % (self.domain, self.primary))
log.err(failure)
| apache-2.0 |
dkluffy/dkluff-code | code/work/testpassport.py | 1 | 2023 | #!/usr/bin/env python
# coding=utf-8
import re
import json
import sys
fmt = '\033[0;3{}m{}\033[0m'.format
def cls():
print chr(27) + "[2J"
def fmtAnswer(s,sp=None):
r=re.sub("Answer:| |\r|\n","",s)
if sp is None:
a=[]
for i in r.upper():
a+=[i]
return a
return r.upper().split(sp)
def fmtItem(item):
q=""
l=len(item)
i=0
while i < l:
q+=item[i]
i+=1
if re.search("^[A-Z]\.",item[i]):
break
return [q,item[i:]]
def readf(f):
exam=[]
for l in f:
r=re.search("^[0-9]*\.",l)
item=[]
if r:
while not l.startswith("Answer:"):
item+=[l]
l=f.next()
exam+=[fmtItem(item)+[fmtAnswer(l,sp=",")]]
#exam[i] = [ q,[opts],[ans] ]
return exam
def examTodict(exam):
j={}
iid=1
for i in exam:
j[str(iid)]={"question":i[0],"opts":i[1],"ans":i[2]}
iid+=1
return j
def prtq(item):
print "----->"
print fmt(4,item[0])
for i in item[1]:
print i
def prtRes(answer,myanswer,item):
print "Answer is :",answer
c=1
if answer == myanswer:
c=2
for i in item:
if i[0] in answer:
print fmt(c,i)
else:
print i
return answer == myanswer
def recordmis(fdir,iid):
with open(fdir,'a') as r:
print >> r,iid
def play(p):
iid=0
lenth=len(p)
while iid <lenth:
cls()
i=p[iid]
prtq(i)
with open(bookmark,'a') as bk:
print >>bk,iid+1
myanswer=fmtAnswer(raw_input("Enter Answers: "))
cls()
print fmt(4,i[0])
tf=prtRes(i[2],myanswer,i[1])
if not tf:
recordmis(logfile,iid+1)
iid+=1
c=raw_input("<----Continue...?")
try:
iid=int(c)-1
except Exception:
pass
if __name__ == "__main__":
f=open(sys.argv[1])
logfile="wrlog.log"
bookmark="bookmark.txt"
p=readf(f)
wrgs=[]
if len(sys.argv) == 3:
wrgfile=sys.argv[2]
print "Review...."
with open(wrgfile) as r:
for l in r:
wrgs+=[p[int(l[:-1])-1]]
if len(wrgs) > 0:
play(wrgs)
else:
play(p)
| apache-2.0 |
kmacinnis/sympy | examples/intermediate/infinite_1d_box.py | 116 | 2911 | #!/usr/bin/env python
"""
Applying perturbation theory to calculate the ground state energy
of the infinite 1D box of width ``a`` with a perturbation
which is linear in ``x``, up to second order in perturbation
"""
from sympy.core import pi
from sympy import Integral, var, S
from sympy.functions import sin, sqrt
def X_n(n, a, x):
"""
Returns the wavefunction X_{n} for an infinite 1D box
``n``
the "principal" quantum number. Corresponds to the number of nodes in
the wavefunction. n >= 0
``a``
width of the well. a > 0
``x``
x coordinate.
"""
n, a, x = map(S, [n, a, x])
C = sqrt(2 / a)
return C * sin(pi * n * x / a)
def E_n(n, a, mass):
"""
Returns the Energy psi_{n} for a 1d potential hole with infinity borders
``n``
the "principal" quantum number. Corresponds to the number of nodes in
the wavefunction. n >= 0
``a``
width of the well. a > 0
``mass``
mass.
"""
return ((n * pi / a)**2) / mass
def energy_corrections(perturbation, n, a=10, mass=0.5):
"""
Calculating first two order corrections due to perturbation theory and
returns tuple where zero element is unperturbated energy, and two second
is corrections
``n``
the "nodal" quantum number. Corresponds to the number of nodes in the
wavefunction. n >= 0
``a``
width of the well. a > 0
``mass``
mass.
"""
x, _a = var("x _a")
Vnm = lambda n, m, a: Integral(X_n(n, a, x) * X_n(m, a, x)
* perturbation.subs({_a: a}), (x, 0, a)).n()
# As we know from theory for V0*r/a we will just V(n, n-1) and V(n, n+1)
# wouldn't equals zero
return (E_n(n, a, mass).evalf(),
Vnm(n, n, a).evalf(),
(Vnm(n, n - 1, a)**2/(E_n(n, a, mass) - E_n(n - 1, a, mass))
+ Vnm(n, n + 1, a)**2/(E_n(n, a, mass) - E_n(n + 1, a, mass))).evalf())
def main():
print()
print("Applying perturbation theory to calculate the ground state energy")
print("of the infinite 1D box of width ``a`` with a perturbation")
print("which is linear in ``x``, up to second order in perturbation.")
print()
x, _a = var("x _a")
perturbation = .1 * x / _a
E1 = energy_corrections(perturbation, 1)
print("Energy for first term (n=1):")
print("E_1^{(0)} = ", E1[0])
print("E_1^{(1)} = ", E1[1])
print("E_1^{(2)} = ", E1[2])
print()
E2 = energy_corrections(perturbation, 2)
print("Energy for second term (n=2):")
print("E_2^{(0)} = ", E2[0])
print("E_2^{(1)} = ", E2[1])
print("E_2^{(2)} = ", E2[2])
print()
E3 = energy_corrections(perturbation, 3)
print("Energy for third term (n=3):")
print("E_3^{(0)} = ", E3[0])
print("E_3^{(1)} = ", E3[1])
print("E_3^{(2)} = ", E3[2])
print()
if __name__ == "__main__":
main()
| bsd-3-clause |
atheed/servo | tests/wpt/web-platform-tests/tools/pytest/_pytest/genscript.py | 191 | 4129 | """ (deprecated) generate a single-file self-contained version of pytest """
import os
import sys
import pkgutil
import py
import _pytest
def find_toplevel(name):
for syspath in sys.path:
base = py.path.local(syspath)
lib = base/name
if lib.check(dir=1):
return lib
mod = base.join("%s.py" % name)
if mod.check(file=1):
return mod
raise LookupError(name)
def pkgname(toplevel, rootpath, path):
parts = path.parts()[len(rootpath.parts()):]
return '.'.join([toplevel] + [x.purebasename for x in parts])
def pkg_to_mapping(name):
toplevel = find_toplevel(name)
name2src = {}
if toplevel.check(file=1): # module
name2src[toplevel.purebasename] = toplevel.read()
else: # package
for pyfile in toplevel.visit('*.py'):
pkg = pkgname(name, toplevel, pyfile)
name2src[pkg] = pyfile.read()
# with wheels py source code might be not be installed
# and the resulting genscript is useless, just bail out.
assert name2src, "no source code found for %r at %r" %(name, toplevel)
return name2src
def compress_mapping(mapping):
import base64, pickle, zlib
data = pickle.dumps(mapping, 2)
data = zlib.compress(data, 9)
data = base64.encodestring(data)
data = data.decode('ascii')
return data
def compress_packages(names):
mapping = {}
for name in names:
mapping.update(pkg_to_mapping(name))
return compress_mapping(mapping)
def generate_script(entry, packages):
data = compress_packages(packages)
tmpl = py.path.local(__file__).dirpath().join('standalonetemplate.py')
exe = tmpl.read()
exe = exe.replace('@SOURCES@', data)
exe = exe.replace('@ENTRY@', entry)
return exe
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption("--genscript", action="store", default=None,
dest="genscript", metavar="path",
help="create standalone pytest script at given target path.")
def pytest_cmdline_main(config):
import _pytest.config
genscript = config.getvalue("genscript")
if genscript:
tw = _pytest.config.create_terminal_writer(config)
tw.line("WARNING: usage of genscript is deprecated.",
red=True)
deps = ['py', '_pytest', 'pytest'] # pluggy is vendored
if sys.version_info < (2,7):
deps.append("argparse")
tw.line("generated script will run on python2.6-python3.3++")
else:
tw.line("WARNING: generated script will not run on python2.6 "
"due to 'argparse' dependency. Use python2.6 "
"to generate a python2.6 compatible script", red=True)
script = generate_script(
'import pytest; raise SystemExit(pytest.cmdline.main())',
deps,
)
genscript = py.path.local(genscript)
genscript.write(script)
tw.line("generated pytest standalone script: %s" % genscript,
bold=True)
return 0
def pytest_namespace():
return {'freeze_includes': freeze_includes}
def freeze_includes():
"""
Returns a list of module names used by py.test that should be
included by cx_freeze.
"""
result = list(_iter_all_modules(py))
result += list(_iter_all_modules(_pytest))
return result
def _iter_all_modules(package, prefix=''):
"""
Iterates over the names of all modules that can be found in the given
package, recursively.
Example:
_iter_all_modules(_pytest) ->
['_pytest.assertion.newinterpret',
'_pytest.capture',
'_pytest.core',
...
]
"""
if type(package) is not str:
path, prefix = package.__path__[0], package.__name__ + '.'
else:
path = package
for _, name, is_package in pkgutil.iter_modules([path]):
if is_package:
for m in _iter_all_modules(os.path.join(path, name), prefix=name + '.'):
yield prefix + m
else:
yield prefix + name
| mpl-2.0 |
gitaarik/django | tests/m2m_through/models.py | 58 | 4699 | from datetime import datetime
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# M2M described on one of the models
@python_2_unicode_compatible
class Person(models.Model):
name = models.CharField(max_length=128)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Group(models.Model):
name = models.CharField(max_length=128)
members = models.ManyToManyField(Person, through='Membership')
custom_members = models.ManyToManyField(Person, through='CustomMembership', related_name="custom")
nodefaultsnonulls = models.ManyToManyField(
Person,
through='TestNoDefaultsOrNulls',
related_name="testnodefaultsnonulls",
)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Membership(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
group = models.ForeignKey(Group, models.CASCADE)
date_joined = models.DateTimeField(default=datetime.now)
invite_reason = models.CharField(max_length=64, null=True)
class Meta:
ordering = ('date_joined', 'invite_reason', 'group')
def __str__(self):
return "%s is a member of %s" % (self.person.name, self.group.name)
@python_2_unicode_compatible
class CustomMembership(models.Model):
person = models.ForeignKey(
Person,
models.CASCADE,
db_column="custom_person_column",
related_name="custom_person_related_name",
)
group = models.ForeignKey(Group, models.CASCADE)
weird_fk = models.ForeignKey(Membership, models.SET_NULL, null=True)
date_joined = models.DateTimeField(default=datetime.now)
def __str__(self):
return "%s is a member of %s" % (self.person.name, self.group.name)
class Meta:
db_table = "test_table"
ordering = ["date_joined"]
class TestNoDefaultsOrNulls(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
group = models.ForeignKey(Group, models.CASCADE)
nodefaultnonull = models.CharField(max_length=5)
@python_2_unicode_compatible
class PersonSelfRefM2M(models.Model):
name = models.CharField(max_length=5)
friends = models.ManyToManyField('self', through="Friendship", symmetrical=False)
def __str__(self):
return self.name
class Friendship(models.Model):
first = models.ForeignKey(PersonSelfRefM2M, models.CASCADE, related_name="rel_from_set")
second = models.ForeignKey(PersonSelfRefM2M, models.CASCADE, related_name="rel_to_set")
date_friended = models.DateTimeField()
# Custom through link fields
@python_2_unicode_compatible
class Event(models.Model):
title = models.CharField(max_length=50)
invitees = models.ManyToManyField(
Person, through='Invitation',
through_fields=('event', 'invitee'),
related_name='events_invited',
)
def __str__(self):
return self.title
class Invitation(models.Model):
event = models.ForeignKey(Event, models.CASCADE, related_name='invitations')
# field order is deliberately inverted. the target field is "invitee".
inviter = models.ForeignKey(Person, models.CASCADE, related_name='invitations_sent')
invitee = models.ForeignKey(Person, models.CASCADE, related_name='invitations')
@python_2_unicode_compatible
class Employee(models.Model):
name = models.CharField(max_length=5)
subordinates = models.ManyToManyField(
'self',
through="Relationship",
through_fields=('source', 'target'),
symmetrical=False,
)
class Meta:
ordering = ('pk',)
def __str__(self):
return self.name
class Relationship(models.Model):
# field order is deliberately inverted.
another = models.ForeignKey(Employee, models.SET_NULL, related_name="rel_another_set", null=True)
target = models.ForeignKey(Employee, models.CASCADE, related_name="rel_target_set")
source = models.ForeignKey(Employee, models.CASCADE, related_name="rel_source_set")
class Ingredient(models.Model):
iname = models.CharField(max_length=20, unique=True)
class Meta:
ordering = ('iname',)
class Recipe(models.Model):
rname = models.CharField(max_length=20, unique=True)
ingredients = models.ManyToManyField(
Ingredient, through='RecipeIngredient', related_name='recipes',
)
class Meta:
ordering = ('rname',)
class RecipeIngredient(models.Model):
ingredient = models.ForeignKey(Ingredient, models.CASCADE, to_field='iname')
recipe = models.ForeignKey(Recipe, models.CASCADE, to_field='rname')
| bsd-3-clause |
NicolasHerin/game_book_share | node_modules/node-gyp/gyp/pylib/gyp/mac_tool.py | 1569 | 23354 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to perform Xcode-style build steps.
These functions are executed via gyp-mac-tool when using the Makefile generator.
"""
import fcntl
import fnmatch
import glob
import json
import os
import plistlib
import re
import shutil
import string
import subprocess
import sys
import tempfile
def main(args):
executor = MacTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class MacTool(object):
"""This class performs all the Mac tooling steps. The methods can either be
executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecCopyBundleResource(self, source, dest, convert_to_binary):
"""Copies a resource file to the bundle/Resources directory, performing any
necessary compilation on each resource."""
extension = os.path.splitext(source)[1].lower()
if os.path.isdir(source):
# Copy tree.
# TODO(thakis): This copies file attributes like mtime, while the
# single-file branch below doesn't. This should probably be changed to
# be consistent with the single-file branch.
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
elif extension == '.xib':
return self._CopyXIBFile(source, dest)
elif extension == '.storyboard':
return self._CopyXIBFile(source, dest)
elif extension == '.strings':
self._CopyStringsFile(source, dest, convert_to_binary)
else:
shutil.copy(source, dest)
def _CopyXIBFile(self, source, dest):
"""Compiles a XIB file with ibtool into a binary plist in the bundle."""
# ibtool sometimes crashes with relative paths. See crbug.com/314728.
base = os.path.dirname(os.path.realpath(__file__))
if os.path.relpath(source):
source = os.path.join(base, source)
if os.path.relpath(dest):
dest = os.path.join(base, dest)
args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices',
'--output-format', 'human-readable-text', '--compile', dest, source]
ibtool_section_re = re.compile(r'/\*.*\*/')
ibtool_re = re.compile(r'.*note:.*is clipping its content')
ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE)
current_section_header = None
for line in ibtoolout.stdout:
if ibtool_section_re.match(line):
current_section_header = line
elif not ibtool_re.match(line):
if current_section_header:
sys.stdout.write(current_section_header)
current_section_header = None
sys.stdout.write(line)
return ibtoolout.returncode
def _ConvertToBinary(self, dest):
subprocess.check_call([
'xcrun', 'plutil', '-convert', 'binary1', '-o', dest, dest])
def _CopyStringsFile(self, source, dest, convert_to_binary):
"""Copies a .strings file using iconv to reconvert the input into UTF-16."""
input_code = self._DetectInputEncoding(source) or "UTF-8"
# Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call
# CFPropertyListCreateFromXMLData() behind the scenes; at least it prints
# CFPropertyListCreateFromXMLData(): Old-style plist parser: missing
# semicolon in dictionary.
# on invalid files. Do the same kind of validation.
import CoreFoundation
s = open(source, 'rb').read()
d = CoreFoundation.CFDataCreate(None, s, len(s))
_, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None)
if error:
return
fp = open(dest, 'wb')
fp.write(s.decode(input_code).encode('UTF-16'))
fp.close()
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _DetectInputEncoding(self, file_name):
"""Reads the first few bytes from file_name and tries to guess the text
encoding. Returns None as a guess if it can't detect it."""
fp = open(file_name, 'rb')
try:
header = fp.read(3)
except e:
fp.close()
return None
fp.close()
if header.startswith("\xFE\xFF"):
return "UTF-16"
elif header.startswith("\xFF\xFE"):
return "UTF-16"
elif header.startswith("\xEF\xBB\xBF"):
return "UTF-8"
else:
return None
def ExecCopyInfoPlist(self, source, dest, convert_to_binary, *keys):
"""Copies the |source| Info.plist to the destination directory |dest|."""
# Read the source Info.plist into memory.
fd = open(source, 'r')
lines = fd.read()
fd.close()
# Insert synthesized key/value pairs (e.g. BuildMachineOSBuild).
plist = plistlib.readPlistFromString(lines)
if keys:
plist = dict(plist.items() + json.loads(keys[0]).items())
lines = plistlib.writePlistToString(plist)
# Go through all the environment variables and replace them as variables in
# the file.
IDENT_RE = re.compile(r'[/\s]')
for key in os.environ:
if key.startswith('_'):
continue
evar = '${%s}' % key
evalue = os.environ[key]
lines = string.replace(lines, evar, evalue)
# Xcode supports various suffices on environment variables, which are
# all undocumented. :rfc1034identifier is used in the standard project
# template these days, and :identifier was used earlier. They are used to
# convert non-url characters into things that look like valid urls --
# except that the replacement character for :identifier, '_' isn't valid
# in a URL either -- oops, hence :rfc1034identifier was born.
evar = '${%s:identifier}' % key
evalue = IDENT_RE.sub('_', os.environ[key])
lines = string.replace(lines, evar, evalue)
evar = '${%s:rfc1034identifier}' % key
evalue = IDENT_RE.sub('-', os.environ[key])
lines = string.replace(lines, evar, evalue)
# Remove any keys with values that haven't been replaced.
lines = lines.split('\n')
for i in range(len(lines)):
if lines[i].strip().startswith("<string>${"):
lines[i] = None
lines[i - 1] = None
lines = '\n'.join(filter(lambda x: x is not None, lines))
# Write out the file with variables replaced.
fd = open(dest, 'w')
fd.write(lines)
fd.close()
# Now write out PkgInfo file now that the Info.plist file has been
# "compiled".
self._WritePkgInfo(dest)
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _WritePkgInfo(self, info_plist):
"""This writes the PkgInfo file from the data stored in Info.plist."""
plist = plistlib.readPlist(info_plist)
if not plist:
return
# Only create PkgInfo for executable types.
package_type = plist['CFBundlePackageType']
if package_type != 'APPL':
return
# The format of PkgInfo is eight characters, representing the bundle type
# and bundle signature, each four characters. If that is missing, four
# '?' characters are used instead.
signature_code = plist.get('CFBundleSignature', '????')
if len(signature_code) != 4: # Wrong length resets everything, too.
signature_code = '?' * 4
dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo')
fp = open(dest, 'w')
fp.write('%s%s' % (package_type, signature_code))
fp.close()
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
fcntl.flock(fd, fcntl.LOCK_EX)
return subprocess.call(cmd_list)
def ExecFilterLibtool(self, *cmd_list):
"""Calls libtool and filters out '/path/to/libtool: file: foo.o has no
symbols'."""
libtool_re = re.compile(r'^.*libtool: file: .* has no symbols$')
libtool_re5 = re.compile(
r'^.*libtool: warning for library: ' +
r'.* the table of contents is empty ' +
r'\(no object file members in the library define global symbols\)$')
env = os.environ.copy()
# Ref:
# http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c
# The problem with this flag is that it resets the file mtime on the file to
# epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone.
env['ZERO_AR_DATE'] = '1'
libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env)
_, err = libtoolout.communicate()
for line in err.splitlines():
if not libtool_re.match(line) and not libtool_re5.match(line):
print >>sys.stderr, line
# Unconditionally touch the output .a file on the command line if present
# and the command succeeded. A bit hacky.
if not libtoolout.returncode:
for i in range(len(cmd_list) - 1):
if cmd_list[i] == "-o" and cmd_list[i+1].endswith('.a'):
os.utime(cmd_list[i+1], None)
break
return libtoolout.returncode
def ExecPackageFramework(self, framework, version):
"""Takes a path to Something.framework and the Current version of that and
sets up all the symlinks."""
# Find the name of the binary based on the part before the ".framework".
binary = os.path.basename(framework).split('.')[0]
CURRENT = 'Current'
RESOURCES = 'Resources'
VERSIONS = 'Versions'
if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)):
# Binary-less frameworks don't seem to contain symlinks (see e.g.
# chromium's out/Debug/org.chromium.Chromium.manifest/ bundle).
return
# Move into the framework directory to set the symlinks correctly.
pwd = os.getcwd()
os.chdir(framework)
# Set up the Current version.
self._Relink(version, os.path.join(VERSIONS, CURRENT))
# Set up the root symlinks.
self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary)
self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES)
# Back to where we were before!
os.chdir(pwd)
def _Relink(self, dest, link):
"""Creates a symlink to |dest| named |link|. If |link| already exists,
it is overwritten."""
if os.path.lexists(link):
os.remove(link)
os.symlink(dest, link)
def ExecCompileXcassets(self, keys, *inputs):
"""Compiles multiple .xcassets files into a single .car file.
This invokes 'actool' to compile all the inputs .xcassets files. The
|keys| arguments is a json-encoded dictionary of extra arguments to
pass to 'actool' when the asset catalogs contains an application icon
or a launch image.
Note that 'actool' does not create the Assets.car file if the asset
catalogs does not contains imageset.
"""
command_line = [
'xcrun', 'actool', '--output-format', 'human-readable-text',
'--compress-pngs', '--notices', '--warnings', '--errors',
]
is_iphone_target = 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ
if is_iphone_target:
platform = os.environ['CONFIGURATION'].split('-')[-1]
if platform not in ('iphoneos', 'iphonesimulator'):
platform = 'iphonesimulator'
command_line.extend([
'--platform', platform, '--target-device', 'iphone',
'--target-device', 'ipad', '--minimum-deployment-target',
os.environ['IPHONEOS_DEPLOYMENT_TARGET'], '--compile',
os.path.abspath(os.environ['CONTENTS_FOLDER_PATH']),
])
else:
command_line.extend([
'--platform', 'macosx', '--target-device', 'mac',
'--minimum-deployment-target', os.environ['MACOSX_DEPLOYMENT_TARGET'],
'--compile',
os.path.abspath(os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']),
])
if keys:
keys = json.loads(keys)
for key, value in keys.iteritems():
arg_name = '--' + key
if isinstance(value, bool):
if value:
command_line.append(arg_name)
elif isinstance(value, list):
for v in value:
command_line.append(arg_name)
command_line.append(str(v))
else:
command_line.append(arg_name)
command_line.append(str(value))
# Note: actool crashes if inputs path are relative, so use os.path.abspath
# to get absolute path name for inputs.
command_line.extend(map(os.path.abspath, inputs))
subprocess.check_call(command_line)
def ExecMergeInfoPlist(self, output, *inputs):
"""Merge multiple .plist files into a single .plist file."""
merged_plist = {}
for path in inputs:
plist = self._LoadPlistMaybeBinary(path)
self._MergePlist(merged_plist, plist)
plistlib.writePlist(merged_plist, output)
def ExecCodeSignBundle(self, key, resource_rules, entitlements, provisioning):
"""Code sign a bundle.
This function tries to code sign an iOS bundle, following the same
algorithm as Xcode:
1. copy ResourceRules.plist from the user or the SDK into the bundle,
2. pick the provisioning profile that best match the bundle identifier,
and copy it into the bundle as embedded.mobileprovision,
3. copy Entitlements.plist from user or SDK next to the bundle,
4. code sign the bundle.
"""
resource_rules_path = self._InstallResourceRules(resource_rules)
substitutions, overrides = self._InstallProvisioningProfile(
provisioning, self._GetCFBundleIdentifier())
entitlements_path = self._InstallEntitlements(
entitlements, substitutions, overrides)
subprocess.check_call([
'codesign', '--force', '--sign', key, '--resource-rules',
resource_rules_path, '--entitlements', entitlements_path,
os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])])
def _InstallResourceRules(self, resource_rules):
"""Installs ResourceRules.plist from user or SDK into the bundle.
Args:
resource_rules: string, optional, path to the ResourceRules.plist file
to use, default to "${SDKROOT}/ResourceRules.plist"
Returns:
Path to the copy of ResourceRules.plist into the bundle.
"""
source_path = resource_rules
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'ResourceRules.plist')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'], 'ResourceRules.plist')
shutil.copy2(source_path, target_path)
return target_path
def _InstallProvisioningProfile(self, profile, bundle_identifier):
"""Installs embedded.mobileprovision into the bundle.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple containing two dictionary: variables substitutions and values
to overrides when generating the entitlements file.
"""
source_path, provisioning_data, team_id = self._FindProvisioningProfile(
profile, bundle_identifier)
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'embedded.mobileprovision')
shutil.copy2(source_path, target_path)
substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.')
return substitutions, provisioning_data['Entitlements']
def _FindProvisioningProfile(self, profile, bundle_identifier):
"""Finds the .mobileprovision file to use for signing the bundle.
Checks all the installed provisioning profiles (or if the user specified
the PROVISIONING_PROFILE variable, only consult it) and select the most
specific that correspond to the bundle identifier.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple of the path to the selected provisioning profile, the data of
the embedded plist in the provisioning profile and the team identifier
to use for code signing.
Raises:
SystemExit: if no .mobileprovision can be used to sign the bundle.
"""
profiles_dir = os.path.join(
os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles')
if not os.path.isdir(profiles_dir):
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
provisioning_profiles = None
if profile:
profile_path = os.path.join(profiles_dir, profile + '.mobileprovision')
if os.path.exists(profile_path):
provisioning_profiles = [profile_path]
if not provisioning_profiles:
provisioning_profiles = glob.glob(
os.path.join(profiles_dir, '*.mobileprovision'))
valid_provisioning_profiles = {}
for profile_path in provisioning_profiles:
profile_data = self._LoadProvisioningProfile(profile_path)
app_id_pattern = profile_data.get(
'Entitlements', {}).get('application-identifier', '')
for team_identifier in profile_data.get('TeamIdentifier', []):
app_id = '%s.%s' % (team_identifier, bundle_identifier)
if fnmatch.fnmatch(app_id, app_id_pattern):
valid_provisioning_profiles[app_id_pattern] = (
profile_path, profile_data, team_identifier)
if not valid_provisioning_profiles:
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
# If the user has multiple provisioning profiles installed that can be
# used for ${bundle_identifier}, pick the most specific one (ie. the
# provisioning profile whose pattern is the longest).
selected_key = max(valid_provisioning_profiles, key=lambda v: len(v))
return valid_provisioning_profiles[selected_key]
def _LoadProvisioningProfile(self, profile_path):
"""Extracts the plist embedded in a provisioning profile.
Args:
profile_path: string, path to the .mobileprovision file
Returns:
Content of the plist embedded in the provisioning profile as a dictionary.
"""
with tempfile.NamedTemporaryFile() as temp:
subprocess.check_call([
'security', 'cms', '-D', '-i', profile_path, '-o', temp.name])
return self._LoadPlistMaybeBinary(temp.name)
def _MergePlist(self, merged_plist, plist):
"""Merge |plist| into |merged_plist|."""
for key, value in plist.iteritems():
if isinstance(value, dict):
merged_value = merged_plist.get(key, {})
if isinstance(merged_value, dict):
self._MergePlist(merged_value, value)
merged_plist[key] = merged_value
else:
merged_plist[key] = value
else:
merged_plist[key] = value
def _LoadPlistMaybeBinary(self, plist_path):
"""Loads into a memory a plist possibly encoded in binary format.
This is a wrapper around plistlib.readPlist that tries to convert the
plist to the XML format if it can't be parsed (assuming that it is in
the binary format).
Args:
plist_path: string, path to a plist file, in XML or binary format
Returns:
Content of the plist as a dictionary.
"""
try:
# First, try to read the file using plistlib that only supports XML,
# and if an exception is raised, convert a temporary copy to XML and
# load that copy.
return plistlib.readPlist(plist_path)
except:
pass
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(plist_path, temp.name)
subprocess.check_call(['plutil', '-convert', 'xml1', temp.name])
return plistlib.readPlist(temp.name)
def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix):
"""Constructs a dictionary of variable substitutions for Entitlements.plist.
Args:
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
app_identifier_prefix: string, value for AppIdentifierPrefix
Returns:
Dictionary of substitutions to apply when generating Entitlements.plist.
"""
return {
'CFBundleIdentifier': bundle_identifier,
'AppIdentifierPrefix': app_identifier_prefix,
}
def _GetCFBundleIdentifier(self):
"""Extracts CFBundleIdentifier value from Info.plist in the bundle.
Returns:
Value of CFBundleIdentifier in the Info.plist located in the bundle.
"""
info_plist_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['INFOPLIST_PATH'])
info_plist_data = self._LoadPlistMaybeBinary(info_plist_path)
return info_plist_data['CFBundleIdentifier']
def _InstallEntitlements(self, entitlements, substitutions, overrides):
"""Generates and install the ${BundleName}.xcent entitlements file.
Expands variables "$(variable)" pattern in the source entitlements file,
add extra entitlements defined in the .mobileprovision file and the copy
the generated plist to "${BundlePath}.xcent".
Args:
entitlements: string, optional, path to the Entitlements.plist template
to use, defaults to "${SDKROOT}/Entitlements.plist"
substitutions: dictionary, variable substitutions
overrides: dictionary, values to add to the entitlements
Returns:
Path to the generated entitlements file.
"""
source_path = entitlements
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['PRODUCT_NAME'] + '.xcent')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'],
'Entitlements.plist')
shutil.copy2(source_path, target_path)
data = self._LoadPlistMaybeBinary(target_path)
data = self._ExpandVariables(data, substitutions)
if overrides:
for key in overrides:
if key not in data:
data[key] = overrides[key]
plistlib.writePlist(data, target_path)
return target_path
def _ExpandVariables(self, data, substitutions):
"""Expands variables "$(variable)" in data.
Args:
data: object, can be either string, list or dictionary
substitutions: dictionary, variable substitutions to perform
Returns:
Copy of data where each references to "$(variable)" has been replaced
by the corresponding value found in substitutions, or left intact if
the key was not found.
"""
if isinstance(data, str):
for key, value in substitutions.iteritems():
data = data.replace('$(%s)' % key, value)
return data
if isinstance(data, list):
return [self._ExpandVariables(v, substitutions) for v in data]
if isinstance(data, dict):
return {k: self._ExpandVariables(data[k], substitutions) for k in data}
return data
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit |
jelugbo/ddi | lms/djangoapps/instructor/tests/test_ecommerce.py | 13 | 13342 | """
Unit tests for Ecommerce feature flag in new instructor dashboard.
"""
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from courseware.tests.tests import TEST_DATA_MONGO_MODULESTORE
from student.tests.factories import AdminFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from course_modes.models import CourseMode
from shoppingcart.models import Coupon, PaidCourseRegistration, CourseRegistrationCode
from mock import patch
from student.roles import CourseFinanceAdminRole
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE)
class TestECommerceDashboardViews(ModuleStoreTestCase):
"""
Check for E-commerce view on the new instructor dashboard
"""
def setUp(self):
self.course = CourseFactory.create()
# Create instructor account
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password="test")
mode = CourseMode(
course_id=self.course.id.to_deprecated_string(), mode_slug='honor',
mode_display_name='honor', min_price=10, currency='usd'
)
mode.save()
# URL for instructor dash
self.url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.e_commerce_link = '<a href="" data-section="e-commerce">E-Commerce</a>'
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
def tearDown(self):
"""
Undo all patches.
"""
patch.stopall()
def test_pass_e_commerce_tab_in_instructor_dashboard(self):
"""
Test Pass E-commerce Tab is in the Instructor Dashboard
"""
response = self.client.get(self.url)
self.assertTrue(self.e_commerce_link in response.content)
def test_user_has_finance_admin_rights_in_e_commerce_tab(self):
response = self.client.get(self.url)
self.assertTrue(self.e_commerce_link in response.content)
# Total amount html should render in e-commerce page, total amount will be 0
total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course.id)
self.assertTrue('<span>Total Amount: <span>$' + str(total_amount) + '</span></span>' in response.content)
self.assertTrue('Download All e-Commerce Purchase' in response.content)
# removing the course finance_admin role of login user
CourseFinanceAdminRole(self.course.id).remove_users(self.instructor)
# total amount should not be visible in e-commerce page if the user is not finance admin
url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course.id)
self.assertFalse('Download All e-Commerce Purchase' in response.content)
self.assertFalse('<span>Total Amount: <span>$' + str(total_amount) + '</span></span>' in response.content)
def test_user_view_course_price(self):
"""
test to check if the user views the set price button and price in
the instructor dashboard
"""
response = self.client.get(self.url)
self.assertTrue(self.e_commerce_link in response.content)
# Total amount html should render in e-commerce page, total amount will be 0
course_honor_mode = CourseMode.mode_for_course(self.course.id, 'honor')
price = course_honor_mode.min_price
self.assertTrue('Course Price: <span>$' + str(price) + '</span>' in response.content)
self.assertFalse('+ Set Price</a></span>' in response.content)
# removing the course finance_admin role of login user
CourseFinanceAdminRole(self.course.id).remove_users(self.instructor)
# total amount should not be visible in e-commerce page if the user is not finance admin
url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertFalse('+ Set Price</a></span>' in response.content)
def test_update_course_price_check(self):
price = 200
# course B
course2 = CourseFactory.create(org='EDX', display_name='test_course', number='100')
mode = CourseMode(
course_id=course2.id.to_deprecated_string(), mode_slug='honor',
mode_display_name='honor', min_price=30, currency='usd'
)
mode.save()
# course A update
CourseMode.objects.filter(course_id=self.course.id).update(min_price=price)
set_course_price_url = reverse('set_course_mode_price', kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'course_price': price, 'currency': 'usd'}
response = self.client.post(set_course_price_url, data)
self.assertTrue('CourseMode price updated successfully' in response.content)
# Course A updated total amount should be visible in e-commerce page if the user is finance admin
url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertTrue('Course Price: <span>$' + str(price) + '</span>' in response.content)
def test_user_admin_set_course_price(self):
"""
test to set the course price related functionality.
test al the scenarios for setting a new course price
"""
set_course_price_url = reverse('set_course_mode_price', kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'course_price': '12%', 'currency': 'usd'}
# Value Error course price should be a numeric value
response = self.client.post(set_course_price_url, data)
self.assertTrue("Please Enter the numeric value for the course price" in response.content)
# validation check passes and course price is successfully added
data['course_price'] = 100
response = self.client.post(set_course_price_url, data)
self.assertTrue("CourseMode price updated successfully" in response.content)
course_honor_mode = CourseMode.objects.get(mode_slug='honor')
course_honor_mode.delete()
# Course Mode not exist with mode slug honor
response = self.client.post(set_course_price_url, data)
self.assertTrue("CourseMode with the mode slug({mode_slug}) DoesNotExist".format(mode_slug='honor') in response.content)
def test_add_coupon(self):
"""
Test Add Coupon Scenarios. Handle all the HttpResponses return by add_coupon view
"""
# URL for add_coupon
add_coupon_url = reverse('add_coupon', kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'code': 'A2314', 'course_id': self.course.id.to_deprecated_string(),
'description': 'ADSADASDSAD', 'created_by': self.instructor, 'discount': 5
}
response = self.client.post(add_coupon_url, data)
self.assertTrue("coupon with the coupon code ({code}) added successfully".format(code=data['code']) in response.content)
data = {
'code': 'A2314', 'course_id': self.course.id.to_deprecated_string(),
'description': 'asdsasda', 'created_by': self.instructor, 'discount': 99
}
response = self.client.post(add_coupon_url, data)
self.assertTrue("coupon with the coupon code ({code}) already exist".format(code='A2314') in response.content)
response = self.client.post(self.url)
self.assertTrue('<td>ADSADASDSAD</td>' in response.content)
self.assertTrue('<td>A2314</td>' in response.content)
self.assertFalse('<td>111</td>' in response.content)
data = {
'code': 'A2345314', 'course_id': self.course.id.to_deprecated_string(),
'description': 'asdsasda', 'created_by': self.instructor, 'discount': 199
}
response = self.client.post(add_coupon_url, data)
self.assertTrue("Please Enter the Coupon Discount Value Less than or Equal to 100" in response.content)
data['discount'] = '25%'
response = self.client.post(add_coupon_url, data=data)
self.assertTrue('Please Enter the Integer Value for Coupon Discount' in response.content)
course_registration = CourseRegistrationCode(
code='Vs23Ws4j', course_id=self.course.id.to_deprecated_string(), created_by=self.instructor
)
course_registration.save()
data['code'] = 'Vs23Ws4j'
response = self.client.post(add_coupon_url, data)
self.assertTrue("The code ({code}) that you have tried to define is already in use as a registration code"
.format(code=data['code']) in response.content)
def test_delete_coupon(self):
"""
Test Delete Coupon Scenarios. Handle all the HttpResponses return by remove_coupon view
"""
coupon = Coupon(
code='AS452', description='asdsadsa', course_id=self.course.id.to_deprecated_string(),
percentage_discount=10, created_by=self.instructor
)
coupon.save()
response = self.client.post(self.url)
self.assertTrue('<td>AS452</td>' in response.content)
# URL for remove_coupon
delete_coupon_url = reverse('remove_coupon', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(delete_coupon_url, {'id': coupon.id})
self.assertTrue('coupon with the coupon id ({coupon_id}) updated successfully'.format(coupon_id=coupon.id) in response.content)
coupon.is_active = False
coupon.save()
response = self.client.post(delete_coupon_url, {'id': coupon.id})
self.assertTrue('coupon with the coupon id ({coupon_id}) is already inactive'.format(coupon_id=coupon.id) in response.content)
response = self.client.post(delete_coupon_url, {'id': 24454})
self.assertTrue('coupon with the coupon id ({coupon_id}) DoesNotExist'.format(coupon_id=24454) in response.content)
response = self.client.post(delete_coupon_url, {'id': ''})
self.assertTrue('coupon id is None' in response.content)
def test_get_coupon_info(self):
"""
Test Edit Coupon Info Scenarios. Handle all the HttpResponses return by edit_coupon_info view
"""
coupon = Coupon(
code='AS452', description='asdsadsa', course_id=self.course.id.to_deprecated_string(),
percentage_discount=10, created_by=self.instructor
)
coupon.save()
# URL for edit_coupon_info
edit_url = reverse('get_coupon_info', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(edit_url, {'id': coupon.id})
self.assertTrue('coupon with the coupon id ({coupon_id}) updated successfully'.format(coupon_id=coupon.id) in response.content)
response = self.client.post(edit_url, {'id': 444444})
self.assertTrue('coupon with the coupon id ({coupon_id}) DoesNotExist'.format(coupon_id=444444) in response.content)
response = self.client.post(edit_url, {'id': ''})
self.assertTrue('coupon id not found"' in response.content)
coupon.is_active = False
coupon.save()
response = self.client.post(edit_url, {'id': coupon.id})
self.assertTrue("coupon with the coupon id ({coupon_id}) is already inactive".format(coupon_id=coupon.id) in response.content)
def test_update_coupon(self):
"""
Test Update Coupon Info Scenarios. Handle all the HttpResponses return by update_coupon view
"""
coupon = Coupon(
code='AS452', description='asdsadsa', course_id=self.course.id.to_deprecated_string(),
percentage_discount=10, created_by=self.instructor
)
coupon.save()
response = self.client.post(self.url)
self.assertTrue('<td>AS452</td>' in response.content)
data = {
'coupon_id': coupon.id, 'code': 'AS452', 'discount': '10', 'description': 'updated_description', # pylint: disable=E1101
'course_id': coupon.course_id.to_deprecated_string()
}
# URL for update_coupon
update_coupon_url = reverse('update_coupon', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(update_coupon_url, data=data)
self.assertTrue('coupon with the coupon id ({coupon_id}) updated Successfully'.format(coupon_id=coupon.id)in response.content)
response = self.client.post(self.url)
self.assertTrue('<td>updated_description</td>' in response.content)
data['coupon_id'] = 1000 # Coupon Not Exist with this ID
response = self.client.post(update_coupon_url, data=data)
self.assertTrue('coupon with the coupon id ({coupon_id}) DoesNotExist'.format(coupon_id=1000) in response.content)
data['coupon_id'] = '' # Coupon id is not provided
response = self.client.post(update_coupon_url, data=data)
self.assertTrue('coupon id not found' in response.content)
| agpl-3.0 |
vipulroxx/sympy | sympy/core/core.py | 30 | 3814 | """ The core's core. """
from __future__ import print_function, division
# used for canonical ordering of symbolic sequences
# via __cmp__ method:
# FIXME this is *so* irrelevant and outdated!
ordering_of_classes = [
# singleton numbers
'Zero', 'One', 'Half', 'Infinity', 'NaN', 'NegativeOne', 'NegativeInfinity',
# numbers
'Integer', 'Rational', 'Float',
# singleton symbols
'Exp1', 'Pi', 'ImaginaryUnit',
# symbols
'Symbol', 'Wild', 'Temporary',
# arithmetic operations
'Pow', 'Mul', 'Add',
# function values
'Derivative', 'Integral',
# defined singleton functions
'Abs', 'Sign', 'Sqrt',
'Floor', 'Ceiling',
'Re', 'Im', 'Arg',
'Conjugate',
'Exp', 'Log',
'Sin', 'Cos', 'Tan', 'Cot', 'ASin', 'ACos', 'ATan', 'ACot',
'Sinh', 'Cosh', 'Tanh', 'Coth', 'ASinh', 'ACosh', 'ATanh', 'ACoth',
'RisingFactorial', 'FallingFactorial',
'factorial', 'binomial',
'Gamma', 'LowerGamma', 'UpperGamma', 'PolyGamma',
'Erf',
# special polynomials
'Chebyshev', 'Chebyshev2',
# undefined functions
'Function', 'WildFunction',
# anonymous functions
'Lambda',
# Landau O symbol
'Order',
# relational operations
'Equality', 'Unequality', 'StrictGreaterThan', 'StrictLessThan',
'GreaterThan', 'LessThan',
]
class BasicType(type):
pass
class Registry(object):
"""
Base class for registry objects.
Registries map a name to an object using attribute notation. Registry
classes behave singletonically: all their instances share the same state,
which is stored in the class object.
All subclasses should set `__slots__ = []`.
"""
__slots__ = []
def __setattr__(self, name, obj):
setattr(self.__class__, name, obj)
def __delattr__(self, name):
delattr(self.__class__, name)
#A set containing all sympy class objects, kept in sync with C
all_classes = set()
class ClassRegistry(Registry):
"""
Namespace for SymPy classes
This is needed to avoid problems with cyclic imports.
To get a SymPy class, use `C.<class_name>` e.g. `C.Rational`, `C.Add`.
For performance reasons, this is coupled with a set `all_classes` holding
the classes, which should not be modified directly.
"""
__slots__ = []
def __setattr__(self, name, cls):
Registry.__setattr__(self, name, cls)
all_classes.add(cls)
def __delattr__(self, name):
cls = getattr(self, name)
Registry.__delattr__(self, name)
# The same class could have different names, so make sure
# it's really gone from C before removing it from all_classes.
if cls not in self.__class__.__dict__.itervalues():
all_classes.remove(cls)
C = ClassRegistry()
class BasicMeta(BasicType):
def __init__(cls, *args, **kws):
setattr(C, cls.__name__, cls)
def __cmp__(cls, other):
# If the other object is not a Basic subclass, then we are not equal to
# it.
if not isinstance(other, BasicType):
return -1
n1 = cls.__name__
n2 = other.__name__
if n1 == n2:
return 0
UNKNOWN = len(ordering_of_classes) + 1
try:
i1 = ordering_of_classes.index(n1)
except ValueError:
i1 = UNKNOWN
try:
i2 = ordering_of_classes.index(n2)
except ValueError:
i2 = UNKNOWN
if i1 == UNKNOWN and i2 == UNKNOWN:
return (n1 > n2) - (n1 < n2)
return (i1 > i2) - (i1 < i2)
def __lt__(cls, other):
if cls.__cmp__(other) == -1:
return True
return False
def __gt__(cls, other):
if cls.__cmp__(other) == 1:
return True
return False
C.BasicMeta = BasicMeta
| bsd-3-clause |
archilogic-com/three.js | utils/exporters/blender/addons/io_three/exporter/utilities.py | 1 | 2246 | import uuid
import hashlib
from .. import constants
ROUND = constants.DEFAULT_PRECISION
def bit_mask(flags):
"""Generate a bit mask.
:type flags: dict
:return: int
"""
bit = 0
true = lambda x, y: (x | (1 << y))
false = lambda x, y: (x & (~(1 << y)))
for mask, position in constants.MASK.items():
func = true if flags.get(mask) else false
bit = func(bit, position)
return bit
def hash(value):
"""Generate a hash from a given value
:param value:
:rtype: str
"""
hash_ = hashlib.md5()
hash_.update(repr(value).encode('utf8'))
return hash_.hexdigest()
def id():
"""Generate a random UUID
:rtype: str
"""
return str(uuid.uuid4()).upper()
def id_from_name(name):
"""Generate a UUID using a name as the namespace
:type name: str
:rtype: str
"""
return str(uuid.uuid3(uuid.NAMESPACE_DNS, name)).upper()
def rgb2int(rgb):
"""Convert a given rgb value to an integer
:type rgb: list|tuple
:rtype: int
"""
is_tuple = isinstance(rgb, tuple)
rgb = list(rgb) if is_tuple else rgb
colour = (int(rgb[0]*255) << 16) + (int(rgb[1]*255) << 8) + int(rgb[2]*255)
return colour
def round_off(value, ndigits=ROUND):
"""Round off values to specified limit
:param value: value(s) to round off
:param ndigits: limit (Default value = ROUND)
:type value: float|list|tuple
:return: the same data type that was passed
:rtype: float|list|tuple
"""
is_tuple = isinstance(value, tuple)
is_list = isinstance(value, list)
value = list(value) if is_tuple else value
value = [value] if not is_list and not is_tuple else value
value = [round(val, ndigits) for val in value]
if is_tuple:
value = tuple(value)
elif not is_list:
value = value[0]
return value
def rounding(options):
"""By evaluation the options determine if precision was
enabled and what the value is
:type options: dict
:rtype: bool, int
"""
round_off_ = options.get(constants.ENABLE_PRECISION)
if round_off_:
round_val = options[constants.PRECISION]
else:
round_val = None
return (round_off_, round_val)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.