repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
kennedyshead/home-assistant | refs/heads/dev | homeassistant/components/remote/reproduce_state.py | 2 | """Reproduce an Remote state."""
from __future__ import annotations
import asyncio
from collections.abc import Iterable
import logging
from typing import Any
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import Context, HomeAssistant, State
from . import DOMAIN
_LOGGER = logging.getLogger(__name__)
VALID_STATES = {STATE_ON, STATE_OFF}
async def _async_reproduce_state(
hass: HomeAssistant,
state: State,
*,
context: Context | None = None,
reproduce_options: dict[str, Any] | None = None,
) -> None:
"""Reproduce a single state."""
cur_state = hass.states.get(state.entity_id)
if cur_state is None:
_LOGGER.warning("Unable to find entity %s", state.entity_id)
return
if state.state not in VALID_STATES:
_LOGGER.warning(
"Invalid state specified for %s: %s", state.entity_id, state.state
)
return
# Return if we are already at the right state.
if cur_state.state == state.state:
return
service_data = {ATTR_ENTITY_ID: state.entity_id}
if state.state == STATE_ON:
service = SERVICE_TURN_ON
elif state.state == STATE_OFF:
service = SERVICE_TURN_OFF
await hass.services.async_call(
DOMAIN, service, service_data, context=context, blocking=True
)
async def async_reproduce_states(
hass: HomeAssistant,
states: Iterable[State],
*,
context: Context | None = None,
reproduce_options: dict[str, Any] | None = None,
) -> None:
"""Reproduce Remote states."""
await asyncio.gather(
*(
_async_reproduce_state(
hass, state, context=context, reproduce_options=reproduce_options
)
for state in states
)
)
|
Jonekee/chromium.src | refs/heads/nw12 | third_party/tlslite/tlslite/basedb.py | 114 | # Authors:
# Trevor Perrin
# Martin von Loewis - python 3 port
#
# See the LICENSE file for legal information regarding use of this file.
"""Base class for SharedKeyDB and VerifierDB."""
try:
import anydbm
except ImportError:
# Python 3
import dbm as anydbm
import threading
class BaseDB(object):
def __init__(self, filename, type):
self.type = type
self.filename = filename
if self.filename:
self.db = None
else:
self.db = {}
self.lock = threading.Lock()
def create(self):
"""Create a new on-disk database.
@raise anydbm.error: If there's a problem creating the database.
"""
if self.filename:
self.db = anydbm.open(self.filename, "n") #raises anydbm.error
self.db["--Reserved--type"] = self.type
self.db.sync()
else:
self.db = {}
def open(self):
"""Open a pre-existing on-disk database.
@raise anydbm.error: If there's a problem opening the database.
@raise ValueError: If the database is not of the right type.
"""
if not self.filename:
raise ValueError("Can only open on-disk databases")
self.db = anydbm.open(self.filename, "w") #raises anydbm.error
try:
if self.db["--Reserved--type"] != self.type:
raise ValueError("Not a %s database" % self.type)
except KeyError:
raise ValueError("Not a recognized database")
def __getitem__(self, username):
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
valueStr = self.db[username]
finally:
self.lock.release()
return self._getItem(username, valueStr)
def __setitem__(self, username, value):
if self.db == None:
raise AssertionError("DB not open")
valueStr = self._setItem(username, value)
self.lock.acquire()
try:
self.db[username] = valueStr
if self.filename:
self.db.sync()
finally:
self.lock.release()
def __delitem__(self, username):
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
del(self.db[username])
if self.filename:
self.db.sync()
finally:
self.lock.release()
def __contains__(self, username):
"""Check if the database contains the specified username.
@type username: str
@param username: The username to check for.
@rtype: bool
@return: True if the database contains the username, False
otherwise.
"""
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
return self.db.has_key(username)
finally:
self.lock.release()
def check(self, username, param):
value = self.__getitem__(username)
return self._checkItem(value, username, param)
def keys(self):
"""Return a list of usernames in the database.
@rtype: list
@return: The usernames in the database.
"""
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
usernames = self.db.keys()
finally:
self.lock.release()
usernames = [u for u in usernames if not u.startswith("--Reserved--")]
return usernames
|
agoragames/leaderboard-python | refs/heads/main | test/leaderboard/reverse_tie_ranking_leaderboard_test.py | 1 | from leaderboard.leaderboard import Leaderboard
from leaderboard.tie_ranking_leaderboard import TieRankingLeaderboard
import unittest
import time
import sure
class ReverseTieRankingLeaderboardTest(unittest.TestCase):
def setUp(self):
self.leaderboard = TieRankingLeaderboard('ties', order=Leaderboard.ASC, decode_responses=True)
def tearDown(self):
self.leaderboard.redis_connection.flushdb()
def test_delete_the_ties_ranking_internal_leaderboard_when_you_delete_a_leaderboard_configured_for_ties(self):
self.leaderboard.rank_member('member_1', 50)
self.leaderboard.redis_connection.exists('ties:ties').should.be.true
self.leaderboard.delete_leaderboard()
self.leaderboard.redis_connection.exists('ties:ties').should.be.false
def test_leaders(self):
self.leaderboard.rank_member('member_1', 50)
self.leaderboard.rank_member('member_2', 50)
self.leaderboard.rank_member('member_3', 30)
self.leaderboard.rank_member('member_4', 30)
self.leaderboard.rank_member('member_5', 10)
leaders = self.leaderboard.leaders(1)
leaders[0]['rank'].should.equal(1)
leaders[1]['rank'].should.equal(2)
leaders[2]['rank'].should.equal(2)
leaders[3]['rank'].should.equal(3)
leaders[4]['rank'].should.equal(3)
def test_correct_rankings_for_leaders_with_different_page_sizes(self):
self.leaderboard.rank_member('member_1', 50)
self.leaderboard.rank_member('member_2', 50)
self.leaderboard.rank_member('member_3', 30)
self.leaderboard.rank_member('member_4', 30)
self.leaderboard.rank_member('member_5', 10)
self.leaderboard.rank_member('member_6', 50)
self.leaderboard.rank_member('member_7', 50)
self.leaderboard.rank_member('member_8', 30)
self.leaderboard.rank_member('member_9', 30)
self.leaderboard.rank_member('member_10', 10)
leaders = self.leaderboard.leaders(1, page_size=3)
leaders[0]['rank'].should.equal(1)
leaders[1]['rank'].should.equal(1)
leaders[2]['rank'].should.equal(2)
leaders = self.leaderboard.leaders(2, page_size=3)
leaders[0]['rank'].should.equal(2)
leaders[1]['rank'].should.equal(2)
leaders[2]['rank'].should.equal(2)
def test_correct_rankings_for_around_me(self):
self.leaderboard.rank_member('member_1', 50)
self.leaderboard.rank_member('member_2', 50)
self.leaderboard.rank_member('member_3', 30)
self.leaderboard.rank_member('member_4', 30)
self.leaderboard.rank_member('member_5', 10)
self.leaderboard.rank_member('member_6', 50)
self.leaderboard.rank_member('member_7', 50)
self.leaderboard.rank_member('member_8', 30)
self.leaderboard.rank_member('member_9', 30)
self.leaderboard.rank_member('member_10', 10)
leaders = self.leaderboard.around_me('member_3', page_size=3)
leaders[0]['rank'].should.equal(1)
leaders[1]['rank'].should.equal(2)
leaders[2]['rank'].should.equal(2)
def test_removing_a_single_member_will_also_remove_their_score_from_the_tie_scores_leaderboard_when_appropriate(self):
self.leaderboard.rank_member('member_1', 50)
self.leaderboard.rank_member('member_2', 50)
self.leaderboard.rank_member('member_3', 30)
self.leaderboard.remove_member('member_1')
self.leaderboard.total_members_in('ties:ties').should.equal(2)
self.leaderboard.remove_member('member_2')
self.leaderboard.total_members_in('ties:ties').should.equal(1)
self.leaderboard.remove_member('member_3')
self.leaderboard.total_members_in('ties:ties').should.equal(0)
def test_retrieve_the_rank_of_a_single_member_using_rank_for(self):
self.leaderboard.rank_member('member_1', 50)
self.leaderboard.rank_member('member_2', 50)
self.leaderboard.rank_member('member_3', 30)
self.leaderboard.rank_for('member_1').should.equal(2)
self.leaderboard.rank_for('member_2').should.equal(2)
self.leaderboard.rank_for('member_3').should.equal(1)
def test_retrieve_the_score_and_rank_of_a_single_member_using_score_and_rank_for(self):
self.leaderboard.rank_member('member_1', 50)
self.leaderboard.rank_member('member_2', 50)
self.leaderboard.rank_member('member_3', 30)
self.leaderboard.score_and_rank_for('member_1')['rank'].should.equal(2)
self.leaderboard.score_and_rank_for('member_2')['rank'].should.equal(2)
self.leaderboard.score_and_rank_for('member_3')['rank'].should.equal(1)
def test_remove_members_in_a_given_score_range_using_remove_members_in_score_range(self):
self.__rank_members_in_leaderboard()
self.leaderboard.total_members().should.equal(5)
self.leaderboard.rank_member('cheater_1', 100)
self.leaderboard.rank_member('cheater_2', 101)
self.leaderboard.rank_member('cheater_3', 102)
self.leaderboard.total_members().should.equal(8)
self.leaderboard.total_members_in('ties:ties').should.equal(8)
self.leaderboard.remove_members_in_score_range(100, 102)
self.leaderboard.total_members().should.equal(5)
self.leaderboard.total_members_in('ties:ties').should.equal(5)
def test_expire_the_ties_leaderboard_in_a_given_number_of_seconds(self):
self.__rank_members_in_leaderboard()
self.leaderboard.expire_leaderboard(3)
ttl = self.leaderboard.redis_connection.ttl('ties')
ttl.should.be.greater_than(1)
ttl = self.leaderboard.redis_connection.ttl('ties:ties')
ttl.should.be.greater_than(1)
ttl = self.leaderboard.redis_connection.ttl('ties:member_data')
ttl.should.be.greater_than(1)
def test_expire_the_ties_leaderboard_at_a_specific_timestamp(self):
self.__rank_members_in_leaderboard()
self.leaderboard.expire_leaderboard_at(int(time.time() + 10))
ttl = self.leaderboard.redis_connection.ttl(
self.leaderboard.leaderboard_name)
ttl.should.be.lower_than(11)
ttl = self.leaderboard.redis_connection.ttl(
'%s:ties' %
self.leaderboard.leaderboard_name)
ttl.should.be.lower_than(11)
ttl = self.leaderboard.redis_connection.ttl(
'%s:member_data' %
self.leaderboard.leaderboard_name)
ttl.should.be.lower_than(11)
def test_correct_rankings_and_scores_when_using_change_score_for(self):
self.leaderboard.rank_member('member_1', 50)
self.leaderboard.rank_member('member_2', 50)
self.leaderboard.rank_member('member_3', 30)
self.leaderboard.rank_member('member_4', 30)
self.leaderboard.rank_member('member_5', 10)
self.leaderboard.change_score_for('member_3', 10)
self.leaderboard.rank_for('member_3').should.equal(3)
self.leaderboard.rank_for('member_4').should.equal(2)
self.leaderboard.score_for('member_3').should.equal(40.0)
def test_correct_rankings_and_scores_when_using_change_score_for_with_varying_scores(self):
self.leaderboard.rank_member('member_1', 5)
self.leaderboard.rank_member('member_2', 4)
self.leaderboard.rank_member('member_3', 3)
self.leaderboard.rank_member('member_4', 2)
self.leaderboard.rank_member('member_5', 1)
self.leaderboard.change_score_for('member_3', 0.5)
self.leaderboard.rank_for('member_3').should.equal(3)
self.leaderboard.rank_for('member_4').should.equal(2)
self.leaderboard.score_for('member_3').should.equal(3.5)
def test_it_should_output_the_correct_rank_when_initial_score_is_0_and_then_later_scores_are_ties(self):
self.leaderboard.rank_members(['member_1', 0, 'member_2', 0])
self.leaderboard.rank_for('member_1').should.equal(1)
self.leaderboard.rank_for('member_2').should.equal(1)
self.leaderboard.rank_members(['member_1', 0, 'member_2', 0])
self.leaderboard.rank_for('member_1').should.equal(1)
self.leaderboard.rank_for('member_2').should.equal(1)
self.leaderboard.rank_members(['member_1', 1, 'member_2', 1])
self.leaderboard.rank_for('member_1').should.equal(1)
self.leaderboard.rank_for('member_2').should.equal(1)
self.leaderboard.rank_members(['member_1', 1, 'member_2', 1, 'member_3', 4])
self.leaderboard.rank_for('member_3').should.equal(2)
self.leaderboard.rank_for('member_1').should.equal(1)
self.leaderboard.rank_for('member_2').should.equal(1)
def __rank_members_in_leaderboard(self, members_to_add=6):
for index in range(1, members_to_add):
self.leaderboard.rank_member(
'member_%s' %
index, index, {
'member_name': 'Leaderboard member %s' %
index})
|
ByteInternet/python-redis-lock | refs/heads/master | src/redis_lock/__init__.py | 2 | from logging import getLogger
from os import urandom
from hashlib import sha1
from redis import StrictRedis
from redis.exceptions import NoScriptError
__version__ = "2.1.0"
logger = getLogger(__name__)
UNLOCK_SCRIPT = b"""
if redis.call("get", KEYS[1]) == ARGV[1] then
redis.call("del", KEYS[2])
redis.call("lpush", KEYS[2], 1)
return redis.call("del", KEYS[1])
else
return 0
end
"""
UNLOCK_SCRIPT_HASH = sha1(UNLOCK_SCRIPT).hexdigest()
class AlreadyAcquired(RuntimeError):
pass
class NotAcquired(RuntimeError):
pass
class Lock(object):
def __init__(self, redis_client, name, expire=None, id=None):
assert isinstance(redis_client, StrictRedis)
self._client = redis_client
self._expire = expire if expire is None else int(expire)
self._id = urandom(16) if id is None else id
self._held = False
self._name = 'lock:'+name
self._signal = 'lock-signal:'+name
def reset(self):
"""
Forcibly deletes the lock. Use this with care.
"""
self._client.delete(self._name)
self._client.delete(self._signal)
@property
def id(self):
return self._id
def get_owner_id(self):
return self._client.get(self._name)
def acquire(self, blocking=True):
logger.debug("Getting %r ...", self._name)
if self._held:
raise AlreadyAcquired("Already aquired from this Lock instance.")
busy = True
while busy:
busy = not self._client.set(self._name, self._id, nx=True, ex=self._expire)
if busy:
if blocking:
self._client.blpop(self._signal, self._expire or 0)
else:
logger.debug("Failed to get %r.", self._name)
return False
logger.debug("Got lock for %r.", self._name)
self._held = True
return True
def __enter__(self):
assert self.acquire(blocking=True)
return self
def __exit__(self, exc_type=None, exc_value=None, traceback=None, force=False):
if not (self._held or force):
raise NotAcquired("This Lock instance didn't acquire the lock.")
logger.debug("Releasing %r.", self._name)
try:
self._client.evalsha(UNLOCK_SCRIPT_HASH, 2, self._name, self._signal, self._id)
except NoScriptError:
logger.warn("UNLOCK_SCRIPT not cached.")
self._client.eval(UNLOCK_SCRIPT, 2, self._name, self._signal, self._id)
self._held = False
release = __exit__
def reset_all(redis_client):
"""
Forcibly deletes all locks if its remains (like a crash reason). Use this with care.
"""
for lock_key in redis_client.keys('lock:*'):
redis_client.delete(lock_key)
for lock_key in redis_client.keys('lock-signal:*'):
redis_client.delete(lock_key)
|
j-carpentier/nova | refs/heads/master | nova/tests/unit/cmd/test_baseproxy.py | 30 | # Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
from nova.cmd import baseproxy
from nova import config
from nova.console import websocketproxy
from nova import test
from nova import version
@mock.patch.object(config, 'parse_args', new=lambda *args, **kwargs: None)
class BaseProxyTestCase(test.NoDBTestCase):
@mock.patch('os.path.exists', return_value=False)
# NOTE(mriedem): sys.exit raises TestingException so we can actually exit
# the test normally.
@mock.patch('sys.exit', side_effect=test.TestingException)
def test_proxy_ssl_without_cert(self, mock_exit, mock_exists):
self.flags(ssl_only=True)
self.assertRaises(test.TestingException, baseproxy.proxy,
'0.0.0.0', '6080')
mock_exit.assert_called_once_with(-1)
@mock.patch('os.path.exists', return_value=False)
@mock.patch('sys.exit', side_effect=test.TestingException)
def test_proxy_web_dir_does_not_exist(self, mock_exit, mock_exists):
self.flags(web='/my/fake/webserver/')
self.assertRaises(test.TestingException, baseproxy.proxy,
'0.0.0.0', '6080')
mock_exit.assert_called_once_with(-1)
@mock.patch('os.path.exists', return_value=True)
@mock.patch.object(logging, 'setup')
@mock.patch.object(gmr.TextGuruMeditation, 'setup_autorun')
@mock.patch('nova.console.websocketproxy.NovaWebSocketProxy.__init__',
return_value=None)
@mock.patch('nova.console.websocketproxy.NovaWebSocketProxy.start_server')
def test_proxy(self, mock_start, mock_init, mock_gmr, mock_log,
mock_exists):
# Force verbose=False so something else testing nova.cmd.baseproxy
# doesn't impact the call to mocked NovaWebSocketProxy.__init__.
self.flags(verbose=False)
baseproxy.proxy('0.0.0.0', '6080')
mock_log.assert_called_once_with(baseproxy.CONF, 'nova')
mock_gmr.mock_assert_called_once_with(version)
mock_init.assert_called_once_with(
listen_host='0.0.0.0', listen_port='6080', source_is_ipv6=False,
verbose=False, cert='self.pem', key=None, ssl_only=False,
daemon=False, record=False, traffic=False,
web='/usr/share/spice-html5', file_only=True,
RequestHandlerClass=websocketproxy.NovaProxyRequestHandler)
mock_start.assert_called_once_with()
|
cosmoharrigan/pylearn2 | refs/heads/master | pylearn2/utils/image.py | 39 | """
Utility functions for working with images.
"""
import logging
import numpy as np
plt = None
axes = None
from theano.compat.six.moves import xrange
from theano.compat.six import string_types
import warnings
try:
import matplotlib.pyplot as plt
import matplotlib.axes
except (RuntimeError, ImportError, TypeError) as matplotlib_exception:
warnings.warn("Unable to import matplotlib. Some features unavailable. "
"Original exception: " + str(matplotlib_exception))
import os
try:
from PIL import Image
except ImportError:
Image = None
from pylearn2.utils import string_utils as string
from pylearn2.utils.exc import reraise_as
from tempfile import mkstemp
from multiprocessing import Process
import subprocess
logger = logging.getLogger(__name__)
def ensure_Image():
"""Makes sure Image has been imported from PIL"""
global Image
if Image is None:
raise RuntimeError("You are trying to use PIL-dependent functionality"
" but don't have PIL installed.")
def imview(*args, **kwargs):
"""
A matplotlib-based image viewer command,
wrapping `matplotlib.pyplot.imshow` but behaving more
sensibly.
Parameters
----------
figure : TODO
TODO: write parameters section using decorators to inherit
the matplotlib docstring
Notes
-----
Parameters are identical to `matplotlib.pyplot.imshow`
but this behaves somewhat differently:
* By default, it creates a new figure (unless a
`figure` keyword argument is supplied.
* It modifies the axes of that figure to use the
full frame, without ticks or tick labels.
* It turns on `nearest` interpolation by default
(i.e., it does not antialias pixel data). This
can be overridden with the `interpolation`
argument as in `imshow`.
All other arguments and keyword arguments are passed
on to `imshow`.`
"""
if 'figure' not in kwargs:
f = plt.figure()
else:
f = kwargs['figure']
new_ax = matplotlib.axes.Axes(f,
[0, 0, 1, 1],
xticks=[],
yticks=[],
frame_on=False)
f.delaxes(f.gca())
f.add_axes(new_ax)
if len(args) < 5 and 'interpolation' not in kwargs:
kwargs['interpolation'] = 'nearest'
plt.imshow(*args, **kwargs)
def imview_async(*args, **kwargs):
"""
A version of `imview` that forks a separate process and
immediately shows the image.
Parameters
----------
window_title : str
TODO: writeme with decorators to inherit the other imviews'
docstrings
Notes
-----
Supports the `window_title` keyword argument to cope with
the title always being 'Figure 1'.
Returns the `multiprocessing.Process` handle.
"""
if 'figure' in kwargs:
raise ValueError("passing a figure argument not supported")
def fork_image_viewer():
f = plt.figure()
kwargs['figure'] = f
imview(*args, **kwargs)
if 'window_title' in kwargs:
f.set_window_title(kwargs['window_title'])
plt.show()
p = Process(None, fork_image_viewer)
p.start()
return p
def show(image):
"""
.. todo::
WRITEME
Parameters
----------
image : PIL Image object or ndarray
If ndarray, integer formats are assumed to use 0-255
and float formats are assumed to use 0-1
"""
viewer_command = string.preprocess('${PYLEARN2_VIEWER_COMMAND}')
if viewer_command == 'inline':
return imview(image)
if hasattr(image, '__array__'):
# do some shape checking because PIL just raises a tuple indexing error
# that doesn't make it very clear what the problem is
if len(image.shape) < 2 or len(image.shape) > 3:
raise ValueError('image must have either 2 or 3 dimensions but its'
' shape is ' + str(image.shape))
# The below is a temporary workaround that prevents us from crashing
# 3rd party image viewers such as eog by writing out overly large
# images.
# In the long run we should determine if this is a bug in PIL when
# producing
# such images or a bug in eog and determine a proper fix.
# Since this is hopefully just a short term workaround the
# constants below are not included in the interface to the
# function, so that 3rd party code won't start passing them.
max_height = 4096
max_width = 4096
# Display separate warnings for each direction, since it's
# common to crop only one.
if image.shape[0] > max_height:
image = image[0:max_height, :, :]
warnings.warn("Cropping image to smaller height to avoid crashing "
"the viewer program.")
if image.shape[0] > max_width:
image = image[:, 0:max_width, :]
warnings.warn("Cropping the image to a smaller width to avoid "
"crashing the viewer program.")
# This ends the workaround
if image.dtype == 'int8':
image = np.cast['uint8'](image)
elif str(image.dtype).startswith('float'):
# don't use *=, we don't want to modify the input array
image = image * 255.
image = np.cast['uint8'](image)
# PIL is too stupid to handle single-channel arrays
if len(image.shape) == 3 and image.shape[2] == 1:
image = image[:, :, 0]
try:
ensure_Image()
image = Image.fromarray(image)
except TypeError:
reraise_as(TypeError("PIL issued TypeError on ndarray of shape " +
str(image.shape) + " and dtype " +
str(image.dtype)))
# Create a temporary file with the suffix '.png'.
fd, name = mkstemp(suffix='.png')
os.close(fd)
# Note:
# Although we can use tempfile.NamedTemporaryFile() to create
# a temporary file, the function should be used with care.
#
# In Python earlier than 2.7, a temporary file created by the
# function will be deleted just after the file is closed.
# We can re-use the name of the temporary file, but there is an
# instant where a file with the name does not exist in the file
# system before we re-use the name. This may cause a race
# condition.
#
# In Python 2.7 or later, tempfile.NamedTemporaryFile() has
# the 'delete' argument which can control whether a temporary
# file will be automatically deleted or not. With the argument,
# the above race condition can be avoided.
#
image.save(name)
if os.name == 'nt':
subprocess.Popen(viewer_command + ' ' + name + ' && del ' + name,
shell=True)
else:
subprocess.Popen(viewer_command + ' ' + name + ' ; rm ' + name,
shell=True)
def pil_from_ndarray(ndarray):
"""
Converts an ndarray to a PIL image.
Parameters
----------
ndarray : ndarray
An ndarray containing an image.
Returns
-------
pil : PIL Image
A PIL Image containing the image.
"""
try:
if ndarray.dtype == 'float32' or ndarray.dtype == 'float64':
assert ndarray.min() >= 0.0
assert ndarray.max() <= 1.0
ndarray = np.cast['uint8'](ndarray * 255)
if len(ndarray.shape) == 3 and ndarray.shape[2] == 1:
ndarray = ndarray[:, :, 0]
ensure_Image()
rval = Image.fromarray(ndarray)
return rval
except Exception as e:
logger.exception('original exception: ')
logger.exception(e)
logger.exception('ndarray.dtype: {0}'.format(ndarray.dtype))
logger.exception('ndarray.shape: {0}'.format(ndarray.shape))
raise
assert False
def ndarray_from_pil(pil, dtype='uint8'):
"""
Converts a PIL Image to an ndarray.
Parameters
----------
pil : PIL Image
An image represented as a PIL Image object
dtype : str
The dtype of ndarray to create
Returns
-------
ndarray : ndarray
The image as an ndarray.
"""
rval = np.asarray(pil)
if dtype != rval.dtype:
rval = np.cast[dtype](rval)
if str(dtype).startswith('float'):
rval /= 255.
if len(rval.shape) == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
return rval
def rescale(image, shape):
"""
Scales image to be no larger than shape. PIL might give you
unexpected results beyond that.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
i = pil_from_ndarray(image)
ensure_Image()
i.thumbnail([shape[1], shape[0]], Image.ANTIALIAS)
rval = ndarray_from_pil(i, dtype=image.dtype)
return rval
resize = rescale
def fit_inside(image, shape):
"""
Scales image down to fit inside shape preserves proportions of image
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
if image.shape[0] <= shape[0] and image.shape[1] <= shape[1]:
return image.copy()
row_ratio = float(image.shape[0]) / float(shape[0])
col_ratio = float(image.shape[1]) / float(shape[1])
if row_ratio > col_ratio:
target_shape = [shape[0], min(image.shape[1] / row_ratio, shape[1])]
else:
target_shape = [min(image.shape[0] / col_ratio, shape[0]), shape[1]]
assert target_shape[0] <= shape[0]
assert target_shape[1] <= shape[1]
assert target_shape[0] == shape[0] or target_shape[1] == shape[1]
rval = rescale(image, target_shape)
return rval
def letterbox(image, shape):
"""
Pads image with black letterboxing to bring image.shape up to shape
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
assert image.shape[0] <= shape[0]
assert image.shape[1] <= shape[1]
if image.shape[0] == shape[0] and image.shape[1] == shape[1]:
return image.copy()
rval = np.zeros((shape[0], shape[1], image.shape[2]), dtype=image.dtype)
rstart = (shape[0] - image.shape[0]) / 2
cstart = (shape[1] - image.shape[1]) / 2
rend = rstart + image.shape[0]
cend = cstart + image.shape[1]
rval[rstart:rend, cstart:cend] = image
return rval
def make_letterboxed_thumbnail(image, shape):
"""
Scales image down to shape. Preserves proportions of image, introduces
black letterboxing if necessary.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3
assert len(shape) == 2
shrunk = fit_inside(image, shape)
letterboxed = letterbox(shrunk, shape)
return letterboxed
def load(filepath, rescale_image=True, dtype='float64'):
"""
Load an image from a file.
Parameters
----------
filepath : str
Path to the image file to load
rescale_image : bool
Default value: True
If True, returned images have pixel values in [0, 1]. Otherwise,
values are in [0, 255].
dtype: str
The dtype to use for the returned value
Returns
-------
img : numpy ndarray
An array containing the image that was in the file.
"""
assert isinstance(filepath, string_types)
if not rescale_image and dtype == 'uint8':
ensure_Image()
rval = np.asarray(Image.open(filepath))
assert rval.dtype == 'uint8'
return rval
s = 1.0
if rescale_image:
s = 255.
try:
ensure_Image()
rval = Image.open(filepath)
except Exception:
reraise_as(Exception("Could not open " + filepath))
numpy_rval = np.array(rval)
msg = ("Tried to load an image, got an array with %d"
" dimensions. Expected 2 or 3."
"This may indicate a mildly corrupted image file. Try "
"converting it to a different image format with a different "
"editor like gimp or imagemagic. Sometimes these programs are "
"more robust to minor corruption than PIL and will emit a "
"correctly formatted image in the new format.")
if numpy_rval.ndim not in [2, 3]:
logger.error(dir(rval))
logger.error(rval)
logger.error(rval.size)
rval.show()
raise AssertionError(msg % numpy_rval.ndim)
rval = numpy_rval
rval = np.cast[dtype](rval) / s
if rval.ndim == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
if rval.ndim != 3:
raise AssertionError("Something went wrong opening " +
filepath + '. Resulting shape is ' +
str(rval.shape) +
" (it's meant to have 3 dimensions by now)")
return rval
def save(filepath, ndarray):
"""
Saves an image to a file.
Parameters
----------
filepath : str
The path to write the file to.
ndarray : ndarray
An array containing the image to be saved.
"""
pil_from_ndarray(ndarray).save(filepath)
def scale_to_unit_interval(ndar, eps=1e-8):
"""
Scales all values in the ndarray ndar to be between 0 and 1
Parameters
----------
ndar : WRITEME
eps : WRITEME
Returns
-------
WRITEME
"""
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
Parameters
----------
x : numpy.ndarray
2-d ndarray or 4 tuple of 2-d ndarrays or None for channels,
in which every row is a flattened image.
shape : 2-tuple of ints
The first component is the height of each image,
the second component is the width.
tile_shape : 2-tuple of ints
The number of images to tile in (row, columns) form.
scale_rows_to_unit_interval : bool
Whether or not the values need to be before being plotted to [0, 1].
output_pixel_vals : bool
Whether or not the output should be pixel values (int8) or floats.
Returns
-------
y : 2d-ndarray
The return value has the same dtype as X, and is suitable for
viewing as an image with PIL.Image.fromarray.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output np ndarray to store the image
if output_pixel_vals:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype=X.dtype)
# colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = np.zeros(out_shape, dtype=dt) + \
channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = np.zeros(out_shape, dtype=dt)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_x = X[tile_row * tile_shape[1] + tile_col]
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
this_x.reshape(img_shape))
else:
this_img = this_x.reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] = this_img * c
return out_array
if __name__ == '__main__':
black = np.zeros((50, 50, 3), dtype='uint8')
red = black.copy()
red[:, :, 0] = 255
green = black.copy()
green[:, :, 1] = 255
show(black)
show(green)
show(red)
|
catapult-project/catapult | refs/heads/master | third_party/google-endpoints/packaging/specifiers.py | 1107 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import functools
import itertools
import re
from ._compat import string_types, with_metaclass
from .version import Version, LegacyVersion, parse
class InvalidSpecifier(ValueError):
"""
An invalid specifier was found, users should refer to PEP 440.
"""
class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def __str__(self):
"""
Returns the str representation of this Specifier like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self):
"""
Returns a hash value for this Specifier like object.
"""
@abc.abstractmethod
def __eq__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are equal.
"""
@abc.abstractmethod
def __ne__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are not equal.
"""
@abc.abstractproperty
def prereleases(self):
"""
Returns whether or not pre-releases as a whole are allowed by this
specifier.
"""
@prereleases.setter
def prereleases(self, value):
"""
Sets whether or not pre-releases as a whole are allowed by this
specifier.
"""
@abc.abstractmethod
def contains(self, item, prereleases=None):
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(self, iterable, prereleases=None):
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
class _IndividualSpecifier(BaseSpecifier):
_operators = {}
def __init__(self, spec="", prereleases=None):
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
self._spec = (
match.group("operator").strip(),
match.group("version").strip(),
)
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<{0}({1!r}{2})>".format(
self.__class__.__name__,
str(self),
pre,
)
def __str__(self):
return "{0}{1}".format(*self._spec)
def __hash__(self):
return hash(self._spec)
def __eq__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec == other._spec
def __ne__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec != other._spec
def _get_operator(self, op):
return getattr(self, "_compare_{0}".format(self._operators[op]))
def _coerce_version(self, version):
if not isinstance(version, (LegacyVersion, Version)):
version = parse(version)
return version
@property
def operator(self):
return self._spec[0]
@property
def version(self):
return self._spec[1]
@property
def prereleases(self):
return self._prereleases
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version or LegacyVersion, this allows us to have
# a shortcut for ``"2.0" in Specifier(">=2")
item = self._coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
return self._get_operator(self.operator)(item, self.version)
def filter(self, iterable, prereleases=None):
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = self._coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later incase nothing
# else matches this specifier.
if (parsed_version.is_prerelease and not
(prereleases or self.prereleases)):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the begining.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
class LegacySpecifier(_IndividualSpecifier):
_regex_str = (
r"""
(?P<operator>(==|!=|<=|>=|<|>))
\s*
(?P<version>
[^,;\s)]* # Since this is a "legacy" specifier, and the version
# string can be just about anything, we match everything
# except for whitespace, a semi-colon for marker support,
# a closing paren since versions can be enclosed in
# them, and a comma since it's a version separator.
)
"""
)
_regex = re.compile(
r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
_operators = {
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
}
def _coerce_version(self, version):
if not isinstance(version, LegacyVersion):
version = LegacyVersion(str(version))
return version
def _compare_equal(self, prospective, spec):
return prospective == self._coerce_version(spec)
def _compare_not_equal(self, prospective, spec):
return prospective != self._coerce_version(spec)
def _compare_less_than_equal(self, prospective, spec):
return prospective <= self._coerce_version(spec)
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= self._coerce_version(spec)
def _compare_less_than(self, prospective, spec):
return prospective < self._coerce_version(spec)
def _compare_greater_than(self, prospective, spec):
return prospective > self._coerce_version(spec)
def _require_version_compare(fn):
@functools.wraps(fn)
def wrapped(self, prospective, spec):
if not isinstance(prospective, Version):
return False
return fn(self, prospective, spec)
return wrapped
class Specifier(_IndividualSpecifier):
_regex_str = (
r"""
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
(?P<version>
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s]* # We just match everything, except for whitespace
# since we are only testing for strict identity.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
# You cannot use a wild card and a dev or local version
# together so group them with a | and make them optional.
(?:
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
|
\.\* # Wild card syntax of .*
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?<!==|!=|~=) # We have special cases for these
# operators so we want to make sure they
# don't match here.
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
)
"""
)
_regex = re.compile(
r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
_operators = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
@_require_version_compare
def _compare_compatible(self, prospective, spec):
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore post and dev releases and we want to treat the pre-release as
# it's own separate segment.
prefix = ".".join(
list(
itertools.takewhile(
lambda x: (not x.startswith("post") and not
x.startswith("dev")),
_version_split(spec),
)
)[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return (self._get_operator(">=")(prospective, spec) and
self._get_operator("==")(prospective, prefix))
@_require_version_compare
def _compare_equal(self, prospective, spec):
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# In the case of prefix matching we want to ignore local segment.
prospective = Version(prospective.public)
# Split the spec out by dots, and pretend that there is an implicit
# dot in between a release segment and a pre-release segment.
spec = _version_split(spec[:-2]) # Remove the trailing .*
# Split the prospective version out by dots, and pretend that there
# is an implicit dot in between a release segment and a pre-release
# segment.
prospective = _version_split(str(prospective))
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
prospective = prospective[:len(spec)]
# Pad out our two sides with zeros so that they both equal the same
# length.
spec, prospective = _pad_version(spec, prospective)
else:
# Convert our spec string into a Version
spec = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec.local:
prospective = Version(prospective.public)
return prospective == spec
@_require_version_compare
def _compare_not_equal(self, prospective, spec):
return not self._compare_equal(prospective, spec)
@_require_version_compare
def _compare_less_than_equal(self, prospective, spec):
return prospective <= Version(spec)
@_require_version_compare
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= Version(spec)
@_require_version_compare
def _compare_less_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is less than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective < spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a pre-release version, that we do not accept pre-release
# versions for the version mentioned in the specifier (e.g. <3.1 should
# not match 3.1.dev0, but should match 3.0.dev0).
if not spec.is_prerelease and prospective.is_prerelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# less than the spec version *and* it's not a pre-release of the same
# version in the spec.
return True
@_require_version_compare
def _compare_greater_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is greater than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective > spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a post-release version, that we do not accept
# post-release versions for the version mentioned in the specifier
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
if not spec.is_postrelease and prospective.is_postrelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# Ensure that we do not allow a local version of the version mentioned
# in the specifier, which is techincally greater than, to match.
if prospective.local is not None:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# greater than the spec version *and* it's not a pre-release of the
# same version in the spec.
return True
def _compare_arbitrary(self, prospective, spec):
return str(prospective).lower() == str(spec).lower()
@property
def prereleases(self):
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Look at all of our specifiers and determine if they are inclusive
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~=", "==="]:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if parse(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
def _version_split(version):
result = []
for item in version.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _pad_version(left, right):
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split[0]):])
right_split.append(right[len(right_split[0]):])
# Insert our padding
left_split.insert(
1,
["0"] * max(0, len(right_split[0]) - len(left_split[0])),
)
right_split.insert(
1,
["0"] * max(0, len(left_split[0]) - len(right_split[0])),
)
return (
list(itertools.chain(*left_split)),
list(itertools.chain(*right_split)),
)
class SpecifierSet(BaseSpecifier):
def __init__(self, specifiers="", prereleases=None):
# Split on , to break each indidivual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Parsed each individual specifier, attempting first to make it a
# Specifier and falling back to a LegacySpecifier.
parsed = set()
for specifier in specifiers:
try:
parsed.add(Specifier(specifier))
except InvalidSpecifier:
parsed.add(LegacySpecifier(specifier))
# Turn our parsed specifiers into a frozen set and save them for later.
self._specs = frozenset(parsed)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
def __str__(self):
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self):
return hash(self._specs)
def __and__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease "
"overrides."
)
return specifier
def __eq__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __ne__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs != other._specs
def __len__(self):
return len(self._specs)
def __iter__(self):
return iter(self._specs)
@property
def prereleases(self):
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# If we don't have any specifiers, and we don't have a forced value,
# then we'll just return None since we don't know if this should have
# pre-releases or not.
if not self._specs:
return None
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Ensure that our item is a Version or LegacyVersion instance.
if not isinstance(item, (LegacyVersion, Version)):
item = parse(item)
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if not prereleases and item.is_prerelease:
return False
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(
s.contains(item, prereleases=prereleases)
for s in self._specs
)
def filter(self, iterable, prereleases=None):
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=bool(prereleases))
return iterable
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases, and which will filter out LegacyVersion in general.
else:
filtered = []
found_prereleases = []
for item in iterable:
# Ensure that we some kind of Version class for this item.
if not isinstance(item, (LegacyVersion, Version)):
parsed_version = parse(item)
else:
parsed_version = item
# Filter out any item which is parsed as a LegacyVersion
if isinstance(parsed_version, LegacyVersion):
continue
# Store any item which is a pre-release for later unless we've
# already found a final version or we are accepting prereleases
if parsed_version.is_prerelease and not prereleases:
if not filtered:
found_prereleases.append(item)
else:
filtered.append(item)
# If we've found no items except for pre-releases, then we'll go
# ahead and use the pre-releases
if not filtered and found_prereleases and prereleases is None:
return found_prereleases
return filtered
|
Geoion/MITMf | refs/heads/master | core/servers/LDAP.py | 24 | #!/usr/bin/env python
# This file is part of Responder
# Original work by Laurent Gaffie - Trustwave Holdings
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import struct
import core.responder.settings as settings
import threading
from traceback import print_exc
from SocketServer import BaseRequestHandler, ThreadingMixIn, TCPServer
from core.responder.packets import LDAPSearchDefaultPacket, LDAPSearchSupportedCapabilitiesPacket, LDAPSearchSupportedMechanismsPacket, LDAPNTLMChallenge
from core.responder.utils import *
class LDAP:
def start(self):
try:
if OsInterfaceIsSupported():
server = ThreadingTCPServer((settings.Config.Bind_To, 389), LDAPServer)
else:
server = ThreadingTCPServer(('', 389), LDAPServer)
t = threading.Thread(name='LDAP', target=server.serve_forever)
t.setDaemon(True)
t.start()
except Exception as e:
print "Error starting LDAP server: {}".format(e)
print_exc()
class ThreadingTCPServer(ThreadingMixIn, TCPServer):
allow_reuse_address = 1
def server_bind(self):
if OsInterfaceIsSupported():
try:
self.socket.setsockopt(socket.SOL_SOCKET, 25, settings.Config.Bind_To+'\0')
except:
pass
TCPServer.server_bind(self)
def ParseSearch(data):
Search1 = re.search('(objectClass)', data)
Search2 = re.search('(?i)(objectClass0*.*supportedCapabilities)', data)
Search3 = re.search('(?i)(objectClass0*.*supportedSASLMechanisms)', data)
if Search1:
return str(LDAPSearchDefaultPacket(MessageIDASNStr=data[8:9]))
if Search2:
return str(LDAPSearchSupportedCapabilitiesPacket(MessageIDASNStr=data[8:9],MessageIDASN2Str=data[8:9]))
if Search3:
return str(LDAPSearchSupportedMechanismsPacket(MessageIDASNStr=data[8:9],MessageIDASN2Str=data[8:9]))
def ParseLDAPHash(data, client):
SSPIStart = data[42:]
LMhashLen = struct.unpack('<H',data[54:56])[0]
if LMhashLen > 10:
LMhashOffset = struct.unpack('<H',data[58:60])[0]
LMHash = SSPIStart[LMhashOffset:LMhashOffset+LMhashLen].encode("hex").upper()
NthashLen = struct.unpack('<H',data[64:66])[0]
NthashOffset = struct.unpack('<H',data[66:68])[0]
NtHash = SSPIStart[NthashOffset:NthashOffset+NthashLen].encode("hex").upper()
DomainLen = struct.unpack('<H',data[72:74])[0]
DomainOffset = struct.unpack('<H',data[74:76])[0]
Domain = SSPIStart[DomainOffset:DomainOffset+DomainLen].replace('\x00','')
UserLen = struct.unpack('<H',data[80:82])[0]
UserOffset = struct.unpack('<H',data[82:84])[0]
User = SSPIStart[UserOffset:UserOffset+UserLen].replace('\x00','')
WriteHash = User+"::"+Domain+":"+LMHash+":"+NtHash+":"+settings.Config.NumChal
SaveToDb({
'module': 'LDAP',
'type': 'NTLMv1',
'client': client,
'user': Domain+'\\'+User,
'hash': NtHash,
'fullhash': WriteHash,
})
if LMhashLen < 2 and settings.Config.Verbose:
settings.Config.ResponderLogger.info("[LDAP] Ignoring anonymous NTLM authentication")
def ParseNTLM(data,client):
Search1 = re.search('(NTLMSSP\x00\x01\x00\x00\x00)', data)
Search2 = re.search('(NTLMSSP\x00\x03\x00\x00\x00)', data)
if Search1:
NTLMChall = LDAPNTLMChallenge(MessageIDASNStr=data[8:9],NTLMSSPNtServerChallenge=settings.Config.Challenge)
NTLMChall.calculate()
return str(NTLMChall)
if Search2:
ParseLDAPHash(data,client)
def ParseLDAPPacket(data, client):
if data[1:2] == '\x84':
PacketLen = struct.unpack('>i',data[2:6])[0]
MessageSequence = struct.unpack('<b',data[8:9])[0]
Operation = data[9:10]
sasl = data[20:21]
OperationHeadLen = struct.unpack('>i',data[11:15])[0]
LDAPVersion = struct.unpack('<b',data[17:18])[0]
if Operation == "\x60":
UserDomainLen = struct.unpack('<b',data[19:20])[0]
UserDomain = data[20:20+UserDomainLen]
AuthHeaderType = data[20+UserDomainLen:20+UserDomainLen+1]
if AuthHeaderType == "\x80":
PassLen = struct.unpack('<b',data[20+UserDomainLen+1:20+UserDomainLen+2])[0]
Password = data[20+UserDomainLen+2:20+UserDomainLen+2+PassLen]
SaveToDb({
'module': 'LDAP',
'type': 'Cleartext',
'client': client,
'user': UserDomain,
'cleartext': Password,
'fullhash': UserDomain+':'+Password,
})
if sasl == "\xA3":
Buffer = ParseNTLM(data,client)
return Buffer
elif Operation == "\x63":
Buffer = ParseSearch(data)
return Buffer
else:
if settings.Config.Verbose:
settings.Config.ResponderLogger.info('[LDAP] Operation not supported')
# LDAP Server class
class LDAPServer(BaseRequestHandler):
def handle(self):
try:
while True:
self.request.settimeout(0.5)
data = self.request.recv(8092)
Buffer = ParseLDAPPacket(data,self.client_address[0])
if Buffer:
self.request.send(Buffer)
except socket.timeout:
pass
|
gems-uff/noworkflow | refs/heads/master | capture/noworkflow/now/persistence/models/variable_dependency.py | 1 | # Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
"""Slicing Dependency Model"""
from __future__ import (absolute_import, print_function,
division, unicode_literals)
from sqlalchemy import Column, Integer, Text, select
from sqlalchemy import PrimaryKeyConstraint, ForeignKeyConstraint
from ...utils.prolog import PrologDescription, PrologTrial, PrologAttribute
from .. import relational
from .base import AlchemyProxy, proxy_class
@proxy_class
class VariableDependency(AlchemyProxy):
"""Represent a variable dependency captured during program slicing"""
__tablename__ = "variable_dependency"
__table_args__ = (
PrimaryKeyConstraint("trial_id", "id"),
ForeignKeyConstraint(["trial_id"],
["trial.id"], ondelete="CASCADE"),
ForeignKeyConstraint(["trial_id", "source_activation_id"],
["function_activation.trial_id",
"function_activation.id"], ondelete="CASCADE"),
ForeignKeyConstraint(["trial_id", "target_activation_id"],
["function_activation.trial_id",
"function_activation.id"], ondelete="CASCADE"),
ForeignKeyConstraint(["trial_id",
"source_activation_id",
"source_id"],
["variable.trial_id",
"variable.activation_id",
"variable.id"], ondelete="CASCADE"),
ForeignKeyConstraint(["trial_id",
"target_activation_id",
"target_id"],
["variable.trial_id",
"variable.activation_id",
"variable.id"], ondelete="CASCADE"),
)
trial_id = Column(Integer, index=True)
id = Column(Integer, index=True) # pylint: disable=invalid-name
source_activation_id = Column(Integer, index=True)
source_id = Column(Integer, index=True)
target_activation_id = Column(Integer, index=True)
target_id = Column(Integer, index=True)
type = Column(Text) # pylint: disable=invalid-name
# Relationship attributes (see relationships.py):
# trial: 1 Trial
# source_activation: 1 Activation
# source: 1 Variable
# target_activation: 1 Activation
# target: 1 Variable
prolog_description = PrologDescription("dependency", (
PrologTrial("trial_id", link="variable.trial_id"),
PrologAttribute("id"),
PrologAttribute("source_activation_id", link="variable.activation_id"),
PrologAttribute("source_id", link="variable.id"),
PrologAttribute("target_activation_id", link="variable.activation_id"),
PrologAttribute("target_id", link="variable.id"),
), description=(
"informs that in a given trial (*trial_id*),\n"
"the value of a variable (*target_id*)\n"
"in a specific function activation (*target_activation_id*),\n"
"influenced somehow the value of another variable (*source_id*)\n"
"in another function activation (*source_activation_id*).\n"
"This influence can occur due to direct assignment,\n"
"matching of arguments in function activations,\n"
"changes in mutable arguments of function activations,\n"
"assignment within control flow structure, and function return."
))
@classmethod # query
def fast_load_by_trial(cls, trial_id, session=None):
"""Return tuples with variable ids"""
session = session or relational.session
model = cls.m
return session.execute(select([model.source_id, model.target_id])
.where(model.trial_id == trial_id)
)
def __repr__(self):
return (
"VariableDependency({0.trial_id}, {0.id}, "
"{0.source}, {0.target})"
).format(self)
def __str__(self):
return "{0.source} <- {0.target}".format(self)
|
koniiiik/django | refs/heads/master | django/conf/locale/de/formats.py | 504 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
girving/tensorflow | refs/heads/master | tensorflow/compiler/tests/adagrad_da_test.py | 9 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AdagradDA optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad_da
class AdagradDAOptimizerTest(xla_test.XLATestCase):
def testAdagradDAWithoutRegularizationBasic1(self):
for dtype in self.float_types:
with self.cached_session(), self.test_scope():
global_step = resource_variable_ops.ResourceVariable(
0, dtype=dtypes.int64)
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = adagrad_da.AdagradDAOptimizer(
3.0,
global_step,
initial_gradient_squared_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
variables.global_variables_initializer().run()
self.assertAllClose([0.0, 0.0], var0.eval())
self.assertAllClose([0.0, 0.0], var1.eval())
# Run a step of AdagradDA
update.run()
# Let g to be gradient accumulator, gg to be gradient squared
# accumulator, T be the global step, lr is the learning rate, and k the
# initial gradient squared accumulator value.
# w = \dfrac{sign(-g)*lr*|g - l1*T|_{+}}{l2*T*lr + \sqrt{k+gg})}
# For -0.1*3.0*(0.1 - 0)/(0 + sqrt(0.1 + 0.1*0.1)) = -0.904534
# similarly for others.
self.assertAllCloseAccordingToType(
np.array([-0.904534, -1.603567]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([-0.094821, -0.189358]), var1.eval())
def testAdagradDAwithoutRegularizationBasic2(self):
for dtype in self.float_types:
with self.cached_session(), self.test_scope():
global_step = resource_variable_ops.ResourceVariable(
0, dtype=dtypes.int64)
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = adagrad_da.AdagradDAOptimizer(
3.0,
global_step,
initial_gradient_squared_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([4.0, 3.0], var1.eval())
# Run a step of AdagradDA
update.run()
self.assertAllCloseAccordingToType(
np.array([-0.904534, -1.603567]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([-0.094821, -0.189358]), var1.eval())
def testAdagradDAWithL1(self):
for dtype in self.float_types:
with self.cached_session(), self.test_scope():
global_step = resource_variable_ops.ResourceVariable(
0, dtype=dtypes.int64)
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = adagrad_da.AdagradDAOptimizer(
3.0,
global_step,
initial_gradient_squared_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([4.0, 3.0], var1.eval())
# Run a step of AdagradDA
update.run()
self.assertAllCloseAccordingToType(
np.array([-0.895489, -1.59555]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([-0.085339, -0.17989]), var1.eval())
def testAdagradDAWithL1_L2(self):
for dtype in self.float_types:
with self.cached_session(), self.test_scope():
global_step = resource_variable_ops.ResourceVariable(
0, dtype=dtypes.int64)
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = adagrad_da.AdagradDAOptimizer(
3.0,
global_step,
initial_gradient_squared_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([4.0, 3.0], var1.eval())
# Run a step of AdagradDA
update.run()
self.assertAllCloseAccordingToType(
np.array([-0.046907, -0.093659]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([-0.004275, -0.009023]), var1.eval())
if __name__ == "__main__":
test.main()
|
G1DR4/buendia | refs/heads/dev | tools/convert_to_xml.py | 13 | #!/usr/bin/python
# Copyright 2015 The Project Buendia Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distrib-
# uted under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# specific language governing permissions and limitations under the License.
import json
import sys
input = open(sys.argv[1])
output = open(sys.argv[2], 'w')
output.write(json.load(input)['xml'])
|
asya-bergal/led-matrix-server | refs/heads/master | server.py | 1 | import random
import os
import posixpath
import BaseHTTPServer
import urllib
import cgi
import shutil
import mimetypes
import re
import glob
import time
import leds
__version__ = "0.1"
__all__ = ["SimpleHTTPRequestHandler"]
#__author__ = "bones7456"
#__home_page__ = "http://li2z.cn/"
#!/usr/bin/env python
"""Simple HTTP Server With Upload.
This module builds on BaseHTTPServer by implementing the standard GET
and HEAD requests in a fairly straightforward manner.
"""
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple HTTP request handler with GET/HEAD/POST commands.
This serves files from the current directory and any of its
subdirectories. The MIME type for files is determined by
calling the .guess_type() method. And can reveive file uploaded
by client.
The GET/HEAD/POST requests are identical except that the HEAD
request omits the actual contents of the file.
"""
server_version = "SimpleHTTPWithUpload/" + __version__
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def do_POST(self):
"""Serve a POST request."""
r, info = self.deal_post_data()
print r, info, "by: ", self.client_address
f = StringIO()
image_types = ['.png', '.jpg', '.gif', '.jpeg']
#f.write('File successfully pushed.')
#f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
#f.write("<html>\n<title>Upload Result Page</title>\n")
#f.write("<body>\n<h2>Upload Result Page</h2>\n")
#f.write("<hr>\n")
if r:
if glob.fn.endswith('.ppm'):
leds.uploadPPM()
f.write("Success: PPM file successfully pushed.\n")
elif glob.fn.endswith('.txt'):
leds.uploadTXT()
f.write("Success: TXT file successfully pushed.\n")
elif any(glob.fn.endswith(image_type) for image_type in image_types):
leds.uploadImage()
f.write("Success: Image file successfuly pushed.\n")
else:
f.write("Failure: Invalid file type.")
#_, ext = os.path.splitext(glob.fn)
#print "File type: " + ext
#f.write("<strong>Success:</strong>")
else:
f.write("Failure: Failed to push file.\n")
#f.write("<strong>Failed:</strong>")
f.write(info)
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
if f:
self.copyfile(f, self.wfile)
f.close()
def deal_post_data(self):
boundary = self.headers.plisttext.split("=")[1]
remainbytes = int(self.headers['content-length'])
line = self.rfile.readline()
remainbytes -= len(line)
if not boundary in line:
return (False, "Content NOT begin with boundary")
line = self.rfile.readline()
remainbytes -= len(line)
fn = re.findall(r'Content-Disposition.*name="file"; filename="(.*)"', line)
if glob.fn != None:
os.remove(glob.fn)
glob.fn = fn[0]
if not fn:
return (False, "Can't find out file name...")
path = self.translate_path(self.path)
fn = os.path.join(path, fn[0])
line = self.rfile.readline()
remainbytes -= len(line)
line = self.rfile.readline()
remainbytes -= len(line)
try:
out = open(fn, 'wb')
except IOError:
return (False, "Can't create file to write, do you have permission to write?")
preline = self.rfile.readline()
remainbytes -= len(preline)
while remainbytes > 0:
line = self.rfile.readline()
remainbytes -= len(line)
if boundary in line:
preline = preline[0:-1]
if preline.endswith('\r'):
preline = preline[0:-1]
out.write(preline)
out.close()
return (True, "File '%s' upload success!" % fn)
else:
out.write(preline)
preline = line
return (False, "Unexpect Ends of data.")
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
f.write("<hr>\n")
f.write("<form ENCTYPE=\"multipart/form-data\" method=\"post\">")
f.write("<input name=\"file\" type=\"file\"/>")
f.write("<input type=\"submit\" value=\"upload\"/></form>\n")
f.write("<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write('<li><a href="%s">%s</a>\n'
% (urllib.quote(linkname), cgi.escape(displayname)))
f.write("</ul>\n<hr>\n</body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
def test(HandlerClass = SimpleHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
BaseHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
|
clouddocx/boto | refs/heads/master | boto/pyami/installers/ubuntu/installer.py | 153 | # Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto.pyami.installers
import os
import os.path
import stat
import boto
import random
from pwd import getpwnam
class Installer(boto.pyami.installers.Installer):
"""
Base Installer class for Ubuntu-based AMI's
"""
def add_cron(self, name, command, minute="*", hour="*", mday="*", month="*", wday="*", who="root", env=None):
"""
Write a file to /etc/cron.d to schedule a command
env is a dict containing environment variables you want to set in the file
name will be used as the name of the file
"""
if minute == 'random':
minute = str(random.randrange(60))
if hour == 'random':
hour = str(random.randrange(24))
fp = open('/etc/cron.d/%s' % name, "w")
if env:
for key, value in env.items():
fp.write('%s=%s\n' % (key, value))
fp.write('%s %s %s %s %s %s %s\n' % (minute, hour, mday, month, wday, who, command))
fp.close()
def add_init_script(self, file, name):
"""
Add this file to the init.d directory
"""
f_path = os.path.join("/etc/init.d", name)
f = open(f_path, "w")
f.write(file)
f.close()
os.chmod(f_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
self.run("/usr/sbin/update-rc.d %s defaults" % name)
def add_env(self, key, value):
"""
Add an environemnt variable
For Ubuntu, the best place is /etc/environment. Values placed here do
not need to be exported.
"""
boto.log.info('Adding env variable: %s=%s' % (key, value))
if not os.path.exists("/etc/environment.orig"):
self.run('cp /etc/environment /etc/environment.orig', notify=False, exit_on_error=False)
fp = open('/etc/environment', 'a')
fp.write('\n%s="%s"' % (key, value))
fp.close()
os.environ[key] = value
def stop(self, service_name):
self.run('/etc/init.d/%s stop' % service_name)
def start(self, service_name):
self.run('/etc/init.d/%s start' % service_name)
def create_user(self, user):
"""
Create a user on the local system
"""
self.run("useradd -m %s" % user)
usr = getpwnam(user)
return usr
def install(self):
"""
This is the only method you need to override
"""
raise NotImplementedError
|
fedora-infra/datanommer | refs/heads/develop | datanommer.models/alembic/versions/143ec484f5ba_add_uuid_column.py | 2 | # This file is a part of datanommer, a message sink for fedmsg.
# Copyright (C) 2014, Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Add msg_id column
Revision ID: 143ec484f5ba
Revises: 2affa1daa804
Create Date: 2013-09-05 21:34:12.915709
"""
# revision identifiers, used by Alembic.
revision = '143ec484f5ba'
down_revision = '2affa1daa804'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('messages', sa.Column('msg_id', sa.UnicodeText, nullable=True,
unique=True, default=None))
pass
def downgrade():
op.drop_column('messages', 'msg_id')
pass
|
se4u/pylearn2 | refs/heads/master | pylearn2/sandbox/nlp/datasets/text.py | 44 | """Datasets for working with text"""
from theano.compat import six
class TextDatasetMixin(object):
"""
Use multiple inheritance with this class and any other dataset
class in order to provide useful functionality for natural
language processing purposes.
The derived class is expected to have a `_vocabulary`, which
is a dictionary from words (strings) to indices (integers). If
needed, one can also set the `_unknown_index` and `_unknown word`
attributes, which define the index and string that will be used
when a word or word index is not in the (inverse) dictionary
respectively.
"""
@property
def is_case_sensitive(self):
return getattr(self, '_is_case_sensitive', True)
@property
def vocabulary(self):
"""
Returns the vocabulary (a dictionary from
word to word indices)
"""
if hasattr(self, '_vocabulary'):
if not getattr(self, '_vocabulary_case_checked', False):
for word in self._vocabulary:
if word != word.lower():
raise ValueError('The vocabulary contains cased words '
'(%s) but the dataset is supposed to '
'be case-insensitive' % (word))
self._vocabulary_case_checked = True
return self._vocabulary
else:
raise NotImplementedError('No vocabulary given')
@property
def unknown_index(self):
"""
The index referring to the unknown word.
"""
if not hasattr(self, '_unknown_index') and \
0 in self.inverse_vocabulary:
raise NotImplementedError('This dataset does not define an index '
'for unknown words, but the default `0` '
'is already taken')
return getattr(self, '_unknown_index', 0)
@property
def unknown_word(self):
"""
The string to use for the unknown words. If
not defined, return `UNK`.
"""
if not hasattr(self, '_unknown_word') and 'UNK' in self.vocabulary:
raise NotImplementedError('This dataset does not define a string '
'for unknown words, but the default '
'`UNK` is already taken')
return getattr(self, '_unknown_word', 'UNK')
@property
def inverse_vocabulary(self):
"""
The inverse vocabulary, a dictionary from
integers to strings. If it does not exist,
it is created from the vocabulary if possible.
"""
if hasattr(self, '_inverse_vocabulary'):
return self._inverse_vocabulary
elif hasattr(self, '_vocabulary'):
self._inverse_vocabulary = dict((index, word) for word, index
in six.iteritems(self._vocabulary))
return self._inverse_vocabulary
else:
raise NotImplementedError
def words_to_indices(self, words):
"""
Converts the elements of a (nested) list of strings
to word indices
Parameters
----------
words : (nested) list of strings
Assumes each element is a word
"""
assert isinstance(words, list)
if all(isinstance(word, list) for word in words):
return [self.words_to_indices(word) for word in words]
assert all(isinstance(word, six.string_types) for word in words)
if self.is_case_sensitive:
return [self.vocabulary.get(word, self.unknown_index)
for word in words]
else:
return [self.vocabulary.get(word.lower(), self.unknown_index)
for word in words]
def indices_to_words(self, indices):
"""
Converts word indices back to words and returns
a list of strings
Parameters
----------
indices : list of ints
A list of word indices
"""
return [self.inverse_vocabulary.get(index, self.unknown_word)
for index in indices]
|
emedinaa/contentbox | refs/heads/master | third_party/social/apps/django_app/default/__init__.py | 12 | """
Django default ORM backend support.
To enable this app:
* Add 'social.apps.django_app.default' to INSTALLED_APPS
* In urls.py include url('', include('social.apps.django_app.urls'))
"""
|
joxeankoret/diaphora | refs/heads/master | pygments/styles/xcode.py | 50 | # -*- coding: utf-8 -*-
"""
pygments.styles.xcode
~~~~~~~~~~~~~~~~~~~~~
Style similar to the `Xcode` default theme.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Literal
class XcodeStyle(Style):
"""
Style similar to the Xcode default colouring theme.
"""
default_style = ''
styles = {
Comment: '#177500',
Comment.Preproc: '#633820',
String: '#C41A16',
String.Char: '#2300CE',
Operator: '#000000',
Keyword: '#A90D91',
Name: '#000000',
Name.Attribute: '#836C28',
Name.Class: '#3F6E75',
Name.Function: '#000000',
Name.Builtin: '#A90D91',
# In Obj-C code this token is used to colour Cocoa types
Name.Builtin.Pseudo: '#5B269A',
Name.Variable: '#000000',
Name.Tag: '#000000',
Name.Decorator: '#000000',
# Workaround for a BUG here: lexer treats multiline method signatres as labels
Name.Label: '#000000',
Literal: '#1C01CE',
Number: '#1C01CE',
Error: '#000000',
}
|
xuru/pyvisdk | refs/heads/master | pyvisdk/do/net_ip_stack_info.py | 1 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def NetIpStackInfo(vim, *args, **kwargs):
'''Protocol version independent reporting data object for IP stack.'''
obj = vim.client.factory.create('ns0:NetIpStackInfo')
# do some validation checking...
if (len(args) + len(kwargs)) < 0:
raise IndexError('Expected at least 1 arguments got: %d' % len(args))
required = [ ]
optional = [ 'defaultRouter', 'neighbor', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
cldershem/osf.io | refs/heads/develop | tests/webtest_tests.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Functional tests using WebTest."""
import httplib as http
import logging
import mock
import re
import unittest
import markupsafe
from nose.tools import * # flake8: noqa (PEP8 asserts)
from framework.mongo.utils import to_mongo_key
from framework.auth import exceptions as auth_exc
from framework.auth.core import Auth
from tests.base import OsfTestCase, fake
from tests.factories import (UserFactory, AuthUserFactory, ProjectFactory,
WatchConfigFactory,
NodeFactory, NodeWikiFactory, RegistrationFactory,
UnregUserFactory, UnconfirmedUserFactory,
PrivateLinkFactory)
from tests.test_features import requires_piwik
from website import settings, language
from website.security import random_string
from website.project.metadata.schemas import OSF_META_SCHEMAS
from website.project.model import ensure_schemas
from website.util import web_url_for, api_url_for
logging.getLogger('website.project.model').setLevel(logging.ERROR)
def assert_in_html(member, container, **kwargs):
"""Looks for the specified member in markupsafe-escaped HTML output"""
member = markupsafe.escape(member)
return assert_in(member, container, **kwargs)
class TestDisabledUser(OsfTestCase):
def setUp(self):
super(TestDisabledUser, self).setUp()
self.user = UserFactory()
self.user.set_password('Korben Dallas')
self.user.is_disabled = True
self.user.save()
def test_profile_disabled_returns_401(self):
res = self.app.get(self.user.url, expect_errors=True)
assert_equal(res.status_code, 410)
class TestAnUnregisteredUser(OsfTestCase):
def test_cant_see_profile_if_not_logged_in(self):
url = web_url_for('profile_view')
res = self.app.get(url)
res = res.follow()
assert_equal(res.status_code, 301)
assert_in('/login/', res.headers['Location'])
class TestAUser(OsfTestCase):
def setUp(self):
super(TestAUser, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
def test_can_see_profile_url(self):
res = self.app.get(self.user.url).maybe_follow()
assert_in(self.user.url, res)
def test_can_see_homepage(self):
# Goes to homepage
res = self.app.get('/').maybe_follow() # Redirects
assert_equal(res.status_code, 200)
def test_is_redirected_to_dashboard_already_logged_in_at_login_page(self):
res = self.app.get('/login/', auth=self.user.auth)
assert_equal(res.status_code, 302)
res = res.follow(auth=self.user.auth)
assert_equal(res.request.path, '/dashboard/')
def test_sees_projects_in_her_dashboard(self):
# the user already has a project
project = ProjectFactory(creator=self.user)
project.add_contributor(self.user)
project.save()
# Goes to homepage, already logged in
res = self.app.get('/', auth=self.user.auth).follow(auth=self.user.auth)
# Clicks Dashboard link in navbar
res = res.click('My Dashboard', index=0, auth=self.user.auth)
assert_in('Projects', res) # Projects heading
def test_does_not_see_osffiles_in_user_addon_settings(self):
res = self.app.get('/settings/addons/', auth=self.auth, auto_follow=True)
assert_not_in('OSF Storage', res)
def test_sees_osffiles_in_project_addon_settings(self):
project = ProjectFactory(creator=self.user)
project.add_contributor(
self.user,
permissions=['read', 'write', 'admin'],
save=True)
res = self.app.get('/{0}/settings/'.format(project._primary_key), auth=self.auth, auto_follow=True)
assert_in('OSF Storage', res)
@unittest.skip("Can't test this, since logs are dynamically loaded")
def test_sees_log_events_on_watched_projects(self):
# Another user has a public project
u2 = UserFactory(username='[email protected]', fullname='Bono')
project = ProjectFactory(creator=u2, is_public=True)
project.add_contributor(u2)
auth = Auth(user=u2)
project.save()
# User watches the project
watch_config = WatchConfigFactory(node=project)
self.user.watch(watch_config)
self.user.save()
# Goes to her dashboard, already logged in
res = self.app.get('/dashboard/', auth=self.auth, auto_follow=True)
# Sees logs for the watched project
assert_in('Watched Projects', res) # Watched Projects header
# The log action is in the feed
assert_in(project.title, res)
def test_sees_correct_title_home_page(self):
# User goes to homepage
res = self.app.get('/', auto_follow=True)
title = res.html.title.string
# page title is correct
assert_equal('OSF | Home', title)
def test_sees_correct_title_on_dashboard(self):
# User goes to dashboard
res = self.app.get('/dashboard/', auth=self.auth, auto_follow=True)
title = res.html.title.string
assert_equal('OSF | Dashboard', title)
def test_can_see_make_public_button_if_admin(self):
# User is a contributor on a project
project = ProjectFactory()
project.add_contributor(
self.user,
permissions=['read', 'write', 'admin'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_in('Make Public', res)
def test_cant_see_make_public_button_if_not_admin(self):
# User is a contributor on a project
project = ProjectFactory()
project.add_contributor(
self.user,
permissions=['read', 'write'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_not_in('Make Public', res)
def test_can_see_make_private_button_if_admin(self):
# User is a contributor on a project
project = ProjectFactory(is_public=True)
project.add_contributor(
self.user,
permissions=['read', 'write', 'admin'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_in('Make Private', res)
def test_cant_see_make_private_button_if_not_admin(self):
# User is a contributor on a project
project = ProjectFactory(is_public=True)
project.add_contributor(
self.user,
permissions=['read', 'write'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_not_in('Make Private', res)
def test_sees_logs_on_a_project(self):
project = ProjectFactory(is_public=True)
# User goes to the project's page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
# Can see log event
assert_in('created', res)
def test_no_wiki_content_message(self):
project = ProjectFactory(creator=self.user)
# Goes to project's wiki, where there is no content
res = self.app.get('/{0}/wiki/home/'.format(project._primary_key), auth=self.auth)
# Sees a message indicating no content
assert_in('No wiki content', res)
def test_wiki_content(self):
project = ProjectFactory(creator=self.user)
wiki_page = 'home'
wiki_content = 'Kittens'
NodeWikiFactory(user=self.user, node=project, content=wiki_content, page_name=wiki_page)
res = self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
wiki_page,
), auth=self.auth)
assert_not_in('No wiki content', res)
assert_in(wiki_content, res)
def test_wiki_page_name_non_ascii(self):
project = ProjectFactory(creator=self.user)
non_ascii = to_mongo_key('WöRlÐé')
self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
non_ascii
), auth=self.auth, expect_errors=True)
project.update_node_wiki(non_ascii, 'new content', Auth(self.user))
assert_in(non_ascii, project.wiki_pages_current)
def test_noncontributor_cannot_see_wiki_if_no_content(self):
user2 = UserFactory()
# user2 creates a public project and adds no wiki content
project = ProjectFactory(creator=user2, is_public=True)
# self navigates to project
res = self.app.get(project.url).maybe_follow()
# Should not see wiki widget (since non-contributor and no content)
assert_not_in('No wiki content', res)
def test_wiki_does_not_exist(self):
project = ProjectFactory(creator=self.user)
res = self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
'not a real page yet',
), auth=self.auth, expect_errors=True)
assert_in('No wiki content', res)
def test_sees_own_profile(self):
res = self.app.get('/profile/', auth=self.auth)
td1 = res.html.find('td', text=re.compile(r'Public(.*?)Profile'))
td2 = td1.find_next_sibling('td')
assert_equal(td2.text, self.user.display_absolute_url)
def test_sees_another_profile(self):
user2 = UserFactory()
res = self.app.get(user2.url, auth=self.auth)
td1 = res.html.find('td', text=re.compile(r'Public(.*?)Profile'))
td2 = td1.find_next_sibling('td')
assert_equal(td2.text, user2.display_absolute_url)
# Regression test for https://github.com/CenterForOpenScience/osf.io/issues/1320
@mock.patch('framework.auth.views.mails.send_mail')
def test_can_reset_password(self, mock_send_mail):
# A registered user
user = UserFactory()
# goes to the login page
url = web_url_for('forgot_password_get')
res = self.app.get(url)
# and fills out forgot password form
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = user.username
# submits
res = form.submit()
# mail was sent
mock_send_mail.assert_called
# gets 200 response
assert_equal(res.status_code, 200)
# URL is /forgotpassword
assert_equal(res.request.path, web_url_for('forgot_password_post'))
class TestRegistrations(OsfTestCase):
def setUp(self):
super(TestRegistrations, self).setUp()
ensure_schemas()
self.user = AuthUserFactory()
self.auth = self.user.auth
self.original = ProjectFactory(creator=self.user, is_public=True)
# A registration
self.project = RegistrationFactory(
creator=self.user,
project=self.original,
user=self.user,
)
def test_can_see_contributor(self):
# Goes to project's page
res = self.app.get(self.project.url, auth=self.auth).maybe_follow()
# Settings is not in the project navigation bar
subnav = res.html.select('#projectSubnav')[0]
assert_in('Sharing', subnav.text)
def test_sees_registration_templates(self):
# Browse to original project
res = self.app.get(
'{}register/'.format(self.original.url),
auth=self.auth
).maybe_follow()
# Find registration options
options = res.html.find(
'select', id='select-registration-template'
).find_all('option')
# Should see number of options equal to number of registration
# templates, plus one for 'Select...'
assert_equal(
len(options),
len(OSF_META_SCHEMAS) + 1
)
# First option should have empty value
assert_equal(options[0].get('value'), '')
# All registration templates should be listed in <option>
option_values = [
option.get('value')
for option in options[1:]
]
for schema in OSF_META_SCHEMAS:
assert_in(
schema['name'],
option_values
)
def test_registration_nav_not_seen(self):
# Goes to project's page
res = self.app.get(self.project.url, auth=self.auth).maybe_follow()
# Settings is not in the project navigation bar
subnav = res.html.select('#projectSubnav')[0]
assert_not_in('Registrations', subnav.text)
class TestComponents(OsfTestCase):
def setUp(self):
super(TestComponents, self).setUp()
self.user = AuthUserFactory()
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
self.project.add_contributor(contributor=self.user, auth=self.consolidate_auth)
# A non-project componenet
self.component = NodeFactory(
category='hypothesis',
creator=self.user,
parent=self.project,
)
self.component.save()
self.component.set_privacy('public', self.consolidate_auth)
self.component.set_privacy('private', self.consolidate_auth)
self.project.save()
self.project_url = self.project.web_url_for('view_project')
def test_can_create_component_from_a_project(self):
res = self.app.get(self.project.url, auth=self.user.auth).maybe_follow()
assert_in('Add Component', res)
def test_can_create_component_from_a_component(self):
res = self.app.get(self.component.url, auth=self.user.auth).maybe_follow()
assert_in('Add Component', res)
def test_sees_parent(self):
res = self.app.get(self.component.url, auth=self.user.auth).maybe_follow()
parent_title = res.html.find_all('h2', class_='node-parent-title')
assert_equal(len(parent_title), 1)
assert_in(self.project.title, parent_title[0].text)
def test_delete_project(self):
res = self.app.get(
self.component.url + 'settings/',
auth=self.user.auth
).maybe_follow()
assert_in(
'Delete {0}'.format(self.component.project_or_component),
res
)
def test_cant_delete_project_if_not_admin(self):
non_admin = AuthUserFactory()
self.component.add_contributor(
non_admin,
permissions=['read', 'write'],
auth=self.consolidate_auth,
save=True,
)
res = self.app.get(
self.component.url + 'settings/',
auth=non_admin.auth
).maybe_follow()
assert_not_in(
'Delete {0}'.format(self.component.project_or_component),
res
)
def test_can_configure_comments_if_admin(self):
res = self.app.get(
self.component.url + 'settings/',
auth=self.user.auth,
).maybe_follow()
assert_in('Configure Commenting', res)
def test_cant_configure_comments_if_not_admin(self):
non_admin = AuthUserFactory()
self.component.add_contributor(
non_admin,
permissions=['read', 'write'],
auth=self.consolidate_auth,
save=True,
)
res = self.app.get(
self.component.url + 'settings/',
auth=non_admin.auth
).maybe_follow()
assert_not_in('Configure commenting', res)
def test_components_should_have_component_list(self):
res = self.app.get(self.component.url, auth=self.user.auth)
assert_in('Components', res)
def test_does_show_registration_button(self):
# No registrations on the component
url = self.component.web_url_for('node_registrations')
res = self.app.get(url, auth=self.user.auth)
# New registration button is hidden
assert_in('New Registration', res)
class TestPrivateLinkView(OsfTestCase):
def setUp(self):
super(TestPrivateLinkView, self).setUp()
self.user = AuthUserFactory() # Is NOT a contributor
self.project = ProjectFactory(is_public=False)
self.link = PrivateLinkFactory(anonymous=True)
self.link.nodes.append(self.project)
self.link.save()
self.project_url = self.project.web_url_for('view_project')
def test_anonymous_link_hide_contributor(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_in("Anonymous Contributors", res.body)
assert_not_in(self.user.fullname, res)
def test_anonymous_link_hides_citations(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_not_in('Citation:', res)
def test_no_warning_for_read_only_user_with_valid_link(self):
link2 = PrivateLinkFactory(anonymous=False)
link2.nodes.append(self.project)
link2.save()
self.project.add_contributor(
self.user,
permissions=['read'],
save=True,
)
res = self.app.get(self.project_url, {'view_only': link2.key},
auth=self.user.auth)
assert_not_in(
"is being viewed through a private, view-only link. "
"Anyone with the link can view this project. Keep "
"the link safe.",
res.body
)
def test_no_warning_for_read_only_user_with_invalid_link(self):
self.project.add_contributor(
self.user,
permissions=['read'],
save=True,
)
res = self.app.get(self.project_url, {'view_only': "not_valid"},
auth=self.user.auth)
assert_not_in(
"is being viewed through a private, view-only link. "
"Anyone with the link can view this project. Keep "
"the link safe.",
res.body
)
class TestMergingAccounts(OsfTestCase):
def setUp(self):
super(TestMergingAccounts, self).setUp()
self.user = UserFactory.build()
self.user.set_password('science')
self.user.save()
self.dupe = UserFactory.build()
self.dupe.set_password('example')
self.dupe.save()
def test_merged_user_is_not_shown_as_a_contributor(self):
project = ProjectFactory(is_public=True)
# Both the master and dupe are contributors
project.add_contributor(self.dupe, log=False)
project.add_contributor(self.user, log=False)
project.save()
# At the project page, both are listed as contributors
res = self.app.get(project.url).maybe_follow()
assert_in(self.user.fullname, res)
assert_in(self.dupe.fullname, res)
# The accounts are merged
self.user.merge_user(self.dupe)
self.user.save()
# Now only the master user is shown at the project page
res = self.app.get(project.url).maybe_follow()
assert_in(self.user.fullname, res)
assert_true(self.dupe.is_merged)
assert_not_in(self.dupe.fullname, res)
def test_merged_user_has_alert_message_on_profile(self):
# Master merges dupe
self.user.merge_user(self.dupe)
self.user.save()
# At the dupe user's profile there is an alert message at the top
# indicating that the user is merged
res = self.app.get('/profile/{0}/'.format(self.dupe._primary_key)).maybe_follow()
assert_in('This account has been merged', res)
# FIXME: These affect search in development environment. So need to migrate solr after running.
# # Remove this side effect.
@unittest.skipIf(not settings.SEARCH_ENGINE, 'Skipping because search is disabled')
class TestSearching(OsfTestCase):
'''Test searching using the search bar. NOTE: These may affect the
Solr database. May need to migrate after running these.
'''
def setUp(self):
super(TestSearching, self).setUp()
import website.search.search as search
search.delete_all()
self.user = AuthUserFactory()
self.auth = self.user.auth
@unittest.skip(reason='¯\_(ツ)_/¯ knockout.')
def test_a_user_from_home_page(self):
user = UserFactory()
# Goes to home page
res = self.app.get('/').maybe_follow()
# Fills search form
form = res.forms['searchBar']
form['q'] = user.fullname
res = form.submit().maybe_follow()
# The username shows as a search result
assert_in(user.fullname, res)
@unittest.skip(reason='¯\_(ツ)_/¯ knockout.')
def test_a_public_project_from_home_page(self):
project = ProjectFactory(title='Foobar Project', is_public=True)
# Searches a part of the name
res = self.app.get('/').maybe_follow()
project.reload()
form = res.forms['searchBar']
form['q'] = 'Foobar'
res = form.submit().maybe_follow()
# A link to the project is shown as a result
assert_in('Foobar Project', res)
@unittest.skip(reason='¯\_(ツ)_/¯ knockout.')
def test_a_public_component_from_home_page(self):
component = NodeFactory(title='Foobar Component', is_public=True)
# Searches a part of the name
res = self.app.get('/').maybe_follow()
component.reload()
form = res.forms['searchBar']
form['q'] = 'Foobar'
res = form.submit().maybe_follow()
# A link to the component is shown as a result
assert_in('Foobar Component', res)
class TestShortUrls(OsfTestCase):
def setUp(self):
super(TestShortUrls, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
# A non-project componenet
self.component = NodeFactory(category='hypothesis', creator=self.user)
self.project.nodes.append(self.component)
self.component.save()
# Hack: Add some logs to component; should be unnecessary pending
# improvements to factories from @rliebz
self.component.set_privacy('public', auth=self.consolidate_auth)
self.component.set_privacy('private', auth=self.consolidate_auth)
self.wiki = NodeWikiFactory(user=self.user, node=self.component)
def _url_to_body(self, url):
return self.app.get(
url,
auth=self.auth
).maybe_follow(
auth=self.auth,
).normal_body
def test_project_url(self):
assert_equal(
self._url_to_body(self.project.deep_url),
self._url_to_body(self.project.url),
)
def test_component_url(self):
assert_equal(
self._url_to_body(self.component.deep_url),
self._url_to_body(self.component.url),
)
def test_wiki_url(self):
assert_equal(
self._url_to_body(self.wiki.deep_url),
self._url_to_body(self.wiki.url),
)
@requires_piwik
class TestPiwik(OsfTestCase):
def setUp(self):
super(TestPiwik, self).setUp()
self.users = [
AuthUserFactory()
for _ in range(3)
]
self.consolidate_auth = Auth(user=self.users[0])
self.project = ProjectFactory(creator=self.users[0], is_public=True)
self.project.add_contributor(contributor=self.users[1])
self.project.save()
def test_contains_iframe_and_src(self):
res = self.app.get(
'/{0}/statistics/'.format(self.project._primary_key),
auth=self.users[0].auth
).maybe_follow()
assert_in('iframe', res)
assert_in('src', res)
assert_in(settings.PIWIK_HOST, res)
def test_anonymous_no_token(self):
res = self.app.get(
'/{0}/statistics/'.format(self.project._primary_key),
auth=self.users[2].auth
).maybe_follow()
assert_in('token_auth=anonymous', res)
def test_contributor_token(self):
res = self.app.get(
'/{0}/statistics/'.format(self.project._primary_key),
auth=self.users[1].auth
).maybe_follow()
assert_in(self.users[1].piwik_token, res)
def test_no_user_token(self):
res = self.app.get(
'/{0}/statistics/'.format(self.project._primary_key)
).maybe_follow()
assert_in('token_auth=anonymous', res)
def test_private_alert(self):
self.project.set_privacy('private', auth=self.consolidate_auth)
self.project.save()
res = self.app.get(
'/{0}/statistics/'.format(self.project._primary_key),
auth=self.users[0].auth
).maybe_follow().normal_body
assert_in(
'Usage statistics are collected only for public resources.',
res
)
class TestClaiming(OsfTestCase):
def setUp(self):
super(TestClaiming, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
def test_correct_name_shows_in_contributor_list(self):
name1, email = fake.name(), fake.email()
UnregUserFactory(fullname=name1, email=email)
name2, email = fake.name(), fake.email()
# Added with different name
self.project.add_unregistered_contributor(fullname=name2,
email=email, auth=Auth(self.referrer))
self.project.save()
res = self.app.get(self.project.url, auth=self.referrer.auth)
# Correct name is shown
assert_in(name2, res)
assert_not_in(name1, res)
def test_user_can_set_password_on_claim_page(self):
name, email = fake.name(), fake.email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
self.project.reload()
assert_in('Set Password', res)
form = res.forms['setPasswordForm']
#form['username'] = new_user.username #Removed as long as E-mail can't be updated.
form['password'] = 'killerqueen'
form['password2'] = 'killerqueen'
res = form.submit().follow()
new_user.reload()
assert_true(new_user.check_password('killerqueen'))
def test_sees_is_redirected_if_user_already_logged_in(self):
name, email = fake.name(), fake.email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
existing = AuthUserFactory()
claim_url = new_user.get_claim_url(self.project._primary_key)
# a user is already logged in
res = self.app.get(claim_url, auth=existing.auth, expect_errors=True)
assert_equal(res.status_code, 302)
def test_unregistered_users_names_are_project_specific(self):
name1, name2, email = fake.name(), fake.name(), fake.email()
project2 = ProjectFactory(creator=self.referrer)
# different projects use different names for the same unreg contributor
self.project.add_unregistered_contributor(
email=email,
fullname=name1,
auth=Auth(self.referrer)
)
self.project.save()
project2.add_unregistered_contributor(
email=email,
fullname=name2,
auth=Auth(self.referrer)
)
project2.save()
self.app.authenticate(*self.referrer.auth)
# Each project displays a different name in the contributor list
res = self.app.get(self.project.url)
assert_in(name1, res)
res2 = self.app.get(project2.url)
assert_in(name2, res2)
@unittest.skip("as long as E-mails cannot be changed")
def test_cannot_set_email_to_a_user_that_already_exists(self):
reg_user = UserFactory()
name, email = fake.name(), fake.email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
# Goes to claim url and successfully claims account
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
self.project.reload()
assert_in('Set Password', res)
form = res.forms['setPasswordForm']
# Fills out an email that is the username of another user
form['username'] = reg_user.username
form['password'] = 'killerqueen'
form['password2'] = 'killerqueen'
res = form.submit().maybe_follow(expect_errors=True)
assert_in(
language.ALREADY_REGISTERED.format(email=reg_user.username),
res
)
def test_correct_display_name_is_shown_at_claim_page(self):
original_name = fake.name()
unreg = UnregUserFactory(fullname=original_name)
different_name = fake.name()
new_user = self.project.add_unregistered_contributor(
email=unreg.username,
fullname=different_name,
auth=Auth(self.referrer),
)
self.project.save()
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
# Correct name (different_name) should be on page
assert_in_html(different_name, res)
class TestConfirmingEmail(OsfTestCase):
def setUp(self):
super(TestConfirmingEmail, self).setUp()
self.user = UnconfirmedUserFactory()
self.confirmation_url = self.user.get_confirmation_url(
self.user.username,
external=False,
)
self.confirmation_token = self.user.get_confirmation_token(
self.user.username
)
def test_cannot_remove_another_user_email(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
url = api_url_for('update_user')
header = {'id': user1.username, 'emails': [{'address': user1.username}]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannnot_make_primary_email_for_another_user(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
email = '[email protected]'
user1.emails.append(email)
user1.save()
url = api_url_for('update_user')
header = {'id': user1.username,
'emails': [{'address': user1.username, 'primary': False, 'confirmed': True},
{'address': email, 'primary': True, 'confirmed': True}
]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannnot_add_email_for_another_user(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
email = '[email protected]'
url = api_url_for('update_user')
header = {'id': user1.username,
'emails': [{'address': user1.username, 'primary': True, 'confirmed': True},
{'address': email, 'primary': False, 'confirmed': False}
]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_error_page_if_confirm_link_is_used(self):
self.user.confirm_email(self.confirmation_token)
self.user.save()
res = self.app.get(self.confirmation_url, expect_errors=True)
assert_in(auth_exc.InvalidTokenError.message_short, res)
assert_equal(res.status_code, http.BAD_REQUEST)
class TestClaimingAsARegisteredUser(OsfTestCase):
def setUp(self):
super(TestClaimingAsARegisteredUser, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
name, email = fake.name(), fake.email()
self.user = self.project.add_unregistered_contributor(
fullname=name,
email=email,
auth=Auth(user=self.referrer)
)
self.project.save()
def test_claim_user_registered_with_correct_password(self):
reg_user = AuthUserFactory() # NOTE: AuthUserFactory sets password as 'password'
url = self.user.get_claim_url(self.project._primary_key)
# Follow to password re-enter page
res = self.app.get(url, auth=reg_user.auth).follow(auth=reg_user.auth)
# verify that the "Claim Account" form is returned
assert_in('Claim Contributor', res.body)
form = res.forms['claimContributorForm']
form['password'] = 'password'
res = form.submit(auth=reg_user.auth).follow(auth=reg_user.auth)
self.project.reload()
self.user.reload()
# user is now a contributor to the project
assert_in(reg_user._primary_key, self.project.contributors)
# the unregistered user (self.user) is removed as a contributor, and their
assert_not_in(self.user._primary_key, self.project.contributors)
# unclaimed record for the project has been deleted
assert_not_in(self.project._primary_key, self.user.unclaimed_records)
class TestExplorePublicActivity(OsfTestCase):
def setUp(self):
super(TestExplorePublicActivity, self).setUp()
self.project = ProjectFactory(is_public=True)
self.registration = RegistrationFactory(project=self.project)
self.private_project = ProjectFactory(title="Test private project")
def test_newest_public_project_and_registrations_show_in_explore_activity(self):
url = self.project.web_url_for('activity')
res = self.app.get(url)
assert_in(str(self.project.title), res)
assert_in(str(self.project.date_created.date()), res)
assert_in(str(self.registration.title), res)
assert_in(str(self.registration.registered_date.date()), res)
assert_not_in(str(self.private_project.title), res)
class TestForgotAndResetPasswordViews(OsfTestCase):
def setUp(self):
super(TestForgotAndResetPasswordViews, self).setUp()
self.user = AuthUserFactory()
self.key = random_string(20)
# manually set verifification key
self.user.verification_key = self.key
self.user.save()
self.url = web_url_for('reset_password', verification_key=self.key)
def test_reset_password_view_returns_200(self):
res = self.app.get(self.url)
assert_equal(res.status_code, 200)
def test_can_reset_password_if_form_success(self):
res = self.app.get(self.url)
form = res.forms['resetPasswordForm']
form['password'] = 'newpassword'
form['password2'] = 'newpassword'
res = form.submit()
# password was updated
self.user.reload()
assert_true(self.user.check_password('newpassword'))
@unittest.skip('TODO: Get this working with CAS setup')
def test_reset_password_logs_out_user(self):
another_user = AuthUserFactory()
# visits reset password link while another user is logged in
res = self.app.get(self.url, auth=another_user.auth)
assert_equal(res.status_code, 200)
# We check if another_user is logged in by checking if
# their full name appears on the page (it should be in the navbar).
# Yes, this is brittle.
assert_not_in(another_user.fullname, res)
# make sure the form is on the page
assert_true(res.forms['resetPasswordForm'])
class TestAUserProfile(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.me = AuthUserFactory()
self.project = ProjectFactory(creator=self.me, is_public=True, title=fake.bs())
self.component = NodeFactory(creator=self.me, project=self.project, is_public=True, title=fake.bs())
# regression test for https://github.com/CenterForOpenScience/osf.io/issues/2623
def test_has_public_projects_and_components(self):
# I go to my own profile
url = web_url_for('profile_view_id', uid=self.me._primary_key)
# I see the title of both my project and component
res = self.app.get(url, auth=self.me.auth)
assert_in_html(self.component.title, res)
assert_in_html(self.project.title, res)
# Another user can also see my public project and component
url = web_url_for('profile_view_id', uid=self.me._primary_key)
# I see the title of both my project and component
res = self.app.get(url, auth=self.user.auth)
assert_in_html(self.component.title, res)
assert_in_html(self.project.title, res)
def test_user_no_public_projects_or_components(self):
# I go to other user's profile
url = web_url_for('profile_view_id', uid=self.user._primary_key)
# User has no public components/projects
res = self.app.get(url, auth=self.me.auth)
assert_in('This user has no public projects', res)
assert_in('This user has no public components', res)
if __name__ == '__main__':
unittest.main()
|
lshain-android-source/external-skia | refs/heads/master | bench/bench_compare.py | 209 | #!/usr/bin/env python
'''
Created on May 16, 2011
@author: bungeman
'''
import sys
import getopt
import bench_util
def usage():
"""Prints simple usage information."""
print '-o <file> the old bench output file.'
print '-n <file> the new bench output file.'
print '-h causes headers to be output.'
print '-s <stat> the type of statistical analysis used'
print ' Not specifying is the same as -s "avg".'
print ' avg: average of all data points'
print ' min: minimum of all data points'
print ' med: median of all data points'
print ' 25th: twenty-fifth percentile for all data points'
print '-f <fieldSpec> which fields to output and in what order.'
print ' Not specifying is the same as -f "bctondp".'
print ' b: bench'
print ' c: config'
print ' t: time type'
print ' o: old time'
print ' n: new time'
print ' d: diff'
print ' p: percent diff'
print '-t use tab delimited format for output.'
print '--match <bench> only matches benches which begin with <bench>.'
class BenchDiff:
"""A compare between data points produced by bench.
(BenchDataPoint, BenchDataPoint)"""
def __init__(self, old, new):
self.old = old
self.new = new
self.diff = old.time - new.time
diffp = 0
if old.time != 0:
diffp = self.diff / old.time
self.diffp = diffp
def __repr__(self):
return "BenchDiff(%s, %s)" % (
str(self.new),
str(self.old),
)
def main():
"""Parses command line and writes output."""
try:
opts, _ = getopt.getopt(sys.argv[1:], "f:o:n:s:ht", ['match='])
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
old = None
new = None
column_format = ""
header_format = ""
columns = 'bctondp'
header = False
stat_type = "avg"
use_tabs = False
match_bench = None;
for option, value in opts:
if option == "-o":
old = value
elif option == "-n":
new = value
elif option == "-h":
header = True
elif option == "-f":
columns = value
elif option == "-s":
stat_type = value
elif option == "-t":
use_tabs = True
elif option == "--match":
match_bench = value
else:
usage()
assert False, "unhandled option"
if old is None or new is None:
usage()
sys.exit(2)
old_benches = bench_util.parse({}, open(old, 'r'), stat_type)
new_benches = bench_util.parse({}, open(new, 'r'), stat_type)
bench_diffs = []
for old_bench in old_benches:
#filter benches by the match criteria
if match_bench and not old_bench.bench.startswith(match_bench):
continue
#filter new_benches for benches that match old_bench
new_bench_match = [bench for bench in new_benches
if old_bench.bench == bench.bench and
old_bench.config == bench.config and
old_bench.time_type == bench.time_type
]
if (len(new_bench_match) < 1):
continue
bench_diffs.append(BenchDiff(old_bench, new_bench_match[0]))
if use_tabs:
column_formats = {
'b' : '{bench}\t',
'c' : '{config}\t',
't' : '{time_type}\t',
'o' : '{old_time: 0.2f}\t',
'n' : '{new_time: 0.2f}\t',
'd' : '{diff: 0.2f}\t',
'p' : '{diffp: 0.1%}\t',
}
header_formats = {
'b' : '{bench}\t',
'c' : '{config}\t',
't' : '{time_type}\t',
'o' : '{old_time}\t',
'n' : '{new_time}\t',
'd' : '{diff}\t',
'p' : '{diffp}\t',
}
else:
bench_max_len = max(map(lambda b: len(b.old.bench), bench_diffs))
config_max_len = max(map(lambda b: len(b.old.config), bench_diffs))
column_formats = {
'b' : '{bench: >%d} ' % (bench_max_len),
'c' : '{config: <%d} ' % (config_max_len),
't' : '{time_type: <4} ',
'o' : '{old_time: >10.2f} ',
'n' : '{new_time: >10.2f} ',
'd' : '{diff: >+10.2f} ',
'p' : '{diffp: >+8.1%} ',
}
header_formats = {
'b' : '{bench: >%d} ' % (bench_max_len),
'c' : '{config: <%d} ' % (config_max_len),
't' : '{time_type: <4} ',
'o' : '{old_time: >10} ',
'n' : '{new_time: >10} ',
'd' : '{diff: >10} ',
'p' : '{diffp: >8} ',
}
for column_char in columns:
if column_formats[column_char]:
column_format += column_formats[column_char]
header_format += header_formats[column_char]
else:
usage()
sys.exit(2)
if header:
print header_format.format(
bench='bench'
, config='conf'
, time_type='time'
, old_time='old'
, new_time='new'
, diff='diff'
, diffp='diffP'
)
bench_diffs.sort(key=lambda d : [d.diffp,
d.old.bench,
d.old.config,
d.old.time_type,
])
for bench_diff in bench_diffs:
print column_format.format(
bench=bench_diff.old.bench.strip()
, config=bench_diff.old.config.strip()
, time_type=bench_diff.old.time_type
, old_time=bench_diff.old.time
, new_time=bench_diff.new.time
, diff=bench_diff.diff
, diffp=bench_diff.diffp
)
if __name__ == "__main__":
main()
|
GlobalFishingWatch/vessel-classification | refs/heads/master | classification/run_training.py | 1 | # Copyright 2017 Google Inc. and Skytruth Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import argparse
import logging
import os
import sys
import importlib
import numpy as np
import tensorflow as tf
from pkg_resources import resource_filename
from . import metadata
def compute_approx_norms(model_fn, count=100):
dataset = model_fn()
print(dataset)
iter = model_fn().make_initializable_iterator()
print(iter)
el = iter.get_next()
means = []
vars = []
with tf.Session() as sess:
sess.run(iter.initializer)
for _ in range(count):
x = sess.run(el)[0]['features']
means.append(x.mean(axis=(0, 1)))
vars.append(x.var(axis=(0, 1)))
return np.mean(means, axis=0), np.sqrt(np.mean(vars, axis=0))
def main(args):
logging.getLogger().setLevel(logging.DEBUG)
tf.logging.set_verbosity(tf.logging.DEBUG)
logging.info("Running with Tensorflow version: %s", tf.__version__)
logging.info("Loading model: %s", args.model_name)
module = "classification.models.{}".format(args.model_name)
try:
Model = importlib.import_module(module).Model
except:
logging.fatal("Could not load model: {}".format(module))
raise
metadata_file = os.path.abspath(
resource_filename('classification.data', args.metadata_file))
if not os.path.exists(metadata_file):
logging.fatal("Could not find metadata file: %s.", metadata_file)
sys.exit(-1)
if args.fishing_ranges_file:
fishing_ranges_file = os.path.abspath(
resource_filename('classification.data', args.fishing_ranges_file))
if not os.path.exists(fishing_ranges_file):
logging.fatal("Could not find fishing range file: %s.",
fishing_ranges_file)
sys.exit(-1)
fishing_ranges = metadata.read_fishing_ranges(fishing_ranges_file)
else:
fishing_ranges = {}
all_available_ids = metadata.find_available_ids(args.root_feature_path)
split = None if (args.split == -1) else args.split
logging.info("Using split: %s", split)
vessel_metadata = Model.read_metadata(
all_available_ids, metadata_file,
fishing_ranges, split=split)
feature_dimensions = int(args.feature_dimensions)
chosen_model = Model(feature_dimensions, vessel_metadata, args.metrics)
train_input_fn = chosen_model.make_training_input_fn(args.root_feature_path,
args.num_parallel_readers)
test_input_fn = chosen_model.make_test_input_fn(args.root_feature_path,
args.num_parallel_readers)
estimator = chosen_model.make_estimator(args.training_output_path)
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn,
max_steps=chosen_model.number_of_steps
)
eval_spec = tf.estimator.EvalSpec(
steps=10,
input_fn=test_input_fn,
start_delay_secs=120,
throttle_secs=600
)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
def parse_args():
""" Parses command-line arguments for training."""
argparser = argparse.ArgumentParser('Train fishing classification model.')
argparser.add_argument('model_name')
argparser.add_argument(
'--root_feature_path',
required=True,
help='The root path to the vessel movement feature directories.')
argparser.add_argument(
'--training_output_path',
required=True,
help='The working path for model statistics and checkpoints.')
argparser.add_argument(
'--feature_dimensions',
required=True,
help='The number of dimensions of a classification feature.')
argparser.add_argument('--metadata_file', help='Path to metadata.')
argparser.add_argument(
'--fishing_ranges_file', help='Path to fishing range file.')
argparser.add_argument(
'--metrics',
default='all',
help='How many metrics to dump ["all" | "minimal"]')
argparser.add_argument(
'--num_parallel_readers',
default=1, type=int,
help='How many parallel readers to employ reading data')
argparser.add_argument(
'--split',
default=0, type=int,
help='Which split to train/test on')
return argparser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(args)
|
namunu/MBS_Patent | refs/heads/master | parser_test/xml_driver.py | 2 | #!/usr/bin/env python
"""
Copyright (c) 2013 The Regents of the University of California, AMERICAN INSTITUTES FOR RESEARCH
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
@author Gabe Fierro [email protected] github.com/gtfierro
"""
"""
General purpose XML parsing driver for use as a content handler through
Python's xml.sax module. Works in conjunction with lib/xml_util.py, which
provides useful helper methods to handle the parsed data.
"""
import functools
from collections import deque
from xml.sax import make_parser, handler
import xml_util
class ChainList(list):
"""
This is the base structure that handles the tree created by XMLElement
and XMLHandler. Overriding __getattr__ allows us to chain queries on
a list in order to traverse the tree.
"""
def contents_of(self, tag, default=[''], as_string=False, upper=True):
res = []
for item in self:
res.extend(item.contents_of(tag, upper=upper))
if as_string:
res = [r for r in res if type(r).__name__ not in ('tuple', 'list')]
return ' '.join(res) if res else ''
return ChainList(res) if res else default
def __getattr__(self, key):
res = []
scope = deque(self)
while scope:
current = scope.popleft()
if current._name == key: res.append(current)
else: scope.extend(current.children)
return ChainList(res)
def __reduce__(self): return (ChainList, (), None, iter(self), None)
def __getstate__(self): return None
class XMLElement(object):
"""
Represents XML elements from a document. These will assist
us in representing an XML document as a Python object.
Heavily inspired from: https://github.com/stchris/untangle/blob/master/untangle.py
"""
def __init__(self, name, attributes):
self._name = name
self._attributes = attributes
self.content = []
self.children = ChainList()
self.is_root = False
def __getstate__(self):
return self.__dict__
def __iter__(self):
yield self
def __nonzero__(self):
return self.is_root or self._name is not None
def __getitem__(self, key):
return self.get_attribute(key)
def __getattr__(self, key):
res = []
scope = deque(self.children)
while scope:
current = scope.popleft()
if current._name == key: res.append(current)
else: scope.extend(current.children)
if res:
self.__dict__[key] = ChainList(res)
return ChainList(res)
else:
return ChainList('')
def contents_of(self, key, default=ChainList(''), as_string=False, upper=True):
candidates = self.__getattr__(key)
if candidates:
res = [x.get_content(upper=upper) for x in candidates]
else:
res = default
if as_string:
if not res:
return ''
# handle corner case of [['content', 'here']]
elif isinstance(res, list)\
and len(res) == 1\
and isinstance(res[0], list):
res = res[0]
return ' '.join(filter(lambda x: x, filter(lambda x: not isinstance(x, list), res)))
return res
def get_content(self, upper=True, as_string=True):
if as_string:
if len(self.content) == 1:
return xml_util.clean(self.content[0], upper=upper)
else:
return ""
else:
return map(functools.partial(xml_util.clean, upper=upper), self.content)
def put_content(self, content, lastlinenumber, linenumber):
if not self.content or lastlinenumber != linenumber:
self.content.append(content)
else:
self.content[-1] += content
def add_child(self, child):
self.children.append(child)
def get_attribute(self, key, upper=True):
return xml_util.clean(self._attributes.get(key, None), upper=upper)
def get_xmlelements(self, name):
return filter(lambda x: x._name == name, self.children) \
if name else \
self.children
def get_attribute_string(self):
attrib_list = [k + '="' + self._attributes.get(k, None) + '"' for k in self._attributes if k != "textdata"]
return " ".join(attrib_list)
class XMLHandler(handler.ContentHandler):
"""
SAX Handler to create the Python object while parsing
"""
def __init__(self):
self.root = XMLElement(None, None)
self.root.is_root = True
self.elements = ChainList()
handler.ContentHandler.__init__(self)
self.lastline = -1
def startElement(self, name, attributes):
name = name.replace('-','_').replace('.','_').replace(':','_')
xmlelem = XMLElement(name, dict(attributes.items()))
if self.elements:
self.elements[-1].add_child(xmlelem)
else:
self.root.add_child(xmlelem)
self.elements.append(xmlelem)
def endElement(self, name):
if self.elements:
elem = self.elements.pop()
if self.elements:
text_data = False
with_tags = False
for i in reversed(self.elements):
if i._attributes.get("textdata", None):
text_data = True
if i._attributes.get("withtags", None):
with_tags = True
if text_data:
cont_text = elem.get_content(upper=False, as_string=True)
attrib_str = elem.get_attribute_string()
if attrib_str != "":
attrib_str = " " + attrib_str
if with_tags:
content = u"<" + elem._name + attrib_str + u">" + cont_text + u"</" + elem._name + u">"
else:
content = cont_text
self.elements[-1].put_content(content, 0, 0)
def characters(self, content):
currentlinenumber = self._locator.getLineNumber()
if content.strip():
#if self.elements[-1]._name in ('b','i'):
# self.elements[-2].put_content(content, self.lastline, currentlinenumber)
#elif self.elements[-1]._name == 'sub':
# newtxt = u"<sub>"+content+u"</sub>"
# self.elements[-2].put_content(newtxt, self.lastline, currentlinenumber)
#else:
# self.elements[-1].put_content(content, self.lastline, currentlinenumber)
self.elements[-1].put_content(content, self.lastline, currentlinenumber)
self.lastline = self._locator.getLineNumber()
|
rolandovillca/python_basic_concepts | refs/heads/master | context_manager/a.py | 4 | class A:
def __enter__(self):
print '11111'
return '22222'
def __exit__(self, type, value, traceback):
print '33333'
return False
with A() as s:
a = '44444'
print '55555:', s
print '66666:', a
print '77777:', s
# 11111
# 55555: 22222
# 33333
# 66666: 44444
# 77777: 22222
|
wcmitchell/insights-core | refs/heads/master | insights/contrib/pyparsing.py | 5 | # module pyparsing.py
#
# Copyright (c) 2003-2015 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form C{"<salutation>, <addressee>!"})::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word( alphas ) + "," + Word( alphas ) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString( hello ))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The parsed results returned from C{parseString()} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "2.1.4"
__versionTime__ = "13 May 2016 18:25 UTC"
__author__ = "Paul McGuire <[email protected]>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
import pprint
import functools
import itertools
import traceback
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
'pyparsing_common',
]
system_version = tuple(sys.version_info)[:3]
PY_3 = system_version[0] == 3
if PY_3:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
# build list of single arg builtins, that can be used as parse actions
singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
else:
_MAX_INT = sys.maxint
range = xrange
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# Else encode it
ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
xmlcharref = Regex('&#\d+;')
xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
return xmlcharref.transformString(ret)
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len sorted reversed list tuple set any all min max".split():
try:
singleArgBuiltins.append(getattr(__builtin__,fname))
except AttributeError:
continue
_generatorType = type((y for y in range(1)))
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
alphas = string.ascii_uppercase + string.ascii_lowercase
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join(c for c in string.printable if c not in string.whitespace)
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join((line_str[:line_column],
markerString, line_str[line_column:]))
return line_str.strip()
def __dir__(self):
return "lineno col line".split() + dir(type(self))
class ParseException(ParseBaseException):
"""exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like C{L{ParseFatalException}}, but thrown internally when an
C{L{ErrorStop<And._ErrorStop>}} ('-' operator) indicates that parsing is to stop immediately because
an unbacktrackable syntax error has been found"""
def __init__(self, pe):
super(ParseSyntaxException, self).__init__(
pe.pstr, pe.loc, pe.msg, pe.parserElement)
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by C{validate()} if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup)
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>})
"""
def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
self.__asList = asList
self.__modal = modal
if toklist is None:
toklist = []
if isinstance(toklist, list):
self.__toklist = toklist[:]
elif isinstance(toklist, _generatorType):
self.__toklist = list(toklist)
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v, isinstance=isinstance ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,(int,slice)):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
#~ for name in self.__tokdict:
#~ occurrences = self.__tokdict[name]
#~ for j in removed:
#~ for k, (value, position) in enumerate(occurrences):
#~ occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
for name,occurrences in self.__tokdict.items():
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return ( not not self.__toklist )
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( self.__toklist[::-1] )
def iterkeys( self ):
"""Returns all named result keys."""
if hasattr(self.__tokdict, "iterkeys"):
return self.__tokdict.iterkeys()
else:
return iter(self.__tokdict)
def itervalues( self ):
"""Returns all named result values."""
return (self[k] for k in self.iterkeys())
def iteritems( self ):
return ((k, self[k]) for k in self.iterkeys())
if PY_3:
keys = iterkeys
values = itervalues
items = iteritems
else:
def keys( self ):
"""Returns all named result keys."""
return list(self.iterkeys())
def values( self ):
"""Returns all named result values."""
return list(self.itervalues())
def items( self ):
"""Returns all named result keys and values as a list of tuples."""
return list(self.iteritems())
def haskeys( self ):
"""Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self.__tokdict)
def pop( self, *args, **kwargs):
"""Removes and returns item at specified index (default=last).
Supports both list and dict semantics for pop(). If passed no
argument or an integer argument, it will use list semantics
and pop tokens from the list of parsed tokens. If passed a
non-integer argument (most likely a string), it will use dict
semantics and pop the corresponding value from any defined
results names. A second default return value argument is
supported, just as in dict.pop()."""
if not args:
args = [-1]
for k,v in kwargs.items():
if k == 'default':
args = (args[0], v)
else:
raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
if (isinstance(args[0], int) or
len(args) == 1 or
args[0] in self):
index = args[0]
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, defaultValue=None):
"""Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified."""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
"""Inserts new element at location index in the list of parsed tokens."""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
#~ for name in self.__tokdict:
#~ occurrences = self.__tokdict[name]
#~ for k, (value, position) in enumerate(occurrences):
#~ occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
for name,occurrences in self.__tokdict.items():
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def append( self, item ):
"""Add single element to end of ParseResults list of elements."""
self.__toklist.append(item)
def extend( self, itemseq ):
"""Add sequence of elements to end of ParseResults list of elements."""
if isinstance(itemseq, ParseResults):
self += itemseq
else:
self.__toklist.extend(itemseq)
def clear( self ):
"""Clear all elements and results names."""
del self.__toklist[:]
self.__tokdict.clear()
def __getattr__( self, name ):
try:
return self[name]
except KeyError:
return ""
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = lambda a: offset if a<0 else a+offset
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
return self
def __radd__(self, other):
if isinstance(other,int) and other == 0:
# useful for merging many ParseResults using sum() builtin
return self.copy()
else:
# this may raise a TypeError - so be it
return other + self
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""Returns the parse results as a nested list of matching tokens, all converted to strings."""
return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
def asDict( self ):
"""Returns the named parse results as a nested dictionary."""
if PY_3:
item_fn = self.items
else:
item_fn = self.iteritems
def toItem(obj):
if isinstance(obj, ParseResults):
if obj.haskeys():
return obj.asDict()
else:
return [toItem(v) for v in obj]
else:
return obj
return dict((k,toItem(v)) for k,v in item_fn())
def copy( self ):
"""Returns a new copy of a C{ParseResults} object."""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
nl = "\n"
out = []
namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist)
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
for i,res in enumerate(self.__toklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""Returns the results name for this token expression."""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
self.__tokdict.values()[0][0][1] in (0,-1)):
return self.__tokdict.keys()[0]
else:
return None
def dump(self,indent='',depth=0):
"""Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data."""
out = []
NL = '\n'
out.append( indent+_ustr(self.asList()) )
if self.haskeys():
items = sorted(self.items())
for k,v in items:
if out:
out.append(NL)
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v:
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(_ustr(v))
elif any(isinstance(vv,ParseResults) for vv in self):
v = self
for i,vv in enumerate(v):
if isinstance(vv,ParseResults):
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) ))
else:
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv)))
return "".join(out)
def pprint(self, *args, **kwargs):
"""Pretty-printer for parsed results as a list, using the C{pprint} module.
Accepts additional positional or keyword args as defined for the
C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})"""
pprint.pprint(self.asList(), *args, **kwargs)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
(self.__tokdict,
par,
inAccumNames,
self.__name) = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __getnewargs__(self):
return self.__toklist, self.__name, self.__asList, self.__modal
def __dir__(self):
return (dir(type(self)) + list(self.keys()))
collections.MutableMapping.register(ParseResults)
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
s = strg
return 1 if loc<len(s) and s[loc] == '\n' else loc - s.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
#~ 'decorator to trim function calls to match the arity of the target'
#~ def _trim_arity(func, maxargs=3):
#~ if func in singleArgBuiltins:
#~ return lambda s,l,t: func(t)
#~ limit = 0
#~ foundArity = False
#~ def wrapper(*args):
#~ nonlocal limit,foundArity
#~ while 1:
#~ try:
#~ ret = func(*args[limit:])
#~ foundArity = True
#~ return ret
#~ except TypeError:
#~ if limit == maxargs or foundArity:
#~ raise
#~ limit += 1
#~ continue
#~ return wrapper
# this version is Python 2.x-3.x cross-compatible
'decorator to trim function calls to match the arity of the target'
def _trim_arity(func, maxargs=2):
if func in singleArgBuiltins:
return lambda s,l,t: func(t)
limit = [0]
foundArity = [False]
# traceback return data structure changed in Py3.5 - normalize back to plain tuples
if system_version[:2] >= (3,5):
def extract_stack():
# special handling for Python 3.5.0 - extra deep call stack by 1
offset = -3 if system_version == (3,5,0) else -2
frame_summary = traceback.extract_stack()[offset]
return [(frame_summary.filename, frame_summary.lineno)]
def extract_tb(tb):
frames = traceback.extract_tb(tb)
frame_summary = frames[-1]
return [(frame_summary.filename, frame_summary.lineno)]
else:
extract_stack = traceback.extract_stack
extract_tb = traceback.extract_tb
# synthesize what would be returned by traceback.extract_stack at the call to
# user's parse action 'func', so that we don't incur call penalty at parse time
LINE_DIFF = 6
# IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
# THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
this_line = extract_stack()[-1]
pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
def wrapper(*args):
while 1:
try:
ret = func(*args[limit[0]:])
foundArity[0] = True
return ret
except TypeError:
# re-raise TypeErrors if they did not come from our arity testing
if foundArity[0]:
raise
else:
try:
tb = sys.exc_info()[-1]
if not extract_tb(tb)[-1][:2] == pa_call_line_synth:
raise
finally:
del tb
if limit[0] <= maxargs:
limit[0] += 1
continue
raise
# copy func name to wrapper for sensible debug output
func_name = "<parse action>"
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
wrapper.__name__ = func_name
return wrapper
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
@staticmethod
def setDefaultWhitespaceChars( chars ):
"""Overrides the default whitespace chars
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
@staticmethod
def inlineLiteralsUsing(cls):
"""
Set class to be used for inclusion of string literals into a parser.
"""
ParserElement.literalStringClass = cls
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element."""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""Define name for this expression, for use in debugging."""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
"""
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches=True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def setParseAction( self, *fns, **kwargs ):
"""Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Optional keyword arguments::
- callDuringTry = (default=False) indicate if parse action should be run during lookaheads and alternate testing
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = kwargs.get("callDuringTry", False)
return self
def addParseAction( self, *fns, **kwargs ):
"""Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}."""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def addCondition(self, *fns, **kwargs):
"""Add a boolean predicate function to expression's list of parse actions. See
L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction},
functions passed to C{addCondition} need to return boolean success/fail of the condition.
Optional keyword arguments::
- message = define a custom message to be used in the raised exception
- fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
"""
msg = kwargs.get("message", "failed user-defined condition")
exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
for fn in fns:
def pa(s,l,t):
if not bool(_trim_arity(fn)(s,l,t)):
raise exc_type(s,l,msg)
self.parseAction.append(pa)
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{L{ParseFatalException}}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException as err:
#~ print ("Exception raised:", err)
if self.debugActions[2]:
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException as err:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
def canParseNext(self, instring, loc):
try:
self.tryParse(instring, loc)
except (ParseException, IndexError):
return False
else:
return True
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
lookup = (self,instring,loc,callPreParse,doActions)
if lookup in ParserElement._exprArgCache:
value = ParserElement._exprArgCache[ lookup ]
if isinstance(value, Exception):
raise value
return (value[0],value[1].copy())
else:
try:
value = self._parseNoCache( instring, loc, doActions, callPreParse )
ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy())
return value
except ParseBaseException as pe:
pe.__traceback__ = None
ParserElement._exprArgCache[ lookup ] = pe
raise
_parse = _parseNoCache
# argument cache for optimizing repeated calls when backtracking through recursive expressions
_exprArgCache = {}
@staticmethod
def resetCache():
ParserElement._exprArgCache.clear()
_packratEnabled = False
@staticmethod
def enablePackrat():
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache
def parseString( self, instring, parseAll=False ):
"""Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{L{StringEnd()}}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
loc = self.preParse( instring, loc )
se = Empty() + StringEnd()
se._parse( instring, loc )
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
"""Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs."""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn( instring, loc )
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def transformString( self, instring ):
"""Extension to C{L{scanString}}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr,_flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def searchString( self, instring, maxMatches=_MAX_INT ):
"""Another extension to C{L{scanString}}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __add__(self, other ):
"""Implementation of + operator - returns C{L{And}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""Implementation of + operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""Implementation of - operator, returns C{L{And}} with error stop"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, And._ErrorStop(), other ] )
def __rsub__(self, other ):
"""Implementation of - operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
"""Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + L{ZeroOrMore}(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
- C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""Implementation of | operator - returns C{L{MatchFirst}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""Implementation of | operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""Implementation of ^ operator - returns C{L{Or}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""Implementation of ^ operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""Implementation of & operator - returns C{L{Each}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""Implementation of & operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""Implementation of ~ operator - returns C{L{NotAny}}"""
return NotAny( self )
def __call__(self, name=None):
"""Shortcut for C{L{setResultsName}}, with C{listAllMatches=default}::
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
could be written as::
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
passed as C{True}.
If C{name} is omitted, same as calling C{L{copy}}.
"""
if name is not None:
return self.setResultsName(name)
else:
return self.copy()
def suppress( self ):
"""Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match C{<TAB>} characters."""
self.keepTabs = True
return self
def ignore( self, other ):
"""Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if isinstance(other, basestring):
other = Suppress(other)
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""Enable display of debugging messages while doing pattern matching."""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable."""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""Check defined expressions for valid structure, check for infinite recursive definitions."""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, "r")
file_contents = f.read()
f.close()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or vars(self) == vars(other)
elif isinstance(other, basestring):
return self.matches(other)
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
def matches(self, s, parseAll=True):
"""Method for quick testing of a parser against a test string. Good for simple
inline microtests of sub expressions while building up larger parser, as in:
expr = Word(nums)
assert expr.matches("100")
Parameters:
- testString - string
"""
try:
self.parseString(_ustr(s), parseAll=parseAll)
return True
except ParseBaseException:
return False
def runTests(self, tests, parseAll=False, comment='#', printResults=True):
"""Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- tests - a list of separate test strings, or a multiline string of test strings
- parseAll - (default=False) - flag to pass to C{L{parseString}} when running tests
- comment - (default='#') - expression for indicating embedded comments in the test
string; pass None to disable comment filtering
- printResults - (default=True) prints test output to stdout; if False, returns a
(success, results) tuple, where success indicates that all tests succeeded, and the
results contain a list of lines of each test's output as it would have been
printed to stdout
"""
if isinstance(tests, basestring):
tests = list(map(str.strip, tests.splitlines()))
if isinstance(comment, basestring):
comment = Literal(comment)
allResults = []
comments = []
success = True
for t in tests:
if comment is not None and comment.matches(t, False) or comments and not t:
comments.append(t)
continue
if not t:
continue
out = ['\n'.join(comments), t]
comments = []
try:
out.append(self.parseString(t, parseAll=parseAll).dump())
except ParseBaseException as pe:
fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
if '\n' in t:
out.append(line(pe.loc, t))
out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
else:
out.append(' '*pe.loc + '^' + fatal)
out.append("FAIL: " + str(pe))
success = False
if printResults:
out.append('')
print('\n'.join(out))
else:
allResults.append(out)
if not printResults:
return success, allResults
class Token(ParserElement):
"""Abstract C{ParserElement} subclass, for defining atomic matching patterns."""
def __init__( self ):
super(Token,self).__init__( savelist=False )
class Empty(Token):
"""An empty token, will always match."""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match."""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl( self, instring, loc, doActions=True ):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""Token to exactly match a specified string."""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
_L = Literal
ParserElement.literalStringClass = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{L{Literal}}::
Literal("if") will match the leading C{'if'} in C{'ifAndOnlyIf'}.
Keyword("if") will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
Accepts two optional constructor arguments in addition to the keyword string:
C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"; C{caseless} allows case-insensitive
matching, default is C{False}.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ):
super(Keyword,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
@staticmethod
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction. An optional
C{excludeChars} parameter can list characters that might be found in
the input C{bodyChars} string; useful to define a word of all printables
except for one or two characters, for instance.
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
super(Word,self).__init__()
if excludeChars:
initChars = ''.join(c for c in initChars if c not in excludeChars)
if bodyChars:
bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.initCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
if not(instring[ loc ] in self.initChars):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__( self, pattern, flags=0):
"""The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if isinstance(pattern, basestring):
if not pattern:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = \
self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
"""Token for matching strings that are delimited by quoting characters.
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
r"""Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=None)
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)
- multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
- convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
"""
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if not quoteChar:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if not endQuoteChar:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
self.convertWhitespaceEscapes = convertWhitespaceEscapes
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped whitespace
if '\\' in ret and self.convertWhitespaceEscapes:
ws_map = {
r'\t' : '\t',
r'\n' : '\n',
r'\f' : '\f',
r'\r' : '\r',
}
for wslit,wschar in ws_map.items():
ret = ret.replace(wslit, wschar)
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given set.
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{L{Word}} class."""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
#~ self.leaveWhitespace()
self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for tabular report scraping."""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""Matches if current position is at the beginning of a line within the parse string"""
def __init__( self ):
super(LineStart,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected start of line"
def preParse( self, instring, loc ):
preloc = super(LineStart,self).preParse(instring,loc)
if instring[preloc] == "\n":
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
if not( loc==0 or
(loc == self.preParse( instring, 0 )) or
(instring[loc-1] == "\n") ): #col(loc, instring) != 1:
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the parse string"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse string"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, _generatorType ):
exprs = list(exprs)
if isinstance( exprs, basestring ):
self.exprs = [ Literal( exprs ) ]
elif isinstance( exprs, collections.Sequence ):
# if sequence of strings provided, wrap with Literal
if all(isinstance(expr, basestring) for expr in exprs):
exprs = map(Literal, exprs)
self.exprs = list(exprs)
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
self.errmsg = "Expected " + _ustr(self)
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
def copy(self):
ret = super(ParseExpression,self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
class And(ParseExpression):
"""Requires all given C{ParseExpression}s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the C{'+'} operator.
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(And._ErrorStop,self).__init__(*args, **kwargs)
self.name = '-'
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.setWhitespaceChars( self.exprs[0].whiteChars )
self.skipWhitespace = self.exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException(pe)
except IndexError:
raise ParseSyntaxException( ParseException(instring, len(instring), self.errmsg, self) )
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.haskeys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the C{'^'} operator.
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
matches = []
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
# save match among all matches, to retry longest to shortest
matches.append((loc2, e))
if matches:
matches.sort(key=lambda x: -x[0])
for _,e in matches:
try:
return e._parse( instring, loc, doActions )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the C{'|'} operator.
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""Requires all given C{ParseExpression}s to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the C{'&'} operator.
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
self.optionals = opt1 + opt2
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(self.opt1map.get(id(e),e))
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join(_ustr(e) for e in tmpReqd)
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = ParseResults()
for r in resultlist:
dups = {}
for k in r.keys():
if k in finalResults:
tmp = ParseResults(finalResults[k])
tmp += ParseResults(r[k])
dups[k] = tmp
finalResults += ParseResults(r)
for k,v in dups.items():
finalResults[k] = v
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens."""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
expr = Literal(expr)
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression. C{FollowedBy}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list."""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression. C{NotAny}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression does *not* match at the current
position. Also, C{NotAny} does *not* skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator."""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr.canParseNext(instring, loc):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class OneOrMore(ParseElementEnhance):
"""Repetition of one or more of the given expression.
Parameters:
- expr - expression that must match one or more times
- stopOn - (default=None) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
"""
def __init__( self, expr, stopOn=None):
super(OneOrMore, self).__init__(expr)
ender = stopOn
if isinstance(ender, basestring):
ender = Literal(ender)
self.not_ender = ~ender if ender is not None else None
def parseImpl( self, instring, loc, doActions=True ):
self_expr_parse = self.expr._parse
self_skip_ignorables = self._skipIgnorables
check_ender = self.not_ender is not None
if check_ender:
try_not_ender = self.not_ender.tryParse
# must be at least one (but first see if we are the stopOn sentinel;
# if so, fail)
if check_ender:
try_not_ender(instring, loc)
loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = (not not self.ignoreExprs)
while 1:
if check_ender:
try_not_ender(instring, loc)
if hasIgnoreExprs:
preloc = self_skip_ignorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self_expr_parse( instring, preloc, doActions )
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(OneOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class ZeroOrMore(OneOrMore):
"""Optional repetition of zero or more of the given expression.
Parameters:
- expr - expression that must match zero or more times
- stopOn - (default=None) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
"""
def __init__( self, expr, stopOn=None):
super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
except (ParseException,IndexError):
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
Parameters:
- expr - expression that must match zero or more times
- default (optional) - value to be returned if the optional expression
is not found.
"""
def __init__( self, expr, default=_optionalNotMatched ):
super(Optional,self).__init__( expr, savelist=False )
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched expression is found.
Parameters:
- expr - target expression marking the end of the data to be skipped
- include - (default=False) if True, the target expression is also parsed
(the skipped text and target expression are returned as a 2-element list).
- ignore - (default=None) used to define grammars (typically quoted strings and
comments) that might contain false matches to the target expression
- failOn - (default=None) define expressions that are not allowed to be
included in the skipped test; if found before the target expression is found,
the SkipTo is not a match
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if isinstance(failOn, basestring):
self.failOn = Literal(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
startloc = loc
instrlen = len(instring)
expr = self.expr
expr_parse = self.expr._parse
self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
tmploc = loc
while tmploc <= instrlen:
if self_failOn_canParseNext is not None:
# break if failOn expression matches
if self_failOn_canParseNext(instring, tmploc):
break
if self_ignoreExpr_tryParse is not None:
# advance past ignore expressions
while 1:
try:
tmploc = self_ignoreExpr_tryParse(instring, tmploc)
except ParseBaseException:
break
try:
expr_parse(instring, tmploc, doActions=False, callPreParse=False)
except (ParseException, IndexError):
# no match, advance loc in string
tmploc += 1
else:
# matched skipto expr, done
break
else:
# ran off the end of the input string without matching skipto expr, fail
raise ParseException(instring, loc, self.errmsg, self)
# build up return values
loc = tmploc
skiptext = instring[startloc:loc]
skipresult = ParseResults(skiptext)
if self.includeMatch:
loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
skipresult += mat
return loc, skipresult
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
Converting to use the '<<=' operator instead will avoid this problem.
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = ParserElement.literalStringClass(other)
self.expr = other
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
def __ilshift__(self, other):
return self << other
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
return self.__class__.__name__ + ": ..."
# stubbed out for now - creates awful memory and perf issues
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret <<= self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""Abstract subclass of C{ParseExpression}, for converting parsed results."""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and retToks.haskeys():
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions."""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
"""
def __init__( self, expr ):
super(Dict,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression."""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""Wrapper for parse actions, to ensure they are only called once."""
def __init__(self, methodCall):
self.callable = _trim_arity(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions."""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.__name__
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %s)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to C{True}, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr, intExpr=None ):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = t[0]
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
def _flatten(L):
ret = []
for i in L:
if isinstance(i,list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And(Literal(tt) for tt in tflat)
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will *not* match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{L{MatchFirst}} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a list of string literals
- caseless - (default=False) - treat all literals as caseless
- useRegex - (default=True) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
symbols = []
if isinstance(strs,basestring):
symbols = strs.split()
elif isinstance(strs, collections.Sequence):
symbols = list(strs[:])
elif isinstance(strs, _generatorType):
symbols = list(strs)
else:
warnings.warn("Invalid argument to oneOf, expected string or list",
SyntaxWarning, stacklevel=2)
if not symbols:
return NoMatch()
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
else:
return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
except:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
def dictOf( key, value ):
"""Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. By default, returns astring containing the original parsed text.
If the optional C{asString} argument is passed as C{False}, then the return value is a
C{L{ParseResults}} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{L{originalTextFor}} contains expressions with defined
results names, you must set C{asString} to C{False} if you want to preserve those
results name values."""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
matchExpr.setParseAction(extractText)
return matchExpr
def ungroup(expr):
"""Helper to undo pyparsing's default grouping of And expressions, even
if all but one are non-empty."""
return TokenConverter(expr).setParseAction(lambda t:t[0])
def locatedExpr(expr):
"""Helper to decorate a returned token with its starting and ending locations in the input string.
This helper adds the following results names:
- locn_start = location where matched expression begins
- locn_end = location where matched expression ends
- value = the actual parsed results
Be careful if the input text contains C{<TAB>} characters, you may want to call
C{L{ParserElement.parseWithTabs}}
"""
locator = Empty().setParseAction(lambda s,l,t: l)
return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(printables, excludeChars=r'\]', exact=1) | Regex(r"\w", re.UNICODE)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
def srange(s):
r"""Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be::
a single character
an escaped character with a leading backslash (such as \- or \])
an escaped hex character with a leading '\x' (\x21, which is a '!' character)
(\0x## is also supported for backwards compatibility)
an escaped octal character with a leading '\0' (\041, which is a '!' character)
a range of any of the above, separated by a dash ('a-z', etc.)
any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
"""
_expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
try:
return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
except:
return ""
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{L{transformString<ParserElement.transformString>}()}.
"""
return lambda s,l,t: [replStr]
def removeQuotes(s,l,t):
"""Helper parse action for removing quotation marks from parsed quoted strings.
To use, add this parse action to quoted string using::
quotedString.setParseAction( removeQuotes )
"""
return t[0][1:-1]
def upcaseTokens(s,l,t):
"""Helper parse action to convert tokens to upper case."""
return [ tt.upper() for tt in map(_ustr,t) ]
def downcaseTokens(s,l,t):
"""Helper parse action to convert tokens to lower case."""
return [ tt.lower() for tt in map(_ustr,t) ]
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join(c for c in printables if c not in ">")
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML, given a tag name"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML, given a tag name"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""Helper to create a validating parse action to be used with start tags created
with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
C{<TD>} or C{<DIV>}.
Call C{withAttribute} with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in C{(align="right")}, or
- as an explicit dict with C{**} operator, when an attribute name is also a Python
reserved word, as in C{**{"class":"Customer", "align":"right"}}
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
If just testing for C{class} (with or without a namespace), use C{L{withClass}}.
To verify that the attribute exists, but without specifying a value, pass
C{withAttribute.ANY_VALUE} as the value.
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
def withClass(classname, namespace=''):
"""Simplified version of C{L{withAttribute}} when matching on a div class - made
difficult because C{class} is a reserved word in Python.
"""
classattr = "%s:class" % namespace if namespace else "class"
return withAttribute(**{classattr : classname})
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
- lpar - expression for matching left-parentheses (default=Suppress('('))
- rpar - expression for matching right-parentheses (default=Suppress(')'))
"""
ret = Forward()
lastExpr = baseExpr | ( lpar + ret + rpar )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward().setName(termName)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
matchExpr.setParseAction( pa )
thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
lastExpr = thisExpr
ret <<= lastExpr
return ret
operatorPrecedence = infixNotation
dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes")
sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes")
quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'|
Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the C{ignoreExpr} argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
The default is L{quotedString}, but if no expressions are to be ignored,
then pass C{None} for this argument.
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
ret.setName('nested %s%s expression' % (opener,closer))
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=True)
A valid block must contain at least one C{blockStatement}.
"""
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
PEER = Empty().setParseAction(checkPeerIndent).setName('')
UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
if indent:
smExpr = Group( Optional(NL) +
#~ FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr.setName('indented block')
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))
_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))
commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
def replaceHTMLEntity(t):
"""Helper parser action to replace common HTML entities with their special characters"""
return _htmlEntityMap.get(t.entity)
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment")
javaStyleComment = cppStyleComment
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
# some other useful expressions - using lower-case class name since we are really using this as a namespace
class pyparsing_common:
"""
Here are some common low-level expressions that may be useful in jump-starting parser development:
- numeric forms (integers, reals, scientific notation)
- parse actions for converting numeric strings to Python int and/or float types
- common programming identifiers
"""
def convertToInteger(t):
"""
Parse action for converting parsed integers to Python int
"""
return int(t[0])
def convertToFloat(t):
"""
Parse action for converting parsed numbers to Python float
"""
return float(t[0])
integer = Word(nums).setName("integer").setParseAction(convertToInteger)
"""expression that parses an unsigned integer and returns an int"""
signedInteger = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
"""expression that parses an integer with optional leading sign and returns an int"""
real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat)
"""expression that parses a floating point number and returns a float"""
sciReal = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientfic notation").setParseAction(convertToFloat)
"""expression that parses a floating point number with optional scientfic notation and returns a float"""
# streamlining this expression makes the docs nicer-looking
numeric = (sciReal | real | signedInteger).streamline()
"""any numeric expression, returns the corresponding Python type"""
number = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("number").setParseAction(convertToFloat)
"""any int or real number, returned as float"""
identifier = Word(alphas+'_', alphanums+'_').setName("identifier")
"""typical code identifier"""
if __name__ == "__main__":
selectToken = CaselessLiteral("select")
fromToken = CaselessLiteral("from")
ident = Word(alphas, alphanums + "_$")
columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
columnNameList = Group(delimitedList(columnName)).setName("columns")
columnSpec = ('*' | columnNameList)
tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
tableNameList = Group(delimitedList(tableName)).setName("tables")
simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")
# demo runTests method, including embedded comments in test string
simpleSQL.runTests("""
# '*' as column list and dotted table name
select * from SYS.XYZZY
# caseless match on "SELECT", and casts back to "select"
SELECT * from XYZZY, ABC
# list of column names, and mixed case SELECT keyword
Select AA,BB,CC from Sys.dual
# multiple tables
Select A, B, C from Sys.dual, Table2
# invalid SELECT keyword - should fail
Xelect A, B, C from Sys.dual
# incomplete command - should fail
Select
# invalid column name - should fail
Select ^^^ frox Sys.dual
""")
pyparsing_common.numeric.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
# any int or real number, returned as float
pyparsing_common.number.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
|
Luise-li/haroopad | refs/heads/develop | node_modules/stringex/node_modules/js-yaml/support/pyyaml-src/scanner.py | 235 |
# Scanner produces tokens of the following types:
# STREAM-START
# STREAM-END
# DIRECTIVE(name, value)
# DOCUMENT-START
# DOCUMENT-END
# BLOCK-SEQUENCE-START
# BLOCK-MAPPING-START
# BLOCK-END
# FLOW-SEQUENCE-START
# FLOW-MAPPING-START
# FLOW-SEQUENCE-END
# FLOW-MAPPING-END
# BLOCK-ENTRY
# FLOW-ENTRY
# KEY
# VALUE
# ALIAS(value)
# ANCHOR(value)
# TAG(value)
# SCALAR(value, plain, style)
#
# Read comments in the Scanner code for more details.
#
__all__ = ['Scanner', 'ScannerError']
from .error import MarkedYAMLError
from .tokens import *
class ScannerError(MarkedYAMLError):
pass
class SimpleKey:
# See below simple keys treatment.
def __init__(self, token_number, required, index, line, column, mark):
self.token_number = token_number
self.required = required
self.index = index
self.line = line
self.column = column
self.mark = mark
class Scanner:
def __init__(self):
"""Initialize the scanner."""
# It is assumed that Scanner and Reader will have a common descendant.
# Reader do the dirty work of checking for BOM and converting the
# input data to Unicode. It also adds NUL to the end.
#
# Reader supports the following methods
# self.peek(i=0) # peek the next i-th character
# self.prefix(l=1) # peek the next l characters
# self.forward(l=1) # read the next l characters and move the pointer.
# Had we reached the end of the stream?
self.done = False
# The number of unclosed '{' and '['. `flow_level == 0` means block
# context.
self.flow_level = 0
# List of processed tokens that are not yet emitted.
self.tokens = []
# Add the STREAM-START token.
self.fetch_stream_start()
# Number of tokens that were emitted through the `get_token` method.
self.tokens_taken = 0
# The current indentation level.
self.indent = -1
# Past indentation levels.
self.indents = []
# Variables related to simple keys treatment.
# A simple key is a key that is not denoted by the '?' indicator.
# Example of simple keys:
# ---
# block simple key: value
# ? not a simple key:
# : { flow simple key: value }
# We emit the KEY token before all keys, so when we find a potential
# simple key, we try to locate the corresponding ':' indicator.
# Simple keys should be limited to a single line and 1024 characters.
# Can a simple key start at the current position? A simple key may
# start:
# - at the beginning of the line, not counting indentation spaces
# (in block context),
# - after '{', '[', ',' (in the flow context),
# - after '?', ':', '-' (in the block context).
# In the block context, this flag also signifies if a block collection
# may start at the current position.
self.allow_simple_key = True
# Keep track of possible simple keys. This is a dictionary. The key
# is `flow_level`; there can be no more that one possible simple key
# for each level. The value is a SimpleKey record:
# (token_number, required, index, line, column, mark)
# A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
# '[', or '{' tokens.
self.possible_simple_keys = {}
# Public methods.
def check_token(self, *choices):
# Check if the next token is one of the given types.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
if not choices:
return True
for choice in choices:
if isinstance(self.tokens[0], choice):
return True
return False
def peek_token(self):
# Return the next token, but do not delete if from the queue.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
return self.tokens[0]
def get_token(self):
# Return the next token.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
self.tokens_taken += 1
return self.tokens.pop(0)
# Private methods.
def need_more_tokens(self):
if self.done:
return False
if not self.tokens:
return True
# The current token may be a potential simple key, so we
# need to look further.
self.stale_possible_simple_keys()
if self.next_possible_simple_key() == self.tokens_taken:
return True
def fetch_more_tokens(self):
# Eat whitespaces and comments until we reach the next token.
self.scan_to_next_token()
# Remove obsolete possible simple keys.
self.stale_possible_simple_keys()
# Compare the current indentation and column. It may add some tokens
# and decrease the current indentation level.
self.unwind_indent(self.column)
# Peek the next character.
ch = self.peek()
# Is it the end of stream?
if ch == '\0':
return self.fetch_stream_end()
# Is it a directive?
if ch == '%' and self.check_directive():
return self.fetch_directive()
# Is it the document start?
if ch == '-' and self.check_document_start():
return self.fetch_document_start()
# Is it the document end?
if ch == '.' and self.check_document_end():
return self.fetch_document_end()
# TODO: support for BOM within a stream.
#if ch == '\uFEFF':
# return self.fetch_bom() <-- issue BOMToken
# Note: the order of the following checks is NOT significant.
# Is it the flow sequence start indicator?
if ch == '[':
return self.fetch_flow_sequence_start()
# Is it the flow mapping start indicator?
if ch == '{':
return self.fetch_flow_mapping_start()
# Is it the flow sequence end indicator?
if ch == ']':
return self.fetch_flow_sequence_end()
# Is it the flow mapping end indicator?
if ch == '}':
return self.fetch_flow_mapping_end()
# Is it the flow entry indicator?
if ch == ',':
return self.fetch_flow_entry()
# Is it the block entry indicator?
if ch == '-' and self.check_block_entry():
return self.fetch_block_entry()
# Is it the key indicator?
if ch == '?' and self.check_key():
return self.fetch_key()
# Is it the value indicator?
if ch == ':' and self.check_value():
return self.fetch_value()
# Is it an alias?
if ch == '*':
return self.fetch_alias()
# Is it an anchor?
if ch == '&':
return self.fetch_anchor()
# Is it a tag?
if ch == '!':
return self.fetch_tag()
# Is it a literal scalar?
if ch == '|' and not self.flow_level:
return self.fetch_literal()
# Is it a folded scalar?
if ch == '>' and not self.flow_level:
return self.fetch_folded()
# Is it a single quoted scalar?
if ch == '\'':
return self.fetch_single()
# Is it a double quoted scalar?
if ch == '\"':
return self.fetch_double()
# It must be a plain scalar then.
if self.check_plain():
return self.fetch_plain()
# No? It's an error. Let's produce a nice error message.
raise ScannerError("while scanning for the next token", None,
"found character %r that cannot start any token" % ch,
self.get_mark())
# Simple keys treatment.
def next_possible_simple_key(self):
# Return the number of the nearest possible simple key. Actually we
# don't need to loop through the whole dictionary. We may replace it
# with the following code:
# if not self.possible_simple_keys:
# return None
# return self.possible_simple_keys[
# min(self.possible_simple_keys.keys())].token_number
min_token_number = None
for level in self.possible_simple_keys:
key = self.possible_simple_keys[level]
if min_token_number is None or key.token_number < min_token_number:
min_token_number = key.token_number
return min_token_number
def stale_possible_simple_keys(self):
# Remove entries that are no longer possible simple keys. According to
# the YAML specification, simple keys
# - should be limited to a single line,
# - should be no longer than 1024 characters.
# Disabling this procedure will allow simple keys of any length and
# height (may cause problems if indentation is broken though).
for level in list(self.possible_simple_keys):
key = self.possible_simple_keys[level]
if key.line != self.line \
or self.index-key.index > 1024:
if key.required:
raise ScannerError("while scanning a simple key", key.mark,
"could not found expected ':'", self.get_mark())
del self.possible_simple_keys[level]
def save_possible_simple_key(self):
# The next token may start a simple key. We check if it's possible
# and save its position. This function is called for
# ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
# Check if a simple key is required at the current position.
required = not self.flow_level and self.indent == self.column
# A simple key is required only if it is the first token in the current
# line. Therefore it is always allowed.
assert self.allow_simple_key or not required
# The next token might be a simple key. Let's save it's number and
# position.
if self.allow_simple_key:
self.remove_possible_simple_key()
token_number = self.tokens_taken+len(self.tokens)
key = SimpleKey(token_number, required,
self.index, self.line, self.column, self.get_mark())
self.possible_simple_keys[self.flow_level] = key
def remove_possible_simple_key(self):
# Remove the saved possible key position at the current flow level.
if self.flow_level in self.possible_simple_keys:
key = self.possible_simple_keys[self.flow_level]
if key.required:
raise ScannerError("while scanning a simple key", key.mark,
"could not found expected ':'", self.get_mark())
del self.possible_simple_keys[self.flow_level]
# Indentation functions.
def unwind_indent(self, column):
## In flow context, tokens should respect indentation.
## Actually the condition should be `self.indent >= column` according to
## the spec. But this condition will prohibit intuitively correct
## constructions such as
## key : {
## }
#if self.flow_level and self.indent > column:
# raise ScannerError(None, None,
# "invalid intendation or unclosed '[' or '{'",
# self.get_mark())
# In the flow context, indentation is ignored. We make the scanner less
# restrictive then specification requires.
if self.flow_level:
return
# In block context, we may need to issue the BLOCK-END tokens.
while self.indent > column:
mark = self.get_mark()
self.indent = self.indents.pop()
self.tokens.append(BlockEndToken(mark, mark))
def add_indent(self, column):
# Check if we need to increase indentation.
if self.indent < column:
self.indents.append(self.indent)
self.indent = column
return True
return False
# Fetchers.
def fetch_stream_start(self):
# We always add STREAM-START as the first token and STREAM-END as the
# last token.
# Read the token.
mark = self.get_mark()
# Add STREAM-START.
self.tokens.append(StreamStartToken(mark, mark,
encoding=self.encoding))
def fetch_stream_end(self):
# Set the current intendation to -1.
self.unwind_indent(-1)
# Reset simple keys.
self.remove_possible_simple_key()
self.allow_simple_key = False
self.possible_simple_keys = {}
# Read the token.
mark = self.get_mark()
# Add STREAM-END.
self.tokens.append(StreamEndToken(mark, mark))
# The steam is finished.
self.done = True
def fetch_directive(self):
# Set the current intendation to -1.
self.unwind_indent(-1)
# Reset simple keys.
self.remove_possible_simple_key()
self.allow_simple_key = False
# Scan and add DIRECTIVE.
self.tokens.append(self.scan_directive())
def fetch_document_start(self):
self.fetch_document_indicator(DocumentStartToken)
def fetch_document_end(self):
self.fetch_document_indicator(DocumentEndToken)
def fetch_document_indicator(self, TokenClass):
# Set the current intendation to -1.
self.unwind_indent(-1)
# Reset simple keys. Note that there could not be a block collection
# after '---'.
self.remove_possible_simple_key()
self.allow_simple_key = False
# Add DOCUMENT-START or DOCUMENT-END.
start_mark = self.get_mark()
self.forward(3)
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_sequence_start(self):
self.fetch_flow_collection_start(FlowSequenceStartToken)
def fetch_flow_mapping_start(self):
self.fetch_flow_collection_start(FlowMappingStartToken)
def fetch_flow_collection_start(self, TokenClass):
# '[' and '{' may start a simple key.
self.save_possible_simple_key()
# Increase the flow level.
self.flow_level += 1
# Simple keys are allowed after '[' and '{'.
self.allow_simple_key = True
# Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_sequence_end(self):
self.fetch_flow_collection_end(FlowSequenceEndToken)
def fetch_flow_mapping_end(self):
self.fetch_flow_collection_end(FlowMappingEndToken)
def fetch_flow_collection_end(self, TokenClass):
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Decrease the flow level.
self.flow_level -= 1
# No simple keys after ']' or '}'.
self.allow_simple_key = False
# Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_entry(self):
# Simple keys are allowed after ','.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add FLOW-ENTRY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(FlowEntryToken(start_mark, end_mark))
def fetch_block_entry(self):
# Block context needs additional checks.
if not self.flow_level:
# Are we allowed to start a new entry?
if not self.allow_simple_key:
raise ScannerError(None, None,
"sequence entries are not allowed here",
self.get_mark())
# We may need to add BLOCK-SEQUENCE-START.
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockSequenceStartToken(mark, mark))
# It's an error for the block entry to occur in the flow context,
# but we let the parser detect this.
else:
pass
# Simple keys are allowed after '-'.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add BLOCK-ENTRY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(BlockEntryToken(start_mark, end_mark))
def fetch_key(self):
# Block context needs additional checks.
if not self.flow_level:
# Are we allowed to start a key (not nessesary a simple)?
if not self.allow_simple_key:
raise ScannerError(None, None,
"mapping keys are not allowed here",
self.get_mark())
# We may need to add BLOCK-MAPPING-START.
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockMappingStartToken(mark, mark))
# Simple keys are allowed after '?' in the block context.
self.allow_simple_key = not self.flow_level
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add KEY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(KeyToken(start_mark, end_mark))
def fetch_value(self):
# Do we determine a simple key?
if self.flow_level in self.possible_simple_keys:
# Add KEY.
key = self.possible_simple_keys[self.flow_level]
del self.possible_simple_keys[self.flow_level]
self.tokens.insert(key.token_number-self.tokens_taken,
KeyToken(key.mark, key.mark))
# If this key starts a new block mapping, we need to add
# BLOCK-MAPPING-START.
if not self.flow_level:
if self.add_indent(key.column):
self.tokens.insert(key.token_number-self.tokens_taken,
BlockMappingStartToken(key.mark, key.mark))
# There cannot be two simple keys one after another.
self.allow_simple_key = False
# It must be a part of a complex key.
else:
# Block context needs additional checks.
# (Do we really need them? They will be catched by the parser
# anyway.)
if not self.flow_level:
# We are allowed to start a complex value if and only if
# we can start a simple key.
if not self.allow_simple_key:
raise ScannerError(None, None,
"mapping values are not allowed here",
self.get_mark())
# If this value starts a new block mapping, we need to add
# BLOCK-MAPPING-START. It will be detected as an error later by
# the parser.
if not self.flow_level:
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockMappingStartToken(mark, mark))
# Simple keys are allowed after ':' in the block context.
self.allow_simple_key = not self.flow_level
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add VALUE.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(ValueToken(start_mark, end_mark))
def fetch_alias(self):
# ALIAS could be a simple key.
self.save_possible_simple_key()
# No simple keys after ALIAS.
self.allow_simple_key = False
# Scan and add ALIAS.
self.tokens.append(self.scan_anchor(AliasToken))
def fetch_anchor(self):
# ANCHOR could start a simple key.
self.save_possible_simple_key()
# No simple keys after ANCHOR.
self.allow_simple_key = False
# Scan and add ANCHOR.
self.tokens.append(self.scan_anchor(AnchorToken))
def fetch_tag(self):
# TAG could start a simple key.
self.save_possible_simple_key()
# No simple keys after TAG.
self.allow_simple_key = False
# Scan and add TAG.
self.tokens.append(self.scan_tag())
def fetch_literal(self):
self.fetch_block_scalar(style='|')
def fetch_folded(self):
self.fetch_block_scalar(style='>')
def fetch_block_scalar(self, style):
# A simple key may follow a block scalar.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Scan and add SCALAR.
self.tokens.append(self.scan_block_scalar(style))
def fetch_single(self):
self.fetch_flow_scalar(style='\'')
def fetch_double(self):
self.fetch_flow_scalar(style='"')
def fetch_flow_scalar(self, style):
# A flow scalar could be a simple key.
self.save_possible_simple_key()
# No simple keys after flow scalars.
self.allow_simple_key = False
# Scan and add SCALAR.
self.tokens.append(self.scan_flow_scalar(style))
def fetch_plain(self):
# A plain scalar could be a simple key.
self.save_possible_simple_key()
# No simple keys after plain scalars. But note that `scan_plain` will
# change this flag if the scan is finished at the beginning of the
# line.
self.allow_simple_key = False
# Scan and add SCALAR. May change `allow_simple_key`.
self.tokens.append(self.scan_plain())
# Checkers.
def check_directive(self):
# DIRECTIVE: ^ '%' ...
# The '%' indicator is already checked.
if self.column == 0:
return True
def check_document_start(self):
# DOCUMENT-START: ^ '---' (' '|'\n')
if self.column == 0:
if self.prefix(3) == '---' \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
return True
def check_document_end(self):
# DOCUMENT-END: ^ '...' (' '|'\n')
if self.column == 0:
if self.prefix(3) == '...' \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
return True
def check_block_entry(self):
# BLOCK-ENTRY: '-' (' '|'\n')
return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
def check_key(self):
# KEY(flow context): '?'
if self.flow_level:
return True
# KEY(block context): '?' (' '|'\n')
else:
return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
def check_value(self):
# VALUE(flow context): ':'
if self.flow_level:
return True
# VALUE(block context): ':' (' '|'\n')
else:
return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
def check_plain(self):
# A plain scalar may start with any non-space character except:
# '-', '?', ':', ',', '[', ']', '{', '}',
# '#', '&', '*', '!', '|', '>', '\'', '\"',
# '%', '@', '`'.
#
# It may also start with
# '-', '?', ':'
# if it is followed by a non-space character.
#
# Note that we limit the last rule to the block context (except the
# '-' character) because we want the flow context to be space
# independent.
ch = self.peek()
return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
and (ch == '-' or (not self.flow_level and ch in '?:')))
# Scanners.
def scan_to_next_token(self):
# We ignore spaces, line breaks and comments.
# If we find a line break in the block context, we set the flag
# `allow_simple_key` on.
# The byte order mark is stripped if it's the first character in the
# stream. We do not yet support BOM inside the stream as the
# specification requires. Any such mark will be considered as a part
# of the document.
#
# TODO: We need to make tab handling rules more sane. A good rule is
# Tabs cannot precede tokens
# BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
# KEY(block), VALUE(block), BLOCK-ENTRY
# So the checking code is
# if <TAB>:
# self.allow_simple_keys = False
# We also need to add the check for `allow_simple_keys == True` to
# `unwind_indent` before issuing BLOCK-END.
# Scanners for block, flow, and plain scalars need to be modified.
if self.index == 0 and self.peek() == '\uFEFF':
self.forward()
found = False
while not found:
while self.peek() == ' ':
self.forward()
if self.peek() == '#':
while self.peek() not in '\0\r\n\x85\u2028\u2029':
self.forward()
if self.scan_line_break():
if not self.flow_level:
self.allow_simple_key = True
else:
found = True
def scan_directive(self):
# See the specification for details.
start_mark = self.get_mark()
self.forward()
name = self.scan_directive_name(start_mark)
value = None
if name == 'YAML':
value = self.scan_yaml_directive_value(start_mark)
end_mark = self.get_mark()
elif name == 'TAG':
value = self.scan_tag_directive_value(start_mark)
end_mark = self.get_mark()
else:
end_mark = self.get_mark()
while self.peek() not in '\0\r\n\x85\u2028\u2029':
self.forward()
self.scan_directive_ignored_line(start_mark)
return DirectiveToken(name, value, start_mark, end_mark)
def scan_directive_name(self, start_mark):
# See the specification for details.
length = 0
ch = self.peek(length)
while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_':
length += 1
ch = self.peek(length)
if not length:
raise ScannerError("while scanning a directive", start_mark,
"expected alphabetic or numeric character, but found %r"
% ch, self.get_mark())
value = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected alphabetic or numeric character, but found %r"
% ch, self.get_mark())
return value
def scan_yaml_directive_value(self, start_mark):
# See the specification for details.
while self.peek() == ' ':
self.forward()
major = self.scan_yaml_directive_number(start_mark)
if self.peek() != '.':
raise ScannerError("while scanning a directive", start_mark,
"expected a digit or '.', but found %r" % self.peek(),
self.get_mark())
self.forward()
minor = self.scan_yaml_directive_number(start_mark)
if self.peek() not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected a digit or ' ', but found %r" % self.peek(),
self.get_mark())
return (major, minor)
def scan_yaml_directive_number(self, start_mark):
# See the specification for details.
ch = self.peek()
if not ('0' <= ch <= '9'):
raise ScannerError("while scanning a directive", start_mark,
"expected a digit, but found %r" % ch, self.get_mark())
length = 0
while '0' <= self.peek(length) <= '9':
length += 1
value = int(self.prefix(length))
self.forward(length)
return value
def scan_tag_directive_value(self, start_mark):
# See the specification for details.
while self.peek() == ' ':
self.forward()
handle = self.scan_tag_directive_handle(start_mark)
while self.peek() == ' ':
self.forward()
prefix = self.scan_tag_directive_prefix(start_mark)
return (handle, prefix)
def scan_tag_directive_handle(self, start_mark):
# See the specification for details.
value = self.scan_tag_handle('directive', start_mark)
ch = self.peek()
if ch != ' ':
raise ScannerError("while scanning a directive", start_mark,
"expected ' ', but found %r" % ch, self.get_mark())
return value
def scan_tag_directive_prefix(self, start_mark):
# See the specification for details.
value = self.scan_tag_uri('directive', start_mark)
ch = self.peek()
if ch not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected ' ', but found %r" % ch, self.get_mark())
return value
def scan_directive_ignored_line(self, start_mark):
# See the specification for details.
while self.peek() == ' ':
self.forward()
if self.peek() == '#':
while self.peek() not in '\0\r\n\x85\u2028\u2029':
self.forward()
ch = self.peek()
if ch not in '\0\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected a comment or a line break, but found %r"
% ch, self.get_mark())
self.scan_line_break()
def scan_anchor(self, TokenClass):
# The specification does not restrict characters for anchors and
# aliases. This may lead to problems, for instance, the document:
# [ *alias, value ]
# can be interpteted in two ways, as
# [ "value" ]
# and
# [ *alias , "value" ]
# Therefore we restrict aliases to numbers and ASCII letters.
start_mark = self.get_mark()
indicator = self.peek()
if indicator == '*':
name = 'alias'
else:
name = 'anchor'
self.forward()
length = 0
ch = self.peek(length)
while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_':
length += 1
ch = self.peek(length)
if not length:
raise ScannerError("while scanning an %s" % name, start_mark,
"expected alphabetic or numeric character, but found %r"
% ch, self.get_mark())
value = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
raise ScannerError("while scanning an %s" % name, start_mark,
"expected alphabetic or numeric character, but found %r"
% ch, self.get_mark())
end_mark = self.get_mark()
return TokenClass(value, start_mark, end_mark)
def scan_tag(self):
# See the specification for details.
start_mark = self.get_mark()
ch = self.peek(1)
if ch == '<':
handle = None
self.forward(2)
suffix = self.scan_tag_uri('tag', start_mark)
if self.peek() != '>':
raise ScannerError("while parsing a tag", start_mark,
"expected '>', but found %r" % self.peek(),
self.get_mark())
self.forward()
elif ch in '\0 \t\r\n\x85\u2028\u2029':
handle = None
suffix = '!'
self.forward()
else:
length = 1
use_handle = False
while ch not in '\0 \r\n\x85\u2028\u2029':
if ch == '!':
use_handle = True
break
length += 1
ch = self.peek(length)
handle = '!'
if use_handle:
handle = self.scan_tag_handle('tag', start_mark)
else:
handle = '!'
self.forward()
suffix = self.scan_tag_uri('tag', start_mark)
ch = self.peek()
if ch not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a tag", start_mark,
"expected ' ', but found %r" % ch, self.get_mark())
value = (handle, suffix)
end_mark = self.get_mark()
return TagToken(value, start_mark, end_mark)
def scan_block_scalar(self, style):
# See the specification for details.
if style == '>':
folded = True
else:
folded = False
chunks = []
start_mark = self.get_mark()
# Scan the header.
self.forward()
chomping, increment = self.scan_block_scalar_indicators(start_mark)
self.scan_block_scalar_ignored_line(start_mark)
# Determine the indentation level and go to the first non-empty line.
min_indent = self.indent+1
if min_indent < 1:
min_indent = 1
if increment is None:
breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
indent = max(min_indent, max_indent)
else:
indent = min_indent+increment-1
breaks, end_mark = self.scan_block_scalar_breaks(indent)
line_break = ''
# Scan the inner part of the block scalar.
while self.column == indent and self.peek() != '\0':
chunks.extend(breaks)
leading_non_space = self.peek() not in ' \t'
length = 0
while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
length += 1
chunks.append(self.prefix(length))
self.forward(length)
line_break = self.scan_line_break()
breaks, end_mark = self.scan_block_scalar_breaks(indent)
if self.column == indent and self.peek() != '\0':
# Unfortunately, folding rules are ambiguous.
#
# This is the folding according to the specification:
if folded and line_break == '\n' \
and leading_non_space and self.peek() not in ' \t':
if not breaks:
chunks.append(' ')
else:
chunks.append(line_break)
# This is Clark Evans's interpretation (also in the spec
# examples):
#
#if folded and line_break == '\n':
# if not breaks:
# if self.peek() not in ' \t':
# chunks.append(' ')
# else:
# chunks.append(line_break)
#else:
# chunks.append(line_break)
else:
break
# Chomp the tail.
if chomping is not False:
chunks.append(line_break)
if chomping is True:
chunks.extend(breaks)
# We are done.
return ScalarToken(''.join(chunks), False, start_mark, end_mark,
style)
def scan_block_scalar_indicators(self, start_mark):
# See the specification for details.
chomping = None
increment = None
ch = self.peek()
if ch in '+-':
if ch == '+':
chomping = True
else:
chomping = False
self.forward()
ch = self.peek()
if ch in '0123456789':
increment = int(ch)
if increment == 0:
raise ScannerError("while scanning a block scalar", start_mark,
"expected indentation indicator in the range 1-9, but found 0",
self.get_mark())
self.forward()
elif ch in '0123456789':
increment = int(ch)
if increment == 0:
raise ScannerError("while scanning a block scalar", start_mark,
"expected indentation indicator in the range 1-9, but found 0",
self.get_mark())
self.forward()
ch = self.peek()
if ch in '+-':
if ch == '+':
chomping = True
else:
chomping = False
self.forward()
ch = self.peek()
if ch not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a block scalar", start_mark,
"expected chomping or indentation indicators, but found %r"
% ch, self.get_mark())
return chomping, increment
def scan_block_scalar_ignored_line(self, start_mark):
# See the specification for details.
while self.peek() == ' ':
self.forward()
if self.peek() == '#':
while self.peek() not in '\0\r\n\x85\u2028\u2029':
self.forward()
ch = self.peek()
if ch not in '\0\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a block scalar", start_mark,
"expected a comment or a line break, but found %r" % ch,
self.get_mark())
self.scan_line_break()
def scan_block_scalar_indentation(self):
# See the specification for details.
chunks = []
max_indent = 0
end_mark = self.get_mark()
while self.peek() in ' \r\n\x85\u2028\u2029':
if self.peek() != ' ':
chunks.append(self.scan_line_break())
end_mark = self.get_mark()
else:
self.forward()
if self.column > max_indent:
max_indent = self.column
return chunks, max_indent, end_mark
def scan_block_scalar_breaks(self, indent):
# See the specification for details.
chunks = []
end_mark = self.get_mark()
while self.column < indent and self.peek() == ' ':
self.forward()
while self.peek() in '\r\n\x85\u2028\u2029':
chunks.append(self.scan_line_break())
end_mark = self.get_mark()
while self.column < indent and self.peek() == ' ':
self.forward()
return chunks, end_mark
def scan_flow_scalar(self, style):
# See the specification for details.
# Note that we loose indentation rules for quoted scalars. Quoted
# scalars don't need to adhere indentation because " and ' clearly
# mark the beginning and the end of them. Therefore we are less
# restrictive then the specification requires. We only need to check
# that document separators are not included in scalars.
if style == '"':
double = True
else:
double = False
chunks = []
start_mark = self.get_mark()
quote = self.peek()
self.forward()
chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
while self.peek() != quote:
chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
self.forward()
end_mark = self.get_mark()
return ScalarToken(''.join(chunks), False, start_mark, end_mark,
style)
ESCAPE_REPLACEMENTS = {
'0': '\0',
'a': '\x07',
'b': '\x08',
't': '\x09',
'\t': '\x09',
'n': '\x0A',
'v': '\x0B',
'f': '\x0C',
'r': '\x0D',
'e': '\x1B',
' ': '\x20',
'\"': '\"',
'\\': '\\',
'N': '\x85',
'_': '\xA0',
'L': '\u2028',
'P': '\u2029',
}
ESCAPE_CODES = {
'x': 2,
'u': 4,
'U': 8,
}
def scan_flow_scalar_non_spaces(self, double, start_mark):
# See the specification for details.
chunks = []
while True:
length = 0
while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
length += 1
if length:
chunks.append(self.prefix(length))
self.forward(length)
ch = self.peek()
if not double and ch == '\'' and self.peek(1) == '\'':
chunks.append('\'')
self.forward(2)
elif (double and ch == '\'') or (not double and ch in '\"\\'):
chunks.append(ch)
self.forward()
elif double and ch == '\\':
self.forward()
ch = self.peek()
if ch in self.ESCAPE_REPLACEMENTS:
chunks.append(self.ESCAPE_REPLACEMENTS[ch])
self.forward()
elif ch in self.ESCAPE_CODES:
length = self.ESCAPE_CODES[ch]
self.forward()
for k in range(length):
if self.peek(k) not in '0123456789ABCDEFabcdef':
raise ScannerError("while scanning a double-quoted scalar", start_mark,
"expected escape sequence of %d hexdecimal numbers, but found %r" %
(length, self.peek(k)), self.get_mark())
code = int(self.prefix(length), 16)
chunks.append(chr(code))
self.forward(length)
elif ch in '\r\n\x85\u2028\u2029':
self.scan_line_break()
chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
else:
raise ScannerError("while scanning a double-quoted scalar", start_mark,
"found unknown escape character %r" % ch, self.get_mark())
else:
return chunks
def scan_flow_scalar_spaces(self, double, start_mark):
# See the specification for details.
chunks = []
length = 0
while self.peek(length) in ' \t':
length += 1
whitespaces = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch == '\0':
raise ScannerError("while scanning a quoted scalar", start_mark,
"found unexpected end of stream", self.get_mark())
elif ch in '\r\n\x85\u2028\u2029':
line_break = self.scan_line_break()
breaks = self.scan_flow_scalar_breaks(double, start_mark)
if line_break != '\n':
chunks.append(line_break)
elif not breaks:
chunks.append(' ')
chunks.extend(breaks)
else:
chunks.append(whitespaces)
return chunks
def scan_flow_scalar_breaks(self, double, start_mark):
# See the specification for details.
chunks = []
while True:
# Instead of checking indentation, we check for document
# separators.
prefix = self.prefix(3)
if (prefix == '---' or prefix == '...') \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a quoted scalar", start_mark,
"found unexpected document separator", self.get_mark())
while self.peek() in ' \t':
self.forward()
if self.peek() in '\r\n\x85\u2028\u2029':
chunks.append(self.scan_line_break())
else:
return chunks
def scan_plain(self):
# See the specification for details.
# We add an additional restriction for the flow context:
# plain scalars in the flow context cannot contain ',', ':' and '?'.
# We also keep track of the `allow_simple_key` flag here.
# Indentation rules are loosed for the flow context.
chunks = []
start_mark = self.get_mark()
end_mark = start_mark
indent = self.indent+1
# We allow zero indentation for scalars, but then we need to check for
# document separators at the beginning of the line.
#if indent == 0:
# indent = 1
spaces = []
while True:
length = 0
if self.peek() == '#':
break
while True:
ch = self.peek(length)
if ch in '\0 \t\r\n\x85\u2028\u2029' \
or (not self.flow_level and ch == ':' and
self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029') \
or (self.flow_level and ch in ',:?[]{}'):
break
length += 1
# It's not clear what we should do with ':' in the flow context.
if (self.flow_level and ch == ':'
and self.peek(length+1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'):
self.forward(length)
raise ScannerError("while scanning a plain scalar", start_mark,
"found unexpected ':'", self.get_mark(),
"Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
if length == 0:
break
self.allow_simple_key = False
chunks.extend(spaces)
chunks.append(self.prefix(length))
self.forward(length)
end_mark = self.get_mark()
spaces = self.scan_plain_spaces(indent, start_mark)
if not spaces or self.peek() == '#' \
or (not self.flow_level and self.column < indent):
break
return ScalarToken(''.join(chunks), True, start_mark, end_mark)
def scan_plain_spaces(self, indent, start_mark):
# See the specification for details.
# The specification is really confusing about tabs in plain scalars.
# We just forbid them completely. Do not use tabs in YAML!
chunks = []
length = 0
while self.peek(length) in ' ':
length += 1
whitespaces = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch in '\r\n\x85\u2028\u2029':
line_break = self.scan_line_break()
self.allow_simple_key = True
prefix = self.prefix(3)
if (prefix == '---' or prefix == '...') \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
return
breaks = []
while self.peek() in ' \r\n\x85\u2028\u2029':
if self.peek() == ' ':
self.forward()
else:
breaks.append(self.scan_line_break())
prefix = self.prefix(3)
if (prefix == '---' or prefix == '...') \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
return
if line_break != '\n':
chunks.append(line_break)
elif not breaks:
chunks.append(' ')
chunks.extend(breaks)
elif whitespaces:
chunks.append(whitespaces)
return chunks
def scan_tag_handle(self, name, start_mark):
# See the specification for details.
# For some strange reasons, the specification does not allow '_' in
# tag handles. I have allowed it anyway.
ch = self.peek()
if ch != '!':
raise ScannerError("while scanning a %s" % name, start_mark,
"expected '!', but found %r" % ch, self.get_mark())
length = 1
ch = self.peek(length)
if ch != ' ':
while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_':
length += 1
ch = self.peek(length)
if ch != '!':
self.forward(length)
raise ScannerError("while scanning a %s" % name, start_mark,
"expected '!', but found %r" % ch, self.get_mark())
length += 1
value = self.prefix(length)
self.forward(length)
return value
def scan_tag_uri(self, name, start_mark):
# See the specification for details.
# Note: we do not check if URI is well-formed.
chunks = []
length = 0
ch = self.peek(length)
while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-;/?:@&=+$,_.!~*\'()[]%':
if ch == '%':
chunks.append(self.prefix(length))
self.forward(length)
length = 0
chunks.append(self.scan_uri_escapes(name, start_mark))
else:
length += 1
ch = self.peek(length)
if length:
chunks.append(self.prefix(length))
self.forward(length)
length = 0
if not chunks:
raise ScannerError("while parsing a %s" % name, start_mark,
"expected URI, but found %r" % ch, self.get_mark())
return ''.join(chunks)
def scan_uri_escapes(self, name, start_mark):
# See the specification for details.
codes = []
mark = self.get_mark()
while self.peek() == '%':
self.forward()
for k in range(2):
if self.peek(k) not in '0123456789ABCDEFabcdef':
raise ScannerError("while scanning a %s" % name, start_mark,
"expected URI escape sequence of 2 hexdecimal numbers, but found %r"
% self.peek(k), self.get_mark())
codes.append(int(self.prefix(2), 16))
self.forward(2)
try:
value = bytes(codes).decode('utf-8')
except UnicodeDecodeError as exc:
raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
return value
def scan_line_break(self):
# Transforms:
# '\r\n' : '\n'
# '\r' : '\n'
# '\n' : '\n'
# '\x85' : '\n'
# '\u2028' : '\u2028'
# '\u2029 : '\u2029'
# default : ''
ch = self.peek()
if ch in '\r\n\x85':
if self.prefix(2) == '\r\n':
self.forward(2)
else:
self.forward()
return '\n'
elif ch in '\u2028\u2029':
self.forward()
return ch
return ''
#try:
# import psyco
# psyco.bind(Scanner)
#except ImportError:
# pass
|
MattsFleaMarket/python-for-android | refs/heads/master | python-build/python-libs/gdata/build/lib/gdata/sample_util.py | 133 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides utility functions used with command line samples."""
# This module is used for version 2 of the Google Data APIs.
import sys
import getpass
import urllib
import gdata.gauth
__author__ = '[email protected] (Jeff Scudder)'
CLIENT_LOGIN = 1
AUTHSUB = 2
OAUTH = 3
HMAC = 1
RSA = 2
def get_param(name, prompt='', secret=False, ask=True):
# First, check for a command line parameter.
for i in xrange(len(sys.argv)):
if sys.argv[i].startswith('--%s=' % name):
return sys.argv[i].split('=')[1]
elif sys.argv[i] == '--%s' % name:
return sys.argv[i + 1]
if ask:
# If it was not on the command line, ask the user to input the value.
prompt = '%s: ' % prompt
if secret:
return getpass.getpass(prompt)
else:
return raw_input(prompt)
else:
return None
def authorize_client(client, auth_type=None, service=None, source=None,
scopes=None, oauth_type=None, consumer_key=None,
consumer_secret=None):
"""Uses command line arguments, or prompts user for token values."""
if auth_type is None:
auth_type = int(get_param(
'auth_type', 'Please choose the authorization mechanism you want'
' to use.\n'
'1. to use your email address and password (ClientLogin)\n'
'2. to use a web browser to visit an auth web page (AuthSub)\n'
'3. if you have registed to use OAuth\n'))
# Get the scopes for the services we want to access.
if auth_type == AUTHSUB or auth_type == OAUTH:
if scopes is None:
scopes = get_param(
'scopes', 'Enter the URL prefixes (scopes) for the resources you '
'would like to access.\nFor multiple scope URLs, place a comma '
'between each URL.\n'
'Example: http://www.google.com/calendar/feeds/,'
'http://www.google.com/m8/feeds/\n').split(',')
elif isinstance(scopes, (str, unicode)):
scopes = scopes.split(',')
if auth_type == CLIENT_LOGIN:
email = get_param('email', 'Please enter your username')
password = get_param('password', 'Password', True)
if service is None:
service = get_param(
'service', 'What is the name of the service you wish to access?'
'\n(See list:'
' http://code.google.com/apis/gdata/faq.html#clientlogin)')
if source is None:
source = get_param('source', ask=False)
client.client_login(email, password, source=source, service=service)
elif auth_type == AUTHSUB:
auth_sub_token = get_param('auth_sub_token', ask=False)
session_token = get_param('session_token', ask=False)
private_key = None
auth_url = None
single_use_token = None
rsa_private_key = get_param(
'rsa_private_key',
'If you want to use secure mode AuthSub, please provide the\n'
' location of your RSA private key which corresponds to the\n'
' certificate you have uploaded for your domain. If you do not\n'
' have an RSA key, simply press enter')
if rsa_private_key:
try:
private_key_file = open(rsa_private_key, 'rb')
private_key = private_key_file.read()
private_key_file.close()
except IOError:
print 'Unable to read private key from file'
if private_key is not None:
if client.auth_token is None:
if session_token:
client.auth_token = gdata.gauth.SecureAuthSubToken(
session_token, private_key, scopes)
return
elif auth_sub_token:
client.auth_token = gdata.gauth.SecureAuthSubToken(
auth_sub_token, private_key, scopes)
client.upgrade_token()
return
auth_url = gdata.gauth.generate_auth_sub_url(
'http://gauthmachine.appspot.com/authsub', scopes, True)
print 'with a private key, get ready for this URL', auth_url
else:
if client.auth_token is None:
if session_token:
client.auth_token = gdata.gauth.AuthSubToken(session_token, scopes)
return
elif auth_sub_token:
client.auth_token = gdata.gauth.AuthSubToken(auth_sub_token, scopes)
client.upgrade_token()
return
auth_url = gdata.gauth.generate_auth_sub_url(
'http://gauthmachine.appspot.com/authsub', scopes)
print 'Visit the following URL in your browser to authorize this app:'
print str(auth_url)
print 'After agreeing to authorize the app, copy the token value from the'
print ' URL. Example: "www.google.com/?token=ab12" token value is ab12'
token_value = raw_input('Please enter the token value: ')
if private_key is not None:
single_use_token = gdata.gauth.SecureAuthSubToken(
token_value, private_key, scopes)
else:
single_use_token = gdata.gauth.AuthSubToken(token_value, scopes)
client.auth_token = single_use_token
client.upgrade_token()
elif auth_type == OAUTH:
if oauth_type is None:
oauth_type = int(get_param(
'oauth_type', 'Please choose the authorization mechanism you want'
' to use.\n'
'1. use an HMAC signature using your consumer key and secret\n'
'2. use RSA with your private key to sign requests\n'))
consumer_key = get_param(
'consumer_key', 'Please enter your OAuth conumer key '
'which identifies your app')
if oauth_type == HMAC:
consumer_secret = get_param(
'consumer_secret', 'Please enter your OAuth conumer secret '
'which you share with the OAuth provider', True)
# Swap out this code once the client supports requesting an oauth token.
# Get a request token.
request_token = client.get_oauth_token(
scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key,
consumer_secret=consumer_secret)
elif oauth_type == RSA:
rsa_private_key = get_param(
'rsa_private_key',
'Please provide the location of your RSA private key which\n'
' corresponds to the certificate you have uploaded for your domain.')
try:
private_key_file = open(rsa_private_key, 'rb')
private_key = private_key_file.read()
private_key_file.close()
except IOError:
print 'Unable to read private key from file'
request_token = client.get_oauth_token(
scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key,
rsa_private_key=private_key)
else:
print 'Invalid OAuth signature type'
return None
# Authorize the request token in the browser.
print 'Visit the following URL in your browser to authorize this app:'
print str(request_token.generate_authorization_url())
print 'After agreeing to authorize the app, copy URL from the browser\'s'
print ' address bar.'
url = raw_input('Please enter the url: ')
gdata.gauth.authorize_request_token(request_token, url)
# Exchange for an access token.
client.auth_token = client.get_access_token(request_token)
else:
print 'Invalid authorization type.'
return None
def print_options():
"""Displays usage information, available command line params."""
# TODO: fill in the usage description for authorizing the client.
print ''
|
spadae22/odoo | refs/heads/chris_master_8 | addons/l10n_es/migrations/8.0.4.1/pre-rename.py | 52 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2014 Domatix (http://www.domatix.com)
# Angel Moya <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
__name__ = ("Cambia columnas name y description")
def migrate_tax_template(cr, version):
cr.execute("""ALTER TABLE account_tax
RENAME COLUMN name to name_to_description_temp""")
cr.execute("""ALTER TABLE account_tax
RENAME COLUMN description to name""")
cr.execute("""ALTER TABLE account_tax
RENAME COLUMN name_to_description_temp to description""")
def migrate(cr, version):
if not version:
return
migrate_tax_template(cr, version)
|
xuzhao1211/OnlineExam | refs/heads/master | misago/core/migrations/0002_basic_settings.py | 8 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.utils.translation import ugettext as _
from misago.conf.migrationutils import migrate_settings_group
def create_basic_settings_group(apps, schema_editor):
migrate_settings_group(
apps,
{
'key': 'basic',
'name': _("Basic forum settings"),
'description': _("Those settings control most basic properties "
"of your forum like its name or description."),
'settings': (
{
'setting': 'forum_name',
'name': _("Forum name"),
'legend': _("General"),
'value': "Misago",
'field_extra': {
'min_length': 2,
'max_length': 255
},
'is_public': True,
},
{
'setting': 'forum_index_title',
'name': _("Index title"),
'description': _("You may set custon title on "
"forum index by typing it here."),
'legend': _("Forum index"),
'field_extra': {
'max_length': 255
},
'is_public': True,
},
{
'setting': 'forum_index_meta_description',
'name': _("Meta Description"),
'description': _("Short description of your forum "
"for internet crawlers."),
'field_extra': {
'max_length': 255
},
},
{
'setting': 'forum_branding_display',
'name': _("Display branding"),
'description': _("Switch branding in forum's navbar."),
'legend': _("Branding"),
'value': True,
'python_type': 'bool',
'form_field': 'yesno',
'is_public': True,
},
{
'setting': 'forum_branding_text',
'name': _("Branding text"),
'description': _("Optional text displayed besides "
"brand image in navbar."),
'value': "isago",
'field_extra': {
'max_length': 255
},
'is_public': True,
},
{
'setting': 'email_footer',
'name': _("E-mails footer"),
'description': _("Optional short message included "
"at the end of e-mails sent by "
"forum"),
'legend': _("Forum e-mails"),
'field_extra': {
'max_length': 255
},
},
)
})
class Migration(migrations.Migration):
dependencies = [
('misago_core', '0001_initial'),
('misago_conf', '0001_initial'),
]
operations = [
migrations.RunPython(create_basic_settings_group),
]
|
erinspace/osf.io | refs/heads/develop | website/project/signals.py | 39 | import blinker
signals = blinker.Namespace()
comment_added = signals.signal('comment-added')
mention_added = signals.signal('mention-added')
contributor_added = signals.signal('contributor-added')
project_created = signals.signal('project-created')
contributor_removed = signals.signal('contributor-removed')
unreg_contributor_added = signals.signal('unreg-contributor-added')
write_permissions_revoked = signals.signal('write-permissions-revoked')
node_deleted = signals.signal('node-deleted')
after_create_registration = signals.signal('post-create-registration')
archive_callback = signals.signal('archive-callback')
privacy_set_public = signals.signal('privacy_set_public')
|
gquirozbogner/contentbox-master | refs/heads/master | third_party/django/core/context_processors.py | 232 | """
A set of request processors that return dictionaries to be merged into a
template context. Each function takes the request object as its only parameter
and returns a dictionary to add to the context.
These are referenced from the setting TEMPLATE_CONTEXT_PROCESSORS and used by
RequestContext.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.middleware.csrf import get_token
from django.utils import six
from django.utils.encoding import smart_text
from django.utils.functional import lazy
def csrf(request):
"""
Context processor that provides a CSRF token, or the string 'NOTPROVIDED' if
it has not been provided by either a view decorator or the middleware
"""
def _get_val():
token = get_token(request)
if token is None:
# In order to be able to provide debugging info in the
# case of misconfiguration, we use a sentinel value
# instead of returning an empty dict.
return 'NOTPROVIDED'
else:
return smart_text(token)
_get_val = lazy(_get_val, six.text_type)
return {'csrf_token': _get_val() }
def debug(request):
"Returns context variables helpful for debugging."
context_extras = {}
if settings.DEBUG and request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS:
context_extras['debug'] = True
from django.db import connection
context_extras['sql_queries'] = connection.queries
return context_extras
def i18n(request):
from django.utils import translation
context_extras = {}
context_extras['LANGUAGES'] = settings.LANGUAGES
context_extras['LANGUAGE_CODE'] = translation.get_language()
context_extras['LANGUAGE_BIDI'] = translation.get_language_bidi()
return context_extras
def tz(request):
from django.utils import timezone
return {'TIME_ZONE': timezone.get_current_timezone_name()}
def static(request):
"""
Adds static-related context variables to the context.
"""
return {'STATIC_URL': settings.STATIC_URL}
def media(request):
"""
Adds media-related context variables to the context.
"""
return {'MEDIA_URL': settings.MEDIA_URL}
def request(request):
return {'request': request}
|
PyCQA/pylint | refs/heads/main | tests/functional/n/nonlocal_without_binding.py | 3 | """ Checks that reversed() receive proper argument """
# pylint: disable=missing-docstring,invalid-name,unused-variable, useless-object-inheritance
# pylint: disable=too-few-public-methods,no-self-use,no-absolute-import
def test():
def parent():
a = 42
def stuff():
nonlocal a
c = 24
def parent2():
a = 42
def stuff():
def other_stuff():
nonlocal a
nonlocal c
b = 42
def func():
def other_func():
nonlocal b # [nonlocal-without-binding]
class SomeClass(object):
nonlocal x # [nonlocal-without-binding]
def func(self):
nonlocal some_attr # [nonlocal-without-binding]
def func2():
nonlocal_ = None
local = None
class Class:
nonlocal nonlocal_
nonlocal_ = 1
local = 1
return local + nonlocal_
|
City-of-Bloomington/green-rental | refs/heads/master | allauth/account/adapter.py | 5 | import warnings
import json
from django.conf import settings
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.template import TemplateDoesNotExist
from django.contrib.sites.models import Site
from django.core.mail import EmailMultiAlternatives, EmailMessage
from django.utils.translation import ugettext_lazy as _
from django import forms
from django.contrib import messages
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
from ..utils import (import_attribute, get_user_model,
generate_unique_username,
resolve_url)
from . import app_settings
class DefaultAccountAdapter(object):
def stash_verified_email(self, request, email):
request.session['account_verified_email'] = email
def unstash_verified_email(self, request):
ret = request.session.get('account_verified_email')
request.session['account_verified_email'] = None
return ret
def is_email_verified(self, request, email):
"""
Checks whether or not the email address is already verified
beyond allauth scope, for example, by having accepted an
invitation before signing up.
"""
ret = False
verified_email = request.session.get('account_verified_email')
if verified_email:
ret = verified_email.lower() == email.lower()
return ret
def format_email_subject(self, subject):
prefix = app_settings.EMAIL_SUBJECT_PREFIX
if prefix is None:
site = Site.objects.get_current()
prefix = u"[{name}] ".format(name=site.name)
return prefix + force_text(subject)
def render_mail(self, template_prefix, email, context):
"""
Renders an e-mail to `email`. `template_prefix` identifies the
e-mail that is to be sent, e.g. "account/email/email_confirmation"
"""
subject = render_to_string('{0}_subject.txt'.format(template_prefix),
context)
# remove superfluous line breaks
subject = " ".join(subject.splitlines()).strip()
subject = self.format_email_subject(subject)
bodies = {}
for ext in ['html', 'txt']:
try:
template_name = '{0}_message.{1}'.format(template_prefix, ext)
bodies[ext] = render_to_string(template_name,
context).strip()
except TemplateDoesNotExist:
if ext == 'txt' and not bodies:
# We need at least one body
raise
if 'txt' in bodies:
msg = EmailMultiAlternatives(subject,
bodies['txt'],
settings.DEFAULT_FROM_EMAIL,
[email])
if 'html' in bodies:
msg.attach_alternative(bodies['html'], 'text/html')
else:
msg = EmailMessage(subject,
bodies['html'],
settings.DEFAULT_FROM_EMAIL,
[email])
msg.content_subtype = 'html' # Main content is now text/html
return msg
def send_mail(self, template_prefix, email, context):
msg = self.render_mail(template_prefix, email, context)
msg.send()
def get_login_redirect_url(self, request):
"""
Returns the default URL to redirect to after logging in. Note
that URLs passed explicitly (e.g. by passing along a `next`
GET parameter) take precedence over the value returned here.
"""
assert request.user.is_authenticated()
url = getattr(settings, "LOGIN_REDIRECT_URLNAME", None)
if url:
warnings.warn("LOGIN_REDIRECT_URLNAME is deprecated, simply"
" use LOGIN_REDIRECT_URL with a URL name",
DeprecationWarning)
else:
url = settings.LOGIN_REDIRECT_URL
return resolve_url(url)
def get_logout_redirect_url(self, request):
"""
Returns the URL to redriect to after the user logs out. Note that
this method is also invoked if you attempt to log out while no users
is logged in. Therefore, request.user is not guaranteed to be an
authenticated user.
"""
return resolve_url(app_settings.LOGOUT_REDIRECT_URL)
def get_email_confirmation_redirect_url(self, request):
"""
The URL to return to after successful e-mail confirmation.
"""
if request.user.is_authenticated():
if app_settings.EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL:
return \
app_settings.EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL
else:
return self.get_login_redirect_url(request)
else:
return app_settings.EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL
def is_open_for_signup(self, request):
"""
Checks whether or not the site is open for signups.
Next to simply returning True/False you can also intervene the
regular flow by raising an ImmediateHttpResponse
"""
return True
def new_user(self, request):
"""
Instantiates a new User instance.
"""
user = get_user_model()()
return user
def populate_username(self, request, user):
"""
Fills in a valid username, if required and missing. If the
username is already present it is assumed to be valid
(unique).
"""
from .utils import user_username, user_email, user_field
first_name = user_field(user, 'first_name')
last_name = user_field(user, 'last_name')
email = user_email(user)
username = user_username(user)
if app_settings.USER_MODEL_USERNAME_FIELD:
user_username(user,
username
or generate_unique_username([first_name,
last_name,
email,
'user']))
def save_user(self, request, user, form, commit=True):
"""
Saves a new `User` instance using information provided in the
signup form.
"""
from .utils import user_username, user_email, user_field
data = form.cleaned_data
first_name = data.get('first_name')
last_name = data.get('last_name')
email = data.get('email')
username = data.get('username')
user_email(user, email)
user_username(user, username)
user_field(user, 'first_name', first_name or '')
user_field(user, 'last_name', last_name or '')
if 'password1' in data:
user.set_password(data["password1"])
else:
user.set_unusable_password()
self.populate_username(request, user)
if commit:
# Ability not to commit makes it easier to derive from
# this adapter by adding
user.save()
return user
def clean_username(self, username):
"""
Validates the username. You can hook into this if you want to
(dynamically) restrict what usernames can be chosen.
"""
from django.contrib.auth.forms import UserCreationForm
USERNAME_REGEX = UserCreationForm().fields['username'].regex
if not USERNAME_REGEX.match(username):
raise forms.ValidationError(_("Usernames can only contain "
"letters, digits and @/./+/-/_."))
# TODO: Add regexp support to USERNAME_BLACKLIST
if username in app_settings.USERNAME_BLACKLIST:
raise forms.ValidationError(_("Username can not be used. "
"Please use other username."))
username_field = app_settings.USER_MODEL_USERNAME_FIELD
assert username_field
user_model = get_user_model()
try:
query = {username_field + '__iexact': username}
user_model.objects.get(**query)
except user_model.DoesNotExist:
return username
raise forms.ValidationError(_("This username is already taken. Please "
"choose another."))
def clean_email(self, email):
"""
Validates an email value. You can hook into this if you want to
(dynamically) restrict what email addresses can be chosen.
"""
return email
def add_message(self, request, level, message_template,
message_context={}, extra_tags=''):
"""
Wrapper of `django.contrib.messages.add_message`, that reads
the message text from a template.
"""
if 'django.contrib.messages' in settings.INSTALLED_APPS:
try:
message = render_to_string(message_template,
message_context).strip()
if message:
messages.add_message(request, level, message,
extra_tags=extra_tags)
except TemplateDoesNotExist:
pass
def ajax_response(self, request, response, redirect_to=None, form=None):
data = {}
if redirect_to:
status = 200
data['location'] = redirect_to
if form:
if form.is_valid():
status = 200
else:
status = 400
data['form_errors'] = form._errors
if hasattr(response, 'render'):
response.render()
data['html'] = response.content.decode('utf8')
return HttpResponse(json.dumps(data),
status=status,
content_type='application/json')
def get_adapter():
return import_attribute(app_settings.ADAPTER)()
|
ryangallen/django | refs/heads/master | tests/foreign_object/models/empty_join.py | 106 | from django.db import models
from django.db.models.fields.related import (
ForeignObjectRel, ForeignRelatedObjectsDescriptor,
)
from django.db.models.lookups import StartsWith
from django.db.models.query_utils import PathInfo
from django.utils.encoding import python_2_unicode_compatible
class CustomForeignObjectRel(ForeignObjectRel):
"""
Define some extra Field methods so this Rel acts more like a Field, which
lets us use ForeignRelatedObjectsDescriptor in both directions.
"""
@property
def foreign_related_fields(self):
return tuple(lhs_field for lhs_field, rhs_field in self.field.related_fields)
def get_attname(self):
return self.name
class StartsWithRelation(models.ForeignObject):
"""
A ForeignObject that uses StartsWith operator in its joins instead of
the default equality operator. This is logically a many-to-many relation
and creates a ForeignRelatedObjectsDescriptor in both directions.
"""
auto_created = False
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
rel_class = CustomForeignObjectRel
def __init__(self, *args, **kwargs):
kwargs['on_delete'] = models.DO_NOTHING
super(StartsWithRelation, self).__init__(*args, **kwargs)
@property
def field(self):
"""
Makes ForeignRelatedObjectsDescriptor work in both directions.
"""
return self.remote_field
def get_extra_restriction(self, where_class, alias, related_alias):
to_field = self.remote_field.model._meta.get_field(self.to_fields[0])
from_field = self.model._meta.get_field(self.from_fields[0])
return StartsWith(to_field.get_col(alias), from_field.get_col(related_alias))
def get_joining_columns(self, reverse_join=False):
return tuple()
def get_path_info(self):
to_opts = self.remote_field.model._meta
from_opts = self.model._meta
return [PathInfo(from_opts, to_opts, (to_opts.pk,), self, False, False)]
def get_reverse_path_info(self):
to_opts = self.model._meta
from_opts = self.remote_field.model._meta
return [PathInfo(from_opts, to_opts, (to_opts.pk,), self.remote_field, False, False)]
def contribute_to_class(self, cls, name, virtual_only=False):
super(StartsWithRelation, self).contribute_to_class(cls, name, virtual_only)
setattr(cls, self.name, ForeignRelatedObjectsDescriptor(self))
class BrokenContainsRelation(StartsWithRelation):
"""
This model is designed to yield no join conditions and
raise an exception in ``Join.as_sql()``.
"""
def get_extra_restriction(self, where_class, alias, related_alias):
return None
@python_2_unicode_compatible
class SlugPage(models.Model):
slug = models.CharField(max_length=20)
descendants = StartsWithRelation(
'self',
from_fields=['slug'],
to_fields=['slug'],
related_name='ascendants',
)
containers = BrokenContainsRelation(
'self',
from_fields=['slug'],
to_fields=['slug'],
)
class Meta:
ordering = ['slug']
def __str__(self):
return 'SlugPage %s' % self.slug
|
kingvuplus/gui_test4 | refs/heads/master | tests/events.py | 80 | import time
import tests
recorded_events = [ ]
def event(self, name, args, kwargs):
global recorded_events
print "*EVENT*", time.time(), self, name, args, kwargs
recorded_events.append((time.time(), self, name, args, kwargs))
def eventfnc(f):
name = f.__name__
def wrapper(self, *args, **kwargs):
event(self, name, args, kwargs)
return f(self, *args, **kwargs)
return wrapper
def get_events():
global recorded_events
r = recorded_events
recorded_events = [ ]
return r
def start_log():
global base_time
base_time = time.time()
def end_log(test_name):
global base_time
results = ""
for (t, self, method, args, kwargs) in get_events():
results += "%s T+%f: %s::%s(%s, *%s, *%s)\n" % (time.ctime(t), t - base_time, str(self.__class__), method, self, args, kwargs)
expected = None
try:
f = open(test_name + ".results", "rb")
expected = f.read()
f.close()
except:
print "NO TEST RESULT FOUND, creating new"
f = open(test_name + ".new_results", "wb")
f.write(results)
f.close()
print results
if expected is not None:
print "expected:"
if expected != results:
f = open(test_name + ".bogus_results", "wb")
f.write(results)
f.close()
raise tests.TestError("test data does not match")
else:
print "test compared ok"
else:
print "no test data to compare with."
def log(fnc, base_time = 0, test_name = "test", *args, **kwargs):
import fake_time
fake_time.setTime(base_time)
start_log()
try:
fnc(*args, **kwargs)
event(None, "test_completed", [], {"test_name": test_name})
except tests.TestError,c:
event(None, "test_failed", [], {"test_name": test_name, "reason": str(c)})
end_log(test_name)
|
dylanaraps/pywal | refs/heads/master | pywal/backends/colorz.py | 1 | """
Generate a colorscheme using Colorz.
"""
import logging
import sys
try:
import colorz
except ImportError:
logging.error("colorz wasn't found on your system.")
logging.error("Try another backend. (wal --backend)")
sys.exit(1)
from .. import colors
from .. import util
def gen_colors(img):
"""Generate a colorscheme using Colorz."""
# pylint: disable=not-callable
raw_colors = colorz.colorz(img, n=6, bold_add=0)
return [util.rgb_to_hex([*color[0]]) for color in raw_colors]
def adjust(cols, light):
"""Create palette."""
raw_colors = [cols[0], *cols, "#FFFFFF",
"#000000", *cols, "#FFFFFF"]
return colors.generic_adjust(raw_colors, light)
def get(img, light=False):
"""Get colorscheme."""
cols = gen_colors(img)
if len(cols) < 6:
logging.error("colorz failed to generate enough colors.")
logging.error("Try another backend or another image. (wal --backend)")
sys.exit(1)
return adjust(cols, light)
|
caveman-dick/ansible | refs/heads/devel | lib/ansible/modules/windows/win_iis_webapplication.py | 47 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Henrik Wallström <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_iis_webapplication
version_added: "2.0"
short_description: Configures IIS web applications
description:
- Creates, removes, and configures IIS web applications.
options:
name:
description:
- Name of the web application.
required: true
site:
description:
- Name of the site on which the application is created.
required: true
state:
description:
- State of the web application.
choices: [ absent, present ]
default: present
physical_path:
description:
- The physical path on the remote host to use for the new application.
- The specified folder must already exist.
application_pool:
description:
- The application pool in which the new site executes.
author:
- Henrik Wallström
'''
EXAMPLES = r'''
- name: Add ACME webapplication on IIS.
win_iis_webapplication:
name: api
site: acme
state: present
physical_path: C:\apps\acme\api
'''
RETURN = r'''
application_pool:
description: The used/implemented application_pool value
returned: success
type: string
sample: DefaultAppPool
physical_path:
description: The used/implemented physical_path value
returned: success
type: string
sample: C:\apps\acme\api
'''
|
jawad6233/Lenovo_A820_kernel_kk | refs/heads/master | bionic/libc/kernel/tools/defaults.py | 5 | # this module contains all the defaults used by the generation of cleaned-up headers
# for the Bionic C library
#
import time, os, sys
from utils import *
# the list of supported architectures
#
kernel_archs = [ 'arm', 'x86', 'mips' ]
# the list of include directories that belong to the kernel
# tree. used when looking for sources...
#
kernel_dirs = [ "linux", "asm", "asm-generic", "mtd" ]
# path to the directory containing the original kernel headers
#
kernel_original_path = os.path.normpath( find_program_dir() + '/../../../../external/kernel-headers/original' )
# path to the default location of the cleaned-up headers
#
kernel_cleaned_path = os.path.normpath( find_program_dir() + '/..' )
# a special value that is used to indicate that a given macro is known to be
# undefined during optimization
kCppUndefinedMacro = "<<<undefined>>>"
# this is the set of known macros we want to totally optimize out from the
# final headers
kernel_known_macros = {
"__KERNEL__": kCppUndefinedMacro,
"__KERNEL_STRICT_NAMES":"1",
"__CHECKER__": kCppUndefinedMacro,
"__CHECK_ENDIAN__": kCppUndefinedMacro,
}
# define to true if you want to remove all defined(CONFIG_FOO) tests
# from the clean headers. testing shows that this is not strictly necessary
# but just generates cleaner results
kernel_remove_config_macros = True
# maps an architecture to a set of default macros that would be provided by
# toolchain preprocessor
kernel_default_arch_macros = {
"arm": {},
"x86": {"__i386__": "1", "CONFIG_X86_32": "1"},
"mips": {"CONFIG_32BIT":"1"},
}
kernel_arch_token_replacements = {
"arm": {},
"x86": {},
"mips": {"off_t":"__kernel_off_t"},
}
# Replace tokens in the output according to this mapping
kernel_token_replacements = {
"asm": "__asm__",
"__unused": "__linux_unused", # The kernel usage of __unused conflicts with the macro defined in sys/cdefs.h
}
# this is the set of known static inline functions that we want to keep
# in the final ARM headers. this is only used to keep optimized byteswapping
# static functions and stuff like that.
kernel_known_arm_statics = set(
[ "___arch__swab32", # asm-arm/byteorder.h
]
)
kernel_known_x86_statics = set(
[ "___arch__swab32", # asm-x86/byteorder.h
"___arch__swab64", # asm-x86/byteorder.h
]
)
kernel_known_mips_statics = set(
[
]
)
kernel_known_generic_statics = set(
[ "__invalid_size_argument_for_IOC", # asm-generic/ioctl.h
"__cmsg_nxthdr", # linux/socket.h
"cmsg_nxthdr", # linux/socket.h
"ipt_get_target",
"ip6t_get_target",
]
)
# this maps an architecture to the set of static inline functions that
# we want to keep in the final headers
#
kernel_known_statics = {
"arm" : kernel_known_arm_statics,
"x86" : kernel_known_x86_statics,
"mips" : kernel_known_mips_statics
}
# this is a list of macros which we want to specifically exclude from
# the generated files.
#
kernel_ignored_macros = set(
[ "MAXHOSTNAMELEN", # for some reason, Linux defines it to 64
# while most of the BSD code expects this to be 256
# so ignore the kernel-provided definition and
# define it in the Bionic headers instead
]
)
# this is the standard disclaimer
#
kernel_disclaimer = """\
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
*** To edit the content of this header, modify the corresponding
*** source file (e.g. under external/kernel-headers/original/) then
*** run bionic/libc/kernel/tools/update_all.py
***
*** Any manual change here will be lost the next time this script will
*** be run. You've been warned!
***
****************************************************************************
****************************************************************************/
"""
# This is the warning line that will be inserted every N-th line in the output
kernel_warning = """\
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
"""
|
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks | refs/heads/master | Splunk_TA_paloalto/bin/lib/pandevice/tests/live/test_network.py | 2 | import random
from tests.live import testlib
from pandevice import network
class TestZoneBasic(testlib.FwFlow):
def setup_state_obj(self, fw, state):
state.obj = network.Zone(
testlib.random_name(),
mode='layer3',
)
fw.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.mode = 'layer2'
class TestZone(testlib.FwFlow):
def create_dependencies(self, fw, state):
state.eth_objs = []
state.eths = testlib.get_available_interfaces(fw, 2)
state.eth_objs.append(network.EthernetInterface(state.eths[0], 'layer2'))
state.eth_objs.append(network.EthernetInterface(state.eths[1], 'layer3'))
for x in state.eth_objs:
fw.add(x)
fw.create_type(network.EthernetInterface)
def setup_state_obj(self, fw, state):
state.obj = network.Zone(
testlib.random_name(), 'layer2', state.eths[0],
enable_user_identification=False,
include_acl=testlib.random_ip('/24'),
exclude_acl=testlib.random_ip('/24'),
)
fw.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.mode = 'layer3'
state.obj.interface = state.eths[1]
state.obj.include_acl = [testlib.random_ip('/24') for x in range(2)]
state.obj.exclude_acl = [testlib.random_ip('/24') for x in range(2)]
def cleanup_dependencies(self, fw, state):
try:
fw.delete_type(network.EthernetInterface)
except Exception:
pass
class TestStaticMac(testlib.FwFlow):
def create_dependencies(self, fw, state):
state.parent = None
state.eth_objs = []
state.eths = testlib.get_available_interfaces(fw, 2)
for eth in state.eths:
state.eth_objs.append(network.EthernetInterface(
eth, 'layer2'))
fw.add(state.eth_objs[-1])
state.eth_objs[0].create_similar()
state.parent = network.Vlan(
testlib.random_name(), state.eths)
fw.add(state.parent)
state.parent.create()
def setup_state_obj(self, fw, state):
state.obj = network.StaticMac(
testlib.random_mac(),
state.eths[0],
)
state.parent.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.interface = state.eths[1]
def cleanup_dependencies(self, fw, state):
try:
state.parent.delete()
except Exception:
pass
try:
state.eth_objs[0].delete_similar()
except Exception:
pass
class TestVlan(testlib.FwFlow):
def create_dependencies(self, fw, state):
state.eth_objs = []
state.eths = testlib.get_available_interfaces(fw, 2)
for eth in state.eths:
state.eth_objs.append(network.EthernetInterface(
eth, 'layer2'))
fw.add(state.eth_objs[-1])
state.eth_objs[0].create_similar()
state.vlan_interface = network.VlanInterface(
'vlan.{0}'.format(random.randint(100, 200)))
fw.add(state.vlan_interface)
state.vlan_interface.create()
def setup_state_obj(self, fw, state):
state.obj = network.Vlan(
testlib.random_name(), state.eths[0],
state.vlan_interface.uid,
)
fw.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.interface = state.eths[1]
def cleanup_dependencies(self, fw, state):
try:
state.vlan_interface.delete()
except Exception:
pass
try:
state.eth_objs[0].delete_similar()
except Exception:
pass
class TestIPv6AddressOnEthernetInterface(testlib.FwFlow):
def create_dependencies(self, fw, state):
state.parent = None
state.eth = testlib.get_available_interfaces(fw)[0]
state.parent = network.EthernetInterface(
state.eth, 'layer3', testlib.random_ip('/24'))
fw.add(state.parent)
state.parent.create()
def setup_state_obj(self, fw, state):
state.obj = network.IPv6Address(
testlib.random_ipv6(),
False, True, False, True, 2420000, 604800, True, False)
state.parent.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.enable_on_interface = True
state.obj.prefix = False
state.obj.anycast = True
def cleanup_dependencies(self, fw, state):
try:
state.parent.delete()
except Exception:
pass
class TestIPv6AddressOnLayer3Subinterface(testlib.FwFlow):
def create_dependencies(self, fw, state):
state.eth_obj = None
state.eth = testlib.get_available_interfaces(fw)[0]
state.eth_obj = network.EthernetInterface(
state.eth, 'layer3', testlib.random_ip('/24'))
fw.add(state.eth_obj)
state.eth_obj.create()
tag = random.randint(1, 4000)
state.parent = network.Layer3Subinterface(
'{0}.{1}'.format(state.eth, tag),
tag, testlib.random_ip('/24'))
state.eth_obj.add(state.parent)
state.parent.create()
def setup_state_obj(self, fw, state):
state.obj = network.IPv6Address(
testlib.random_ipv6(),
False, True, False, True, 2420000, 604800, True, False)
state.parent.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.enable_on_interface = True
state.obj.prefix = False
state.obj.anycast = True
def cleanup_dependencies(self, fw, state):
try:
state.parent.delete()
except Exception:
pass
try:
state.eth_obj.delete()
except Exception:
pass
# Interface - inherited by other interface objects
class TestArpOnEthernetInterface(testlib.FwFlow):
def create_dependencies(self, fw, state):
state.eth_obj = None
state.eth = testlib.get_available_interfaces(fw)[0]
state.eth_obj = network.EthernetInterface(
state.eth, 'layer3', testlib.random_ip('/24'))
fw.add(state.eth_obj)
state.eth_obj.create()
def setup_state_obj(self, fw, state):
state.obj = network.Arp(
testlib.random_ip(), '00:30:48:52:ab:cd')
state.eth_obj.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.hw_address = '00:30:48:52:12:9a'
def cleanup_dependencies(self, fw, state):
try:
state.eth_obj.delete()
except Exception:
pass
class TestArpOnSubinterface(testlib.FwFlow):
def create_dependencies(self, fw, state):
state.eth_obj = None
state.eth = testlib.get_available_interfaces(fw)[0]
state.eth_obj = network.EthernetInterface(
state.eth, 'layer3', testlib.random_ip('/24'))
fw.add(state.eth_obj)
state.eth_obj.create()
tag = random.randint(1, 4000)
state.parent = network.Layer3Subinterface(
'{0}.{1}'.format(state.eth, tag),
tag, testlib.random_ip('/24'))
state.eth_obj.add(state.parent)
state.parent.create()
def setup_state_obj(self, fw, state):
state.obj = network.Arp(
testlib.random_ip(), testlib.random_mac())
state.parent.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.hw_address = testlib.random_mac()
def cleanup_dependencies(self, fw, state):
try:
state.parent.delete()
except Exception:
pass
try:
state.eth_obj.delete()
except Exception:
pass
class TestVirtualWire(testlib.FwFlow):
def create_dependencies(self, fw, state):
state.eth_objs = []
state.eths = testlib.get_available_interfaces(fw, 3)
for eth in state.eths:
state.eth_objs.append(network.EthernetInterface(
eth, 'virtual-wire'))
fw.add(state.eth_objs[-1])
state.eth_objs[-1].create()
def setup_state_obj(self, fw, state):
state.obj = network.VirtualWire(
testlib.random_name(),
tag=random.randint(1, 4000),
interface1=state.eths[0],
interface2=state.eths[1],
multicast=True,
pass_through=False,
)
fw.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.tag = random.randint(1, 4000)
state.obj.interface1 = state.eths[1]
state.obj.interface2 = state.eths[2]
state.obj.multicast = False
state.obj.pass_through = True
def cleanup_dependencies(self, fw, state):
for x in state.eth_objs:
try:
x.delete()
except Exception:
pass
# Subinterface - inherited by others
# AbstractSubinterface
class TestL3Subinterface(testlib.FwFlow):
def create_dependencies(self, fw, state):
state.management_profile = network.ManagementProfile(
testlib.random_name(), ping=True)
state.eth = None
fw.add(state.management_profile)
state.management_profile.create()
state.eth = testlib.get_available_interfaces(fw)[0]
state.parent = network.EthernetInterface(
state.eth, 'layer3', ip=testlib.random_ip('/24'),
)
fw.add(state.parent)
state.parent.create()
def setup_state_obj(self, fw, state):
tag = random.randint(1, 4000)
name = '{0}.{1}'.format(state.eth, tag)
state.obj = network.Layer3Subinterface(
name, tag, testlib.random_ip('/24'), False,
state.management_profile, random.randint(576, 1500),
True, None, 'This is my subeth',
random.randint(40, 300), random.randint(60, 300),
)
state.parent.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.comment = 'Update the comment'
state.obj.ip = testlib.random_ip('/24')
def cleanup_dependencies(self, fw, state):
try:
state.management_profile.delete()
except Exception:
pass
try:
state.parent.delete()
except Exception:
pass
class TestL2Subinterface(testlib.FwFlow):
def create_dependencies(self, fw, state):
state.eth = None
state.eth = testlib.get_available_interfaces(fw)[0]
state.parent = network.EthernetInterface(
state.eth, 'layer2',
)
fw.add(state.parent)
state.parent.create()
def setup_state_obj(self, fw, state):
tag = random.randint(1, 4000)
name = '{0}.{1}'.format(state.eth, tag)
state.obj = network.Layer2Subinterface(
name, tag, comment='This is my L2 subinterface',
)
state.parent.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.comment = 'Updated comment'
def cleanup_dependencies(self, fw, state):
try:
state.parent.delete()
except Exception:
pass
# PhysicalInterface - inherited by others
class TestL3EthernetInterface(testlib.FwFlow):
def create_dependencies(self, fw, state):
state.management_profiles = []
state.eth = testlib.get_available_interfaces(fw)[0]
state.management_profiles = [
network.ManagementProfile(testlib.random_name(),
ping=bool(x)) for x in range(2)]
for x in state.management_profiles:
fw.add(x)
state.management_profiles[0].create_similar()
def setup_state_obj(self, fw, state):
state.obj = network.EthernetInterface(
state.eth, 'layer3', testlib.random_ip('/24'),
ipv6_enabled=False,
management_profile=state.management_profiles[0],
mtu=random.randint(600, 1500),
adjust_tcp_mss=True,
link_speed='auto',
link_duplex='auto',
link_state='auto',
comment='This is my interface',
ipv4_mss_adjust=random.randint(40, 300),
ipv6_mss_adjust=random.randint(60, 300),
)
fw.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.management_profile = state.management_profiles[1]
state.obj.mtu = random.randint(600, 1500)
state.obj.ipv4_mss_adjust = random.randint(40, 300)
state.obj.ipv6_mss_adjust = random.randint(60, 300)
state.obj.comment = 'This is an update layer3 interface'
def cleanup_dependencies(self, fw, state):
try:
state.management_profiles[0].delete_similar()
except IndexError:
pass
class TestL2EthernetInterface(testlib.FwFlow):
def create_dependencies(self, fw, state):
state.management_profiles = []
state.eth = testlib.get_available_interfaces(fw)[0]
state.management_profiles = [
network.ManagementProfile(testlib.random_name(),
ping=bool(x)) for x in range(2)]
for x in state.management_profiles:
fw.add(x)
state.management_profiles[0].create_similar()
def setup_state_obj(self, fw, state):
state.obj = network.EthernetInterface(
state.eth, 'layer2',
management_profile=state.management_profiles[0])
fw.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.management_profile = state.management_profiles[1]
def cleanup_dependencies(self, fw, state):
try:
state.management_profiles[0].delete_similar()
except IndexError:
pass
# AggregateInterface
class TestVlanInterface(testlib.FwFlow):
def setup_state_obj(self, fw, state):
state.obj = network.VlanInterface(
'vlan.{0}'.format(random.randint(20, 5000)),
testlib.random_ip('/24'),
mtu=random.randint(800, 1000),
adjust_tcp_mss=True,
comment='Vlan interface',
ipv4_mss_adjust=random.randint(100, 200),
ipv6_mss_adjust=random.randint(100, 200),
)
fw.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.ip = None
state.obj.comment = 'Updated vlan'
state.obj.enable_dhcp = True
state.obj.create_dhcp_default_route = True
state.obj.dhcp_default_route_metric = random.randint(50, 200)
class TestLoopbackInterface(testlib.FwFlow):
def setup_state_obj(self, fw, state):
state.obj = network.LoopbackInterface(
'loopback.{0}'.format(random.randint(20, 5000)),
testlib.random_ip(),
mtu=random.randint(800, 1000),
adjust_tcp_mss=True,
comment='Some loopback interface',
ipv4_mss_adjust=random.randint(100, 200),
ipv6_mss_adjust=random.randint(100, 200),
)
fw.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.ip = testlib.random_ip()
state.obj.comment = 'Updated loopback'
class TestTunnelInterface(testlib.FwFlow):
def setup_state_obj(self, fw, state):
state.obj = network.TunnelInterface(
'tunnel.{0}'.format(random.randint(20, 5000)),
testlib.random_ip('/24'),
mtu=random.randint(800, 1000),
comment='Underground interface',
)
fw.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.ip = testlib.random_ip('/24')
state.obj.comment = 'Updated tunnel'
class TestStaticRoute(testlib.FwFlow):
def create_dependencies(self, fw, state):
state.eth_obj = None
state.eth = testlib.get_available_interfaces(fw)[0]
state.eth_obj = network.EthernetInterface(
state.eth, 'layer3', testlib.random_ip('/24'))
fw.add(state.eth_obj)
state.eth_obj.create()
state.vr = network.VirtualRouter(
testlib.random_name(), interface=state.eth)
fw.add(state.vr)
state.vr.create()
def setup_state_obj(self, fw, state):
state.obj = network.StaticRoute(
testlib.random_name(),
testlib.random_ip('/32'),
'ip-address',
testlib.random_ip(),
state.eth,
random.randint(10, 240),
random.randint(1, 65535),
)
state.vr.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.destination = testlib.random_ip('/32')
state.obj.nexthop_type = 'discard'
state.obj.nexthop = None
state.obj.interface = None
def cleanup_dependencies(self, fw, state):
try:
state.vr.delete()
except Exception:
pass
try:
state.eth_obj.delete()
except Exception:
pass
class TestStaticRouteV6(testlib.FwFlow):
def create_dependencies(self, fw, state):
state.eth_obj = None
state.eth = testlib.get_available_interfaces(fw)[0]
state.eth_obj = network.EthernetInterface(
state.eth, 'layer3', testlib.random_ip('/24'), ipv6_enabled=True)
fw.add(state.eth_obj)
state.eth_obj.create()
state.vr = network.VirtualRouter(
testlib.random_name(), interface=state.eth)
fw.add(state.vr)
state.vr.create()
def setup_state_obj(self, fw, state):
ip = testlib.random_ipv6('')
state.obj = network.StaticRouteV6(
testlib.random_name(),
destination=ip + '/64',
nexthop_type='ipv6-address',
nexthop=ip + '1',
interface=state.eth,
admin_dist=random.randint(100, 200),
metric=random.randint(1, 65535),
)
state.vr.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.destination = testlib.random_ipv6('/64')
state.obj.nexthop_type = 'discard'
state.obj.nexthop = None
state.obj.interface = None
def cleanup_dependencies(self, fw, state):
try:
state.vr.delete()
except Exception:
pass
try:
state.eth_obj.delete()
except Exception:
pass
class TestVirtualRouter(testlib.FwFlow):
def create_dependencies(self, fw, state):
state.eth_obj = None
state.eth = testlib.get_available_interfaces(fw)[0]
state.eth_obj = network.EthernetInterface(
state.eth, 'layer3', testlib.random_ip('/24'))
fw.add(state.eth_obj)
state.eth_obj.create()
def setup_state_obj(self, fw, state):
state.obj = network.VirtualRouter(
testlib.random_name(),
interface=state.eth,
ad_static=random.randint(10, 240),
ad_static_ipv6=random.randint(10, 240),
ad_ospf_int=random.randint(10, 240),
ad_ospf_ext=random.randint(10, 240),
ad_ospfv3_int=random.randint(10, 240),
ad_ospfv3_ext=random.randint(10, 240),
ad_ibgp=random.randint(10, 240),
ad_ebgp=random.randint(10, 240),
ad_rip=random.randint(10, 240),
)
fw.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.ad_static = random.randint(10, 240)
state.obj.ad_rip = random.randint(10, 240)
def cleanup_dependencies(self, fw, state):
try:
state.eth_obj.delete()
except Exception:
pass
class MakeVirtualRouter(testlib.FwFlow):
WITH_OSPF = False
WITH_AREA = False
WITH_AUTH_PROFILE = False
WITH_AREA_INTERFACE = False
WITH_REDISTRIBUTION_PROFILE = False
WITH_BGP = False
WITH_BGP_ROUTING_OPTIONS = False
WITH_BGP_AUTH_PROFILE = False
WITH_BGP_PEER_GROUP = False
WITH_BGP_PEER = False
WITH_BGP_IMPORT_RULE = False
WITH_BGP_EXPORT_RULE = False
def create_dependencies(self, fw, state):
state.eths = testlib.get_available_interfaces(fw, 2)
state.eth_obj_v4 = network.EthernetInterface(
state.eths[0], 'layer3', testlib.random_ip('/24'))
fw.add(state.eth_obj_v4)
state.eth_obj_v6 = network.EthernetInterface(
state.eths[1], 'layer3', ipv6_enabled=True)
fw.add(state.eth_obj_v6)
state.eth_obj_v4.create_similar()
state.vr = network.VirtualRouter(testlib.random_name(), state.eths)
fw.add(state.vr)
state.vr.create()
if self.WITH_REDISTRIBUTION_PROFILE:
some_ip = testlib.random_ip()
state.redist_profile = network.RedistributionProfile(
testlib.random_name(),
priority=random.randint(1, 255),
action='no-redist',
filter_type=['ospf', 'static', 'connect', 'bgp'],
filter_interface=random.choice(state.eths),
filter_destination=testlib.random_ip(),
filter_nexthop=testlib.random_ip(),
ospf_filter_pathtype=('intra-area', 'ext-1'),
ospf_filter_area=some_ip,
ospf_filter_tag=some_ip,
)
state.vr.add(state.redist_profile)
state.redist_profile.create()
if any((self.WITH_OSPF, self.WITH_AUTH_PROFILE,
self.WITH_AREA, self.WITH_AREA_INTERFACE)):
state.ospf = network.Ospf(
True, testlib.random_ip())
state.vr.add(state.ospf)
if self.WITH_AUTH_PROFILE:
state.auth = network.OspfAuthProfile(
testlib.random_name(), 'md5')
state.ospf.add(state.auth)
if self.WITH_AREA or self.WITH_AREA_INTERFACE:
state.area = network.OspfArea(testlib.random_ip())
state.ospf.add(state.area)
if self.WITH_AREA_INTERFACE:
state.iface = network.OspfAreaInterface(
state.eths[0], True, True, 'p2mp')
state.area.add(state.iface)
state.ospf.create()
if any((self.WITH_BGP, self.WITH_BGP_ROUTING_OPTIONS,
self.WITH_BGP_AUTH_PROFILE, self.WITH_BGP_PEER_GROUP, self.WITH_BGP_PEER,
self.WITH_BGP_IMPORT_RULE, self.WITH_BGP_EXPORT_RULE)):
state.bgp = network.Bgp(
enable=True,
router_id=testlib.random_ip(),
reject_default_route=True,
allow_redist_default_route=True,
install_route=True,
ecmp_multi_as=True,
enforce_first_as=True,
local_as=random.randint(1, 2000))
state.vr.add(state.bgp)
if self.WITH_BGP_AUTH_PROFILE:
state.bgp_auth = network.BgpAuthProfile(
testlib.random_name(), 'MD5')
state.bgp.add(state.bgp_auth)
state.bgp.apply()
if self.WITH_BGP_ROUTING_OPTIONS:
state.bgp_opts = network.BgpRoutingOptions(
as_format='2-byte')
state.bgp.add(state.bgp_opts)
state.bgp.apply()
if any((self.WITH_BGP_PEER_GROUP, self.WITH_BGP_PEER)):
state.pg = network.BgpPeerGroup(
name=testlib.random_name(),
enable=True,
aggregated_confed_as_path=True,
soft_reset_with_stored_info=True,
export_nexthop='resolve',
import_nexthop='original',
remove_private_as=True,
)
state.bgp.add(state.pg)
state.bgp.apply()
if self.WITH_BGP_PEER:
state.peer = network.BgpPeer(
name=testlib.random_name(),
enable=True,
peer_as=random.randint(1000, 1255),
local_interface=state.eths[0],
peer_address_ip=testlib.random_ip(),
)
state.pg.add(state.peer)
state.pg.apply()
if self.WITH_BGP_IMPORT_RULE:
state.import_rule = network.BgpPolicyImportRule(
name=testlib.random_name(),
enable=True,
)
state.bgp.add(state.import_rule)
state.bgp.apply()
if self.WITH_BGP_EXPORT_RULE:
state.export_rule = network.BgpPolicyExportRule(
name=testlib.random_name(),
enable=True,
)
state.bgp.add(state.export_rule)
state.bgp.apply()
state.bgp.create()
def cleanup_dependencies(self, fw, state):
try:
state.vr.delete()
except Exception:
pass
try:
state.eth_obj_v4.delete_similar()
except Exception:
pass
class TestRedistributionProfile(MakeVirtualRouter):
def setup_state_obj(self, fw, state):
some_ip = testlib.random_ip()
state.obj = network.RedistributionProfile(
testlib.random_name(),
priority=random.randint(1, 255),
action='no-redist',
filter_type=['ospf', 'static', 'connect'],
filter_interface=random.choice(state.eths),
filter_destination=testlib.random_ip(),
filter_nexthop=testlib.random_ip(),
ospf_filter_pathtype=('intra-area', 'ext-1'),
ospf_filter_area=some_ip,
ospf_filter_tag=some_ip,
)
state.vr.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.action = 'redist'
state.obj.filter_type = ('ospf', 'rip', 'bgp')
state.obj.ospf_filter_pathtype = ('inter-area', 'ext-2')
state.obj.bgp_filter_community = ('local-as', 'no-export')
class TestOspf(MakeVirtualRouter):
def setup_state_obj(self, fw, state):
state.obj = network.Ospf(
True, testlib.random_ip(), True, True, True,
2, 3, False, 300, False, False, 400)
state.vr.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.enable = False
state.obj.reject_default_route = False
state.obj.allow_redist_default_route = False
state.obj.rfc1583 = False
state.obj.spf_calculation_delay = 3
state.obj.lsa_interval = 4
state.obj.graceful_restart_enable = True
state.obj.gr_helper_enable = True
state.obj.gr_strict_lsa_checking = True
class TestOspfArea(MakeVirtualRouter):
WITH_OSPF = True
def setup_state_obj(self, fw, state):
state.obj = network.OspfArea(
testlib.random_ip(), 'normal')
state.ospf.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.type = 'stub'
state.obj.accept_summary = True
state.obj.default_route_advertise = 'disable'
def test_05_stub_area_with_default_route_advertise(self, fw, state_map):
state = self.sanity(fw, state_map)
state.obj.default_route_advertise = 'advertise'
state.obj.default_route_advertise_metric = 45
state.obj.apply()
def test_06_nssa_area_type_ext1(self, fw, state_map):
state = self.sanity(fw, state_map)
state.obj.type = 'nssa'
state.obj.default_route_advertise_type = 'ext-1'
state.obj.apply()
def test_07_nssa_area_type_ext2(self, fw, state_map):
state = self.sanity(fw, state_map)
state.obj.default_route_advertise_type = 'ext-2'
state.obj.apply()
class TestOspfRange(MakeVirtualRouter):
WITH_AREA = True
def setup_state_obj(self, fw, state):
state.obj = network.OspfRange(testlib.random_ip(), 'advertise')
state.area.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.mode = 'suppress'
class TestOspfNssaExternalRange(MakeVirtualRouter):
WITH_AREA = True
def create_dependencies(self, fw, state):
super(TestOspfNssaExternalRange, self).create_dependencies(fw, state)
state.area.type = 'nssa'
state.area.apply()
def setup_state_obj(self, fw, state):
state.obj = network.OspfNssaExternalRange(
testlib.random_ip('/24'), 'advertise')
state.area.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.mode = 'suppress'
class TestOspfAreaInterface(MakeVirtualRouter):
WITH_AREA = True
WITH_AUTH_PROFILE = True
def setup_state_obj(self, fw, state):
state.obj = network.OspfAreaInterface(
random.choice(state.eths), True, True, 'broadcast', 4096, 50,
12, 3, 4, 5, 6, state.auth.uid)
state.area.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.enable = False
state.obj.passive = False
state.obj.link_type = 'p2p'
def test_05_link_type_p2mp(self, fw, state_map):
state = self.sanity(fw, state_map)
state.obj.enable = True
state.obj.link_type = 'p2mp'
state.obj.apply()
class TestOspfNeighbor(MakeVirtualRouter):
WITH_AREA_INTERFACE = True
def setup_state_obj(self, fw, state):
state.obj = network.OspfNeighbor(testlib.random_ip(), 10)
state.iface.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.metric = 11
class TestOspfAuthProfile(MakeVirtualRouter):
WITH_OSPF = True
def setup_state_obj(self, fw, state):
state.obj = network.OspfAuthProfile(
testlib.random_name(), 'password', 'secret')
state.ospf.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.password = 'secret2'
class TestOspfAuthProfileMd5(MakeVirtualRouter):
WITH_AUTH_PROFILE = True
def setup_state_obj(self, fw, state):
state.obj = network.OspfAuthProfileMd5(
'1', 'secret1', False)
state.auth.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.preferred = True
def test_05_add_second_profile_not_preferred(self, fw, state_map):
state = self.sanity(fw, state_map)
o = network.OspfAuthProfileMd5('2', 'secret2', False)
state.auth.add(o)
o.create()
class TestOspfExportRules(MakeVirtualRouter):
WITH_OSPF = True
def setup_state_obj(self, fw, state):
state.obj = network.OspfExportRules(
testlib.random_netmask(),
'ext-2', testlib.random_ip(), 2048)
state.ospf.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.new_path_type = 'ext-1'
state.obj.metric = 5309
class TestBgp(MakeVirtualRouter):
def setup_state_obj(self, fw, state):
state.obj = network.Bgp(
enable=True,
router_id=testlib.random_ip(),
reject_default_route=True,
allow_redist_default_route=True,
install_route=True,
ecmp_multi_as=True,
enforce_first_as=True,
local_as=random.randint(1, 2000))
state.vr.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.enable = False
state.obj.reject_default_route = False
state.obj.allow_redist_default_route = False
state.obj.install_route = False
state.obj.ecmp_multi_as = False
state.obj.enforce_first_as = False
state.obj.local_as = 101
class TestBgpAuthProfile(MakeVirtualRouter):
WITH_BGP = True
def setup_state_obj(self, fw, state):
state.obj = network.BgpAuthProfile(
testlib.random_name(), 'md5'
)
state.bgp.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.secret = 'sha256'
class TestBgpRoutingOptions(MakeVirtualRouter):
WITH_BGP = True
def setup_state_obj(self, fw, state):
state.obj = network.BgpRoutingOptions(
as_format='2-byte',
always_compare_med=True,
deterministic_med_comparison=True,
default_local_preference=10,
graceful_restart_enable=True,
gr_stale_route_time=10,
gr_local_restart_time=60,
gr_max_peer_restart_time=120,
reflector_cluster_id='192.168.19.104',
confederation_member_as=random.randint(1, 100),
aggregate_med=True,
)
state.bgp.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.enable = False
state.obj.as_format = '4-byte'
state.obj.always_compare_med = False
state.obj.deterministic_med_comparison = False
state.obj.default_local_preference = False
state.obj.graceful_restart_enable = False
state.obj.gr_stale_route_time = 120
state.obj.gr_local_restart_time = 60
state.obj.gr_max_peer_restart_time = 10
state.obj.reflector_cluster_id = '192.168.19.14'
state.obj.confederation_member_as = '13634.10467'
state.obj.aggregate_med = False
# # unsupported configuration, test disabled
# class TestBgpOutboundRouteFilter(MakeVirtualRouter):
# WITH_BGP_ROUTING_OPTIONS = True
# def setup_state_obj(self, fw, state):
# state.obj = network.BgpOutboundRouteFilter(
# enable = True,
# max_received_entries = 100,
# cisco_prefix_mode = False,
# )
# state.bgp_opts.add(state.obj)
# def update_state_obj(self, fw, state):
# state.obj.enable = False
# state.obj.max_received_entries = 200
# state.obj.cisco_prefix_mode = True
class TestBgpDampeningProfile(MakeVirtualRouter):
WITH_BGP = True
def setup_state_obj(self, fw, state):
state.obj = network.BgpDampeningProfile(
name=testlib.random_name(),
enable=True,
cutoff=random.randint(1, 3),
reuse=random.random(),
max_hold_time=random.randint(1, 3600),
decay_half_life_reachable=random.randint(1, 3600),
decay_half_life_unreachable=random.randint(1, 3600),
)
state.bgp.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.enable = False
state.obj.cutoff = random.randint(1, 3)
state.obj.reuse = random.random()
state.obj.max_hold_time = random.randint(1, 3600)
state.obj.decay_half_life_reachable = random.randint(1, 3600)
state.obj.decay_half_life_unreachable = random.randint(1, 3600)
class TestBgpPeerGroup(MakeVirtualRouter):
WITH_BGP = True
def setup_state_obj(self, fw, state):
state.obj = network.BgpPeerGroup(
name=testlib.random_name(),
enable=True,
aggregated_confed_as_path=True,
soft_reset_with_stored_info=True,
# # 'type'='ebgp',
export_nexthop='resolve',
import_nexthop='original',
remove_private_as=True
)
state.bgp.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.enable = False
state.obj.aggregated_confed_as_path = False
state.obj.soft_reset_with_stored_info = False
state.obj.export_nexthop = 'use-self'
state.obj.import_nexhop = 'use-peer'
state.obj.remove_private_as = False
class TestBgpPeer(MakeVirtualRouter):
WITH_BGP = True
WITH_BGP_AUTH_PROFILE = True
WITH_BGP_PEER_GROUP = True
def setup_state_obj(self, fw, state):
state.obj = network.BgpPeer(
name=testlib.random_name(),
enable=True,
peer_as=random.randint(1000, 1255),
enable_mp_bgp=False,
address_family_identifier='ipv4',
subsequent_address_unicast=True,
subsequent_address_multicast=False,
local_interface=state.eths[0],
peer_address_ip=testlib.random_ip(),
connection_authentication=state.bgp_auth.name,
connection_keep_alive_interval=random.randint(25, 35),
connection_min_route_adv_interval=random.randint(25, 35),
connection_multihop=0,
connection_open_delay_time=0,
connection_hold_time=random.randint(85, 95),
connection_idle_hold_time=random.randint(5, 15),
connection_incoming_allow=True,
connection_outgoing_allow=True,
connection_incoming_remote_port=0,
connection_outgoing_local_port=0,
enable_sender_side_loop_detection=True,
reflector_client='non-client',
peering_type='unspecified',
# aggregated_confed_as_path=True,
max_prefixes=random.randint(4000, 6000),
# max_orf_entries=random.randint(4000, 6000),
# soft_reset_with_stored_info=True,
bfd_profile='Inherit-vr-global-setting'
)
state.pg.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.enable = False
state.obj.peer_as = random.randint(1000, 1255)
state.obj.enable_mp_bgp = True
state.obj.subsequent_address_multicast = True
state.obj.subsequent_address_unicast = False
state.obj.enable_mp_bgp = True
state.obj.local_interface = state.eths[1]
state.obj.connection_authentication = None
state.obj.connection_keep_alive_interval = random.randint(1, 1200)
state.obj.connection_min_route_adv_interval = random.randint(1, 600)
state.obj.connection_multihop = random.randint(0, 255)
state.obj.connection_open_delay_time = random.randint(0, 240)
state.obj.connection_hold_time = random.randint(3, 3600)
state.obj.connection_idle_hold_time = random.randint(1, 3600)
state.obj.connection_incoming_allow=False
state.obj.connection_outgoing_allow=False
state.obj.connection_incoming_remote_port=random.randint(1025, 65535)
state.obj.connection_outgoing_local_port=random.randint(1025, 65535)
state.obj.enable_sender_side_loop_detection=False
state.obj.reflector_client='client'
state.obj.peering_type='bilateral'
# state.obj.aggregated_confed_as_path=False
state.obj.max_prefixes=random.randint(4000, 6000)
# state.obj.max_orf_entries=random.randint(4000, 6000)
# state.obj.soft_reset_with_stored_info=False
state.obj.bfd_profile=None
class MakeBgpPolicyRule(MakeVirtualRouter):
WITH_BGP = True
WITH_BGP_PEER = True
WITH_BGP_PEER_GROUP = True
USE_IMPORT_RULE = False
USE_EXPORT_RULE = False
def setup_state_obj(self, fw, state):
rule_spec = {
'name': testlib.random_name(),
'enable': True,
'used_by': state.pg.name,
# match_afi/match_safi are unsupported for testing
# 'match_afi': 'ip',
# 'match_safi': 'ip',
'match_route_table': 'unicast',
'match_nexthop': [testlib.random_ip('/32'), ],
'match_from_peer': state.peer.name,
'match_med': random.randint(0, 4294967295),
'match_as_path_regex': 'as-path-regex',
'match_community_regex': 'community-regex',
'match_extended_community_regex': 'ext-comm-regex',
'action': 'allow',
'action_local_preference': random.randint(0, 4294967295),
'action_med': random.randint(0, 4294967295),
'action_nexthop': testlib.random_ip(),
'action_origin': 'incomplete',
'action_as_path_limit': random.randint(1, 255),
'action_as_path_type': 'none',
}
if self.USE_IMPORT_RULE:
state.obj = network.BgpPolicyImportRule(**rule_spec)
elif self.USE_EXPORT_RULE:
state.obj = network.BgpPolicyExportRule(**rule_spec)
state.bgp.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.enable = False
state.obj.match_route_table = 'both'
state.obj.match_nexthop = [testlib.random_ip('/32'), ]
state.obj.match_from_peer = None
state.obj.match_med = random.randint(0, 4294967295)
state.obj.match_as_path_regex = 'updated-as-path-regex'
state.obj.match_community_regex = 'updated-community-regex'
state.obj.match_extended_community_regex = 'updated-ext-comm-regex'
state.obj.action_local_preference = random.randint(0, 4294967295)
state.obj.action_med = random.randint(0, 4294967295)
state.obj.action_nexthop = testlib.random_ip()
state.obj.action_origin = 'incomplete'
state.obj.action_as_path_limit = random.randint(1, 255)
state.obj.action_as_path_type='none'
def test_05_action_community_regex_argument(self, fw, state_map):
state = self.sanity(fw, state_map)
state.obj.action = 'allow'
state.obj.action_community_type = 'remove-regex'
state.obj.action_community_argument = 'test-regex'
state.obj.apply()
def test_06_action_extended_community_regex_argument(self, fw, state_map):
state = self.sanity(fw, state_map)
state.obj.action = 'allow'
state.obj.action_extended_community_type = 'remove-regex'
state.obj.action_extended_community_argument = 'test-regex'
state.obj.apply()
def test_07_action_deny(self, fw, state_map):
state = self.sanity(fw, state_map)
state.obj.action = 'deny'
state.obj.apply()
class TestBgpPolicyImportRule(MakeBgpPolicyRule):
USE_IMPORT_RULE = True
"""Define any Import specific tests here"""
class TestBgpPolicyExportRule(MakeBgpPolicyRule):
USE_EXPORT_RULE = True
"""Define any Export specific tests here"""
class MakeBgpPolicyAddressPrefix(MakeVirtualRouter):
WITH_BGP = True
WITH_BGP_IMPORT_RULE = False
WITH_BGP_EXPORT_RULE = False
def setup_state_obj(self, fw, state):
state.obj = network.BgpPolicyAddressPrefix(
name=testlib.random_netmask(),
exact=True,
)
if self.WITH_BGP_IMPORT_RULE:
state.import_rule.add(state.obj)
elif self.WITH_BGP_EXPORT_RULE:
state.export_rule.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.exact = False
def test_05_multiple_prefixes(self, fw, state_map):
state = self.sanity(fw, state_map)
prefixes = [network.BgpPolicyAddressPrefix(
name=testlib.random_netmask(),
exact=random.choice([True, False])) for x in range(2)]
if self.WITH_BGP_IMPORT_RULE:
state.import_rule.extend(prefixes)
state.import_rule.apply()
elif self.WITH_BGP_EXPORT_RULE:
state.export_rule.extend(prefixes)
state.export_rule.apply()
class TestBgpPolicyImportRuleAddressPrefix(MakeBgpPolicyAddressPrefix):
WITH_BGP_IMPORT_RULE = True
class TestBgpPolicyExportRuleAddressPrefix(MakeBgpPolicyAddressPrefix):
WITH_BGP_EXPORT_RULE = True
class TestBgpPolicyConditionalAdvertisement(MakeVirtualRouter):
WITH_BGP = True
WITH_BGP_PEER = True
WITH_BGP_PEER_GROUP = True
def setup_state_obj(self, fw, state):
prefixes = [network.BgpPolicyAddressPrefix(
name=testlib.random_netmask()) for x in range(2)]
non_exist = network.BgpPolicyNonExistFilter(
name=testlib.random_name(), enable=False)
non_exist.extend(prefixes)
advert = network.BgpPolicyAdvertiseFilter(
name=testlib.random_name(), enable=False)
advert.extend(prefixes)
state.obj = network.BgpPolicyConditionalAdvertisement(
name=testlib.random_name(),
enable=True,
used_by=state.pg.name,
)
state.obj.add(non_exist)
state.obj.add(advert)
state.bgp.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.enable = False
state.obj.used_by = None
class TestBgpPolicyAggregationAddress(MakeVirtualRouter):
WITH_BGP = True
def setup_state_obj(self, fw, state):
prefixes = [network.BgpPolicyAddressPrefix(
name=testlib.random_netmask(),
exact=random.choice([True, False])) for x in range(2)]
suppress = network.BgpPolicySuppressFilter(
name=testlib.random_name(), enable=False)
suppress.extend(prefixes)
advert = network.BgpPolicyAdvertiseFilter(
name=testlib.random_name(), enable=False)
advert.extend(prefixes)
state.obj = network.BgpPolicyAggregationAddress(
name=testlib.random_name(),
enable=True,
prefix=testlib.random_netmask(),
summary=False,
)
state.obj.add(suppress)
state.obj.add(advert)
state.bgp.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.enable = False
state.obj.prefix = testlib.random_netmask()
state.obj.summary = True
def test_05_attributes(self, fw, state_map):
state = self.sanity(fw, state_map)
state.obj.enable = True
state.obj.prefix = testlib.random_netmask()
state.obj.summary = True
state.obj.as_set = True
state.obj.attr_local_preference = random.randint(0, 4294967295)
state.obj.attr_med = random.randint(0, 4294967295)
state.obj.attr_nexthop = testlib.random_ip()
state.obj.attr_origin = 'incomplete'
state.obj.attr_as_path_limit = random.randint(1, 255)
state.obj.attr_as_path_type='none'
class TestBgpRedistributionRule(MakeVirtualRouter):
WITH_BGP = True
WITH_REDISTRIBUTION_PROFILE = True
def setup_state_obj(self, fw, state):
state.obj = network.BgpRedistributionRule(
name=state.redist_profile.name,
enable=True, address_family_identifier='ipv4',
)
state.bgp.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.enable = False
class TestManagementProfile(testlib.FwFlow):
def setup_state_obj(self, fw, state):
state.obj = network.ManagementProfile(
testlib.random_name(),
ping=True,
telnet=False,
ssh=True,
http=False,
http_ocsp=True,
https=False,
snmp=True,
response_pages=False,
userid_service=True,
userid_syslog_listener_ssl=False,
userid_syslog_listener_udp=True,
permitted_ip=['1.2.3.4', '5.6.7.8'],
)
fw.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.permitted_ip = ['9.8.7.6', ]
state.obj.https = True
state.obj.http_ocsp = False
class TestIkeCryptoProfile(testlib.FwFlow):
def setup_state_obj(self, fw, state):
state.obj = network.IkeCryptoProfile(
testlib.random_name(),
authentication=['sha256', ],
dh_group=['group1', ],
lifetime_minutes=42,
)
fw.add(state.obj)
state.obj.set_encryption('3des')
def update_state_obj(self, fw, state):
state.obj.dh_group = ['group5', 'group2']
state.obj.lifetime_minutes = None
state.obj.lifetime_hours = 4
state.obj.authentication_multiple = 3
state.obj.set_encryption(['3des', 'aes128'])
class TestIpsecCryptoProfile(testlib.FwFlow):
def setup_state_obj(self, fw, state):
state.obj = network.IpsecCryptoProfile(
testlib.random_name(),
ah_authentication=['md5', 'sha256'],
dh_group='group1',
lifetime_hours=4,
lifesize_gb=2,
)
fw.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.ah_authentication = None
state.obj.esp_authentication = ['md5', 'sha512']
state.obj.lifetime_hours = None
state.obj.lifetime_days = 2
state.obj.lifesize_gb = None
state.obj.lifesize_tb = 1
state.obj.set_esp_encryption(['aes128', 'aes192', 'aes256'])
class TestIkeGateway(testlib.FwFlow):
def create_dependencies(self, fw, state):
state.lbi = network.LoopbackInterface(
'loopback.{0}'.format(random.randint(5, 20)),
ip=[testlib.random_ip(), testlib.random_ip()],
)
fw.add(state.lbi)
state.lbi.create()
def setup_state_obj(self, fw, state):
state.obj = network.IkeGateway(
testlib.random_name(),
auth_type='pre-shared-key',
enable_dead_peer_detection=True,
enable_liveness_check=True,
enable_passive_mode=True,
ikev2_crypto_profile='default',
interface=state.lbi.name,
liveness_check_interval=5,
local_id_type='ipaddr',
local_id_value=testlib.random_ip(),
local_ip_address_type='ip',
local_ip_address=state.lbi.ip[0],
peer_ip_type='ip',
peer_ip_value=testlib.random_ip(),
pre_shared_key='secret',
version='ikev2-preferred',
)
fw.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.disabled = True
state.obj.local_ip_address = state.lbi.ip[1]
state.obj.local_id_type = 'fqdn'
state.obj.local_id_value = 'example.com'
state.obj.peer_id_type = 'keyid'
state.obj.peer_id_value = '{0:04x}'.format(random.randint(1, 65535))
def cleanup_dependencies(self, fw, state):
try:
state.lbi.delete()
except Exception:
pass
class TestIkeIpv6Gateway(testlib.FwFlow):
def create_dependencies(self, fw, state):
if fw._version_info < (7, 0, 0):
raise ValueError('IkeGateway not supported for version < 7.0')
state.lbi = network.LoopbackInterface(
'loopback.{0}'.format(random.randint(5, 20)),
ipv6_enabled=True,
)
state.lbi.add(network.IPv6Address(testlib.random_ipv6()))
state.lbi.add(network.IPv6Address(testlib.random_ipv6()))
fw.add(state.lbi)
state.lbi.create()
def setup_state_obj(self, fw, state):
state.obj = network.IkeGateway(
testlib.random_name(),
auth_type='pre-shared-key',
enable_ipv6=True,
enable_liveness_check=True,
ikev2_crypto_profile='default',
interface=state.lbi.name,
liveness_check_interval=5,
local_id_type='ufqdn',
local_id_value='[email protected]',
local_ip_address_type='ip',
local_ip_address=state.lbi.children[0].address,
peer_id_type='keyid',
peer_id_value='{0:04x}'.format(random.randint(1, 65535)),
peer_ip_type='dynamic',
pre_shared_key='secret',
version='ikev2',
)
fw.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.disabled = True
state.obj.local_ip_address = state.lbi.children[1].address
state.obj.enable_liveness_check = False
def cleanup_dependencies(self, fw, state):
try:
state.lbi.delete()
except Exception:
pass
class TestIpv4IpsecTunnel(testlib.FwFlow):
def create_dependencies(self, fw, state):
state.ti = network.TunnelInterface(
'tunnel.{0}'.format(random.randint(5, 50)),
ip=[testlib.random_ip(), testlib.random_ip()],
)
fw.add(state.ti)
state.lbi = network.LoopbackInterface(
'loopback.{0}'.format(random.randint(5, 20)),
ip=[testlib.random_ip(), testlib.random_ip()],
)
fw.add(state.lbi)
state.ike_gw = network.IkeGateway(
testlib.random_name(),
auth_type='pre-shared-key',
enable_dead_peer_detection=True,
enable_liveness_check=True,
enable_passive_mode=True,
ikev2_crypto_profile='default',
interface=state.lbi.name,
liveness_check_interval=5,
local_id_type='ipaddr',
local_id_value=testlib.random_ip(),
local_ip_address_type='ip',
local_ip_address=state.lbi.ip[0],
peer_ip_type='ip',
peer_ip_value=testlib.random_ip(),
pre_shared_key='secret',
version='ikev2-preferred',
)
fw.add(state.ike_gw)
state.ti.create()
state.lbi.create()
state.ike_gw.create()
def setup_state_obj(self, fw, state):
state.obj = network.IpsecTunnel(
testlib.random_name(),
tunnel_interface=state.ti.name,
type='auto-key',
ak_ike_gateway=state.ike_gw.name,
ak_ipsec_crypto_profile='default',
)
fw.add(state.obj)
def update_state_obj(self, fw, state):
state.obj.anti_replay = True
state.obj.copy_tos = True
state.obj.enable_tunnel_monitor = True
state.obj.tunnel_monitor_dest_ip = testlib.random_ip()
def test_05_add_ipv4_proxy_id(self, fw, state_map):
state = self.sanity(fw, state_map)
state.proxy_id = network.IpsecTunnelIpv4ProxyId(
testlib.random_name(),
local=testlib.random_netmask(),
remote=testlib.random_netmask(),
any_protocol=True,
)
state.obj.add(state.proxy_id)
state.proxy_id.create()
def cleanup_dependencies(self, fw, state):
for o in (state.ike_gw, state.lbi, state.ti):
try:
o.delete()
except Exception:
pass
|
jordonbiondo/cldoc | refs/heads/master | cldoc/generators/xml.py | 3 | # This file is part of cldoc. cldoc is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import absolute_import
from cldoc.clang import cindex
from .generator import Generator
from cldoc import nodes
from cldoc import example
from cldoc import utf8
from xml.etree import ElementTree
import sys, os
from cldoc import fs
class Xml(Generator):
def generate(self, outdir):
if not outdir:
outdir = 'xml'
try:
fs.fs.makedirs(outdir)
except OSError:
pass
ElementTree.register_namespace('gobject', 'http://jessevdk.github.com/cldoc/gobject/1.0')
ElementTree.register_namespace('cldoc', 'http://jessevdk.github.com/cldoc/1.0')
self.index = ElementTree.Element('index')
self.written = {}
self.indexmap = {
self.tree.root: self.index
}
cm = self.tree.root.comment
if cm:
if cm.brief:
self.index.append(self.doc_to_xml(self.tree.root, cm.brief, 'brief'))
if cm.doc:
self.index.append(self.doc_to_xml(self.tree.root, cm.doc))
Generator.generate(self, outdir)
if self.options.report:
self.add_report()
self.write_xml(self.index, 'index.xml')
print('Generated `{0}\''.format(outdir))
def add_report(self):
from .report import Report
reportname = 'report'
while reportname + '.xml' in self.written:
reportname = '_' + reportname
page = Report(self.tree, self.options).generate(reportname)
elem = ElementTree.Element('report')
elem.set('name', 'Documentation generator')
elem.set('ref', reportname)
self.index.append(elem)
self.write_xml(page, reportname + '.xml')
def indent(self, elem, level=0):
i = "\n" + " " * level
if elem.tag == 'doc':
return
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for e in elem:
self.indent(e, level + 1)
if not e.tail or not e.tail.strip():
e.tail = i + " "
if not e.tail or not e.tail.strip():
e.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def write_xml(self, elem, fname):
self.written[fname] = True
elem.attrib['xmlns'] = 'http://jessevdk.github.com/cldoc/1.0'
tree = ElementTree.ElementTree(elem)
self.indent(tree.getroot())
f = fs.fs.open(os.path.join(self.outdir, fname), 'w')
tree.write(f, encoding='utf-8', xml_declaration=True)
f.write('\n')
f.close()
def is_page(self, node):
if node.force_page:
return True
if isinstance(node, nodes.Struct) and node.is_anonymous:
return False
if isinstance(node, nodes.Class):
for child in node.children:
if not (isinstance(child, nodes.Field) or \
isinstance(child, nodes.Variable) or \
isinstance(child, nodes.TemplateTypeParameter)):
return True
return False
pagecls = [nodes.Namespace, nodes.Category, nodes.Root]
for cls in pagecls:
if isinstance(node, cls):
return True
if isinstance(node, nodes.Typedef) and len(node.children) > 0:
return True
return False
def is_top(self, node):
if self.is_page(node):
return True
if node.parent == self.tree.root:
return True
return False
def refid(self, node):
if not node._refid is None:
return node._refid
parent = node
meid = node.qid
if not node.parent or (isinstance(node.parent, nodes.Root) and not self.is_page(node)):
return 'index#' + meid
# Find topmost parent
while not self.is_page(parent):
parent = parent.parent
if not node is None:
node._refid = parent.qid + '#' + meid
return node._refid
else:
return None
def add_ref_node_id(self, node, elem):
r = self.refid(node)
if not r is None:
elem.set('ref', r)
def add_ref_id(self, cursor, elem):
if not cursor:
return
if cursor in self.tree.cursor_to_node:
node = self.tree.cursor_to_node[cursor]
elif cursor.get_usr() in self.tree.usr_to_node:
node = self.tree.usr_to_node[cursor.get_usr()]
else:
return
self.add_ref_node_id(node, elem)
def type_to_xml(self, tp, parent=None):
elem = ElementTree.Element('type')
if tp.is_constant_array:
elem.set('size', str(tp.constant_array_size))
elem.set('class', 'array')
elem.append(self.type_to_xml(tp.element_type, parent))
elif tp.is_function:
elem.set('class', 'function')
result = ElementTree.Element('result')
result.append(self.type_to_xml(tp.function_result, parent))
elem.append(result)
args = ElementTree.Element('arguments')
elem.append(args)
for arg in tp.function_arguments:
args.append(self.type_to_xml(arg, parent))
else:
elem.set('name', tp.typename_for(parent))
if len(tp.qualifier) > 0:
elem.set('qualifier', tp.qualifier_string)
if tp.builtin:
elem.set('builtin', 'yes')
if tp.is_out:
elem.set('out', 'yes')
if tp.transfer_ownership != 'none':
elem.set('transfer-ownership', tp.transfer_ownership)
if tp.allow_none:
elem.set('allow-none', 'yes')
self.add_ref_id(tp.decl, elem)
return elem
def enumvalue_to_xml(self, node, elem):
elem.set('value', str(node.value))
def enum_to_xml(self, node, elem):
if not node.typedef is None:
elem.set('typedef', 'yes')
if node.isclass:
elem.set('class', 'yes')
def struct_to_xml(self, node, elem):
self.class_to_xml(node, elem)
if not node.typedef is None:
elem.set('typedef', 'yes')
def templatetypeparameter_to_xml(self, node, elem):
dt = node.default_type
if not dt is None:
d = ElementTree.Element('default')
d.append(self.type_to_xml(dt))
elem.append(d)
def templatenontypeparameter_to_xml(self, node, elem):
elem.append(self.type_to_xml(node.type))
def function_to_xml(self, node, elem):
if not (isinstance(node, nodes.Constructor) or
isinstance(node, nodes.Destructor)):
ret = ElementTree.Element('return')
if not node.comment is None and hasattr(node.comment, 'returns') and node.comment.returns:
ret.append(self.doc_to_xml(node, node.comment.returns))
tp = self.type_to_xml(node.return_type, node.parent)
ret.append(tp)
elem.append(ret)
for arg in node.arguments:
ret = ElementTree.Element('argument')
ret.set('name', arg.name)
ret.set('id', arg.qid)
if not node.comment is None and arg.name in node.comment.params:
ret.append(self.doc_to_xml(node, node.comment.params[arg.name]))
ret.append(self.type_to_xml(arg.type, node.parent))
elem.append(ret)
def method_to_xml(self, node, elem):
self.function_to_xml(node, elem)
if len(node.override) > 0:
elem.set('override', 'yes')
for ov in node.override:
ovelem = ElementTree.Element('override')
ovelem.set('name', ov.qid_to(node.qid))
self.add_ref_node_id(ov, ovelem)
elem.append(ovelem)
if node.virtual:
elem.set('virtual', 'yes')
if node.static:
elem.set('static', 'yes')
if node.abstract:
elem.set('abstract', 'yes')
def typedef_to_xml(self, node, elem):
elem.append(self.type_to_xml(node.type, node))
def typedef_to_xml_ref(self, node, elem):
elem.append(self.type_to_xml(node.type, node))
def variable_to_xml(self, node, elem):
elem.append(self.type_to_xml(node.type, node.parent))
def property_to_xml(self, node, elem):
elem.append(self.type_to_xml(node.type, node.parent))
def set_access_attribute(self, node, elem):
if node.access == cindex.CXXAccessSpecifier.PROTECTED:
elem.set('access', 'protected')
elif node.access == cindex.CXXAccessSpecifier.PRIVATE:
elem.set('access', 'private')
elif node.access == cindex.CXXAccessSpecifier.PUBLIC:
elem.set('access', 'public')
def process_bases(self, node, elem, bases, tagname):
for base in bases:
child = ElementTree.Element(tagname)
self.set_access_attribute(base, child)
child.append(self.type_to_xml(base.type, node))
if base.node and not base.node.comment is None and base.node.comment.brief:
child.append(self.doc_to_xml(base.node, base.node.comment.brief, 'brief'))
elem.append(child)
def process_subclasses(self, node, elem, subclasses, tagname):
for subcls in subclasses:
child = ElementTree.Element(tagname)
self.set_access_attribute(subcls, child)
self.add_ref_node_id(subcls, child)
child.set('name', subcls.qid_to(node.qid))
if not subcls.comment is None and subcls.comment.brief:
child.append(self.doc_to_xml(subcls, subcls.comment.brief, 'brief'))
elem.append(child)
def class_to_xml(self, node, elem):
self.process_bases(node, elem, node.bases, 'base')
self.process_bases(node, elem, node.implements, 'implements')
self.process_subclasses(node, elem, node.subclasses, 'subclass')
self.process_subclasses(node, elem, node.implemented_by, 'implementedby')
hasabstract = False
allabstract = True
for method in node.methods:
if method.abstract:
hasabstract = True
else:
allabstract = False
if hasabstract:
if allabstract:
elem.set('interface', 'true')
else:
elem.set('abstract', 'true')
def field_to_xml(self, node, elem):
elem.append(self.type_to_xml(node.type, node.parent))
def doc_to_xml(self, parent, doc, tagname='doc'):
doce = ElementTree.Element(tagname)
s = ''
last = None
for component in doc.components:
if isinstance(component, utf8.string):
s += component
elif isinstance(component, example.Example):
# Make highlighting
if last is None:
doce.text = s
else:
last.tail = s
s = ''
code = ElementTree.Element('code')
doce.append(code)
last = code
for item in component:
if item.classes is None:
s += item.text
else:
last.tail = s
s = ''
par = code
for cls in item.classes:
e = ElementTree.Element(cls)
par.append(e)
par = e
par.text = item.text
last = par
if last == code:
last.text = s
else:
last.tail = s
s = ''
last = code
else:
if last is None:
doce.text = s
else:
last.tail = s
s = ''
nds = component[0]
refname = component[1]
# Make multiple refs
for ci in range(len(nds)):
cc = nds[ci]
last = ElementTree.Element('ref')
if refname:
last.text = refname
else:
last.text = parent.qlbl_from(cc)
self.add_ref_node_id(cc, last)
if ci != len(nds) - 1:
if ci == len(nds) - 2:
last.tail = ' and '
else:
last.tail = ', '
doce.append(last)
if last is None:
doce.text = s
else:
last.tail = s
return doce
def call_type_specific(self, node, elem, fn):
clss = [node.__class__]
while len(clss) > 0:
cls = clss[0]
clss = clss[1:]
if cls == nodes.Node:
continue
nm = cls.__name__.lower() + '_' + fn
if hasattr(self, nm):
getattr(self, nm)(node, elem)
break
if cls != nodes.Node:
clss.extend(cls.__bases__)
def node_to_xml(self, node):
elem = ElementTree.Element(node.classname)
props = node.props
for prop in props:
if props[prop]:
elem.set(prop, props[prop])
if not node.comment is None and node.comment.brief:
elem.append(self.doc_to_xml(node, node.comment.brief, 'brief'))
if not node.comment is None and node.comment.doc:
elem.append(self.doc_to_xml(node, node.comment.doc))
self.call_type_specific(node, elem, 'to_xml')
for child in node.sorted_children():
if child.access == cindex.CXXAccessSpecifier.PRIVATE:
continue
self.refid(child)
if self.is_page(child):
chelem = self.node_to_xml_ref(child)
else:
chelem = self.node_to_xml(child)
elem.append(chelem)
return elem
def templated_to_xml_ref(self, node, element):
for child in node.sorted_children():
if not (isinstance(child, nodes.TemplateTypeParameter) or isinstance(child, nodes.TemplateNonTypeParameter)):
continue
element.append(self.node_to_xml(child))
def generate_page(self, node):
elem = self.node_to_xml(node)
self.write_xml(elem, node.qid.replace('::', '.') + '.xml')
def node_to_xml_ref(self, node):
elem = ElementTree.Element(node.classname)
props = node.props
# Add reference item to index
self.add_ref_node_id(node, elem)
if 'name' in props:
elem.set('name', props['name'])
if not node.comment is None and node.comment.brief:
elem.append(self.doc_to_xml(node, node.comment.brief, 'brief'))
self.call_type_specific(node, elem, 'to_xml_ref')
return elem
def generate_node(self, node):
# Ignore private stuff
if node.access == cindex.CXXAccessSpecifier.PRIVATE:
return
self.refid(node)
if self.is_page(node):
elem = self.node_to_xml_ref(node)
self.indexmap[node.parent].append(elem)
self.indexmap[node] = elem
self.generate_page(node)
elif self.is_top(node):
self.index.append(self.node_to_xml(node))
if isinstance(node, nodes.Namespace) or isinstance(node, nodes.Category):
# Go deep for namespaces and categories
Generator.generate_node(self, node)
elif isinstance(node, nodes.Class):
# Go deep, but only for inner classes
Generator.generate_node(self, node, lambda x: isinstance(x, nodes.Class))
# vi:ts=4:et
|
earshel/PokeyPyManager | refs/heads/master | POGOProtos/Inventory/ItemId_pb2.py | 7 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Inventory/ItemId.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Inventory/ItemId.proto',
package='POGOProtos.Inventory',
syntax='proto3',
serialized_pb=_b('\n!POGOProtos/Inventory/ItemId.proto\x12\x14POGOProtos.Inventory*\xc7\x05\n\x06ItemId\x12\x10\n\x0cITEM_UNKNOWN\x10\x00\x12\x12\n\x0eITEM_POKE_BALL\x10\x01\x12\x13\n\x0fITEM_GREAT_BALL\x10\x02\x12\x13\n\x0fITEM_ULTRA_BALL\x10\x03\x12\x14\n\x10ITEM_MASTER_BALL\x10\x04\x12\x0f\n\x0bITEM_POTION\x10\x65\x12\x15\n\x11ITEM_SUPER_POTION\x10\x66\x12\x15\n\x11ITEM_HYPER_POTION\x10g\x12\x13\n\x0fITEM_MAX_POTION\x10h\x12\x10\n\x0bITEM_REVIVE\x10\xc9\x01\x12\x14\n\x0fITEM_MAX_REVIVE\x10\xca\x01\x12\x13\n\x0eITEM_LUCKY_EGG\x10\xad\x02\x12\x1a\n\x15ITEM_INCENSE_ORDINARY\x10\x91\x03\x12\x17\n\x12ITEM_INCENSE_SPICY\x10\x92\x03\x12\x16\n\x11ITEM_INCENSE_COOL\x10\x93\x03\x12\x18\n\x13ITEM_INCENSE_FLORAL\x10\x94\x03\x12\x13\n\x0eITEM_TROY_DISK\x10\xf5\x03\x12\x12\n\rITEM_X_ATTACK\x10\xda\x04\x12\x13\n\x0eITEM_X_DEFENSE\x10\xdb\x04\x12\x13\n\x0eITEM_X_MIRACLE\x10\xdc\x04\x12\x14\n\x0fITEM_RAZZ_BERRY\x10\xbd\x05\x12\x14\n\x0fITEM_BLUK_BERRY\x10\xbe\x05\x12\x15\n\x10ITEM_NANAB_BERRY\x10\xbf\x05\x12\x15\n\x10ITEM_WEPAR_BERRY\x10\xc0\x05\x12\x15\n\x10ITEM_PINAP_BERRY\x10\xc1\x05\x12\x18\n\x13ITEM_SPECIAL_CAMERA\x10\xa1\x06\x12#\n\x1eITEM_INCUBATOR_BASIC_UNLIMITED\x10\x85\x07\x12\x19\n\x14ITEM_INCUBATOR_BASIC\x10\x86\x07\x12!\n\x1cITEM_POKEMON_STORAGE_UPGRADE\x10\xe9\x07\x12\x1e\n\x19ITEM_ITEM_STORAGE_UPGRADE\x10\xea\x07\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ITEMID = _descriptor.EnumDescriptor(
name='ItemId',
full_name='POGOProtos.Inventory.ItemId',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ITEM_UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_POKE_BALL', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_GREAT_BALL', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_ULTRA_BALL', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_MASTER_BALL', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_POTION', index=5, number=101,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_SUPER_POTION', index=6, number=102,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_HYPER_POTION', index=7, number=103,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_MAX_POTION', index=8, number=104,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_REVIVE', index=9, number=201,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_MAX_REVIVE', index=10, number=202,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_LUCKY_EGG', index=11, number=301,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_INCENSE_ORDINARY', index=12, number=401,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_INCENSE_SPICY', index=13, number=402,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_INCENSE_COOL', index=14, number=403,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_INCENSE_FLORAL', index=15, number=404,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_TROY_DISK', index=16, number=501,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_X_ATTACK', index=17, number=602,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_X_DEFENSE', index=18, number=603,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_X_MIRACLE', index=19, number=604,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_RAZZ_BERRY', index=20, number=701,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_BLUK_BERRY', index=21, number=702,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_NANAB_BERRY', index=22, number=703,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_WEPAR_BERRY', index=23, number=704,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_PINAP_BERRY', index=24, number=705,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_SPECIAL_CAMERA', index=25, number=801,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_INCUBATOR_BASIC_UNLIMITED', index=26, number=901,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_INCUBATOR_BASIC', index=27, number=902,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_POKEMON_STORAGE_UPGRADE', index=28, number=1001,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ITEM_ITEM_STORAGE_UPGRADE', index=29, number=1002,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=60,
serialized_end=771,
)
_sym_db.RegisterEnumDescriptor(_ITEMID)
ItemId = enum_type_wrapper.EnumTypeWrapper(_ITEMID)
ITEM_UNKNOWN = 0
ITEM_POKE_BALL = 1
ITEM_GREAT_BALL = 2
ITEM_ULTRA_BALL = 3
ITEM_MASTER_BALL = 4
ITEM_POTION = 101
ITEM_SUPER_POTION = 102
ITEM_HYPER_POTION = 103
ITEM_MAX_POTION = 104
ITEM_REVIVE = 201
ITEM_MAX_REVIVE = 202
ITEM_LUCKY_EGG = 301
ITEM_INCENSE_ORDINARY = 401
ITEM_INCENSE_SPICY = 402
ITEM_INCENSE_COOL = 403
ITEM_INCENSE_FLORAL = 404
ITEM_TROY_DISK = 501
ITEM_X_ATTACK = 602
ITEM_X_DEFENSE = 603
ITEM_X_MIRACLE = 604
ITEM_RAZZ_BERRY = 701
ITEM_BLUK_BERRY = 702
ITEM_NANAB_BERRY = 703
ITEM_WEPAR_BERRY = 704
ITEM_PINAP_BERRY = 705
ITEM_SPECIAL_CAMERA = 801
ITEM_INCUBATOR_BASIC_UNLIMITED = 901
ITEM_INCUBATOR_BASIC = 902
ITEM_POKEMON_STORAGE_UPGRADE = 1001
ITEM_ITEM_STORAGE_UPGRADE = 1002
DESCRIPTOR.enum_types_by_name['ItemId'] = _ITEMID
# @@protoc_insertion_point(module_scope)
|
luiseiherrera/jsmd | refs/heads/master | blog/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
andrew-szymanski/gae_django | refs/heads/master | django/core/management/commands/sqlreset.py | 313 | from optparse import make_option
from django.core.management.base import AppCommand
from django.core.management.sql import sql_reset
from django.db import connections, DEFAULT_DB_ALIAS
class Command(AppCommand):
help = "Prints the DROP TABLE SQL, then the CREATE TABLE SQL, for the given app name(s)."
option_list = AppCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to print the '
'SQL for. Defaults to the "default" database.'),
)
output_transaction = True
def handle_app(self, app, **options):
return u'\n'.join(sql_reset(app, self.style, connections[options.get('database', DEFAULT_DB_ALIAS)])).encode('utf-8')
|
VanirAOSP/external_chromium_org | refs/heads/kk44 | PRESUBMIT_test.py | 24 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import unittest
import PRESUBMIT
class MockInputApi(object):
def __init__(self):
self.re = re
self.os_path = os.path
self.files = []
self.is_committing = False
def AffectedFiles(self):
return self.files
class MockOutputApi(object):
class PresubmitResult(object):
def __init__(self, message, items=None, long_text=''):
self.message = message
self.items = items
self.long_text = long_text
class PresubmitError(PresubmitResult):
def __init__(self, message, items, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'error'
class PresubmitPromptWarning(PresubmitResult):
def __init__(self, message, items, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'warning'
class PresubmitNotifyResult(PresubmitResult):
def __init__(self, message, items, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'notify'
class PresubmitPromptOrNotify(PresubmitResult):
def __init__(self, message, items, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'promptOrNotify'
class MockFile(object):
def __init__(self, local_path, new_contents):
self._local_path = local_path
self._new_contents = new_contents
self._changed_contents = [(i + 1, l) for i, l in enumerate(new_contents)]
def ChangedContents(self):
return self._changed_contents
def NewContents(self):
return self._new_contents
def LocalPath(self):
return self._local_path
class MockChange(object):
def __init__(self, changed_files):
self._changed_files = changed_files
def LocalPaths(self):
return self._changed_files
class IncludeOrderTest(unittest.TestCase):
def testSystemHeaderOrder(self):
scope = [(1, '#include <csystem.h>'),
(2, '#include <cppsystem>'),
(3, '#include "acustom.h"')]
all_linenums = [linenum for (linenum, _) in scope]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', all_linenums)
self.assertEqual(0, len(warnings))
def testSystemHeaderOrderMismatch1(self):
scope = [(10, '#include <cppsystem>'),
(20, '#include <csystem.h>'),
(30, '#include "acustom.h"')]
all_linenums = [linenum for (linenum, _) in scope]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', all_linenums)
self.assertEqual(1, len(warnings))
self.assertTrue('20' in warnings[0])
def testSystemHeaderOrderMismatch2(self):
scope = [(10, '#include <cppsystem>'),
(20, '#include "acustom.h"'),
(30, '#include <csystem.h>')]
all_linenums = [linenum for (linenum, _) in scope]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', all_linenums)
self.assertEqual(1, len(warnings))
self.assertTrue('30' in warnings[0])
def testSystemHeaderOrderMismatch3(self):
scope = [(10, '#include "acustom.h"'),
(20, '#include <csystem.h>'),
(30, '#include <cppsystem>')]
all_linenums = [linenum for (linenum, _) in scope]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', all_linenums)
self.assertEqual(2, len(warnings))
self.assertTrue('20' in warnings[0])
self.assertTrue('30' in warnings[1])
def testAlphabeticalOrderMismatch(self):
scope = [(10, '#include <csystem.h>'),
(15, '#include <bsystem.h>'),
(20, '#include <cppsystem>'),
(25, '#include <bppsystem>'),
(30, '#include "bcustom.h"'),
(35, '#include "acustom.h"')]
all_linenums = [linenum for (linenum, _) in scope]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', all_linenums)
self.assertEqual(3, len(warnings))
self.assertTrue('15' in warnings[0])
self.assertTrue('25' in warnings[1])
self.assertTrue('35' in warnings[2])
def testSpecialFirstInclude1(self):
mock_input_api = MockInputApi()
contents = ['#include "some/path/foo.h"',
'#include "a/header.h"']
mock_file = MockFile('some/path/foo.cc', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
def testSpecialFirstInclude2(self):
mock_input_api = MockInputApi()
contents = ['#include "some/other/path/foo.h"',
'#include "a/header.h"']
mock_file = MockFile('some/path/foo.cc', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
def testSpecialFirstInclude3(self):
mock_input_api = MockInputApi()
contents = ['#include "some/path/foo.h"',
'#include "a/header.h"']
mock_file = MockFile('some/path/foo_platform.cc', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
def testSpecialFirstInclude4(self):
mock_input_api = MockInputApi()
contents = ['#include "some/path/bar.h"',
'#include "a/header.h"']
mock_file = MockFile('some/path/foo_platform.cc', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(1, len(warnings))
self.assertTrue('2' in warnings[0])
def testSpecialFirstInclude5(self):
mock_input_api = MockInputApi()
contents = ['#include "some/other/path/foo.h"',
'#include "a/header.h"']
mock_file = MockFile('some/path/foo-suffix.h', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
def testSpecialFirstInclude6(self):
mock_input_api = MockInputApi()
contents = ['#include "some/other/path/foo_win.h"',
'#include <set>',
'#include "a/header.h"']
mock_file = MockFile('some/path/foo_unittest_win.h', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
def testOrderAlreadyWrong(self):
scope = [(1, '#include "b.h"'),
(2, '#include "a.h"'),
(3, '#include "c.h"')]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', [3])
self.assertEqual(0, len(warnings))
def testConflictAdded1(self):
scope = [(1, '#include "a.h"'),
(2, '#include "c.h"'),
(3, '#include "b.h"')]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', [2])
self.assertEqual(1, len(warnings))
self.assertTrue('3' in warnings[0])
def testConflictAdded2(self):
scope = [(1, '#include "c.h"'),
(2, '#include "b.h"'),
(3, '#include "d.h"')]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', [2])
self.assertEqual(1, len(warnings))
self.assertTrue('2' in warnings[0])
def testIfElifElseEndif(self):
mock_input_api = MockInputApi()
contents = ['#include "e.h"',
'#define foo',
'#include "f.h"',
'#undef foo',
'#include "e.h"',
'#if foo',
'#include "d.h"',
'#elif bar',
'#include "c.h"',
'#else',
'#include "b.h"',
'#endif',
'#include "a.h"']
mock_file = MockFile('', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
def testExcludedIncludes(self):
# #include <sys/...>'s can appear in any order.
mock_input_api = MockInputApi()
contents = ['#include <sys/b.h>',
'#include <sys/a.h>']
mock_file = MockFile('', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
contents = ['#include <atlbase.h>',
'#include <aaa.h>']
mock_file = MockFile('', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
contents = ['#include "build/build_config.h"',
'#include "aaa.h"']
mock_file = MockFile('', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
def testCheckOnlyCFiles(self):
mock_input_api = MockInputApi()
mock_output_api = MockOutputApi()
contents = ['#include <b.h>',
'#include <a.h>']
mock_file_cc = MockFile('something.cc', contents)
mock_file_h = MockFile('something.h', contents)
mock_file_other = MockFile('something.py', contents)
mock_input_api.files = [mock_file_cc, mock_file_h, mock_file_other]
warnings = PRESUBMIT._CheckIncludeOrder(mock_input_api, mock_output_api)
self.assertEqual(1, len(warnings))
self.assertEqual(2, len(warnings[0].items))
self.assertEqual('promptOrNotify', warnings[0].type)
def testUncheckableIncludes(self):
mock_input_api = MockInputApi()
contents = ['#include <windows.h>',
'#include "b.h"'
'#include "a.h"']
mock_file = MockFile('', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
contents = ['#include "gpu/command_buffer/gles_autogen.h"',
'#include "b.h"'
'#include "a.h"']
mock_file = MockFile('', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
contents = ['#include "gl_mock_autogen.h"',
'#include "b.h"'
'#include "a.h"']
mock_file = MockFile('', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
contents = ['#include "ipc/some_macros.h"',
'#include "b.h"'
'#include "a.h"']
mock_file = MockFile('', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
class VersionControlConflictsTest(unittest.TestCase):
def testTypicalConflict(self):
lines = ['<<<<<<< HEAD',
' base::ScopedTempDir temp_dir_;',
'=======',
' ScopedTempDir temp_dir_;',
'>>>>>>> master']
errors = PRESUBMIT._CheckForVersionControlConflictsInFile(
MockInputApi(), MockFile('some/path/foo_platform.cc', lines))
self.assertEqual(3, len(errors))
self.assertTrue('1' in errors[0])
self.assertTrue('3' in errors[1])
self.assertTrue('5' in errors[2])
class BadExtensionsTest(unittest.TestCase):
def testBadRejFile(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', ''),
MockFile('some/path/foo.cc.rej', ''),
MockFile('some/path2/bar.h.rej', ''),
]
results = PRESUBMIT._CheckPatchFiles(mock_input_api, MockOutputApi())
self.assertEqual(1, len(results))
self.assertEqual(2, len(results[0].items))
self.assertTrue('foo.cc.rej' in results[0].items[0])
self.assertTrue('bar.h.rej' in results[0].items[1])
def testBadOrigFile(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('other/path/qux.h.orig', ''),
MockFile('other/path/qux.h', ''),
MockFile('other/path/qux.cc', ''),
]
results = PRESUBMIT._CheckPatchFiles(mock_input_api, MockOutputApi())
self.assertEqual(1, len(results))
self.assertEqual(1, len(results[0].items))
self.assertTrue('qux.h.orig' in results[0].items[0])
def testGoodFiles(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('other/path/qux.h', ''),
MockFile('other/path/qux.cc', ''),
]
results = PRESUBMIT._CheckPatchFiles(mock_input_api, MockOutputApi())
self.assertEqual(0, len(results))
def testOnlyOwnersFiles(self):
mock_change = MockChange([
'some/path/OWNERS',
'A\Windows\Path\OWNERS',
])
results = PRESUBMIT.GetPreferredTrySlaves(None, mock_change)
self.assertEqual(0, len(results))
class InvalidOSMacroNamesTest(unittest.TestCase):
def testInvalidOSMacroNames(self):
lines = ['#if defined(OS_WINDOWS)',
' #elif defined(OS_WINDOW)',
' # if defined(OS_MACOSX) || defined(OS_CHROME)',
'# else // defined(OS_MAC)',
'#endif // defined(OS_MACOS)']
errors = PRESUBMIT._CheckForInvalidOSMacrosInFile(
MockInputApi(), MockFile('some/path/foo_platform.cc', lines))
self.assertEqual(len(lines), len(errors))
self.assertTrue(':1 OS_WINDOWS' in errors[0])
self.assertTrue('(did you mean OS_WIN?)' in errors[0])
def testValidOSMacroNames(self):
lines = ['#if defined(%s)' % m for m in PRESUBMIT._VALID_OS_MACROS]
errors = PRESUBMIT._CheckForInvalidOSMacrosInFile(
MockInputApi(), MockFile('some/path/foo_platform.cc', lines))
self.assertEqual(0, len(errors))
class CheckAddedDepsHaveTetsApprovalsTest(unittest.TestCase):
def testDepsFilesToCheck(self):
changed_lines = [
'"+breakpad",',
'"+chrome/installer",',
'"+chrome/plugin/chrome_content_plugin_client.h",',
'"+chrome/utility/chrome_content_utility_client.h",',
'"+chromeos/chromeos_paths.h",',
'"+components/breakpad",',
'"+components/nacl/common",',
'"+content/public/browser/render_process_host.h",',
'"+grit", # For generated headers',
'"+grit/generated_resources.h",',
'"+grit/",',
'"+policy", # For generated headers and source',
'"+sandbox",',
'"+tools/memory_watcher",',
'"+third_party/lss/linux_syscall_support.h",',
]
files_to_check = PRESUBMIT._DepsFilesToCheck(re, changed_lines)
expected = set([
'breakpad/DEPS',
'chrome/installer/DEPS',
'chrome/plugin/DEPS',
'chrome/utility/DEPS',
'chromeos/DEPS',
'components/breakpad/DEPS',
'components/nacl/common/DEPS',
'content/public/browser/DEPS',
'policy/DEPS',
'sandbox/DEPS',
'tools/memory_watcher/DEPS',
'third_party/lss/DEPS',
])
self.assertEqual(expected, files_to_check);
if __name__ == '__main__':
unittest.main()
|
jredd23/March_Madness_Style_Bracket | refs/heads/master | manage.py | 1 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ncaa.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
overtherain/scriptfile | refs/heads/master | software/googleAppEngine/lib/django_1_3/tests/regressiontests/generic_views/views.py | 49 | from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.utils.decorators import method_decorator
from django.views import generic
from regressiontests.generic_views.models import Artist, Author, Book, Page
from regressiontests.generic_views.forms import AuthorForm
class CustomTemplateView(generic.TemplateView):
template_name = 'generic_views/about.html'
def get_context_data(self, **kwargs):
return {
'params': kwargs,
'key': 'value'
}
class ObjectDetail(generic.DetailView):
template_name = 'generic_views/detail.html'
def get_object(self):
return {'foo': 'bar'}
class ArtistDetail(generic.DetailView):
queryset = Artist.objects.all()
class AuthorDetail(generic.DetailView):
queryset = Author.objects.all()
class PageDetail(generic.DetailView):
queryset = Page.objects.all()
template_name_field = 'template'
class DictList(generic.ListView):
"""A ListView that doesn't use a model."""
queryset = [
{'first': 'John', 'last': 'Lennon'},
{'last': 'Yoko', 'last': 'Ono'}
]
template_name = 'generic_views/list.html'
class ArtistList(generic.ListView):
template_name = 'generic_views/list.html'
queryset = Artist.objects.all()
class AuthorList(generic.ListView):
queryset = Author.objects.all()
class CustomPaginator(Paginator):
def __init__(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
super(CustomPaginator, self).__init__(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class AuthorListCustomPaginator(AuthorList):
paginate_by = 5;
def get_paginator(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
return super(AuthorListCustomPaginator, self).get_paginator(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class ArtistCreate(generic.CreateView):
model = Artist
class NaiveAuthorCreate(generic.CreateView):
queryset = Author.objects.all()
class AuthorCreate(generic.CreateView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorCreate(generic.CreateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id,])
class AuthorCreateRestricted(AuthorCreate):
post = method_decorator(login_required)(AuthorCreate.post)
class ArtistUpdate(generic.UpdateView):
model = Artist
class NaiveAuthorUpdate(generic.UpdateView):
queryset = Author.objects.all()
class AuthorUpdate(generic.UpdateView):
model = Author
success_url = '/list/authors/'
class OneAuthorUpdate(generic.UpdateView):
success_url = '/list/authors/'
def get_object(self):
return Author.objects.get(pk=1)
class SpecializedAuthorUpdate(generic.UpdateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id,])
class NaiveAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
class AuthorDelete(generic.DeleteView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
template_name = 'generic_views/confirm_delete.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('authors_list')
class BookConfig(object):
queryset = Book.objects.all()
date_field = 'pubdate'
class BookArchive(BookConfig, generic.ArchiveIndexView):
pass
class BookYearArchive(BookConfig, generic.YearArchiveView):
pass
class BookMonthArchive(BookConfig, generic.MonthArchiveView):
pass
class BookWeekArchive(BookConfig, generic.WeekArchiveView):
pass
class BookDayArchive(BookConfig, generic.DayArchiveView):
pass
class BookTodayArchive(BookConfig, generic.TodayArchiveView):
pass
class BookDetail(BookConfig, generic.DateDetailView):
pass
class AuthorGetQuerySetFormView(generic.edit.ModelFormMixin):
def get_queryset(self):
return Author.objects.all()
|
odejesush/tensorflow | refs/heads/master | tensorflow/python/ops/control_flow_ops.py | 6 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Control Flow Operations
TensorFlow provides several operations and classes that you can use to control
the execution of operations and add conditional dependencies to your graph.
@@identity
@@tuple
@@group
@@no_op
@@count_up_to
@@cond
@@case
@@while_loop
## Logical Operators
TensorFlow provides several operations that you can use to add logical operators
to your graph.
@@logical_and
@@logical_not
@@logical_or
@@logical_xor
## Comparison Operators
TensorFlow provides several operations that you can use to add comparison
operators to your graph.
@@equal
@@not_equal
@@less
@@less_equal
@@greater
@@greater_equal
@@where
## Debugging Operations
TensorFlow provides several operations that you can use to validate values and
debug your graph.
@@is_finite
@@is_inf
@@is_nan
@@verify_tensor_all_finite
@@check_numerics
@@add_check_numerics_ops
@@Assert
@@Print
"""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import control_flow_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.gen_control_flow_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
# We override the 'tuple' for a control flow op, so we keep python's
# existing 'tuple' for later use in this module.
_basetuple = tuple
# pylint: disable=protected-access
# Assert and Print are special symbols in python, so we must
# use an upper-case version of them.
def Assert(condition, data, summarize=None, name=None):
"""Asserts that the given condition is true.
If `condition` evaluates to false, print the list of tensors in `data`.
`summarize` determines how many entries of the tensors to print.
NOTE: To ensure that Assert executes, one usually attaches a dependency:
```python
# Ensure maximum element of x is smaller or equal to 1
assert_op = tf.Assert(tf.less_equal(tf.reduce_max(x), 1.), [x])
with tf.control_dependencies([assert_op]):
... code using x ...
```
Args:
condition: The condition to evaluate.
data: The tensors to print out when condition is false.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional).
Returns:
assert_op: An `Operation` that, when executed, raises a
`tf.errors.InvalidArgumentError` if `condition` is not true.
"""
with ops.name_scope(name, "Assert", [condition, data]) as name:
xs = ops.convert_n_to_tensor(data)
if all([x.dtype in {dtypes.string, dtypes.int32} for x in xs]):
# As a simple heuristic, we assume that string and int32 are
# on host to avoid the need to use cond. If it is not case,
# we will pay the price copying the tensor to host memory.
return gen_logging_ops._assert(
condition, data, summarize, name="Assert")
else:
condition = ops.convert_to_tensor(condition, name="Condition")
def true_assert():
return gen_logging_ops._assert(
condition, data, summarize, name="Assert")
guarded_assert = cond(
condition, no_op, true_assert, name="AssertGuard")
return guarded_assert.op
def _Identity(data, name=None):
"""Return a tensor with the same shape and contents as the input tensor.
Args:
data: A Tensor.
name: A name for this operation (optional).
Returns:
A Tensor with the same type and value as the input Tensor.
"""
data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype: # pylint: disable=protected-access
return gen_array_ops._ref_identity(data, name=name)
else:
return array_ops.identity(data, name=name)
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = _Identity(data.values, name=name)
indices = array_ops.identity(data.indices, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = array_ops.identity(dense_shape, name="dense_shape")
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = array_ops.identity(data.dense_shape, name="dense_shape")
return sparse_tensor.SparseTensor(indices, values, dense_shape)
def _NextIteration(data, name=None):
data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype: # pylint: disable=protected-access
return ref_next_iteration(data, name=name)
else:
return next_iteration(data, name=name)
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = _NextIteration(data.values, name=name)
indices = next_iteration(data.indices, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = next_iteration(dense_shape, name="dense_shape")
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = next_iteration(data.dense_shape, name="dense_shape")
return sparse_tensor.SparseTensor(indices, values, dense_shape)
def _Enter(data, frame_name, is_constant=False, parallel_iterations=10,
use_ref=True, use_input_shape=True, name=None):
"""Creates or finds a child frame, and makes `data` available to it.
The unique `frame_name` is used by the `Executor` to identify frames. If
`is_constant` is true, `data` is a constant in the child frame; otherwise
it may be changed in the child frame. At most `parallel_iterations`
iterations are run in parallel in the child frame.
Args:
data: The tensor to be made available to the child frame.
frame_name: The name of the child frame.
is_constant: If true, the output is constant within the child frame.
parallel_iterations: The number of iterations allowed to run in parallel.
use_ref: If true, use ref_enter if data is of ref type.
name: A name for this operation (optional).
Returns:
The same tensor as `data`.
"""
data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype and use_ref: # pylint: disable=protected-access
result = ref_enter(data, frame_name, is_constant, parallel_iterations,
name=name)
else:
result = enter(data, frame_name, is_constant, parallel_iterations,
name=name)
if use_input_shape:
result.set_shape(data.get_shape())
return result
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = _Enter(data.values, frame_name, is_constant,
parallel_iterations=parallel_iterations,
use_input_shape=use_input_shape, name=name)
indices = enter(data.indices, frame_name, is_constant,
parallel_iterations, name="indices")
if use_input_shape:
indices.set_shape(data.indices.get_shape())
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = enter(dense_shape, frame_name, is_constant,
parallel_iterations, name="dense_shape")
if use_input_shape:
dense_shape.set_shape(data.dense_shape.get_shape())
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = enter(data.dense_shape, frame_name, is_constant,
parallel_iterations, name="dense_shape")
if use_input_shape:
dense_shape.set_shape(data.dense_shape.get_shape())
return sparse_tensor.SparseTensor(indices, values, dense_shape)
def exit(data, name=None):
"""Exits the current frame to its parent frame.
Exit makes its input `data` available to the parent frame.
Args:
data: The tensor to be made available to the parent frame.
name: A name for this operation (optional).
Returns:
The same tensor as `data`.
"""
data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype: # pylint: disable=protected-access
return gen_control_flow_ops._ref_exit(data, name)
else:
return gen_control_flow_ops._exit(data, name)
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = exit(data.values, name=name)
indices = gen_control_flow_ops._exit(data.indices, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = gen_control_flow_ops._exit(dense_shape, name)
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = gen_control_flow_ops._exit(data.dense_shape, name)
return sparse_tensor.SparseTensor(indices, values, dense_shape)
def switch(data, pred, dtype=None, name=None):
"""Forwards `data` to an output determined by `pred`.
If `pred` is true, the `data` input is forwared to the first output.
Otherwise, the data goes to the second output.
This op handles `Tensor`s and `IndexedSlices`.
Args:
data: The tensor to be forwarded to the appropriate output.
pred: A scalar that specifies which output port will receive data.
dtype: Optional element type for the returned tensor. If missing,
the type is inferred from the type of `value`.
name: A name for this operation (optional).
Returns:
`(output_false, output_true)`: If `pred` is true, data will be forwarded
to `output_true`, otherwise it goes to `output_false`.
"""
with ops.name_scope(name, "Switch", [data, pred]) as name:
data = ops.internal_convert_to_tensor_or_indexed_slices(
data, dtype=dtype, name="data", as_ref=True)
pred = ops.convert_to_tensor(pred, name="pred")
if isinstance(data, ops.Tensor):
return gen_control_flow_ops._switch(data, pred, name=name)
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
val, ind = data.values, data.indices
val_f, val_t = gen_control_flow_ops._switch(val, pred, name=name)
ind_f, ind_t = gen_control_flow_ops._switch(ind, pred, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape_f, dense_shape_t = gen_control_flow_ops._switch(
dense_shape, pred, name="dense_shape")
else:
dense_shape_f, dense_shape_t = None, None
return (ops.IndexedSlices(val_f, ind_f, dense_shape_f),
ops.IndexedSlices(val_t, ind_t, dense_shape_t))
else:
dense_shape = data.dense_shape
dense_shape_f, dense_shape_t = gen_control_flow_ops._switch(
data.dense_shape, pred, name="dense_shape")
return (sparse_tensor.SparseTensor(ind_f, val_f, dense_shape_f),
sparse_tensor.SparseTensor(ind_t, val_t, dense_shape_t))
def _SwitchRefOrTensor(data, pred, name="Switch"):
"""Forwards `data` to an output determined by `pred`.
If `pred` is true, the `data` input is forwared to the first output.
Otherwise, the data goes to the second output.
This op handles `Tensor`s and `IndexedSlices`.
Args:
data: The tensor to be forwarded to the appropriate output.
pred: A scalar that specifies which output port will receive data.
name: A name for this operation (optional).
Returns:
`(output_false, output_false)`: If `pred` is true, data will be forwarded to
`output_true`, otherwise it goes to `output_false`.
Raises:
TypeError: if data is not a Tensor or IndexedSlices
"""
data = ops.convert_to_tensor_or_indexed_slices(data, name="data")
# NOTE(vrv): ops.colocate_with(data, ignore_existing=True) below
# addresses the following scenario.
#
# Assume you execute Optimizer.apply_gradients() in a branch of a cond().
#
# 1. The update op is created inside a `with ops.colocate(var):` block
#
# 2. Some tensor `data` is captured and a switch is created in a
# `with ops.colocate_with(data):` block.
#
# with ops.colocate_with(var):
# with ops.colocate_with(data):
# op = ...
#
# var and data may be pinned to different devices, so we want to ops
# created within ops.colocate_with(data) to ignore the existing stack.
with ops.colocate_with(data, ignore_existing=True):
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype: # pylint: disable=protected-access
return ref_switch(data, pred, name=name)
return switch(data, pred, name=name)
def merge(inputs, name=None):
"""Returns the value of an available element of `inputs`.
This op tests each of the tensors in `inputs` in turn to determine if any of
them is available. If it finds an available tensor, it returns it and its
index in `inputs`.
It is an error if more than one tensor in `inputs` is available. If no tensor
in `inputs` is available, the returned tensor and index are not set.
This op handles both `Tensor`s and `IndexedSlices`. If inputs has a mix of
`Tensor`s and `IndexedSlices`, all inputs are converted to IndexedSlices
before merging.
Args:
inputs: The input tensors, at most one of which is available.
name: A name for this operation (optional).
Returns:
A tuple containing the chosen input tensor and its index in `inputs`.
Raises:
ValueError: If any of the inputs is None, or inputs are IndexedSlices and
some but not all have a dense_shape property.
"""
if any([inp is None for inp in inputs]):
raise ValueError("At least one of the merge inputs is None: %s" % inputs)
with ops.name_scope(name, "Merge", inputs) as name:
inputs = [ops.internal_convert_to_tensor_or_indexed_slices(inp, as_ref=True)
for inp in inputs]
if all([isinstance(v, ops.Tensor) for v in inputs]):
if all([v.dtype._is_ref_dtype for v in inputs]): # pylint: disable=protected-access
return gen_control_flow_ops._ref_merge(inputs, name)
else:
return gen_control_flow_ops._merge(inputs, name)
elif all([isinstance(v, sparse_tensor.SparseTensor) for v in inputs]):
# Only handle the case when all inputs are SparseTensor.
values, _ = merge([inp.values for inp in inputs], name=name)
indices, chosen_index = gen_control_flow_ops._merge(
[inp.indices for inp in inputs], name="indices")
dense_shape, _ = gen_control_flow_ops._merge(
[inp.dense_shape for inp in inputs], name="dense_shape")
return (sparse_tensor.SparseTensor(indices, values, dense_shape),
chosen_index)
else:
# For now convert all the inputs as IndexedSlices.
inputs = math_ops._as_indexed_slices_list(inputs, optimize=False)
values, _ = merge([inp.values for inp in inputs], name=name)
indices, chosen_index = gen_control_flow_ops._merge(
[inp.indices for inp in inputs], name="indices")
if any(inp.dense_shape is not None for inp in inputs):
if any(inp.dense_shape is None for inp in inputs):
raise ValueError("Either all merged IndexedSlices must have a "
"dense_shape, or none must have a dense_shape.")
dense_shape, _ = gen_control_flow_ops._merge(
[inp.dense_shape for inp in inputs], name="dense_shape")
else:
dense_shape = None
return ops.IndexedSlices(values, indices, dense_shape), chosen_index
# pylint: enable=protected-access
def _convert_tensorarrays_to_flows(tensors_or_tensor_arrays):
return [ta.flow if isinstance(ta, tensor_array_ops.TensorArray)
else ta
for ta in tensors_or_tensor_arrays]
def _make_tensor_array(ta, t_or_flow):
new_ta = tensor_array_ops.TensorArray(
dtype=ta.dtype, handle=ta.handle, flow=t_or_flow,
infer_shape=ta._infer_shape)
new_ta._element_shape = ta._element_shape # pylint: disable=protected-access
return new_ta
def _convert_flows_to_tensorarrays(tensors_or_tensorarrays, tensors_or_flows):
if len(tensors_or_tensorarrays) != len(tensors_or_flows):
raise ValueError(
"Lengths of original Tensor list and new list do not match: %d vs. %d"
% (len(tensors_or_tensorarrays), len(tensors_or_flows)))
return [
_make_tensor_array(ta, t_or_flow)
if isinstance(ta, tensor_array_ops.TensorArray)
else t_or_flow
for (ta, t_or_flow) in zip(tensors_or_tensorarrays, tensors_or_flows)]
def _IsLoopConstantEnter(op):
"""Return true iff op is a loop invariant."""
is_enter = (op.type == "Enter" or op.type == "RefEnter")
return is_enter and op.get_attr("is_constant")
def _GetLoopConstantEnter(value):
"""Return the enter op if we can infer `value` to be a loop invariant."""
id_ops = {"Switch", "RefSwitch", "Identity", "RefIdentity"}
op = value.op
while op.type in id_ops:
op = op.inputs[0].op
return op if _IsLoopConstantEnter(op) else None
def _GetOutputContext(op):
"""Return the control flow context for the output of an op."""
ctxt = op._get_control_flow_context()
if IsLoopExit(op):
ctxt = ctxt.outer_context
return ctxt
def _ShapeLessThanOrEqual(shape1, shape2):
if shape2.dims is None:
return True
if shape1.ndims != shape2.ndims:
return False
for dim1, dim2 in zip(shape1.dims, shape2.dims):
if dim2.value is not None and dim1.value != dim2.value:
return False
return True
def _SetShapeInvariants(input_vars, enter_vars, shapes):
"""Set the shapes of the tensors in `enter_vars` to `shapes`.
Args:
input_vars: A list of tensors that are inputs to `enter_vars`.
enter_vars: A list of tensors whose shapes will be set.
shapes: A (possibly nested) list of shapes.
Raises:
ValueError: If any tensor in `enter_vars` has a less specific shape
than its corresponding shape in `shapes`.
"""
if shapes is None:
return
flat_shapes = nest.flatten(shapes)
if not all([isinstance(s, tensor_shape.TensorShape) for s in flat_shapes]):
raise ValueError("`shapes` must be a (possibly nested) list of shapes.")
# Check that the shapes of the inputs are less than the shape invariants,
# and set the shapes of `enter_vars` to the shape invariants.
for inp, var, shape in zip(input_vars, enter_vars, flat_shapes):
if isinstance(var, ops.Tensor):
if not _ShapeLessThanOrEqual(inp.get_shape(), shape):
raise ValueError(
"The shape invariant specified for %s is not compatible with "
"the initial shape of the loop variable. It enters the loop "
"with shape %s, but the specified shape invariant is %s."
% (inp.name, inp.get_shape(), shape))
var.set_shape(shape)
else:
if not isinstance(var, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(var))
if isinstance(var, ops.IndexedSlices):
if not _ShapeLessThanOrEqual(inp.values.get_shape(), shape):
raise ValueError(
"The shape invariant specified for %s is not compatible with "
"the initial shape of the values tensor of this IndexedSlices. "
"It enters the loop with shape %s, but the specified shape "
"invariant is %s."
% (inp.values.name, inp.values.get_shape(), shape))
var.values.set_shape(shape)
var.indices.set_shape(tensor_shape.TensorShape([shape[0]]))
if var.dense_shape is not None:
var.dense_shape.set_shape(tensor_shape.TensorShape([shape.ndims]))
else:
if not _ShapeLessThanOrEqual(inp.dense_shape.get_shape(), shape):
raise ValueError(
"The shape invariant specified for %s is not compatible with "
"the initial shape of the shape tensor of this SparseTensor. "
"It enters the loop with shape %s, but the specified shape "
"invariant is %s."
% (inp.dense_shape.name, inp.dense_shape.get_shape(), shape))
var.values.set_shape(tensor_shape.TensorShape([None]))
var.indices.set_shape(tensor_shape.TensorShape([None, shape.ndims]))
var.dense_shape.set_shape(shape)
def _EnforceShapeInvariant(merge_var, next_var):
"""Check if the shapes of the loops variables are invariants.
Args:
merge_vars: The list of tensors representing the initial values of the
loop variables.
next_vars: The list of tensors representing the values of the loop
variables after one loop iteration.
Raises:
ValueError: If any tensor in `merge_vars` has a more specific shape than
its correspnding tensor in `next_var`.
"""
if isinstance(merge_var, ops.Tensor):
m_shape = merge_var.get_shape()
n_shape = next_var.get_shape()
if not _ShapeLessThanOrEqual(n_shape, m_shape):
raise ValueError(
"The shape for %s is not an invariant for the loop. It enters "
"the loop with shape %s, but has shape %s after one iteration. "
"Provide shape invariants using either the `shape_invariants` "
"argument of tf.while_loop or set_shape() on the loop variables."
% (merge_var.name, m_shape, n_shape))
else:
if not isinstance(var, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(var))
if isinstance(var, ops.IndexedSlices):
m_values_shape = merge_var.values.get_shape()
m_indices_shape = merge_var.indices.get_shape()
m_shape_shape = tensor_shape.TensorShape(None)
if merge_var.dense_shape is not None:
m_shape_shape = merge_var.dense_shape.get_shape()
n_values_shape = next_var.values.get_shape()
n_indices_shape = next_var.indices.get_shape()
n_shape_shape = tensor_shape.TensorShape(None)
if next_var.dense_shape is not None:
n_shape_shape = next_var.dense_shape.get_shape()
if (not _ShapeLessThanOrEqual(n_values_shape, m_values_shape) or
not _ShapeLessThanOrEqual(n_indices_shape, m_indices_shape)):
if not _ShapeLessThanOrEqual(n_values_shape, m_values_shape):
raise ValueError(
"The shape for %s is not an invariant for the loop. It enters "
"the loop with shape (%s, %s, %s), but has shape (%s, %s, %s) "
"after one iteration. Provide shape invariants using either the "
"`shape_invariants` argument of tf.while_loop or set_shape() "
"on the loop variables."
% (merge_var.name, m_values_shape, m_indices_shape, m_shape_shape,
n_values_shape, n_indices_shape, n_shape_shape))
else:
m_values_shape = merge_var.values.get_shape()
m_indices_shape = merge_var.indices.get_shape()
m_shape_shape = merge_var.dense_shape.get_shape()
n_values_shape = next_var.values.get_shape()
n_indices_shape = next_var.indices.get_shape()
n_shape_shape = next_var.dense_shape.get_shape()
if (not _ShapeLessThanOrEqual(n_values_shape, m_values_shape) or
not _ShapeLessThanOrEqual(n_indices_shape, m_indices_shape) or
not _ShapeLessThanOrEqual(n_shape_shape, m_shape_shape)):
raise ValueError(
"The shape for %s is not an invariant for the loop. It enters "
"the loop with shape (%s, %s, %s), but has shape (%s, %s, %s) "
"after one iteration. Provide shape invariants using either "
"the `shape_invariants` argument of tf.while_loop or set_shape() "
"on the loop variables."
% (merge_var.name, m_values_shape, m_indices_shape, m_shape_shape,
n_values_shape, n_indices_shape, n_shape_shape))
def _AddNextAndBackEdge(m, v):
"""Add NextIteration and back edge from v to m."""
if isinstance(m, ops.Tensor):
v = ops.convert_to_tensor(v)
v = _NextIteration(v)
m.op._update_input(1, v) # pylint: disable=protected-access
elif isinstance(m, ops.IndexedSlices):
# pylint: disable=protected-access
v = math_ops._as_indexed_slices(v, optimize=False)
v = _NextIteration(v)
m.values.op._update_input(1, v.values)
m.indices.op._update_input(1, v.indices)
# pylint: enable=protected-access
if m.dense_shape is not None:
if v.dense_shape is None:
raise ValueError("Must have dense shape: %s" % v.name)
m.dense_shape.op._update_input(1, v.dense_shape)
elif isinstance(m, sparse_tensor.SparseTensor):
if not isinstance(v, sparse_tensor.SparseTensor):
raise ValueError("Must be a sparse tensor: %s" % v.name)
v = _NextIteration(v)
# pylint: disable=protected-access
m.values.op._update_input(1, v.values)
m.indices.op._update_input(1, v.indices)
m.dense_shape.op._update_input(1, v.dense_shape)
# pylint: enable=protected-access
else:
raise TypeError("Type %s not supported" % type(m))
return v
class GradLoopState(object):
"""The state used for constructing the gradient graph for a while loop.
We create a GradLoopState for each while loop in forward and its
corresponding while loop in backprop. This gives us access to both
the forward and the backprop WhileContexts.
During the construction of gradient graph, any time when we detect
a forward value that is needed for backprop, we create a history
accumulator and add it to `history_map`. Any time when we backprop
a loop switch op (in _SwitchGrad), we add the grad merge op in
`switch_map`.
"""
def __init__(self, forward_ctxt, outer_grad_state):
# The grad loop state for the outer while loop.
self._outer_grad_state = None
# The while loop context for forward.
self._forward_context = None
# The loop counter added by AddForwardLoopCounter. It is the value
# of the loop counter for the next iteration.
self._forward_index = None
# A sync op for forward.
self._forward_sync = None
# The while loop context for backprop.
self._grad_context = None
# The loop counter added by AddBackPropLoopCounter. It is the value
# of the loop counter for the current iteration.
self._grad_index = None
# A sync op for backprop.
self._grad_sync = None
# Information needed by backprop.
self._history_map = {}
self._switch_map = {}
self._unused_exits = []
self._deferred_exits = []
self._forward_loop_exits = list(forward_ctxt.loop_exits)
self._pending_exits_count = len(forward_ctxt.loop_exits)
self._outer_grad_state = outer_grad_state
if outer_grad_state:
outer_forward_ctxt = outer_grad_state.forward_context
else:
outer_forward_ctxt = forward_ctxt.outer_context
# Add the forward loop counter.
if outer_forward_ctxt: outer_forward_ctxt.Enter()
cnt, forward_index = forward_ctxt.AddForwardLoopCounter(outer_grad_state)
if outer_forward_ctxt: outer_forward_ctxt.Exit()
self._forward_context = forward_ctxt
self._forward_index = forward_index
# Add the backprop WhileContext, and the backprop loop counter.
if outer_grad_state:
# This is a nested loop. Remember the iteration counts for each
# execution of this inner loop.
outer_forward_ctxt.AddName(cnt.name)
history_cnt = outer_grad_state.AddForwardAccumulator(cnt)
outer_grad_ctxt = outer_grad_state.grad_context
outer_grad_ctxt.Enter()
self._grad_context = WhileContext(forward_ctxt.parallel_iterations,
forward_ctxt.back_prop,
forward_ctxt.swap_memory,
forward_ctxt.name,
self)
real_cnt = outer_grad_state.AddBackPropAccumulatedValue(history_cnt, cnt)
self._grad_index = self._grad_context.AddBackPropLoopCounter(
real_cnt, outer_grad_state)
outer_grad_ctxt.Exit()
else:
if outer_forward_ctxt: outer_forward_ctxt.Enter()
self._grad_context = WhileContext(forward_ctxt.parallel_iterations,
forward_ctxt.back_prop,
forward_ctxt.swap_memory,
forward_ctxt.name,
self)
self._grad_index = self._grad_context.AddBackPropLoopCounter(
cnt, outer_grad_state)
if outer_forward_ctxt: outer_forward_ctxt.Exit()
@property
def outer_grad_state(self):
"""The grad loop state for outer loop."""
return self._outer_grad_state
@property
def forward_context(self):
"""The while loop context for forward."""
return self._forward_context
@property
def forward_index(self):
"""The loop index of forward loop."""
return self._forward_index
@property
def forward_sync(self):
"""A control trigger node for synchronization in the forward loop.
One main use is to keep the push ops of a stack executed in the
iteration order.
"""
if self._forward_sync is None:
with ops.control_dependencies(None):
self._forward_sync = control_trigger(name="f_sync")
self._forward_sync._set_control_flow_context(self._forward_context)
self._forward_index.op._add_control_input(self._forward_sync)
return self._forward_sync
@property
def grad_context(self):
"""The corresponding WhileContext for gradient."""
return self._grad_context
@property
def grad_index(self):
"""The loop index of backprop loop."""
return self._grad_index
@property
def grad_sync(self):
"""A control trigger node for synchronization in the grad loop.
One main use is to keep the pop ops of a stack executed in the
iteration order.
"""
if self._grad_sync is None:
with ops.control_dependencies(None):
self._grad_sync = control_trigger(name="b_sync")
self._grad_sync._set_control_flow_context(self._grad_context)
self._grad_index.op._add_control_input(self._grad_sync)
return self._grad_sync
@property
def history_map(self):
"""The map that records all the tensors needed for backprop."""
return self._history_map
@property
def switch_map(self):
"""The map that records all the Switch ops for the while loop."""
return self._switch_map
@property
def unused_exits(self):
"""The list of "unused" exits."""
return self._unused_exits
@property
def deferred_exits(self):
"""The list of "deferred" exits."""
return self._deferred_exits
@property
def forward_loop_exits(self):
"""The list of exits of the forward loop."""
return self._forward_loop_exits
@property
def pending_exits_count(self):
"""The number of exits we expect to see but haven't."""
return self._pending_exits_count
@pending_exits_count.setter
def pending_exits_count(self, cnt):
"""Set the pending count to cnt."""
self._pending_exits_count = cnt
def AddForwardAccumulator(self, value, dead_branch=False):
"""Add an accumulator for each forward tensor that is needed in backprop.
This is added to the forward loop at the first time when a tensor
in the forward loop is used by backprop gradient computation loop.
We create an accumulator that accumulates the value of tensor at each
iteration. Called in the control flow context where gradients() is called.
The pseudocode is:
```
acc = stack();
while (_pivot) {
acc = stack_push(acc, value);
}
```
We make sure that the stack push op in one iteration is executed before
next iteration. This is achieved by adding a control edge from
`forward_index.op.inputs[0].op` to the push op, and another control
edge from the push op to either `forward_index.op` or `forward_sync`.
Args:
value: The source tensor in forward that is to be accumulated.
dead_branch: True iff the tensor is on a dead branch of a cond.
Returns:
The stack that contains the accumulated history of the tensor.
Raises:
TypeError: For internal errors involving the value condition context.
"""
curr_ctxt = ops.get_default_graph()._get_control_flow_context()
with ops.control_dependencies(None):
if curr_ctxt: curr_ctxt.Enter()
with ops.colocate_with(value):
# pylint: disable=protected-access
acc = gen_data_flow_ops._stack(value.dtype.base_dtype, name="f_acc")
# pylint: enable=protected-access
if curr_ctxt: curr_ctxt.Exit()
# Make acc available in the forward context.
enter_acc = self.forward_context.AddValue(acc)
# Add the stack_push op in the context of value.op.
swap_enabled = self.forward_context.swap_memory
value_ctxt = _GetOutputContext(value.op)
if value_ctxt == self.forward_context:
# value is not nested in the forward context.
self.forward_context.Enter()
push = gen_data_flow_ops._stack_push(
enter_acc, value, swap_memory=swap_enabled)
self.forward_context.Exit()
# Protect stack push and order it before forward_index.
self.forward_index.op._add_control_input(push.op)
else:
# value is in a cond context within the forward context.
if not isinstance(value_ctxt, CondContext):
raise TypeError(
"value_ctxt is not a CondContext: %s" % value_ctxt)
if dead_branch:
# The special case for creating a zero tensor for a dead
# branch of a switch. See ControlFlowState.ZerosLike().
value_ctxt.outer_context.Enter()
push = gen_data_flow_ops._stack_push(
enter_acc, value, swap_memory=swap_enabled)
value_ctxt.outer_context.Exit()
push.op._set_control_flow_context(value_ctxt)
else:
value_ctxt.Enter()
push = gen_data_flow_ops._stack_push(
enter_acc, value, swap_memory=swap_enabled)
value_ctxt.Exit()
# Protect stack push and order it before forward_sync.
self.forward_sync._add_control_input(push.op)
# Order stack push after the successor of forward_index
add_op = self.forward_index.op.inputs[0].op
push.op._add_control_input(add_op)
return acc
def AddBackPropAccumulatedValue(self, history_value, value,
dead_branch=False):
"""Add the getter for an accumulated value in the grad context.
This is added to the backprop loop. Called in the grad context to
get the value of an accumulated value. The stack pop op must be guarded
by the pred of the controlling cond.
Args:
history_value: The history (a stack) of a value.
value: The value that is pushed onto the stack.
dead_branch: True iff the tensor is on a dead branch of a cond.
Returns:
The current value (the top of the stack).
"""
history_ctxt = history_value.op._get_control_flow_context()
# Find the cond context that controls history_value if any.
cond_ctxt = None
value_ctxt = value.op._get_control_flow_context()
while value_ctxt and value_ctxt != history_ctxt:
if isinstance(value_ctxt, CondContext):
cond_ctxt = value_ctxt
break
value_ctxt = value_ctxt.outer_context
with ops.control_dependencies(None):
self.grad_context.Enter()
if cond_ctxt:
# Guard stack pop with a switch if it is controlled by a cond.
grad_state = self
pred = None
while pred is None and grad_state:
pred = grad_state.history_map.get(cond_ctxt.pred.name)
grad_state = grad_state.outer_grad_state
if pred is None:
pred = cond_ctxt.pred
branch = (1 - cond_ctxt.branch) if dead_branch else cond_ctxt.branch
history_value = _SwitchRefOrTensor(history_value, pred)[branch]
pop = gen_data_flow_ops._stack_pop(history_value, value.dtype.base_dtype)
pop.set_shape(value.get_shape())
self.grad_context.Exit()
parallel_iterations = self.grad_context.parallel_iterations
if parallel_iterations > 1:
# All pops are ordered after pivot_for_body and before grad_sync.
self.grad_sync._add_control_input(pop.op)
return pop
def GetRealValue(self, value):
"""Get the real value of `value`.
If backprop "uses" a value produced by forward inference, an accumulator
is added in the forward loop to accumulate its values. We use the
accumulated value. This method must be called in the grad loop context.
`value` must be in forward and needed for backprop.
Args:
value: A tensor to be captured.
Returns:
The same tensor obtained from the saved history.
"""
assert value.op.type not in ["Variable", "VariableV2"]
real_value = self._history_map.get(value.name)
if real_value is None:
cur_value = value
cur_grad_state = self
while True:
enter_op = _GetLoopConstantEnter(cur_value)
if enter_op:
# Special case: cur_value comes from a constant Enter node.
cur_value = enter_op.inputs[0]
cur_grad_state = cur_grad_state.outer_grad_state
if cur_grad_state is None:
# We are now outside all nested loops for this gradient(),
# so `value` is a loop invariant and there is no need to
# save the history of value. Just make cur_value to enter
# the right control flow context.
real_value = self._grad_context.AddValue(cur_value)
break
else:
# Record the history of this value in forward_ctxt.
# TODO(yuanbyu): Avoid recording constants.
self._grad_context.Exit()
history_value = cur_grad_state.AddForwardAccumulator(cur_value)
self._grad_context.Enter()
break
if real_value is None:
# Add the stack pop op in the grad context.
real_value = cur_grad_state.AddBackPropAccumulatedValue(history_value,
cur_value)
if cur_grad_state != self:
real_value = self._grad_context.AddValue(real_value)
self._history_map[value.name] = real_value
return real_value
def _GetWhileContext(op):
"""Get the WhileContext to which this op belongs."""
ctxt = op._get_control_flow_context()
if ctxt:
ctxt = ctxt.GetWhileContext()
return ctxt
class ControlFlowState(object):
"""Maintain the mapping from the loops to their grad states."""
def __init__(self):
self._map = {} # maps forward loop context to GradLoopState
def GetGradState(self, op, before):
"""Return the grad state for this op if it's in a forward loop context."""
if before and IsLoopExit(op):
forward_ctxt = op._get_control_flow_context()
forward_ctxt = forward_ctxt.outer_context
if forward_ctxt:
forward_ctxt = forward_ctxt.GetWhileContext()
else:
forward_ctxt = _GetWhileContext(op)
if forward_ctxt:
return self._map.get(forward_ctxt)
return None
def ProcessUnusedLoopExits(self, pending_count, to_ops_set):
"""Process all the "unused" loop exits.
The "unused" exits of the loops are added to `unused_exits`. An exit is
unused if its pending_count is 0. If there is an exit with real gradient,
all these deferred exits will enter the backprop loop with zero gradient.
Otherwise, they will enter the backprop loop with None. As an example,
people often write:
```
v1, _ = tf.while_loop(p, b, [x1, x2])
result = gradients(v1, x1)
```
The exit node for x2 is not included by the betweenness analysis. But we
need to backprop x2 if x2 is involved in computing v1.
Args:
pending_count: The number of backprop inputs for every op.
to_ops_set: The set of ops for ys in gradients(ys, xs)
Returns:
The set of unused loop exits that we know at this point we need
to backprop.
"""
loop_exits = []
for _, grad_state in self._map.items():
for y in grad_state.forward_loop_exits:
# pylint: disable=protected-access
if pending_count[y.op._id] == 0:
grad_state.pending_exits_count -= 1
if y.op._id not in to_ops_set:
grad_state.unused_exits.append(y)
if grad_state.pending_exits_count == 0:
loop_exits.extend(grad_state.unused_exits)
# pylint: enable=protected-access
return loop_exits
def EnterGradWhileContext(self, op, before):
"""Enter the WhileContext for gradient computation."""
grad_state = self.GetGradState(op, before)
if grad_state:
grad_state.grad_context.Enter()
def ExitGradWhileContext(self, op, before):
"""Exit the WhileContext for gradient computation."""
grad_state = self.GetGradState(op, before)
if grad_state:
grad_state.grad_context.Exit()
def AddWhileContext(self, op, between_op_list, between_ops):
"""Add the grad state for the while loop that op belongs to.
Note that op is an Exit, and this method must be called in
the control flow context where gradients() is called.
Note that this method modifies `between_op_list` and `between_ops`.
"""
forward_ctxt = _GetWhileContext(op)
grad_state = self._map.get(forward_ctxt)
if grad_state is None:
# This is a new while loop so create a grad state for it.
outer_forward_ctxt = forward_ctxt.outer_context
if outer_forward_ctxt:
outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()
outer_grad_state = None
if outer_forward_ctxt:
outer_grad_state = self._map.get(outer_forward_ctxt)
grad_state = GradLoopState(forward_ctxt, outer_grad_state)
self._map[forward_ctxt] = grad_state
# We need to include all exits of a loop for backprop.
for loop_exit in grad_state.forward_loop_exits:
if not between_ops[loop_exit.op._id]:
between_ops[loop_exit.op._id] = True
between_op_list.append(loop_exit.op)
def ZerosLikeForExit(self, val):
"""Create zeros_like gradient for a loop exit.
If the result of a loop variable is not used but is involved in
computing the result of some needed loop variable, we create a
zero-valued tensor that is fed as gradient for the Exit node of that
loop variable. Note that val.op is an Exit, and this method must be
called in the control flow context where gradients() is called.
Args:
val: The output tensor of an Exit op.
Returns:
A zero tensor of the same shape of val.
"""
val_shape = val.get_shape()
forward_ctxt = val.op._get_control_flow_context()
outer_forward_ctxt = forward_ctxt.outer_context
if outer_forward_ctxt:
outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()
outer_grad_state = None
if outer_forward_ctxt:
outer_grad_state = self._map.get(outer_forward_ctxt)
if outer_grad_state:
# This is a nested loop.
if val_shape.is_fully_defined():
# If the shape is known statically, just create a zero tensor
# with the right shape in the right context.
outer_grad_state.grad_context.Enter()
result = array_ops.zeros(val_shape.dims, val.dtype)
outer_grad_state.grad_context.Exit()
else:
# Only the shape of value is needed for backprop.
forward_ctxt.outer_context.Enter()
shape = array_ops.shape_internal(val, optimize=False)
forward_ctxt.outer_context.Exit()
# Save the shape to a stack.
history_shape = outer_grad_state.AddForwardAccumulator(shape)
# Get the shape back from the stack.
outer_grad_ctxt = outer_grad_state.grad_context
outer_grad_ctxt.Enter()
real_shape = outer_grad_state.AddBackPropAccumulatedValue(
history_shape, shape)
result = array_ops.zeros(real_shape, val.dtype)
outer_grad_ctxt.Exit()
else:
# This is not a nested loop.
if val_shape.is_fully_defined():
# If the shape is known statically, just create a zero tensor
# with the right shape.
result = array_ops.zeros(val_shape.dims, val.dtype)
else:
result = array_ops.zeros_like(val, optimize=False)
return result
def ZerosLike(self, op, index):
"""Create zeros_like for the specified output of an op.
If op is in a while loop that is part of gradients(), this method
must be called in its grad loop context.
Args:
op: A tensorflow operation.
index: the index for a specific output of the op.
Returns:
A zero tensor of the same shape of op.outputs[index].
"""
if IsLoopSwitch(op): return None
dead_branch = IsSwitch(op)
forward_ctxt = _GetWhileContext(op)
grad_state = self._map.get(forward_ctxt)
if grad_state is None:
# op is not in a while loop that is part of gradients().
return ZerosLikeOutsideLoop(op, index)
op_ctxt = op._get_control_flow_context()
val = ops.convert_to_tensor(op.outputs[index], name="tensor")
shape = val.get_shape()
if shape.is_fully_defined():
# If the shape is known statically, just create a zero tensor with
# the right shape in the grad loop context.
result = constant_op.constant(0, shape=shape.dims, dtype=val.dtype)
if dead_branch:
# op is a cond switch. Guard the zero tensor with a switch.
pred = grad_state.history_map.get(op_ctxt.pred.name)
branch = op_ctxt.branch
result = _SwitchRefOrTensor(result, pred)[1 - branch]
else:
# Unknown shape so keep a history of the shape at runtime.
if dead_branch:
# Need to add a special switch to guard the value.
pred = op_ctxt.pred
branch = op_ctxt.branch
op_ctxt.outer_context.Enter()
val = _SwitchRefOrTensor(op.inputs[0], pred)[1 - branch]
zeros_shape = array_ops.shape_internal(val, optimize=False)
op_ctxt.outer_context.Exit()
val.op._set_control_flow_context(op_ctxt)
zeros_shape.op._set_control_flow_context(op_ctxt)
else:
op_ctxt.Enter()
zeros_shape = array_ops.shape_internal(val, optimize=False)
op_ctxt.Exit()
# Add forward accumulator for shape.
grad_state.grad_context.Exit()
history_zeros_shape = grad_state.AddForwardAccumulator(
zeros_shape, dead_branch=dead_branch)
grad_state.grad_context.Enter()
# Create a zero tensor with the right shape.
shape = grad_state.AddBackPropAccumulatedValue(
history_zeros_shape, zeros_shape, dead_branch)
result = array_ops.zeros(shape, val.dtype)
return result
def PostProcessing(self):
"""Perform postprocessing at the end of gradients().
We have created the gradient graph at this point. So this function
can be used to perform any postprocessing on the gradient graph.
We currently perform the following postprocessing:
1. Patch the gradient graph if the output of a loop variable
doesn't depend on its input.
"""
for _, grad_state in self._map.items():
for _, b_merge in grad_state.switch_map.items():
if b_merge.op.inputs[0] == b_merge.op.inputs[1]:
# The value of this loop variable at iteration i+1 doesn't
# depend on its value at iteration i. So use zeros as the
# gradients for all iterations > 0.
dtype = b_merge.op.inputs[0].dtype
shape = b_merge.op.inputs[0].get_shape()
# pylint: disable=protected-access
if shape.is_fully_defined():
grad_state.grad_context.Enter()
# Create a zeros and use it for iterations > 0.
grad_val = constant_op.constant(0, dtype=dtype, shape=shape)
next_grad_val = _NextIteration(grad_val)
grad_state.grad_context.Exit()
else:
# Create a zeros in the outer grad context.
outer_grad_ctxt = grad_state.grad_context.outer_context
if outer_grad_ctxt: outer_grad_ctxt.Enter()
enter_grad_op = b_merge.op.inputs[0].op
enter_grad = enter_grad_op.inputs[0]
grad_shape = array_ops.shape_internal(enter_grad, optimize=False)
grad_val = array_ops.zeros(grad_shape)
if outer_grad_ctxt: outer_grad_ctxt.Exit()
# Use the zeros for iterations > 0.
grad_state.grad_context.Enter()
next_grad_val = _NextIteration(grad_val)
grad_state.grad_context.Exit()
b_merge.op._update_input(1, next_grad_val)
# pylint: enable=protected-access
def MaybeCreateControlFlowState(between_op_list, between_ops,
colocate_gradients_with_ops):
"""Create the state for all the while loops involved in one gradients().
We create a ControlFlowState when there are while loops involved in
gradients(). In gradients(), control flow logic is only invoked when
the ControlFlowState is not None.
Note that this method modifies `between_op_list` and `between_ops`.
"""
loop_state = None
for op in between_op_list:
if IsLoopExit(op):
if loop_state is None:
loop_state = ControlFlowState()
if colocate_gradients_with_ops:
with ops.colocate_with(op):
loop_state.AddWhileContext(op, between_op_list, between_ops)
else:
loop_state.AddWhileContext(op, between_op_list, between_ops)
return loop_state
def IsSwitch(op):
"""Return true if `op` is a Switch."""
return op.type == "Switch" or op.type == "RefSwitch"
def IsLoopExit(op):
"""Return true if `op` is an Exit."""
return op.type == "Exit" or op.type == "RefExit"
def IsLoopSwitch(op):
"""Return true if `op` is the Switch for a while loop."""
if IsSwitch(op):
ctxt = op._get_control_flow_context()
return ctxt and isinstance(ctxt, WhileContext)
return False
def ZerosLikeOutsideLoop(op, index):
"""Create zeros_like for the specified output of an op."""
val = op.outputs[index]
if not IsSwitch(op):
return array_ops.zeros_like(val, optimize=False)
else:
op_ctxt = op._get_control_flow_context()
pred = op_ctxt.pred
branch = op_ctxt.branch
switch_val = switch(op.inputs[0], pred)[1 - branch]
zeros_shape = array_ops.shape_internal(switch_val, optimize=False)
return array_ops.zeros(zeros_shape, dtype=val.dtype)
class ControlFlowContext(object):
"""The base class for control flow context.
The usage pattern is a sequence of (Enter, Exit) followed by a final
ExitResult.
We maintain the following state for control flow contexts during graph
construction:
1. graph has _control_flow_context: the current context used to
construct new nodes. Changed by ctxt.Enter() and ctxt.Exit()
2. op has _control_flow_context: the context to which the op belongs.
Set at the time the op is created. Immutable.
3. A ControlFlowContext has _outer_context: the context in which this
context is created. Set at the time a context is created. Immutable.
4. A ControlFlowContext has _context_stack.
Pushed and popped by ctxt.Enter() and ctxt.Exit()
"""
def __init__(self, values_def=None, import_scope=None):
self._outer_context = ops.get_default_graph()._get_control_flow_context()
self._context_stack = []
if values_def:
self._init_values_from_proto(values_def,
import_scope=import_scope)
else:
# Values that have been already seen in this context.
self._values = set()
# Values referenced by but external to this context.
self._external_values = {}
def _init_values_from_proto(self, values_def, import_scope=None):
"""Initializes values and external_values from `ValuesDef` protocol buffer.
Args:
values_def: `ValuesDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(values_def, control_flow_pb2.ValuesDef)
self._values = set(values_def.values)
g = ops.get_default_graph()
self._external_values = {}
for k, v in values_def.external_values.items():
self._external_values[k] = g.as_graph_element(
ops.prepend_name_scope(v, import_scope))
op_names = set([op.split(":")[0]
for op in self._values - set(self._external_values)])
for op in op_names:
# pylint: disable=protected-access
g.as_graph_element(ops.prepend_name_scope(
op, import_scope))._set_control_flow_context(self)
# pylint: enable=protected-access
@property
def outer_context(self):
"""Return the context containing this context."""
return self._outer_context
@property
def grad_state(self):
raise NotImplementedError("Abstract method")
@property
def back_prop(self):
raise NotImplementedError("Abstract method")
def _to_proto(self, export_scope=None):
"""Converts the values to a `ValuesDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `ValuesDef` protocol buffer.
"""
values_def = control_flow_pb2.ValuesDef()
values_def.values.extend(
[ops.strip_name_scope(v, export_scope)
for v in sorted(self._values)])
for k, v in self._external_values.items():
values_def.external_values[k] = ops.strip_name_scope(
v.name, export_scope)
return values_def
@staticmethod
def _from_proto(values_def, import_scope=None):
"""Returns a `ControlFlowContext` created from `values_def`."""
return ControlFlowContext(values_def=values_def,
import_scope=import_scope)
def AddName(self, name):
self._values.add(name)
# pylint: disable=protected-access
def Enter(self):
"""Enter this control flow context."""
graph = ops.get_default_graph()
self._context_stack.append(graph._get_control_flow_context())
graph._set_control_flow_context(self)
def Exit(self):
"""Exit this control flow context."""
graph = ops.get_default_graph()
last_context = self._context_stack.pop()
graph._set_control_flow_context(last_context)
def ExitResult(self, result):
"""Make a list of tensors available in the outer context."""
if self._outer_context:
for x in result:
self._outer_context.AddName(x.name)
def GetWhileContext(self):
"""Return the while context containing this context."""
if self._outer_context:
return self._outer_context.GetWhileContext()
return None
def _IsInOuterContext(self, op):
op_ctxt = _GetOutputContext(op)
outer_ctxt = self.outer_context
while outer_ctxt != op_ctxt:
if outer_ctxt is None:
return False
outer_ctxt = outer_ctxt.outer_context
return True
def _RemoveExternalControlEdges(self, op):
"""Remove any external control dependency on this op."""
while_ctxt = self.GetWhileContext()
# A control input of `op` is internal if it is in the same while
# loop context as the enclosing while loop context of self.
if while_ctxt is None:
internal_control_inputs = op.control_inputs
else:
internal_control_inputs = []
for x in op.control_inputs:
ctxt = _GetOutputContext(x)
if ctxt is not None and ctxt.GetWhileContext() == while_ctxt:
internal_control_inputs.append(x)
if len(internal_control_inputs) != len(op.control_inputs):
del op.control_inputs[:]
op._add_control_inputs(internal_control_inputs)
return internal_control_inputs
# pylint: enable=protected-access
class CondContext(ControlFlowContext):
"""The context for the conditional construct."""
def __init__(self, pred=None, pivot=None, branch=None,
name="cond_text", context_def=None, import_scope=None):
"""Creates a `CondContext`.
Args:
pred: The `boolean` tensor for the conditional predicate.
pivot: The predicate tensor in this branch.
branch: 0 or 1 representing this branch.
name: Name of the `CondContext` python object.
context_def: Optional `ContextDef` protocol buffer to initialize the
`CondContext` object from.
import_scope: Optional `string`. Name scope to add. Only used when
initialing from protocol buffer.
"""
self._name = ops.get_default_graph().unique_name(name)
if context_def:
self._init_from_proto(context_def, import_scope=import_scope)
else:
# Initializes the default fields.
ControlFlowContext.__init__(self)
self._pred = pred # The boolean tensor for the cond predicate
self._pivot = pivot # The predicate tensor in this branch
self._branch = branch # 0 or 1 representing this branch
# Values considered to have been already seen in this context.
self._values.add(pred.name)
self._values.add(pivot.name)
def _init_from_proto(self, context_def, import_scope=None):
"""Creates a new `CondContext` from protocol buffer.
Args:
context_def: `CondContextDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(context_def, control_flow_pb2.CondContextDef)
# Create from context_def.
g = ops.get_default_graph()
self._name = ops.prepend_name_scope(
context_def.context_name, import_scope)
self._pred = g.as_graph_element(ops.prepend_name_scope(
context_def.pred_name, import_scope))
self._pivot = g.as_graph_element(ops.prepend_name_scope(
context_def.pivot_name, import_scope))
self._branch = context_def.branch
super(CondContext, self).__init__(values_def=context_def.values_def,
import_scope=import_scope)
@property
def name(self):
return self._name
@property
def pred(self):
return self._pred
@property
def pivot(self):
return self._pivot
@property
def branch(self):
return self._branch
@property
def grad_state(self):
if self.GetWhileContext():
return self.GetWhileContext().grad_state
return None
@property
def back_prop(self):
if self.GetWhileContext():
self.GetWhileContext().back_prop
return False
def GetControlPivot(self):
return self._pivot
def to_proto(self, export_scope=None):
"""Converts a `CondContext` to a `CondContextDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `CondContextDef` protocol buffer.
"""
if (export_scope is None or
self.name.startswith(export_scope)):
context_def = control_flow_pb2.CondContextDef()
context_def.context_name = ops.strip_name_scope(
self.name, export_scope)
context_def.pred_name = ops.strip_name_scope(
self._pred.name, export_scope)
context_def.pivot_name = ops.strip_name_scope(
self._pivot.name, export_scope)
context_def.branch = self._branch
context_def.values_def.MergeFrom(super(CondContext, self)._to_proto(
export_scope))
return context_def
else:
return None
@staticmethod
def from_proto(context_def, import_scope=None):
"""Returns a `CondContext` object created from `context_def`."""
return CondContext(context_def=context_def,
import_scope=import_scope)
def AddValue(self, val):
"""Add `val` to the current context and its outer context recursively."""
if val.name in self._values:
# Use the real value if it comes from outer context. This is needed in
# particular for nested conds.
result = self._external_values.get(val.name)
result = val if result is None else result
else:
result = val
self._values.add(val.name)
if self._outer_context:
result = self._outer_context.AddValue(val)
self._values.add(result.name)
with ops.control_dependencies(None):
result = _SwitchRefOrTensor(result, self._pred)[self._branch]
result.op.graph.prevent_fetching(result.op)
# pylint: disable=protected-access
result.op._set_control_flow_context(self)
# pylint: enable=protected-access
self._values.add(result.name)
self._external_values[val.name] = result
return result
def AddOp(self, op):
self._AddOpInternal(op)
def _AddOpInternal(self, op):
"""Add `op` to the current context."""
if not op.inputs:
# Remove any external control dependency on this op
self._RemoveExternalControlEdges(op)
# pylint: disable=protected-access
op._add_control_input(self._pivot.op)
# pylint: enable=protected-access
for x in op.outputs:
self._values.add(x.name)
else:
for index in range(len(op.inputs)):
x = op.inputs[index]
real_x = self.AddValue(x)
if real_x != x:
# pylint: disable=protected-access
op._update_input(index, real_x)
# pylint: enable=protected-access
for x in op.outputs:
self._values.add(x.name)
if self._outer_context or not IsLoopExit(op):
op.graph.prevent_fetching(op)
def _ProcessOutputTensor(self, val):
"""Process an output tensor of a conditional branch."""
real_val = val
if val.name not in self._values:
# Handle the special case of lambda: x
self._values.add(val.name)
if self._outer_context:
real_val = self._outer_context.AddValue(val)
self._values.add(real_val.name)
real_val = _SwitchRefOrTensor(real_val, self._pred)[self._branch]
self._external_values[val.name] = real_val
else:
external_val = self._external_values.get(val.name)
if external_val is not None:
real_val = external_val
return real_val
def BuildCondBranch(self, fn):
"""Add the subgraph defined by fn() to the graph."""
r = fn()
original_r = r
result = []
if r is not None:
if not isinstance(r, list) and not isinstance(r, _basetuple):
r = [r]
original_r = [original_r]
r = _convert_tensorarrays_to_flows(r)
for v in r:
real_v = v
if isinstance(v, ops.Operation):
# Use pivot as the proxy for this op.
real_v = with_dependencies([v], self._pivot)
else:
if isinstance(v, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
values = self._ProcessOutputTensor(v.values)
indices = self._ProcessOutputTensor(v.indices)
if isinstance(v, ops.IndexedSlices):
dense_shape = v.dense_shape
if dense_shape is not None:
dense_shape = self._ProcessOutputTensor(dense_shape)
real_v = ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = self._ProcessOutputTensor(v.dense_shape)
real_v = sparse_tensor.SparseTensor(indices, values, dense_shape)
else:
real_v = self._ProcessOutputTensor(v)
result.append(real_v)
return original_r, result
def cond(pred, fn1, fn2, name=None):
"""Return either fn1() or fn2() based on the boolean predicate `pred`.
`fn1` and `fn2` both return lists of output tensors. `fn1` and `fn2` must have
the same non-zero number and type of outputs.
Note that the conditional execution applies only to the operations defined in
fn1 and fn2. Consider the following simple program:
```python
z = tf.multiply(a, b)
result = tf.cond(x < y, lambda: tf.add(x, z), lambda: tf.square(y))
```
If x < y, the `tf.add` operation will be executed and `tf.square`
operation will not be executed. Since z is needed for at least one
branch of the cond, the `tf.multiply` operation is always executed, unconditionally.
Although this behavior is consistent with the dataflow model of TensorFlow,
it has occasionally surprised some users who expected a lazier semantics.
Args:
pred: A scalar determining whether to return the result of `fn1` or `fn2`.
fn1: The callable to be performed if pred is true.
fn2: The callable to be performed if pref is false.
name: Optional name prefix for the returned tensors.
Returns:
Tensors returned by the call to either `fn1` or `fn2`. If the callables
return a singleton list, the element is extracted from the list.
Raises:
TypeError: if `fn1` or `fn2` is not callable.
ValueError: if `fn1` and `fn2` do not return the same number of tensors, or
return tensors of different types.
Example:
```python
x = tf.constant(2)
y = tf.constant(5)
def f1(): return tf.multiply(x, 17)
def f2(): return tf.add(y, 23)
r = tf.cond(tf.less(x, y), f1, f2)
# r is set to f1().
# Operations in f2 (e.g., tf.add) are not executed.
```
"""
with ops.name_scope(name, "cond", [pred]) as name:
if not callable(fn1):
raise TypeError("fn1 must be callable.")
if not callable(fn2):
raise TypeError("fn2 must be callable.")
# Add the Switch to the graph.
if isinstance(pred, bool):
raise TypeError("pred must not be a Python bool")
p_2, p_1 = switch(pred, pred)
pivot_1 = array_ops.identity(p_1, name="switch_t")
pivot_2 = array_ops.identity(p_2, name="switch_f")
pred = array_ops.identity(pred, name="pred_id")
# Disable the fetching of tensors that are only on one branch of cond.
for tensor in [p_1, p_2, pivot_1, pivot_2, pred]:
tensor.op.graph.prevent_fetching(tensor.op)
# Build the graph for the true branch in a new context.
context_t = CondContext(pred, pivot_1, branch=1)
context_t.Enter()
orig_res, res_t = context_t.BuildCondBranch(fn1)
context_t.ExitResult(res_t)
context_t.Exit()
# Build the graph for the false branch in a new context.
context_f = CondContext(pred, pivot_2, branch=0)
context_f.Enter()
_, res_f = context_f.BuildCondBranch(fn2)
context_f.ExitResult(res_f)
context_f.Exit()
# Add the final merge to the graph.
if len(res_t) != len(res_f):
raise ValueError("fn1 and fn2 must return the same number of results.")
if not res_t:
raise ValueError("fn1 and fn2 must return at least one result.")
for x, y in zip(res_f, res_t):
assert ((isinstance(x, ops.IndexedSlices) and
isinstance(y, ops.IndexedSlices)) or
(isinstance(x, sparse_tensor.SparseTensor) and
isinstance(y, sparse_tensor.SparseTensor)) or
(isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor)))
val_x = x if isinstance(x, ops.Tensor) else x.values
val_y = y if isinstance(y, ops.Tensor) else y.values
if val_x.dtype.base_dtype != val_y.dtype.base_dtype:
raise ValueError("Outputs of fn1 and fn2 must have the same type: "
"%s, %s" % (val_x.dtype.name, val_y.dtype.name))
merges = [merge([x[0], x[1]])[0] for x in zip(res_f, res_t)]
merges = _convert_flows_to_tensorarrays(orig_res, merges)
# Add to collections
ops.add_to_collection(ops.GraphKeys.COND_CONTEXT, context_t)
ops.add_to_collection(ops.GraphKeys.COND_CONTEXT, context_f)
return merges[0] if len(merges) == 1 else merges
# TODO(yuanbyu): Consider having a unified notion of context for
# not only conditionals and loops but also control dependency and
# subgraphs.
class WhileContext(ControlFlowContext):
"""The context for the loop construct."""
def __init__(self, parallel_iterations=10, back_prop=True, swap_memory=False,
name="while_context", grad_state=None, context_def=None,
import_scope=None):
""""Creates a `WhileContext`.
Args:
parallel_iterations: The number of iterations allowed to run in parallel.
back_prop: Whether backprop is enabled for this while loop.
swap_memory: Whether GPU-CPU memory swap is enabled for this loop.
name: Optional name prefix for the returned tensors.
grad_state: The gradient loop state.
context_def: Optional `WhileContextDef` protocol buffer to initialize
the `Whilecontext` python object from.
import_scope: Optional `string`. Name scope to add. Only used when
initialing from protocol buffer.
"""
if context_def:
self._init_from_proto(context_def, import_scope=import_scope)
else:
ControlFlowContext.__init__(self)
self._init_from_args(parallel_iterations, back_prop, swap_memory,
name)
# The gradient loop state.
self._grad_state = grad_state
def _init_from_args(self, parallel_iterations, back_prop, swap_memory,
name):
"""Creates a new `WhileContext` from arguments.
Args:
parallel_iterations: The number of iterations allowed to run in parallel.
back_prop: Whether backprop is enabled for this while loop.
swap_memory: Whether GPU-CPU memory swap is enabled for this loop.
name: Optional name prefix for the returned tensors.
Raises:
ValueError: If `parallel_iterations` has invalid value.
"""
if not isinstance(parallel_iterations, int) or (parallel_iterations <= 0):
raise ValueError("`parallel_iterations` must be a positive integer: "
"%s" % parallel_iterations)
self._name = ops.get_default_graph().unique_name(name)
self._parallel_iterations = parallel_iterations
self._back_prop = back_prop
self._swap_memory = swap_memory
# We use this node to control constants created by the pred lambda.
self._pivot_for_pred = None
# We use this node to control constants created by the body lambda.
self._pivot_for_body = None
# The boolean tensor for loop termination condition. Used in code
# generation for gradient computation
self._pivot = None
# The list of exit tensors for loop variables.
self._loop_exits = []
def _init_from_proto(self, context_def, import_scope=None):
"""Creates a new `WhileContext` from protocol buffer.
Args:
context_def: `WhileContextDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(context_def, control_flow_pb2.WhileContextDef)
# Create from context_def.
g = ops.get_default_graph()
self._name = ops.prepend_name_scope(
context_def.context_name, import_scope)
self._parallel_iterations = context_def.parallel_iterations
self._back_prop = context_def.back_prop
self._swap_memory = context_def.swap_memory
self._pivot_for_pred = g.as_graph_element(ops.prepend_name_scope(
context_def.pivot_for_pred_name, import_scope))
# We use this node to control constants created by the body lambda.
self._pivot_for_body = g.as_graph_element(ops.prepend_name_scope(
context_def.pivot_for_body_name, import_scope))
# The boolean tensor for loop termination condition. Used in code
# generation for gradient computation.
self._pivot = g.as_graph_element(
ops.prepend_name_scope(context_def.pivot_name, import_scope))
# The list of exit tensors for loop variables.
self._loop_exits = [g.as_graph_element(
ops.prepend_name_scope(exit_name, import_scope))
for exit_name in context_def.loop_exit_names]
super(WhileContext, self).__init__(values_def=context_def.values_def,
import_scope=import_scope)
@property
def name(self):
return self._name
@property
def parallel_iterations(self):
"""The number of iterations allowed to run in parallel."""
return self._parallel_iterations
@property
def back_prop(self):
"""True iff backprop is enabled for this while loop."""
return self._back_prop
@property
def swap_memory(self):
"""True iff GPU-CPU memory swap is enabled for this while loop."""
return self._swap_memory
@property
def pivot(self):
"""The boolean tensor representing the loop termination condition."""
return self._pivot
@property
def loop_exits(self):
"""The list of exit tensors for loop variables."""
return self._loop_exits
@property
def grad_state(self):
"""The gradient loop state."""
return self._grad_state
def to_proto(self, export_scope=None):
"""Converts a `WhileContext` to a `WhileContextDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `WhileContextDef` protocol buffer.
"""
if (export_scope is None or
self.name.startswith(export_scope)):
context_def = control_flow_pb2.WhileContextDef()
context_def.context_name = ops.strip_name_scope(
self.name, export_scope)
context_def.parallel_iterations = self._parallel_iterations
context_def.back_prop = self._back_prop
context_def.swap_memory = self._swap_memory
context_def.pivot_for_pred_name = ops.strip_name_scope(
self._pivot_for_pred.name, export_scope)
context_def.pivot_for_body_name = ops.strip_name_scope(
self._pivot_for_body.name, export_scope)
context_def.pivot_name = ops.strip_name_scope(
self._pivot.name, export_scope)
if self._loop_exits:
context_def.loop_exit_names.extend(
[ops.strip_name_scope(l.name, export_scope)
for l in self._loop_exits])
context_def.values_def.MergeFrom(
super(WhileContext, self)._to_proto(
export_scope=export_scope))
return context_def
else:
return None
@staticmethod
def from_proto(context_def, import_scope=None):
"""Returns a `WhileContext` object created from `context_def`.
Args:
context_def: A `WhileContextDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
Returns:
A `WhileContext` Python object.
"""
return WhileContext(context_def=context_def,
import_scope=import_scope)
def GetWhileContext(self):
return self
def GetControlPivot(self):
if self._pivot_for_body is not None:
return self._pivot_for_body
return self._pivot_for_pred
def AddValue(self, val):
"""Add `val` to the current context and its outer context recursively."""
result = val
if val.name not in self._values:
self._values.add(val.name)
# If we are in a grad context and val is from its forward context,
# use GetRealValue(), which adds the logic to save the history of
# val in forward.
grad_ctxt = ops.get_default_graph()._get_control_flow_context()
if grad_ctxt:
grad_ctxt = grad_ctxt.GetWhileContext()
if grad_ctxt.grad_state:
forward_ctxt = _GetWhileContext(val.op)
if IsLoopExit(val.op):
forward_ctxt = forward_ctxt.outer_context
if forward_ctxt:
forward_ctxt = forward_ctxt.GetWhileContext()
if forward_ctxt == grad_ctxt.grad_state.forward_context:
real_val = grad_ctxt.grad_state.GetRealValue(val)
self._external_values[val.name] = real_val
return real_val
if self._outer_context is not None:
result = self._outer_context.AddValue(val)
# Create an Enter to make `result` known to this loop context.
with ops.control_dependencies(None):
enter = _Enter(result, self._name, is_constant=True,
parallel_iterations=self._parallel_iterations)
# Fix the control inputs and control flow context of these enter ops.
self._FixControlInputsAndContext([enter])
# Add `enter` in this context.
self._values.add(enter.name)
self._external_values[val.name] = enter
result = enter
else:
actual_val = self._external_values.get(val.name)
if actual_val is not None:
result = actual_val
return result
def AddOp(self, op):
"""Add `op` to the current context."""
# For a reduction op, if op is in a grad context and its input is from
# its forward context, moving op to the forward context means we would
# store the tensor after the reduction as opposed to the tensor before
# reduction, and therefore could significantly reduce memory consumption.
# For now, we do this only for a few ops.
if op.type in {"Shape", "Size", "Rank"}:
grad_ctxt = ops.get_default_graph()._get_control_flow_context()
if grad_ctxt:
grad_ctxt = grad_ctxt.GetWhileContext()
if grad_ctxt.grad_state:
op_input_forward_ctxt = _GetWhileContext(op.inputs[0].op)
if op_input_forward_ctxt == grad_ctxt.grad_state.forward_context:
op_input_ctxt = op.inputs[0].op._get_control_flow_context()
op._set_control_flow_context(op_input_ctxt)
op_input_ctxt._AddOpInternal(op)
return
self._AddOpInternal(op)
def _AddOpInternal(self, op):
"""Add `op` to the current context.
In the case that op has only external data inputs, we remove all of its
external control inputs so all its inputs are in the same while loop
context. This is valid because op now has an Enter input that has all
the right control dependency.
"""
if not op.inputs:
# Remove any external control dependency on this op
control_inputs = self._RemoveExternalControlEdges(op)
# Add a control edge from the control pivot to this op.
if not control_inputs:
# pylint: disable=protected-access
op._add_control_input(self.GetControlPivot().op)
# pylint: enable=protected-access
for x in op.outputs:
self._values.add(x.name)
else:
for index in range(len(op.inputs)):
x = op.inputs[index]
real_x = self.AddValue(x)
if real_x != x:
op._update_input(index, real_x)
# Remove any external control dependency on this op.
self._RemoveExternalControlEdges(op)
# Add a control dependency to prevent loop invariants from
# enabling ops that should not be executed.
self._MaybeAddControlDependency(op)
for x in op.outputs:
self._values.add(x.name)
if self._outer_context or not IsLoopExit(op):
op.graph.prevent_fetching(op)
def _MaybeAddControlDependency(self, op):
"""Add a control input to the op if it only depends on loop invariants."""
def _IsOpFree(op):
if op.control_inputs:
return False
for x in op.inputs:
if not _IsLoopConstantEnter(x.op):
return False
return True
if _IsOpFree(op):
# pylint: disable=protected-access
op._add_control_input(self.GetControlPivot().op)
# pylint: enable=protected-access
def AddForwardLoopCounter(self, outer_grad_state):
"""Adds a loop that counts the number of iterations.
This is added to the forward loop at the time when we start to
create the loop for backprop gradient computation. Called in
the outer context of this forward context.
The pseudocode is:
`n = 0; while (_pivot) { n++; }`
Note that a control dependency is added to `n` to ensure the correct
execution order of stack push ops.
Args:
outer_grad_state: The outer grad state. None if not nested.
Returns:
The number of iterations taken by the forward loop and the loop index.
"""
n = constant_op.constant(0, name="f_count")
if outer_grad_state is not None:
# Force the stack pushes of i-th execution of an inner loop to be ordered
# before the pushes of (i+1)-th execution of the same inner loop.
outer_add_op = outer_grad_state.forward_index.op.inputs[0].op
n.op._add_control_input(outer_add_op) # pylint: disable=protected-access
self.Enter()
self.AddName(n.name)
enter_n = _Enter(n, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
name="f_count")
merge_n = merge([enter_n, enter_n])[0]
switch_n = switch(merge_n, self._pivot)
index = math_ops.add(switch_n[1], 1)
next_n = _NextIteration(index)
merge_n.op._update_input(1, next_n)
total_iterations = exit(switch_n[0], name="f_count")
self.loop_exits.append(total_iterations)
self.ExitResult([total_iterations])
self.Exit()
return total_iterations, next_n
def AddBackPropLoopCounter(self, count, outer_grad_state):
"""Add the backprop loop that controls the iterations.
This is added to the backprop loop. It is used to control the loop
termination of the backprop loop. Called in the outer context of
this grad context.
The pseudocode is:
`n = count; while (n >= 1) { n--; }`
Note that a control dependency is added to `final_zero` to ensure the
correct execution order of stack pop ops.
Args:
count: The number of iterations for backprop.
outer_grad_state: The outer grad state. None if not nested.
Returns:
The loop index.
"""
one = constant_op.constant(1, name="b_count")
self.Enter()
self.AddName(count.name)
enter_count = _Enter(count, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
name="b_count")
merge_count = merge([enter_count, enter_count])[0]
self._pivot_for_pred = merge_count
pred = math_ops.greater_equal(merge_count, one)
self._pivot = loop_cond(pred, name="b_count")
switch_count = switch(merge_count, self._pivot)
index = math_ops.subtract(switch_count[1], one)
self._pivot_for_body = index
next_count = _NextIteration(index)
merge_count.op._update_input(1, next_count)
final_zero = exit(switch_count[0], name="b_count")
self.loop_exits.append(final_zero)
if outer_grad_state is not None:
# Force the stack pops of i-th execution of an inner loop to be ordered
# before the pops of (i+1)-th execution of the same inner loop.
# pylint: disable=protected-access
outer_grad_state.grad_sync._add_control_input(final_zero.op)
# pylint: enable=protected-access
self.ExitResult([final_zero])
self.Exit()
return next_count
def AddBackPropAccumulator(self, op, grad):
"""Add an accumulation loop for every loop invariant.
This is added to the backprop loop. It is used to accumulate partial
gradients within each loop iteration. Called when in the gradient while
context.
The pseudocode is:
```
acc = 0.0;
while (_pivot) {
acc += grad;
}
```
Args:
op: The Enter op for a loop invariant.
grad: The partial gradient of an iteration for a loop invariant.
Returns:
The gradient for a loop invariant.
"""
self.Exit()
# Create a zeros tensor with the right shape for acc. If we don't
# know the full shape statically, we will have to get the shape
# dynamically from the forward inference. Getting the shape right
# for the zeros is only needed for the base case when the loop exits
# without running any iterations.
shape = grad.get_shape()
if shape.is_fully_defined():
if self.outer_context: self.outer_context.Enter()
acc = constant_op.constant(0, grad.dtype, shape=shape, name="b_acc")
if self.outer_context: self.outer_context.Exit()
else:
value = op.inputs[0]
if (isinstance(self.outer_context, WhileContext) and
self.outer_context.grad_state is not None):
# We are in a nested while loop.
forward_ctxt = self.grad_state.forward_context
forward_ctxt.outer_context.Enter()
zeros_shape = array_ops.shape_internal(value, optimize=False)
forward_ctxt.outer_context.Exit()
outer_grad_state = self.grad_state.outer_grad_state
history_zeros_shape = outer_grad_state.AddForwardAccumulator(
zeros_shape)
self.outer_context.Enter()
real_shape = outer_grad_state.AddBackPropAccumulatedValue(
history_zeros_shape, zeros_shape)
acc = array_ops.zeros(real_shape, grad.dtype)
self.outer_context.Exit()
else:
if self.outer_context: self.outer_context.Enter()
zeros_shape = array_ops.shape_internal(value, optimize=False)
acc = array_ops.zeros(zeros_shape, grad.dtype)
if self.outer_context: self.outer_context.Exit()
acc._shape = grad.get_shape() # pylint: disable=protected-access
self.Enter()
self.AddName(acc.name)
enter_acc = _Enter(acc, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
name="b_acc")
merge_acc = merge([enter_acc, enter_acc], name="b_acc")[0]
switch_acc_false, switch_acc_true = switch(merge_acc, self._pivot)
add_acc = math_ops.add(switch_acc_true, grad)
next_acc = _NextIteration(add_acc)
merge_acc.op._update_input(1, next_acc) # pylint: disable=protected-access
acc_result = exit(switch_acc_false, name="b_acc")
self.loop_exits.append(acc_result)
self.ExitResult([acc_result])
return acc_result
def AddBackPropIndexedSlicesAccumulator(self, op, grad):
"""This is used for accumulating gradients that are IndexedSlices.
This is essentially the equavalent of AddBackPropAccumulator but optimized
for things like updating embeddings from within a while loop.
Args:
op: The Enter op for a loop invariant.
grad: The partial gradients represented as an IndexedSlices.
Returns:
The accumulated IndexedSlices gradient of the loop invariant.
"""
values = grad.values
indices = grad.indices
dense_shape = grad.dense_shape
self.Exit()
if self.outer_context: self.outer_context.Enter()
if values.get_shape().is_fully_defined():
values_shape = tensor_shape.TensorShape(
[tensor_shape.Dimension(1)] + values.get_shape().dims[1:])
if self.outer_context: self.outer_context.Enter()
values_acc = constant_op.constant(0, values.dtype, shape=values_shape,
name="b_acc")
if self.outer_context: self.outer_context.Exit()
else:
values_shape = array_ops.shape_internal(op.inputs[0], optimize=False)[1:]
values_shape = array_ops.concat([[1], values_shape], 0)
values_acc = array_ops.zeros(values_shape, dtype=values.dtype)
indices_acc = constant_op.constant([0], indices.dtype)
shape_acc = None
if dense_shape is not None:
if dense_shape.get_shape().is_fully_defined():
if self.outer_context: self.outer_context.Enter()
shape_acc = constant_op.constant(0, dense_shape.dtype,
shape=dense_shape.get_shape())
if self.outer_context: self.outer_context.Exit()
else:
shape_acc = array_ops.zeros_like(
array_ops.shape_internal(op.inputs[0], optimize=False),
optimize=False)
if self.outer_context: self.outer_context.Exit()
self.Enter()
self.AddName(values_acc.name)
self.AddName(indices_acc.name)
init_acc = [indices_acc, values_acc]
if shape_acc is not None:
self.AddName(shape_acc.name)
init_acc.append(shape_acc)
enter_acc = [_Enter(x, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
name="b_acc") for x in init_acc]
merge_acc = [merge([x, x], name="b_acc")[0] for x in enter_acc]
switch_acc = [switch(x, self._pivot) for x in merge_acc]
# The actual accumulation.
acc_indexed_slices = [
array_ops.concat([xa[1], xv], 0)
for xa, xv in zip(switch_acc[:2], [indices, values])
]
if shape_acc is not None:
# For the shape we just keep the maximum
acc_indexed_slices.append(
math_ops.maximum(dense_shape, switch_acc[2][1]))
next_acc = [_NextIteration(x) for x in acc_indexed_slices]
for xm, xn in zip(merge_acc, next_acc):
xm.op._update_input(1, xn) # pylint: disable=protected-access
acc_exits = [exit(x[0], name="b_acc") for x in switch_acc]
self.loop_exits.extend(acc_exits)
self.ExitResult(acc_exits)
return ops.IndexedSlices(
indices=acc_exits[0], values=acc_exits[1],
dense_shape=acc_exits[2] if shape_acc is not None else None)
def _InitializeValues(self, values):
"""Makes the values known to this context."""
self._values = set()
for x in values:
if isinstance(x, ops.Tensor):
self._values.add(x.name)
else:
self._values.add(x.values.name)
self._values.add(x.indices.name)
if isinstance(x, ops.IndexedSlices):
dense_shape = x.dense_shape
elif isinstance(x, sparse_tensor.SparseTensor):
dense_shape = x.dense_shape
else:
raise TypeError("Type %s not supported" % type(x))
if dense_shape is not None:
self._values.add(dense_shape.name)
def _BuildLoop(self, pred, body, original_loop_vars, loop_vars,
shape_invariants):
"""Core: Add the loop termination condition and body to the graph."""
flat_loop_vars = nest.flatten(original_loop_vars)
# Let the context know the loop variables so the loop variables
# would be added in the outer contexts properly.
self._InitializeValues(loop_vars)
real_vars = loop_vars
if self._outer_context:
real_vars = [self._outer_context.AddValue(x) for x in loop_vars]
with ops.control_dependencies(None):
enter_vars = [_Enter(x, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
use_input_shape=(shape_invariants is None))
for x in real_vars]
if self._outer_context:
control_pivot = self._outer_context.GetControlPivot().op
for var in enter_vars:
if _IsLoopConstantEnter(var.op.inputs[0].op):
# pylint: disable=protected-access
var.op._add_control_input(control_pivot)
# pylint: enable=protected-access
_SetShapeInvariants(real_vars, enter_vars, shape_invariants)
# Fix the control inputs and control flow context of these enter ops.
self._FixControlInputsAndContext(enter_vars)
self._InitializeValues(enter_vars)
merge_vars = [merge([x, x])[0] for x in enter_vars]
self._pivot_for_pred = merge_vars[0]
# Build the graph for pred.
merge_vars_with_tensor_arrays = (
_convert_flows_to_tensorarrays(flat_loop_vars, merge_vars))
packed_vars = nest.pack_sequence_as(
structure=original_loop_vars,
flat_sequence=merge_vars_with_tensor_arrays)
c = ops.convert_to_tensor(pred(*packed_vars))
self._pivot = loop_cond(c, name="LoopCond")
switch_vars = [_SwitchRefOrTensor(x, self._pivot) for x in merge_vars]
# Build the graph for body.
vars_for_body = [_Identity(x[1]) for x in switch_vars]
self._pivot_for_body = vars_for_body[0]
# Convert TensorArray flow variables inside the context back into
# their associated TensorArrays for calling the body.
vars_for_body_with_tensor_arrays = (
_convert_flows_to_tensorarrays(flat_loop_vars, vars_for_body))
packed_vars_for_body = nest.pack_sequence_as(
structure=original_loop_vars,
flat_sequence=vars_for_body_with_tensor_arrays)
body_result = body(*packed_vars_for_body)
if not nest.is_sequence(body_result):
body_result = [body_result]
# Compare the structure types of input and output of body.
# For backwards compatibility, the first layer is forced to a list
# during this comparison, because inputs are typically lists and
# outputs of the body are typically tuples.
nest.assert_same_structure(list(packed_vars_for_body), list(body_result))
# Store body_result to keep track of TensorArrays returned by body
original_body_result = body_result
# Convert TensorArrays returned by body into their flow variables
flat_result = nest.flatten(body_result)
result = _convert_tensorarrays_to_flows(flat_result)
result = ops.convert_n_to_tensor_or_indexed_slices(result)
# Add NextIteration and the back edges to complete the loop.
if len(merge_vars) != len(result):
raise ValueError("Number of inputs and outputs of body must match "
"loop_vars: %d, %d" % (len(merge_vars), len(result)))
next_vars = []
for m, v in zip(merge_vars, result):
next_vars.append(_AddNextAndBackEdge(m, v))
# Add the exit ops.
exit_vars = [exit(x[0]) for x in switch_vars]
self._loop_exits = exit_vars
# Make sure the shapes of loop outputs are correct.
for m_var, n_var in zip(merge_vars, next_vars):
if isinstance(m_var, ops.Tensor):
_EnforceShapeInvariant(m_var, n_var)
# Exit the loop.
self.ExitResult(exit_vars)
return original_body_result, exit_vars
def BuildLoop(self, pred, body, loop_vars, shape_invariants):
"""Add the loop termination condition and body to the graph."""
# Keep original_loop_vars to identify which are TensorArrays
original_loop_vars = loop_vars
flat_loop_vars = nest.flatten(loop_vars)
# Convert TensorArrays to their flow variables
loop_vars = _convert_tensorarrays_to_flows(flat_loop_vars)
loop_vars = ops.convert_n_to_tensor_or_indexed_slices(loop_vars)
try:
self.Enter()
original_body_result, exit_vars = self._BuildLoop(
pred, body, original_loop_vars, loop_vars, shape_invariants)
finally:
self.Exit()
flat_result = nest.flatten(original_body_result)
# Convert TensorArray flow variables outside the context back into
# their associated TensorArrays for returning to caller.
exit_vars_with_tensor_arrays = (
_convert_flows_to_tensorarrays(flat_result, exit_vars))
packed_exit_vars = nest.pack_sequence_as(
structure=original_body_result,
flat_sequence=exit_vars_with_tensor_arrays)
return (packed_exit_vars[0] if len(exit_vars) == 1
else packed_exit_vars)
def _FixControlInputsAndContext(self, enters):
graph = ops.get_default_graph()
# pylint: disable=protected-access
for e in enters:
if isinstance(e, ops.Tensor):
xs = [e]
else:
if not isinstance(e, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(e))
xs = [e.values, e.indices]
shape = e.dense_shape
if shape is not None:
xs.append(shape)
for x in xs:
inp_op = x.op.inputs[0]
control_inputs = graph._control_dependencies_for_inputs([inp_op])
outer_control_inputs = [op for op in control_inputs
if self._IsInOuterContext(op)]
x.op._set_control_flow_context(self)
x.op._add_control_inputs(outer_control_inputs)
graph._record_op_seen_by_control_dependencies(x.op)
# pylint: enable=protected-access
def while_loop(cond, body, loop_vars, shape_invariants=None,
parallel_iterations=10, back_prop=True, swap_memory=False,
name=None):
"""Repeat `body` while the condition `cond` is true.
`cond` is a callable returning a boolean scalar tensor. `body` is a callable
returning a (possibly nested) tuple, namedtuple or list of tensors of the same
arity (length and structure) and types as `loop_vars`. `loop_vars` is a
(possibly nested) tuple, namedtuple or list of tensors that is passed to both
`cond` and `body`. `cond` and `body` both take as many arguments as there are
`loop_vars`.
While `cond` evaluates to true, `body` is executed.
In addition to regular Tensors or IndexedSlices, the body may accept and
return TensorArray objects. The flows of the TensorArray objects will
be appropriately forwarded between loops and during gradient calculations.
For correctness, `tf.while_loop()` strictly enforces shape invariants for
the loop variables. A shape invariant is a (possibly partial) shape that
is unchanged across the iterations of the loop. An error will be raised
if the shape of a loop variable after an iteration is determined to be more
general than or incompatible with its shape invariant. For example, a shape
of [11, None] is more general than a shape of [11, 17], and [11, 21] is not
compatible with [11, 17]. By default (if the argument `shape_invariants` is
not specified), it is assumed that the initial shape of each tensor in
`loop_vars` is the same in every iteration. The `shape_invariants` argument
allows the caller to specify a less specific shape invariant for each loop
variable, which is needed if the shape varies between iterations. The
[`Tensor.set_shape()`](../../api_docs/python/framework.md#Tensor.set_shape)
function may also be used in the `body` function to indicate that
the output loop variable has a particular shape. The shape invariant for
SparseTensor and IndexedSlices are treated specially as follows:
a) If a loop variable is a SparseTensor, the shape invariant must be
TensorShape([r]) where r is the rank of the dense tensor represented
by the sparse tensor. It means the shapes of the three tensors of the
SparseTensor are ([None], [None, r], [r]). NOTE: The shape invariant here
is the shape of the SparseTensor.dense_shape property. It must be the shape of
a vector.
b) If a loop variable is an IndexedSlices, the shape invariant must be
a shape invariant of the values tensor of the IndexedSlices. It means
the shapes of the three tensors of the IndexedSlices are (shape, [shape[0]],
[shape.ndims]).
`while_loop` implements non-strict semantics, enabling multiple iterations
to run in parallel. The maximum number of parallel iterations can be
controlled by `parallel_iterations`, which gives users some control over
memory consumption and execution order. For correct programs, `while_loop`
should return the same result for any parallel_iterations > 0.
For training, TensorFlow remembers the tensors that are produced in the
forward inference but needed in back propagation. These tensors can be a
main source of memory consumption and often cause OOM problems when training
on GPUs. When the flag swap_memory is true, we swap out these tensors from
GPU to CPU. This for example allows us to train RNN models with very long
sequences and large batches.
Args:
cond: A callable that represents the termination condition of the loop.
body: A callable that represents the loop body.
loop_vars: A (possibly nested) tuple, namedtuple or list of numpy array,
`Tensor`, and `TensorArray` objects.
shape_invariants: The shape invariants for the loop variables.
parallel_iterations: The number of iterations allowed to run in parallel.
It must be a positive integer.
back_prop: Whether backprop is enabled for this while loop.
swap_memory: Whether GPU-CPU memory swap is enabled for this loop.
name: Optional name prefix for the returned tensors.
Returns:
The output tensors for the loop variables after the loop. When the length
of `loop_vars` is 1 this is a Tensor, TensorArray or IndexedSlice and when
the length of `loop_vars` is greater than 1 it returns a list.
Raises:
TypeError: if `cond` or `body` is not callable.
ValueError: if `loop_vars` is empty.
Example:
```python
i = tf.constant(0)
c = lambda i: tf.less(i, 10)
b = lambda i: tf.add(i, 1)
r = tf.while_loop(c, b, [i])
```
Example with nesting and a namedtuple:
```python
import collections
Pair = collections.namedtuple('Pair', 'j, k')
ijk_0 = (tf.constant(0), Pair(tf.constant(1), tf.constant(2)))
c = lambda i, p: i < 10
b = lambda i, p: (i + 1, Pair((p.j + p.k), (p.j - p.k)))
ijk_final = tf.while_loop(c, b, ijk_0)
```
Example using shape_invariants:
```python
i0 = tf.constant(0)
m0 = tf.ones([2, 2])
c = lambda i, m: i < 10
b = lambda i, m: [i+1, tf.concat([m, m], axis=0)]
tf.while_loop(
c, b, loop_vars=[i0, m0],
shape_invariants=[i0.get_shape(), tf.TensorShape([None, 2])])
```
"""
with ops.name_scope(name, "while", loop_vars) as name:
if not loop_vars:
raise ValueError("No loop variables provided")
if not callable(cond):
raise TypeError("cond must be callable.")
if not callable(body):
raise TypeError("body must be callable.")
if parallel_iterations < 1:
raise TypeError("parallel_iterations must be a positive integer.")
if shape_invariants is not None:
nest.assert_same_structure(loop_vars, shape_invariants)
context = WhileContext(parallel_iterations, back_prop, swap_memory, name)
ops.add_to_collection(ops.GraphKeys.WHILE_CONTEXT, context)
result = context.BuildLoop(cond, body, loop_vars, shape_invariants)
return result
def _AsTensorList(x, p):
"""Return x as a list of Tensors or IndexedSlices.
For entries of `x` that are Operations, this returns an Identity of `p`
with a dependency on the operation.
Args:
x: A Tensor/IndexedSlices/Operation or a list or tuple of them.
p: A Tensor to return for entries in `x` that are Operations.
Returns:
A list of Tensors or IndexedSlices.
"""
if not isinstance(x, (list, _basetuple)):
x = [x]
l = []
for v in x:
if isinstance(v, ops.Operation):
v = with_dependencies([v], p)
v = ops.convert_to_tensor_or_indexed_slices(v)
if isinstance(v, ops.Tensor):
l.append(array_ops.identity(v))
else:
l.append(ops.IndexedSlices(array_ops.identity(v.values),
array_ops.identity(v.indices)))
return l
def _CheckResults(a, b):
assert len(a) == len(b), (
"Values returned by a() and b() must have the same length.")
for x, y in zip(a, b):
assert x.dtype == y.dtype, (
"Values returned by a() [%s] and b() [%s] must have "
"the same type: %s, %s." %
(x.name, y.name, x.dtype.name, y.dtype.name))
def with_dependencies(dependencies, output_tensor, name=None):
"""Produces the content of `output_tensor` only after `dependencies`.
In some cases, a user may want the output of an operation to be
consumed externally only after some other dependencies have run
first. This function ensures returns `output_tensor`, but only after all
operations in `dependencies` have run. Note that this means that there is
no guarantee that `output_tensor` will be evaluated after any `dependencies`
have run.
See also `tuple` and `group`.
Args:
dependencies: Iterable of operations to run before this op finishes.
output_tensor: A `Tensor` or `IndexedSlices` that will be returned.
name: (Optional) A name for this operation.
Returns:
Same as `output_tensor`.
Raises:
TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`.
"""
with ops.name_scope(name, "control_dependency",
list(dependencies) + [output_tensor]) as name:
with ops.colocate_with(output_tensor):
with ops.control_dependencies(dependencies):
output_tensor = ops.convert_to_tensor_or_indexed_slices(output_tensor)
if isinstance(output_tensor, ops.Tensor):
return _Identity(output_tensor, name=name)
else:
return ops.IndexedSlices(_Identity(output_tensor.values, name=name),
output_tensor.indices,
output_tensor.dense_shape)
def _GroupControlDeps(dev, deps, name=None):
with ops.control_dependencies(deps):
if dev is None:
return no_op(name=name)
else:
with ops.device(dev):
return no_op(name=name)
# TODO(touts): Accept "inputs" as a list.
def group(*inputs, **kwargs):
"""Create an op that groups multiple operations.
When this op finishes, all ops in `input` have finished. This op has no
output.
See also `tuple` and `with_dependencies`.
Args:
*inputs: Zero or more tensors to group.
**kwargs: Optional parameters to pass when constructing the NodeDef.
name: A name for this operation (optional).
Returns:
An Operation that executes all its inputs.
Raises:
ValueError: If an unknown keyword argument is provided.
"""
name = kwargs.pop("name", None)
if kwargs:
raise ValueError("Unknown keyword arguments: " + ", ".join(kwargs.keys()))
with ops.name_scope(name, "group_deps", inputs) as name:
# Grouping no inputs means do nothing
if not inputs:
return no_op(name=name)
# Sorts *inputs according to their devices.
ops_on_device = {} # device -> operations specified on the device.
for inp in inputs:
dev = inp.device
if dev in ops_on_device:
ops_on_device[dev].append(inp)
else:
ops_on_device[dev] = [inp]
if len(ops_on_device) == 1:
# 1-level tree. The root node is the returned NoOp node.
(dev, deps), = ops_on_device.items()
return _GroupControlDeps(dev, deps, name=name)
# 2-level tree. The root node is the returned NoOp node.
# deps contains 1 NoOp node for each device.
deps = []
def device_key(dev):
"""A sort key that allows None to be compared to strings."""
return "" if dev is None else dev
for dev in sorted(six.iterkeys(ops_on_device), key=device_key):
deps.append(_GroupControlDeps(dev, ops_on_device[dev]))
with ops.control_dependencies(deps):
return no_op(name=name)
def tuple(tensors, name=None, control_inputs=None):
"""Group tensors together.
This creates a tuple of tensors with the same values as the `tensors`
argument, except that the value of each tensor is only returned after the
values of all tensors have been computed.
`control_inputs` contains additional ops that have to finish before this op
finishes, but whose outputs are not returned.
This can be used as a "join" mechanism for parallel computations: all the
argument tensors can be computed in parallel, but the values of any tensor
returned by `tuple` are only available after all the parallel computations
are done.
See also `group` and `with_dependencies`.
Args:
tensors: A list of `Tensor`s or `IndexedSlices`, some entries can be `None`.
name: (optional) A name to use as a `name_scope` for the operation.
control_inputs: List of additional ops to finish before returning.
Returns:
Same as `tensors`.
Raises:
ValueError: If `tensors` does not contain any `Tensor` or `IndexedSlices`.
TypeError: If `control_inputs` is not a list of `Operation` or `Tensor`
objects.
"""
with ops.name_scope(name, "tuple", tensors) as name:
gating_ops = [t.op for t in tensors if t is not None]
if control_inputs:
for c in control_inputs:
if isinstance(c, ops.Tensor):
c = c.op
elif not isinstance(c, ops.Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
gating_ops.append(c)
# Note that in order to ensure ordering in the pbtxt, we must take care to
# ensure the order here.
gating_ops = sorted(set(gating_ops), key=lambda op: op._id) # Uniquify ops.
if not gating_ops:
raise ValueError("Must have at least one Tensor: %s" % tensors)
gate = group(*gating_ops)
tpl = []
for t in tensors:
if t is not None:
tpl.append(with_dependencies([gate], t))
else:
tpl.append(None)
return tpl
def case(pred_fn_pairs, default, exclusive=False, name="case"):
"""Create a case operation.
The `pred_fn_pairs` parameter is a dict or list of pairs of size N.
Each pair contains a boolean scalar tensor and a python callable that
creates the tensors to be returned if the boolean evaluates to True.
`default` is a callable generating a list of tensors. All the callables
in `pred_fn_pairs` as well as `default` should return the same number
and types of tensors.
If `exclusive==True`, all predicates are evaluated, and an exception is
thrown if more than one of the predicates evaluates to `True`.
If `exclusive==False`, execution stops are the first predicate which
evaluates to True, and the tensors generated by the corresponding function
are returned immediately. If none of the predicates evaluate to True, this
operation returns the tensors generated by `default`.
Example 1:
Pseudocode:
```
if (x < y) return 17;
else return 23;
```
Expressions:
```
f1 = lambda: tf.constant(17)
f2 = lambda: tf.constant(23)
r = case([(tf.less(x, y), f1)], default=f2)
```
Example 2:
Pseudocode:
```
if (x < y && x > z) raise OpError("Only one predicate may evaluate true");
if (x < y) return 17;
else if (x > z) return 23;
else return -1;
```
Expressions:
```
x = tf.constant(0)
y = tf.constant(1)
z = tf.constant(2)
def f1(): return tf.constant(17)
def f2(): return tf.constant(23)
def f3(): return tf.constant(-1)
r = case({tf.less(x, y): f1, tf.greater(x, z): f2},
default=f3, exclusive=True)
```
Args:
pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a
callable which returns a list of tensors.
default: A callable that returns a list of tensors.
exclusive: True iff at most one predicate is allowed to evaluate to `True`.
name: A name for this operation (optional).
Returns:
The tensors returned by the first pair whose predicate evaluated to True, or
those returned by `default` if none does.
Raises:
TypeError: If `pred_fn_pairs` is not a list/dictionary.
TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.
TypeError: If `fns[i]` is not callable for any i, or `default` is not
callable.
"""
pfp = pred_fn_pairs # For readability
if not (isinstance(pfp, list) or isinstance(pfp, _basetuple)
or isinstance(pfp, dict)):
raise TypeError("fns must be a list, tuple, or dict")
if isinstance(pfp, dict):
pfp = pfp.items()
if not exclusive:
logging.warn("%s: Provided dictionary of predicate/fn pairs, but "
"exclusive=False. Order of conditional tests is "
"not guaranteed.", name)
for tup in pfp:
if not isinstance(tup, _basetuple) or len(tup) != 2:
raise TypeError("Each entry in pred_fn_pairs must be a 2-tuple")
pred, fn = tup
if pred.dtype != dtypes.bool:
raise TypeError("pred must be of type bool: %s", pred.name)
if not callable(fn):
raise TypeError("fn for pred %s must be callable." % pred.name)
if not callable(default):
raise TypeError("default must be callable.")
preds, fns = map(list, zip(*pfp))
with ops.name_scope(name, "case", [preds]):
if not preds:
return default()
not_preds = []
for i, p in enumerate(preds):
with ops.name_scope("not_%d" % i):
not_preds.append(math_ops.logical_not(p))
and_not_preds = [constant_op.constant(True, name="always_true")]
for i, notp in enumerate(not_preds):
with ops.name_scope("and_not_%d" % i):
and_not_preds.append(math_ops.logical_and(and_not_preds[-1], notp))
# preds = [p1, p2, p3]
# fns = [f1, f2, f3]
# not_preds = [~p1, ~p2, ~p3]
# and_not_preds = [True, ~p1, ~p1 & ~p2, ~p1 & ~p2 & ~p3]
# case_preds = [p1,
# p2 & ~p1,
# p3 & ~p2 & ~p1,
# ~p3 & ~p2 & ~p1]
case_preds = []
for i, (p, and_not_p_prev) in enumerate(zip(preds, and_not_preds[:-1])):
with ops.name_scope("case_%d" % i):
case_preds.append(math_ops.logical_and(p, and_not_p_prev))
with ops.name_scope("case_none_are_true"):
case_preds.append(and_not_preds[-1])
# Create an empty tensor, or list, with the right type and shape
with ops.name_scope("case_create_empty"):
dummy_value = default()
def _correct_empty(v):
if isinstance(v, ops.Operation):
return no_op()
elif v.dtype == dtypes.string:
return array_ops.constant("")
else:
return array_ops.constant(v.dtype.as_numpy_dtype())
if isinstance(dummy_value, collections.Sequence):
dummy_type = type(dummy_value)
empty = lambda: dummy_type(_correct_empty(v) for v in dummy_value)
else:
empty = lambda: _correct_empty(dummy_value)
# case_sequence = [
# cond(~p3 & ~p2 & ~p1, default, empty),
# cond(p3 & ~p2 & ~p1, f3, lambda: case_sequence[0]),
# cond(p2 & ~p1, f2, lambda: case_sequence[1]),
# cond(p1, f1, lambda: case_sequence[2])
# ]
#
# And the return value will be case_sequence[-1]
def _build_case():
all_fns = [fn for fn in fns]
all_fns.append(default)
prev_case = None
for i, (cp, fn) in enumerate(list(zip(case_preds, all_fns))[::-1]):
prev_case = cond(
cp, fn,
empty if i == 0 else lambda: prev_case,
name="If_%d" % i)
return prev_case
if exclusive:
preds_c = array_ops.stack(preds, name="preds_c")
num_true_conditions = math_ops.reduce_sum(
math_ops.cast(preds_c, dtypes.int32), name="num_true_conds")
at_most_one_true_condition = math_ops.less(
num_true_conditions, constant_op.constant(2, name="two_true_conds"))
error_msg = [
("More than one condition evaluated as True but "
"exclusive=True. Conditions: (%s), Values:"
% ", ".join([p.name for p in preds])),
preds_c]
with ops.control_dependencies([
Assert(condition=at_most_one_true_condition,
data=error_msg, summarize=len(preds))]):
case_seq = _build_case()
else:
case_seq = _build_case()
return case_seq
ops.register_proto_function(ops.GraphKeys.COND_CONTEXT,
proto_type=control_flow_pb2.CondContextDef,
to_proto=CondContext.to_proto,
from_proto=CondContext.from_proto)
ops.register_proto_function(ops.GraphKeys.WHILE_CONTEXT,
proto_type=control_flow_pb2.WhileContextDef,
to_proto=WhileContext.to_proto,
from_proto=WhileContext.from_proto)
|
2014cdbg17/2015cd_midterm2 | refs/heads/master | static/Brython3.1.0-20150301-090019/Lib/atexit.py | 743 | """allow programmer to define multiple exit functions to be executedupon normal program termination.
Two public functions, register and unregister, are defined.
"""
class __loader__(object):
pass
def _clear(*args,**kw):
"""_clear() -> None
Clear the list of previously registered exit functions."""
pass
def _run_exitfuncs(*args,**kw):
"""_run_exitfuncs() -> None
Run all registered exit functions."""
pass
def register(*args,**kw):
"""register(func, *args, **kwargs) -> func
Register a function to be executed upon normal program termination
func - function to be called at exit
args - optional arguments to pass to func
kwargs - optional keyword arguments to pass to func
func is returned to facilitate usage as a decorator."""
pass
def unregister(*args,**kw):
"""unregister(func) -> None
Unregister a exit function which was previously registered using
atexit.register
func - function to be unregistered"""
pass
|
mazvv/travelcrm | refs/heads/master | travelcrm/models/task.py | 1 | # -*-coding: utf-8-*-
from datetime import timedelta
from sqlalchemy import (
Column,
Integer,
String,
DateTime,
Table,
ForeignKey,
)
from sqlalchemy.orm import relationship, backref
from ..models import (
DBSession,
Base,
)
from ..lib import EnumIntType
from ..lib.utils.common_utils import translate as _
task_resource = Table(
'task_resource',
Base.metadata,
Column(
'task_id',
Integer,
ForeignKey(
'task.id',
ondelete='restrict',
onupdate='cascade',
name='fk_task_id_task_resource',
),
primary_key=True,
),
Column(
'resource_id',
Integer,
ForeignKey(
'resource.id',
ondelete='restrict',
onupdate='cascade',
name='fk_resource_id_task_resource',
),
primary_key=True,
)
)
task_upload = Table(
'task_upload',
Base.metadata,
Column(
'task_id',
Integer,
ForeignKey(
'task.id',
ondelete='restrict',
onupdate='cascade',
name='fk_task_id_task_upload',
),
primary_key=True,
),
Column(
'upload_id',
Integer,
ForeignKey(
'upload.id',
ondelete='restrict',
onupdate='cascade',
name='fk_upload_id_task_upload',
),
primary_key=True,
)
)
class Task(Base):
__tablename__ = 'task'
STATUS = (
('new', _(u'new')),
('enquiry', _(u'enquiry')),
('in_work', _(u'in work')),
('ready', _(u'ready')),
)
id = Column(
Integer,
autoincrement=True,
primary_key=True
)
resource_id = Column(
Integer,
ForeignKey(
'resource.id',
name="fk_resource_id_task",
ondelete='restrict',
onupdate='cascade',
),
nullable=False,
)
title = Column(
String(length=128),
nullable=False,
)
deadline = Column(
DateTime(timezone=True),
nullable=False,
)
reminder = Column(
Integer,
)
descr = Column(
String,
)
status = Column(
EnumIntType(STATUS),
default='new',
nullable=False,
)
resource = relationship(
'Resource',
backref=backref(
'task',
uselist=False,
cascade="all,delete"
),
foreign_keys=[resource_id],
cascade="all,delete",
uselist=False,
)
task_resource = relationship(
'Resource',
secondary=task_resource,
backref=backref(
'tasks',
uselist=True,
lazy='dynamic',
),
uselist=False,
)
uploads = relationship(
'Upload',
secondary=task_upload,
backref=backref(
'task',
uselist=False
),
cascade="all,delete",
uselist=True,
)
@classmethod
def get(cls, id):
if id is None:
return None
return DBSession.query(cls).get(id)
@classmethod
def by_resource_id(cls, resource_id):
if resource_id is None:
return None
return (
DBSession.query(cls).filter(cls.resource_id == resource_id).first()
)
@property
def reminder_datetime(self):
if self.reminder:
return self.deadline - timedelta(minutes=self.reminder)
|
cernops/rally | refs/heads/master | tests/unit/rally_jobs/test_jobs.py | 12 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import traceback
import mock
import yaml
from rally import api
from rally.common.plugin import discover
from rally.task import engine
from tests.unit import test
class RallyJobsTestCase(test.TestCase):
rally_jobs_path = os.path.join(
os.path.dirname(__file__), "..", "..", "..", "rally-jobs")
@mock.patch("rally.task.engine.BenchmarkEngine"
"._validate_config_semantic")
def test_schema_is_valid(
self, mock_benchmark_engine__validate_config_semantic):
discover.load_plugins(os.path.join(self.rally_jobs_path, "plugins"))
for filename in ["rally.yaml", "rally-neutron.yaml",
"rally-zaqar.yaml", "rally-designate.yaml"]:
full_path = os.path.join(self.rally_jobs_path, filename)
with open(full_path) as task_file:
try:
args_file = os.path.join(
self.rally_jobs_path,
filename.rsplit(".", 1)[0] + "_args.yaml")
args = {}
if os.path.exists(args_file):
args = yaml.safe_load(open(args_file).read())
if not isinstance(args, dict):
raise TypeError(
"args file %s must be dict in yaml or json "
"presenatation" % args_file)
task = api.Task.render_template(task_file.read(), **args)
task = yaml.safe_load(task)
eng = engine.BenchmarkEngine(task, mock.MagicMock())
eng.validate()
except Exception:
print(traceback.format_exc())
self.fail("Wrong task input file: %s" % full_path)
|
imron/scalyr-agent-2 | refs/heads/master | scalyr_agent/compat.py | 1 | # Copyright 2014-2020 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
if False: # NOSONAR
from typing import Union, Tuple, Any, Generator, Iterable, Optional
import sys
import struct
import os
import subprocess
import six
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY26 = sys.version_info[0] == 2 and sys.version_info[1] == 6
PY2_pre_279 = PY2 and sys.version_info < (2, 7, 9)
PY_post_equal_279 = sys.version_info >= (2, 7, 9)
PY3_pre_32 = PY3 and sys.version_info < (3, 2)
PY3_post_equal_37 = PY3 and sys.version_info >= (3, 7)
# NOTE: ssl.match_hostname was added in Python 2.7.9 so for earlier versions, we need to use
# version from backports package
if PY2_pre_279 or PY3_pre_32:
try:
from backports.ssl_match_hostname import (
match_hostname as ssl_match_hostname,
) # NOQA
from backports.ssl_match_hostname import CertificateError # NOQA
except ImportError:
# NOTE: We should never come here in real life. If we do, it indicates we messed up package
# creation and / or path mangling in scalyr_init().
raise Exception(
"Missing backports.ssl_match_hostname module, hostname verification can't "
"be performed"
)
else:
# ssl module in Python 2 >= 2.7.9 and Python 3 >= 3.2 includes match hostname function
from ssl import match_hostname as ssl_match_hostname # NOQA
from ssl import CertificateError # type: ignore # NOQA
def custom_any(iterable):
if sys.version_info[:2] > (2, 4):
return any(iterable)
else:
for element in iterable:
if element:
return True
return False
def custom_all(iterable):
if sys.version_info[:2] > (2, 4):
return all(iterable)
else:
for element in iterable:
if not element:
return False
return True
def custom_defaultdict(default_type):
if sys.version_info[:2] > (2, 4):
from collections import defaultdict
return defaultdict(default_type)
else:
class DefaultDict(dict):
def __getitem__(self, key):
if key not in self:
dict.__setitem__(self, key, default_type())
return dict.__getitem__(self, key)
return DefaultDict()
if six.PY2:
class EnvironUnicode(object):
"""Just a wrapper for os.environ, to convert its items to unicode in python2."""
def __getitem__(self, item):
value = os.environ[item]
return six.ensure_text(value)
def get(self, item, default=None):
value = os.environ.get(item, default)
if value is not None:
value = six.ensure_text(value)
return value
def pop(self, item, default=None):
value = os.environ.pop(item, default)
if value is not None:
value = six.ensure_text(value)
return value
def __setitem__(self, key, value):
key = six.ensure_text(key)
value = six.ensure_text(value)
os.environ[key] = value
@staticmethod
def _iterable_elements_to_unicode_generator(iterable):
# type: (Iterable) -> Generator[Union[Tuple, Any]]
"""Generator that gets values from original iterable and converts its 'str' values to 'unicode'"""
for element in iterable:
if type(element) is tuple:
yield tuple(
v.decode("utf-8", "replace")
if type(v) is six.binary_type
else v
for v in element
)
else:
yield six.ensure_text(element)
def iteritems(self):
return self._iterable_elements_to_unicode_generator(
six.iteritems(os.environ)
)
def items(self):
return list(
self._iterable_elements_to_unicode_generator(os.environ.items())
)
def iterkeys(self):
return self._iterable_elements_to_unicode_generator(
six.iterkeys(os.environ)
)
def keys(self):
return list(self._iterable_elements_to_unicode_generator(os.environ.keys()))
def itervalues(self):
return self._iterable_elements_to_unicode_generator(
six.itervalues(os.environ)
)
def values(self):
return list(
self._iterable_elements_to_unicode_generator(os.environ.values())
)
def copy(self):
return dict(self.items())
def __iter__(self):
return self.iterkeys()
def os_getenv_unicode(name, default=None):
"""The same logic as in os.environ, but with None check."""
result = os.getenv(name, default)
if result is not None:
result = six.ensure_text(result)
return result
os_environ_unicode = EnvironUnicode()
else:
os_environ_unicode = os.environ
os_getenv_unicode = os.getenv
# 2->TODO struct.pack|unpack, does not accept unicode as format string.
# see more: https://python-future.org/stdlib_incompatibilities.html#struct-pack
# to avoid conversion of format string on every struct.pack call, we can monkey patch it here.
if sys.version_info[:3] < (2, 7, 7):
def python_unicode_pack_unpack_wrapper(f):
def _pack_unpack(format_str, *args):
"""wrapper for struct.pack function that converts unicode format string to 'str'"""
binary_format_str = six.ensure_binary(format_str)
return f(binary_format_str, *args)
return _pack_unpack
struct_pack_unicode = python_unicode_pack_unpack_wrapper(struct.pack)
struct_unpack_unicode = python_unicode_pack_unpack_wrapper(struct.unpack)
else:
struct_pack_unicode = struct.pack
struct_unpack_unicode = struct.unpack
def which(executable):
# type: (str) -> Optional[str]
"""
Search for the provided executable in PATH and return path to it if found.
"""
paths = os.environ["PATH"].split(os.pathsep)
for path in paths:
full_path = os.path.join(path, executable)
if os.path.exists(full_path) and os.access(full_path, os.X_OK):
return full_path
return None
def find_executable(executable):
# type: (str) -> Optional[str]
"""
Wrapper around distutils.spawn.find_executable which is not available in some default Python 3
installations where full blown python3-distutils package is not installed.
"""
try:
from distutils.spawn import find_executable as distutils_find_executable
except ImportError:
# Likely Ubuntu 18.04 where python3-distutils package is not present (default behavior)
return which(executable)
return distutils_find_executable(executable)
def subprocess_check_output(cmd, *args, **kwargs):
"""
Wrapper around subprocess.check_output which is not available under Python 2.6.
"""
if sys.version_info < (2, 7, 0):
output = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, *args, **kwargs
).communicate()[0]
else:
output = subprocess.check_output(cmd, *args, **kwargs)
return output
|
mojoboss/scikit-learn | refs/heads/master | sklearn/neighbors/__init__.py | 306 | """
The :mod:`sklearn.neighbors` module implements the k-nearest neighbors
algorithm.
"""
from .ball_tree import BallTree
from .kd_tree import KDTree
from .dist_metrics import DistanceMetric
from .graph import kneighbors_graph, radius_neighbors_graph
from .unsupervised import NearestNeighbors
from .classification import KNeighborsClassifier, RadiusNeighborsClassifier
from .regression import KNeighborsRegressor, RadiusNeighborsRegressor
from .nearest_centroid import NearestCentroid
from .kde import KernelDensity
from .approximate import LSHForest
__all__ = ['BallTree',
'DistanceMetric',
'KDTree',
'KNeighborsClassifier',
'KNeighborsRegressor',
'NearestCentroid',
'NearestNeighbors',
'RadiusNeighborsClassifier',
'RadiusNeighborsRegressor',
'kneighbors_graph',
'radius_neighbors_graph',
'KernelDensity',
'LSHForest']
|
toslunar/chainerrl | refs/heads/master | chainerrl/links/sequence.py | 1 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import chainer
from chainerrl.recurrent import RecurrentChainMixin
try:
# For Python 3.5 and later
from inspect import Parameter
from inspect import signature
except Exception:
from funcsigs import Parameter
from funcsigs import signature
def accept_variable_arguments(func):
for param in signature(func).parameters.values():
if param.kind in (Parameter.VAR_POSITIONAL,
Parameter.VAR_KEYWORD):
return True
return False
class Sequence(chainer.ChainList, RecurrentChainMixin):
"""Sequential callable Link that consists of other Links."""
def __init__(self, *layers):
self.layers = list(layers)
links = [layer for layer in layers if isinstance(layer, chainer.Link)]
# Cache the signatures because it might be slow
self.argnames = [set(signature(layer).parameters)
for layer in layers]
self.accept_var_args = [accept_variable_arguments(layer)
for layer in layers]
super().__init__(*links)
def __call__(self, x, **kwargs):
h = x
for layer, argnames, accept_var_args in zip(self.layers,
self.argnames,
self.accept_var_args):
if accept_var_args:
layer_kwargs = kwargs
else:
layer_kwargs = {k: v for k, v in kwargs.items()
if k in argnames}
h = layer(h, **layer_kwargs)
return h
|
amyvmiwei/qt-creator | refs/heads/master | tests/system/suite_editors/tst_revert_changes/test.py | 1 | #############################################################################
##
## Copyright (C) 2014 Digia Plc and/or its subsidiary(-ies).
## Contact: http://www.qt-project.org/legal
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Digia. For licensing terms and
## conditions see http://www.qt.io/licensing. For further information
## use the contact form at http://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Public License
## requirements will be met: https://www.gnu.org/licenses/lgpl.html and
# http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Digia gives you certain additional
## rights. These rights are described in the Digia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
source("../../shared/qtcreator.py")
import __builtin__
cppEditorStr = ":Qt Creator_CppEditor::Internal::CPPEditorWidget"
originalSources = os.path.abspath(os.path.join(os.getcwd(), "..", "shared", "simplePlainCPP"))
def init():
global homeShortCut, endShortCut
if platform.system() == "Darwin":
homeShortCut = "<Ctrl+Left>"
endShortCut = "<End>"
else:
homeShortCut = "<Home>"
endShortCut = "<Ctrl+End>"
def main():
folder = prepareTemplate(originalSources)
if folder == None:
test.fatal("Could not prepare test files - leaving test")
return
proFile = os.path.join(folder, "testfiles.pro")
startApplication("qtcreator" + SettingsPath)
if not startedWithoutPluginError():
return
openQmakeProject(proFile)
fileModifications = {"testfiles.testfiles\\.pro":__modifyProFile__,
"testfiles.Headers.testfile\\.h":__modifyHeader__,
"testfiles.Sources.testfile\\.cpp":__modifySource__,
"testfiles.Sources.main\\.cpp":None}
for fileName, modification in fileModifications.iteritems():
__modifyFile__(fileName, modification)
test.log("Reverting all files...")
fileModifications = dict(zip(fileModifications.keys(),
(__builtin__.bool(v) for v in fileModifications.values())))
revertChanges(fileModifications)
invokeMenuItem("File", "Exit")
def __modifyFile__(fileName, modificationFunc):
simpleFName = simpleFileName(fileName)
test.log("Opening file '%s'" % simpleFName)
openDocument(fileName)
if modificationFunc:
test.log("Modifying file '%s'" % simpleFName)
modificationFunc()
else:
test.log("Leaving file '%s' unmodified." % simpleFName)
# add some stuff to pro file
def __modifyProFile__():
proEditorStr = ":Qt Creator_TextEditor::TextEditorWidget"
addConfig = ["", "CONFIG += thread", "",
"lessThan(QT_VER_MAJ, 4) | lessThan(QT_VER_MIN, 7) {",
" error(Qt 4.7 or newer is required but version $$[QT_VERSION] was detected.)",
"}"]
addFile = [" \\", " not_existing.cpp"]
if placeCursorToLine(proEditorStr, "CONFIG -= qt"):
typeLines(proEditorStr, addConfig)
if placeCursorToLine(proEditorStr, "testfile.cpp"):
typeLines(proEditorStr, addFile)
# re-order some stuff inside header
def __modifyHeader__():
global cppEditorStr, homeShortCut, endShortCut
if placeCursorToLine(cppEditorStr, "class.+", True):
type(cppEditorStr, homeShortCut)
markText(cppEditorStr, "Down", 5)
invokeMenuItem("Edit", "Cut")
type(cppEditorStr, endShortCut)
type(cppEditorStr, "<Return>")
invokeMenuItem("Edit", "Paste")
# remove some stuff from source
def __modifySource__():
global cppEditorStr, homeShortCut
if placeCursorToLine(cppEditorStr, "void function1(int a);"):
type(cppEditorStr, homeShortCut)
markText(cppEditorStr, "Down")
type(cppEditorStr, "<Delete>")
if placeCursorToLine(cppEditorStr, "bool function1(int a) {"):
type(cppEditorStr, homeShortCut)
markText(cppEditorStr, "Down", 4)
type(cppEditorStr, "<Delete>")
def revertChanges(files):
for f,canRevert in files.iteritems():
simpleName = simpleFileName(f)
test.log("Trying to revert changes for '%s'" % simpleName)
if openDocument(f):
fileMenu = findObject("{name='QtCreator.Menu.File' title='File' type='QMenu'}")
for menuItem in object.children(fileMenu):
if str(menuItem.text) == 'Revert "%s" to Saved' % simpleName:
if (test.compare(canRevert, menuItem.enabled, "Verifying whether MenuItem "
"'Revert to Saved' has expected state (%s)"
% str(canRevert)) and canRevert):
invokeMenuItem('File', 'Revert "%s" to Saved' % simpleName)
clickButton(waitForObject(":Revert to Saved.Proceed_QPushButton"))
compareFileToOriginal(simpleName)
test.log("Reverted changes inside %s" % simpleName)
else:
test.fail("Could not open %s for reverting changes" % simpleName)
def compareFileToOriginal(fileName):
global originalSources
currentContent = str(waitForObject(getEditorForFileSuffix(fileName)).plainText)
origFile = open(os.path.join(originalSources, fileName), "r")
originalContent = origFile.read()
origFile.close()
test.compare(originalContent, currentContent,
"Comparing original to reverted file content for '%s'" % fileName)
|
OmairAJ/Plagia | refs/heads/master | local/matchReduce.py | 1 | #matchReduce.py
# python matchReduce.py -d text.m
import os
import sys
import string
import argparse
import fileinput
## Casefold text
def casefold(text):
text = text.lower()
text = text.translate(string.maketrans("",""), string.punctuation)
text = text.split()
text = filter(None, text)
return text
def uniqifier(seq, idfun=None):
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
## Command-line arguments parser
parser = argparse.ArgumentParser(description="Index documents for contextual n-grams based plagiarism detection. Only text files with .m extension are accepted for indexing.")
parser.add_argument("-d", action="store", dest="Document", type=str, help="Document to index")
parser.add_argument("-v", action="version", version="%(prog)s 1.0")
parserResults = parser.parse_args()
documentOpen = parserResults.Document
if (documentOpen is None):
print "This application requires an index file with .m extesion to function."
print "\n"
sys.exit()
else:
documentPath = os.path.dirname(documentOpen)
documentName = casefold(os.path.splitext(os.path.basename(documentOpen))[0])[0]
documentExtension = casefold(os.path.splitext(os.path.basename(documentOpen))[1])[0]
documentFile = documentName + "." + documentExtension
if documentExtension != "m":
print "This application only accepts plain text files with .m extension."
print "\n"
sys.exit()
documentRead = open(documentOpen, "r")
indexList = sorted(documentRead.readlines())
keys = []
metas = []
current_word = None
current_count = 0
current_meta = []
word = None
documentSavePath = "matches/"
if not os.path.exists(documentSavePath): os.makedirs(documentSavePath)
documentExport = open(documentSavePath + documentName + ".txt","w")
for i in range (len(indexList)):
line = indexList[i].strip()
word, count, meta = line.split('\t', 2)
try:
count = int(count)
except ValueError:
continue
if current_word == word:
current_count += count
current_meta.append(meta)
else:
if current_word:
if current_count > 1:
key = [current_word, current_count, current_meta]
keys.append(key)
current_count = count
current_word = word
current_meta = [meta]
if current_word == word:
if current_count > 1:
key = [current_word, current_count, current_meta]
keys.append(key)
for i in range (len(keys)):
for j in range (len(keys[i][2])):
documentMeta = uniqifier(casefold(keys[i][2][j].replace('txt', '')))
if documentName == documentMeta[2]:
print '%s\t%s\t%s' % (keys[i][0], keys[i][1], keys[i][2])
documentExport.write('%s\t%s\t%s\n' % (keys[i][0], keys[i][1], keys[i][2]))
documentExport.close()
|
superberny70/pelisalacarta | refs/heads/develop | python/main-classic/channels/zpeliculas.py | 2 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para seriespepito
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ------------------------------------------------------------
import re
import urllib
from core import logger
from core import scrapertools
from core import servertools
from core.item import Item
def mainlist(item):
logger.info()
itemlist = []
#itemlist.append( Item(channel=item.channel, action="destacadas" , title="Destacadas", url="http://www.zpeliculas.com", fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Últimas peliculas", url="http://www.zpeliculas.com/", fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png", viewmode="movie"))
#itemlist.append( Item(channel=item.channel, action="sugeridas" , title="Películas sugeridas", url="http://www.zpeliculas.com", fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="generos" , title="Por género", url="http://www.zpeliculas.com", fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png"))
itemlist.append( Item(channel=item.channel, action="alfabetico" , title="Listado alfabético", fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png"))
itemlist.append( Item(channel=item.channel, action="search" , title="Buscador", url="http://www.zpeliculas.com", fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png", viewmode="movie"))
return itemlist
def alfabetico(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, action="peliculas" , title="A", url="http://www.zpeliculas.com/cat/a", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="B", url="http://www.zpeliculas.com/cat/b", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="C", url="http://www.zpeliculas.com/cat/c", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="D", url="http://www.zpeliculas.com/cat/d", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="E", url="http://www.zpeliculas.com/cat/e", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="F", url="http://www.zpeliculas.com/cat/f", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="G", url="http://www.zpeliculas.com/cat/g", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="H", url="http://www.zpeliculas.com/cat/h", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="I", url="http://www.zpeliculas.com/cat/i", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="J", url="http://www.zpeliculas.com/cat/j", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="K", url="http://www.zpeliculas.com/cat/k", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="L", url="http://www.zpeliculas.com/cat/l", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="M", url="http://www.zpeliculas.com/cat/m", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="N", url="http://www.zpeliculas.com/cat/n", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="O", url="http://www.zpeliculas.com/cat/o", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="P", url="http://www.zpeliculas.com/cat/p", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Q", url="http://www.zpeliculas.com/cat/q", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="R", url="http://www.zpeliculas.com/cat/r", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="S", url="http://www.zpeliculas.com/cat/s", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="T", url="http://www.zpeliculas.com/cat/t", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="U", url="http://www.zpeliculas.com/cat/u", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="V", url="http://www.zpeliculas.com/cat/v", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="W", url="http://www.zpeliculas.com/cat/w", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="X", url="http://www.zpeliculas.com/cat/x", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Y", url="http://www.zpeliculas.com/cat/y", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Z", url="http://www.zpeliculas.com/cat/z", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="0", url="http://www.zpeliculas.com/cat/0", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="1", url="http://www.zpeliculas.com/cat/1", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="2", url="http://www.zpeliculas.com/cat/2", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="3", url="http://www.zpeliculas.com/cat/3", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="4", url="http://www.zpeliculas.com/cat/4", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="5", url="http://www.zpeliculas.com/cat/5", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="6", url="http://www.zpeliculas.com/cat/6", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="7", url="http://www.zpeliculas.com/cat/7", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="8", url="http://www.zpeliculas.com/cat/8", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="9", url="http://www.zpeliculas.com/cat/9", viewmode="movie"))
return itemlist
def generos(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Acción", url="http://www.zpeliculas.com/peliculas/p-accion/", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Animación", url="http://www.zpeliculas.com/peliculas/p-animacion/", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Aventura", url="http://www.zpeliculas.com/peliculas/p-aventura/", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Biografía", url="http://www.zpeliculas.com/peliculas/p-biografia/", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Bélico", url="http://www.zpeliculas.com/peliculas/p-belico/", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Ciencia Ficción", url="http://www.zpeliculas.com/peliculas/p-cienciaficcion/", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Comedia", url="http://www.zpeliculas.com/peliculas/p-comedia/", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Crimen", url="http://www.zpeliculas.com/peliculas/p-crimen/", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Drama", url="http://www.zpeliculas.com/peliculas/p-drama/", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Fantasía", url="http://www.zpeliculas.com/peliculas/p-fantasia/", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Histórico", url="http://www.zpeliculas.com/peliculas/p-historico/", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Intriga", url="http://www.zpeliculas.com/peliculas/p-intriga/", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Musical", url="http://www.zpeliculas.com/peliculas/p-musical/", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Romántica", url="http://www.zpeliculas.com/peliculas/p-romantica/", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Terror", url="http://www.zpeliculas.com/peliculas/p-terror/", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Thriller", url="http://www.zpeliculas.com/peliculas/p-thriller/", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Western", url="http://www.zpeliculas.com/peliculas/p-western/", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Otros", url="http://www.zpeliculas.com/peliculas/p-otros/", viewmode="movie"))
return itemlist
def search(item,texto):
try:
post = urllib.urlencode({"story": texto, "do": "search", "subaction": "search", "x": "0", "y": "0"})
data = scrapertools.cache_page("http://www.zpeliculas.com",post=post)
patron = '<div class="leftpane">(.*?)<div class="clear"'
matches = re.compile(patron,re.DOTALL).findall(data)
itemlist = []
for match in matches:
scrapedtitle = scrapertools.find_single_match(match,'<div class="shortname">([^<]+)</div>')
scrapedurl = scrapertools.find_single_match(match,'<a href="([^"]+)"')
scrapedthumbnail = scrapertools.find_single_match(match,'<img src="([^"]+)"')
scrapedyear = scrapertools.find_single_match(match,'<div class="year"[^>]+>([^<]+)</div>')
scrapedidioma = scrapertools.find_single_match(match,'title="Idioma">([^<]+)</div>')
scrapedcalidad = scrapertools.find_single_match(match,'<div class="shortname"[^<]+</div[^<]+<div[^>]+>([^<]+)</div>')
title = scrapedtitle + ' ('+scrapedyear+') ['+scrapedidioma+'] ['+scrapedcalidad+']'
url = scrapedurl
thumbnail = scrapedthumbnail
plot = ""
logger.debug("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, show=title, fanart=thumbnail, hasContentDetails=True, contentTitle=title, contentThumbnail=thumbnail,
contentType="movie", context=["buscar_trailer"]))
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = "http://www.zpeliculas.com"
elif categoria == 'infantiles':
item.url = "http://www.zpeliculas.com/peliculas/p-animacion/"
else:
return []
itemlist = peliculas(item)
if itemlist[-1].extra == "next_page":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
logger.info()
# Descarga la página
body = scrapertools.cachePage(item.url)
data = scrapertools.get_match(body,'<div class="shortmovies">(.*?)<div class="navigation ignore-select" align="center">')
'''
<div class="leftpane">
<div class="movieposter" title="Descargar Sólo los amantes sobreviven">
<a href="http://www.zpeliculas.com/peliculas/p-drama/1634-slo-los-amantes-sobreviven.html"><img src="http://i.imgur.com/NBPgXrp.jpg" width="110" height="150" alt="Sólo los amantes sobreviven" title="Descargar Sólo los amantes sobreviven" /></a>
<div class="shortname">Sólo los amantes sobreviven</div>
<div class="BDRip">BDRip</div>
</div>
</div>
<div class="rightpane">
<div style="display:block;overflow:hidden;">
<h2 class="title" title="Sólo los amantes sobreviven"><a href="http://www.zpeliculas.com/peliculas/p-drama/1634-slo-los-amantes-sobreviven.html">Sólo los amantes sobreviven</a></h2>
<div style="height:105px; overflow:hidden;">
<div class="small">
<div class="cats" title="Genero"><a href="http://www.zpeliculas.com/peliculas/p-drama/">Drama</a>, <a href="http://www.zpeliculas.com/peliculas/p-fantasia/">Fantasia</a>, <a href="http://www.zpeliculas.com/peliculas/p-romantica/">Romantica</a></div>
<div class="year" title="Año">2013</div>
<div class="ESP" title="Idioma">ESP</div>
<div class="FA" title="Sólo los amantes sobreviven FA Official Website"><a href="http://www.filmaffinity.com/es/film851633.html" target="_blank" title="Sólo los amantes sobreviven en filmaffinity">Sólo los amantes sobreviven en FA</a></div>
</div>
</div>
<div class="clear" style="height:2px;"></div>
<div style="float:right">
'''
patron = '<div class="leftpane">(.*?)<div style="float\:right">'
matches = re.compile(patron,re.DOTALL).findall(data)
itemlist = []
for match in matches:
scrapedurl = scrapertools.find_single_match(match,'<a href="([^"]+)"')
scrapedthumbnail = scrapertools.find_single_match(match,'<img src="([^"]+)"')
scrapedtitle = scrapertools.find_single_match(match,'<div class="shortname">([^<]+)')
scrapedcalidad = scrapertools.find_single_match(match,'<div class="shortname">[^<]+</div[^<]+<div class="[^"]+">([^<]+)')
scrapedyear = scrapertools.find_single_match(match,'<div class="year[^>]+>([^<]+)')
scrapedidioma = scrapertools.find_single_match(match,'<div class="year[^>]+>[^<]+</div[^<]+<div class[^>]+>([^<]+)')
contentTitle = scrapertools.htmlclean(scrapedtitle)
#logger.info("title="+scrapedtitle)
title = contentTitle + ' ('+scrapedyear+') ['+scrapedidioma+'] ['+scrapedcalidad+']'
#title = scrapertools.htmlclean(title)
url = scrapedurl
thumbnail = scrapedthumbnail
plot = ""
logger.debug("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, hasContentDetails=True, contentTitle=contentTitle, contentThumbnail=thumbnail, fanart=thumbnail,
contentType="movie", context=["buscar_trailer"]))
next_page = scrapertools.find_single_match(body,'<a href="([^"]+)">Siguiente')
if next_page!="":
itemlist.append( Item(channel=item.channel, action="peliculas" , title=">> Página siguiente" , url=next_page, thumbnail="", plot="", show="", viewmode="movie", fanart=thumbnail, extra="next_page"))
return itemlist
def destacadas(item):
logger.info()
# Descarga la página
data = scrapertools.cachePage(item.url)
data = scrapertools.get_match(data,'<div id="sliderwrapper">(.*?)<div class="genreblock">')
'''
<div class="imageview view-first">
<a href="/templates/mytopV2/blockpro/noimage-full.png" onclick="return hs.expand(this)"><img src="http://i.imgur.com/H4d96Wn.jpg" alt="Ocho apellidos vascos"></a>
<div class="mask">
<h2><a href="/peliculas/p-comedia/1403-ocho-apellidos-vascos.html" title="Ocho apellidos vascos">Ocho apellidos vascos</a></h2>
</div>
'''
patron = '<div class="imageview view-first">.*?<a href=.*?>.*?src="(.*?)" alt="(.*?)"></a>.*?<h2><a href="(.*?)".*?</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
itemlist = []
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
logger.info("title="+scrapedtitle)
title = scrapedtitle
title = scrapertools.htmlclean(title)
url = "http://www.zpeliculas.com" + scrapedurl
thumbnail = scrapedthumbnail
plot = ""
plot = unicode( plot, "iso-8859-1" , errors="replace" ).encode("utf-8")
logger.debug("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, show=title, fanart=thumbnail, hasContentDetails=True, contentTitle=title, contentThumbnail=thumbnail,
contentType="movie", context=["buscar_trailer"]))
return itemlist
def sugeridas(item):
logger.info()
# Descarga la página
data = scrapertools.cachePage(item.url)
data = scrapertools.get_match(data,'<ul class="links">(.*?)</ul>')
'''
<li><a href="/peliculas/p-accion/425-instinto-asesino.html" title="Descargar Instinto asesino (The Crew)"><span class="movie-name">Instinto asesino (The Crew)</span><img src="http://i.imgur.com/1xXLz.jpg" width="102" height="138" alt="Instinto asesino (The Crew)" title="Descargar Instinto asesino (The Crew)" /></a></li>
'''
patron = '<li>.*?<a href="(.*?)".*?"movie-name">(.*?)</span><img src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
itemlist = []
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
logger.info("title="+scrapedtitle)
title = scrapedtitle
title = scrapertools.htmlclean(title)
url = "http://www.zpeliculas.com" + scrapedurl
thumbnail = scrapedthumbnail
plot = ""
plot = unicode( plot, "iso-8859-1" , errors="replace" ).encode("utf-8")
logger.debug("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, show=title, fanart=thumbnail, hasContentDetails=True, contentTitle=title, contentThumbnail=thumbnail,
contentType="movie", context=["buscar_trailer"]))
return itemlist
def findvideos(item):
logger.info("item="+item.tostring())
# Descarga la página para obtener el argumento
data = scrapertools.cachePage(item.url)
item.plot = scrapertools.find_single_match(data,'<div class="contenttext">([^<]+)<').strip()
item.contentPlot = item.plot
logger.info("plot="+item.plot)
return servertools.find_video_items(item=item,data=data)
|
AnimeshSinha1309/WebsiteEdunet | refs/heads/master | WebsiteEdunet/env/Lib/site-packages/django/template/defaultfilters.py | 23 | """Default variable filters."""
from __future__ import unicode_literals
import random as random_module
import re
import warnings
from decimal import ROUND_HALF_UP, Context, Decimal, InvalidOperation
from functools import wraps
from pprint import pformat
from django.conf import settings
from django.utils import formats, six
from django.utils.dateformat import format, time_format
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text, iri_to_uri
from django.utils.html import (
avoid_wrapping, conditional_escape, escape, escapejs, linebreaks,
remove_tags, strip_tags, urlize as _urlize,
)
from django.utils.http import urlquote
from django.utils.safestring import SafeData, mark_for_escaping, mark_safe
from django.utils.text import (
Truncator, normalize_newlines, phone2numeric, slugify as _slugify, wrap,
)
from django.utils.timesince import timesince, timeuntil
from django.utils.translation import ugettext, ungettext
from .base import Variable, VariableDoesNotExist
from .library import Library
register = Library()
#######################
# STRING DECORATOR #
#######################
def stringfilter(func):
"""
Decorator for filters which should only receive unicode objects. The object
passed as the first positional argument will be converted to a unicode
object.
"""
def _dec(*args, **kwargs):
if args:
args = list(args)
args[0] = force_text(args[0])
if (isinstance(args[0], SafeData) and
getattr(_dec._decorated_function, 'is_safe', False)):
return mark_safe(func(*args, **kwargs))
return func(*args, **kwargs)
# Include a reference to the real function (used to check original
# arguments by the template parser, and to bear the 'is_safe' attribute
# when multiple decorators are applied).
_dec._decorated_function = getattr(func, '_decorated_function', func)
return wraps(func)(_dec)
###################
# STRINGS #
###################
@register.filter(is_safe=True)
@stringfilter
def addslashes(value):
"""
Adds slashes before quotes. Useful for escaping strings in CSV, for
example. Less useful for escaping JavaScript; use the ``escapejs``
filter instead.
"""
return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'")
@register.filter(is_safe=True)
@stringfilter
def capfirst(value):
"""Capitalizes the first character of the value."""
return value and value[0].upper() + value[1:]
@register.filter("escapejs")
@stringfilter
def escapejs_filter(value):
"""Hex encodes characters for use in JavaScript strings."""
return escapejs(value)
# Values for testing floatformat input against infinity and NaN representations,
# which differ across platforms and Python versions. Some (i.e. old Windows
# ones) are not recognized by Decimal but we want to return them unchanged vs.
# returning an empty string as we do for completely invalid input. Note these
# need to be built up from values that are not inf/nan, since inf/nan values do
# not reload properly from .pyc files on Windows prior to some level of Python 2.5
# (see Python Issue757815 and Issue1080440).
pos_inf = 1e200 * 1e200
neg_inf = -1e200 * 1e200
nan = (1e200 * 1e200) // (1e200 * 1e200)
special_floats = [str(pos_inf), str(neg_inf), str(nan)]
@register.filter(is_safe=True)
def floatformat(text, arg=-1):
"""
Displays a float to a specified number of decimal places.
If called without an argument, it displays the floating point number with
one decimal place -- but only if there's a decimal place to be displayed:
* num1 = 34.23234
* num2 = 34.00000
* num3 = 34.26000
* {{ num1|floatformat }} displays "34.2"
* {{ num2|floatformat }} displays "34"
* {{ num3|floatformat }} displays "34.3"
If arg is positive, it will always display exactly arg number of decimal
places:
* {{ num1|floatformat:3 }} displays "34.232"
* {{ num2|floatformat:3 }} displays "34.000"
* {{ num3|floatformat:3 }} displays "34.260"
If arg is negative, it will display arg number of decimal places -- but
only if there are places to be displayed:
* {{ num1|floatformat:"-3" }} displays "34.232"
* {{ num2|floatformat:"-3" }} displays "34"
* {{ num3|floatformat:"-3" }} displays "34.260"
If the input float is infinity or NaN, the (platform-dependent) string
representation of that value will be displayed.
"""
try:
input_val = force_text(text)
d = Decimal(input_val)
except UnicodeEncodeError:
return ''
except InvalidOperation:
if input_val in special_floats:
return input_val
try:
d = Decimal(force_text(float(text)))
except (ValueError, InvalidOperation, TypeError, UnicodeEncodeError):
return ''
try:
p = int(arg)
except ValueError:
return input_val
try:
m = int(d) - d
except (ValueError, OverflowError, InvalidOperation):
return input_val
if not m and p < 0:
return mark_safe(formats.number_format('%d' % (int(d)), 0))
if p == 0:
exp = Decimal(1)
else:
exp = Decimal('1.0') / (Decimal(10) ** abs(p))
try:
# Set the precision high enough to avoid an exception, see #15789.
tupl = d.as_tuple()
units = len(tupl[1]) - tupl[2]
prec = abs(p) + units + 1
# Avoid conversion to scientific notation by accessing `sign`, `digits`
# and `exponent` from `Decimal.as_tuple()` directly.
sign, digits, exponent = d.quantize(exp, ROUND_HALF_UP,
Context(prec=prec)).as_tuple()
digits = [six.text_type(digit) for digit in reversed(digits)]
while len(digits) <= abs(exponent):
digits.append('0')
digits.insert(-exponent, '.')
if sign:
digits.append('-')
number = ''.join(reversed(digits))
return mark_safe(formats.number_format(number, abs(p)))
except InvalidOperation:
return input_val
@register.filter(is_safe=True)
@stringfilter
def iriencode(value):
"""Escapes an IRI value for use in a URL."""
return force_text(iri_to_uri(value))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linenumbers(value, autoescape=True):
"""Displays text with line numbers."""
lines = value.split('\n')
# Find the maximum width of the line count, for use with zero padding
# string format command
width = six.text_type(len(six.text_type(len(lines))))
if not autoescape or isinstance(value, SafeData):
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, line)
else:
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, escape(line))
return mark_safe('\n'.join(lines))
@register.filter(is_safe=True)
@stringfilter
def lower(value):
"""Converts a string into all lowercase."""
return value.lower()
@register.filter(is_safe=False)
@stringfilter
def make_list(value):
"""
Returns the value turned into a list.
For an integer, it's a list of digits.
For a string, it's a list of characters.
"""
return list(value)
@register.filter(is_safe=True)
@stringfilter
def slugify(value):
"""
Converts to ASCII. Converts spaces to hyphens. Removes characters that
aren't alphanumerics, underscores, or hyphens. Converts to lowercase.
Also strips leading and trailing whitespace.
"""
return _slugify(value)
@register.filter(is_safe=True)
def stringformat(value, arg):
"""
Formats the variable according to the arg, a string formatting specifier.
This specifier uses Python string formating syntax, with the exception that
the leading "%" is dropped.
See http://docs.python.org/lib/typesseq-strings.html for documentation
of Python string formatting
"""
try:
return ("%" + six.text_type(arg)) % value
except (ValueError, TypeError):
return ""
@register.filter(is_safe=True)
@stringfilter
def title(value):
"""Converts a string into titlecase."""
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
return re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
@register.filter(is_safe=True)
@stringfilter
def truncatechars(value, arg):
"""
Truncates a string after a certain number of characters.
Argument: Number of characters to truncate after.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).chars(length)
@register.filter(is_safe=True)
@stringfilter
def truncatechars_html(value, arg):
"""
Truncates HTML after a certain number of chars.
Argument: Number of chars to truncate after.
Newlines in the HTML are preserved.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).chars(length, html=True)
@register.filter(is_safe=True)
@stringfilter
def truncatewords(value, arg):
"""
Truncates a string after a certain number of words.
Argument: Number of words to truncate after.
Newlines within the string are removed.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).words(length, truncate=' ...')
@register.filter(is_safe=True)
@stringfilter
def truncatewords_html(value, arg):
"""
Truncates HTML after a certain number of words.
Argument: Number of words to truncate after.
Newlines in the HTML are preserved.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).words(length, html=True, truncate=' ...')
@register.filter(is_safe=False)
@stringfilter
def upper(value):
"""Converts a string into all uppercase."""
return value.upper()
@register.filter(is_safe=False)
@stringfilter
def urlencode(value, safe=None):
"""
Escapes a value for use in a URL.
Takes an optional ``safe`` parameter used to determine the characters which
should not be escaped by Django's ``urlquote`` method. If not provided, the
default safe characters will be used (but an empty string can be provided
when *all* characters should be escaped).
"""
kwargs = {}
if safe is not None:
kwargs['safe'] = safe
return urlquote(value, **kwargs)
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlize(value, autoescape=True):
"""Converts URLs in plain text into clickable links."""
return mark_safe(_urlize(value, nofollow=True, autoescape=autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlizetrunc(value, limit, autoescape=True):
"""
Converts URLs into clickable links, truncating URLs to the given character
limit, and adding 'rel=nofollow' attribute to discourage spamming.
Argument: Length to truncate URLs to.
"""
return mark_safe(_urlize(value, trim_url_limit=int(limit), nofollow=True,
autoescape=autoescape))
@register.filter(is_safe=False)
@stringfilter
def wordcount(value):
"""Returns the number of words."""
return len(value.split())
@register.filter(is_safe=True)
@stringfilter
def wordwrap(value, arg):
"""
Wraps words at specified line length.
Argument: number of characters to wrap the text at.
"""
return wrap(value, int(arg))
@register.filter(is_safe=True)
@stringfilter
def ljust(value, arg):
"""
Left-aligns the value in a field of a given width.
Argument: field size.
"""
return value.ljust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def rjust(value, arg):
"""
Right-aligns the value in a field of a given width.
Argument: field size.
"""
return value.rjust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def center(value, arg):
"""Centers the value in a field of a given width."""
return value.center(int(arg))
@register.filter
@stringfilter
def cut(value, arg):
"""
Removes all values of arg from the given string.
"""
safe = isinstance(value, SafeData)
value = value.replace(arg, '')
if safe and arg != ';':
return mark_safe(value)
return value
###################
# HTML STRINGS #
###################
@register.filter("escape", is_safe=True)
@stringfilter
def escape_filter(value):
"""
Marks the value as a string that should be auto-escaped.
"""
return mark_for_escaping(value)
@register.filter(is_safe=True)
@stringfilter
def force_escape(value):
"""
Escapes a string's HTML. This returns a new string containing the escaped
characters (as opposed to "escape", which marks the content for later
possible escaping).
"""
return escape(value)
@register.filter("linebreaks", is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaks_filter(value, autoescape=True):
"""
Replaces line breaks in plain text with appropriate HTML; a single
newline becomes an HTML line break (``<br />``) and a new line
followed by a blank line becomes a paragraph break (``</p>``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
return mark_safe(linebreaks(value, autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaksbr(value, autoescape=True):
"""
Converts all newlines in a piece of plain text to HTML line breaks
(``<br />``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
value = normalize_newlines(value)
if autoescape:
value = escape(value)
return mark_safe(value.replace('\n', '<br />'))
@register.filter(is_safe=True)
@stringfilter
def safe(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
return mark_safe(value)
@register.filter(is_safe=True)
def safeseq(value):
"""
A "safe" filter for sequences. Marks each element in the sequence,
individually, as safe, after converting them to unicode. Returns a list
with the results.
"""
return [mark_safe(force_text(obj)) for obj in value]
@register.filter(is_safe=True)
@stringfilter
def removetags(value, tags):
"""Removes a space separated list of [X]HTML tags from the output."""
return remove_tags(value, tags)
@register.filter(is_safe=True)
@stringfilter
def striptags(value):
"""Strips all [X]HTML tags."""
return strip_tags(value)
###################
# LISTS #
###################
@register.filter(is_safe=False)
def dictsort(value, arg):
"""
Takes a list of dicts, returns that list sorted by the property given in
the argument.
"""
try:
return sorted(value, key=Variable(arg).resolve)
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def dictsortreversed(value, arg):
"""
Takes a list of dicts, returns that list sorted in reverse order by the
property given in the argument.
"""
try:
return sorted(value, key=Variable(arg).resolve, reverse=True)
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def first(value):
"""Returns the first item in a list."""
try:
return value[0]
except IndexError:
return ''
@register.filter(is_safe=True, needs_autoescape=True)
def join(value, arg, autoescape=True):
"""
Joins a list with a string, like Python's ``str.join(list)``.
"""
value = map(force_text, value)
if autoescape:
value = [conditional_escape(v) for v in value]
try:
data = conditional_escape(arg).join(value)
except AttributeError: # fail silently but nicely
return value
return mark_safe(data)
@register.filter(is_safe=True)
def last(value):
"Returns the last item in a list"
try:
return value[-1]
except IndexError:
return ''
@register.filter(is_safe=False)
def length(value):
"""Returns the length of the value - useful for lists."""
try:
return len(value)
except (ValueError, TypeError):
return 0
@register.filter(is_safe=False)
def length_is(value, arg):
"""Returns a boolean of whether the value's length is the argument."""
try:
return len(value) == int(arg)
except (ValueError, TypeError):
return ''
@register.filter(is_safe=True)
def random(value):
"""Returns a random item from the list."""
return random_module.choice(value)
@register.filter("slice", is_safe=True)
def slice_filter(value, arg):
"""
Returns a slice of the list.
Uses the same syntax as Python's list slicing; see
http://www.diveintopython3.net/native-datatypes.html#slicinglists
for an introduction.
"""
try:
bits = []
for x in arg.split(':'):
if len(x) == 0:
bits.append(None)
else:
bits.append(int(x))
return value[slice(*bits)]
except (ValueError, TypeError):
return value # Fail silently.
@register.filter(is_safe=True, needs_autoescape=True)
def unordered_list(value, autoescape=True):
"""
Recursively takes a self-nested list and returns an HTML unordered list --
WITHOUT opening and closing <ul> tags.
The list is assumed to be in the proper format. For example, if ``var``
contains: ``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``,
then ``{{ var|unordered_list }}`` would return::
<li>States
<ul>
<li>Kansas
<ul>
<li>Lawrence</li>
<li>Topeka</li>
</ul>
</li>
<li>Illinois</li>
</ul>
</li>
"""
if autoescape:
escaper = conditional_escape
else:
escaper = lambda x: x
def convert_old_style_list(list_):
"""
Converts old style lists to the new easier to understand format.
The old list format looked like:
['Item 1', [['Item 1.1', []], ['Item 1.2', []]]
And it is converted to:
['Item 1', ['Item 1.1', 'Item 1.2]]
"""
if not isinstance(list_, (tuple, list)) or len(list_) != 2:
return list_, False
first_item, second_item = list_
if second_item == []:
return [first_item], True
try:
# see if second item is iterable
iter(second_item)
except TypeError:
return list_, False
old_style_list = True
new_second_item = []
for sublist in second_item:
item, old_style_list = convert_old_style_list(sublist)
if not old_style_list:
break
new_second_item.extend(item)
if old_style_list:
second_item = new_second_item
return [first_item, second_item], old_style_list
def walk_items(item_list):
item_iterator = iter(item_list)
try:
item = next(item_iterator)
while True:
try:
next_item = next(item_iterator)
except StopIteration:
yield item, None
break
if not isinstance(next_item, six.string_types):
try:
iter(next_item)
except TypeError:
pass
else:
yield item, next_item
item = next(item_iterator)
continue
yield item, None
item = next_item
except StopIteration:
pass
def list_formatter(item_list, tabs=1):
indent = '\t' * tabs
output = []
for item, children in walk_items(item_list):
sublist = ''
if children:
sublist = '\n%s<ul>\n%s\n%s</ul>\n%s' % (
indent, list_formatter(children, tabs + 1), indent, indent)
output.append('%s<li>%s%s</li>' % (
indent, escaper(force_text(item)), sublist))
return '\n'.join(output)
value, converted = convert_old_style_list(value)
if converted:
warnings.warn(
"The old style syntax in `unordered_list` is deprecated and will "
"be removed in Django 1.10. Use the the new format instead.",
RemovedInDjango110Warning)
return mark_safe(list_formatter(value))
###################
# INTEGERS #
###################
@register.filter(is_safe=False)
def add(value, arg):
"""Adds the arg to the value."""
try:
return int(value) + int(arg)
except (ValueError, TypeError):
try:
return value + arg
except Exception:
return ''
@register.filter(is_safe=False)
def get_digit(value, arg):
"""
Given a whole number, returns the requested digit of it, where 1 is the
right-most digit, 2 is the second-right-most digit, etc. Returns the
original value for invalid input (if input or argument is not an integer,
or if argument is less than 1). Otherwise, output is always an integer.
"""
try:
arg = int(arg)
value = int(value)
except ValueError:
return value # Fail silently for an invalid argument
if arg < 1:
return value
try:
return int(str(value)[-arg])
except IndexError:
return 0
###################
# DATES #
###################
@register.filter(expects_localtime=True, is_safe=False)
def date(value, arg=None):
"""Formats a date according to the given format."""
if value in (None, ''):
return ''
if arg is None:
arg = settings.DATE_FORMAT
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
@register.filter(expects_localtime=True, is_safe=False)
def time(value, arg=None):
"""Formats a time according to the given format."""
if value in (None, ''):
return ''
if arg is None:
arg = settings.TIME_FORMAT
try:
return formats.time_format(value, arg)
except AttributeError:
try:
return time_format(value, arg)
except AttributeError:
return ''
@register.filter("timesince", is_safe=False)
def timesince_filter(value, arg=None):
"""Formats a date as the time since that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
if arg:
return timesince(value, arg)
return timesince(value)
except (ValueError, TypeError):
return ''
@register.filter("timeuntil", is_safe=False)
def timeuntil_filter(value, arg=None):
"""Formats a date as the time until that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
return timeuntil(value, arg)
except (ValueError, TypeError):
return ''
###################
# LOGIC #
###################
@register.filter(is_safe=False)
def default(value, arg):
"""If value is unavailable, use given default."""
return value or arg
@register.filter(is_safe=False)
def default_if_none(value, arg):
"""If value is None, use given default."""
if value is None:
return arg
return value
@register.filter(is_safe=False)
def divisibleby(value, arg):
"""Returns True if the value is divisible by the argument."""
return int(value) % int(arg) == 0
@register.filter(is_safe=False)
def yesno(value, arg=None):
"""
Given a string mapping values for true, false and (optionally) None,
returns one of those strings according to the value:
========== ====================== ==================================
Value Argument Outputs
========== ====================== ==================================
``True`` ``"yeah,no,maybe"`` ``yeah``
``False`` ``"yeah,no,maybe"`` ``no``
``None`` ``"yeah,no,maybe"`` ``maybe``
``None`` ``"yeah,no"`` ``"no"`` (converts None to False
if no mapping for None is given.
========== ====================== ==================================
"""
if arg is None:
arg = ugettext('yes,no,maybe')
bits = arg.split(',')
if len(bits) < 2:
return value # Invalid arg.
try:
yes, no, maybe = bits
except ValueError:
# Unpack list of wrong size (no "maybe" value provided).
yes, no, maybe = bits[0], bits[1], bits[1]
if value is None:
return maybe
if value:
return yes
return no
###################
# MISC #
###################
@register.filter(is_safe=True)
def filesizeformat(bytes):
"""
Formats the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB,
102 bytes, etc.).
"""
try:
bytes = float(bytes)
except (TypeError, ValueError, UnicodeDecodeError):
value = ungettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}
return avoid_wrapping(value)
filesize_number_format = lambda value: formats.number_format(round(value, 1), 1)
KB = 1 << 10
MB = 1 << 20
GB = 1 << 30
TB = 1 << 40
PB = 1 << 50
if bytes < KB:
value = ungettext("%(size)d byte", "%(size)d bytes", bytes) % {'size': bytes}
elif bytes < MB:
value = ugettext("%s KB") % filesize_number_format(bytes / KB)
elif bytes < GB:
value = ugettext("%s MB") % filesize_number_format(bytes / MB)
elif bytes < TB:
value = ugettext("%s GB") % filesize_number_format(bytes / GB)
elif bytes < PB:
value = ugettext("%s TB") % filesize_number_format(bytes / TB)
else:
value = ugettext("%s PB") % filesize_number_format(bytes / PB)
return avoid_wrapping(value)
@register.filter(is_safe=False)
def pluralize(value, arg='s'):
"""
Returns a plural suffix if the value is not 1. By default, 's' is used as
the suffix:
* If value is 0, vote{{ value|pluralize }} displays "0 votes".
* If value is 1, vote{{ value|pluralize }} displays "1 vote".
* If value is 2, vote{{ value|pluralize }} displays "2 votes".
If an argument is provided, that string is used instead:
* If value is 0, class{{ value|pluralize:"es" }} displays "0 classes".
* If value is 1, class{{ value|pluralize:"es" }} displays "1 class".
* If value is 2, class{{ value|pluralize:"es" }} displays "2 classes".
If the provided argument contains a comma, the text before the comma is
used for the singular case and the text after the comma is used for the
plural case:
* If value is 0, cand{{ value|pluralize:"y,ies" }} displays "0 candies".
* If value is 1, cand{{ value|pluralize:"y,ies" }} displays "1 candy".
* If value is 2, cand{{ value|pluralize:"y,ies" }} displays "2 candies".
"""
if ',' not in arg:
arg = ',' + arg
bits = arg.split(',')
if len(bits) > 2:
return ''
singular_suffix, plural_suffix = bits[:2]
try:
if float(value) != 1:
return plural_suffix
except ValueError: # Invalid string that's not a number.
pass
except TypeError: # Value isn't a string or a number; maybe it's a list?
try:
if len(value) != 1:
return plural_suffix
except TypeError: # len() of unsized object.
pass
return singular_suffix
@register.filter("phone2numeric", is_safe=True)
def phone2numeric_filter(value):
"""Takes a phone number and converts it in to its numerical equivalent."""
return phone2numeric(value)
@register.filter(is_safe=True)
def pprint(value):
"""A wrapper around pprint.pprint -- for debugging, really."""
try:
return pformat(value)
except Exception as e:
return "Error in formatting: %s: %s" % (e.__class__.__name__, force_text(e, errors="replace"))
|
0x0mar/memex-explorer | refs/heads/master | source/apps/crawl_space/tests/test_crawl_model.py | 1 | from __future__ import unicode_literals
# Test
from memex.test_utils.unit_test_utils import UnitTestSkeleton, form_errors
from django.test import TestCase
from django.db import IntegrityError
from django.core.files.uploadedfile import SimpleUploadedFile
# App
from base.forms import AddProjectForm
from base.models import Project
from test_crawl import assert_form_errors
class TestAddDataModelView(UnitTestSkeleton):
@classmethod
def setUpClass(cls):
super(TestAddDataModelView, cls).setUpClass()
cls.test_project = Project(
name = u"Model Test",
description = "Test Project Description")
cls.test_project.save()
@property
def slugs(self):
return dict(slugs=dict(
project_slug="model-test"))
def get_model_file(self):
return SimpleUploadedFile('pageclassifier.model', bytes('This is a model file.\n'), 'utf-8')
def get_features_file(self):
return SimpleUploadedFile('pageclassifier.features', bytes('This is a features file.\n'), 'utf-8')
def test_add_model_page(self):
response = self.get('base:crawl_space:add_crawl_model', **self.slugs)
assert 'crawl_space/add_crawl_model.html' in response.template_name
def test_add_model_no_data(self):
response = self.post('base:crawl_space:add_crawl_model', **self.slugs)
assert_form_errors(response, 'name', 'model', 'features')
def test_add_model_no_name(self):
response = self.post('base:crawl_space:add_crawl_model',
{
'model': self.get_model_file(),
'features': self.get_features_file(),
},
**self.slugs)
assert_form_errors(response, 'name')
def test_add_model_no_model(self):
response = self.post('base:crawl_space:add_crawl_model',
{
'name': 'Test Model',
'features': self.get_features_file(),
},
**self.slugs)
assert_form_errors(response, 'model')
def test_add_model_no_features(self):
response = self.post('base:crawl_space:add_crawl_model',
{
'name': 'Test Model',
'model': self.get_model_file(),
},
**self.slugs)
assert_form_errors(response, 'features')
def test_add_model_success(self):
response = self.post('base:crawl_space:add_crawl_model',
{
'name': 'Test Model',
'model': self.get_model_file(),
'features': self.get_features_file(),
},
**self.slugs)
|
hortonworks/hortonworks-sandbox | refs/heads/master | desktop/core/ext-py/Django-1.2.3/django/contrib/localflavor/sk/forms.py | 344 | """
Slovak-specific form helpers
"""
from django.forms.fields import Select, RegexField
from django.utils.translation import ugettext_lazy as _
class SKRegionSelect(Select):
"""
A select widget widget with list of Slovak regions as choices.
"""
def __init__(self, attrs=None):
from sk_regions import REGION_CHOICES
super(SKRegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class SKDistrictSelect(Select):
"""
A select widget with list of Slovak districts as choices.
"""
def __init__(self, attrs=None):
from sk_districts import DISTRICT_CHOICES
super(SKDistrictSelect, self).__init__(attrs, choices=DISTRICT_CHOICES)
class SKPostalCodeField(RegexField):
"""
A form field that validates its input as Slovak postal code.
Valid form is XXXXX or XXX XX, where X represents integer.
"""
default_error_messages = {
'invalid': _(u'Enter a postal code in the format XXXXX or XXX XX.'),
}
def __init__(self, *args, **kwargs):
super(SKPostalCodeField, self).__init__(r'^\d{5}$|^\d{3} \d{2}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self, value):
"""
Validates the input and returns a string that contains only numbers.
Returns an empty string for empty values.
"""
v = super(SKPostalCodeField, self).clean(value)
return v.replace(' ', '')
|
yro/buttsworth | refs/heads/master | chatterbot/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
chirilo/mozillians | refs/heads/master | vendor-local/lib/python/tablib/packages/xlwt3/ExcelFormula.py | 44 | from . import ExcelFormulaParser, ExcelFormulaLexer
import struct
from .antlr import ANTLRException
class Formula(object):
__slots__ = ["__s", "__parser", "__sheet_refs", "__xcall_refs"]
def __init__(self, s):
try:
self.__s = s
lexer = ExcelFormulaLexer.Lexer(s)
self.__parser = ExcelFormulaParser.Parser(lexer)
self.__parser.formula()
self.__sheet_refs = self.__parser.sheet_references
self.__xcall_refs = self.__parser.xcall_references
except ANTLRException as e:
# print e
raise ExcelFormulaParser.FormulaParseException("can't parse formula " + s)
def get_references(self):
return self.__sheet_refs, self.__xcall_refs
def patch_references(self, patches):
for offset, idx in patches:
self.__parser.rpn = self.__parser.rpn[:offset] + struct.pack('<H', idx) + self.__parser.rpn[offset+2:]
def text(self):
return self.__s
def rpn(self):
'''
Offset Size Contents
0 2 Size of the following formula data (sz)
2 sz Formula data (RPN token array)
[2+sz] var. (optional) Additional data for specific tokens
'''
return struct.pack("<H", len(self.__parser.rpn)) + self.__parser.rpn
|
juliusbierk/scikit-image | refs/heads/master | skimage/viewer/tests/test_plugins.py | 35 | # -*- coding: utf-8 -*-
import numpy as np
import skimage
import skimage.data as data
from skimage.filters.rank import median
from skimage.morphology import disk
from skimage.viewer import ImageViewer, has_qt
from skimage.viewer.plugins.base import Plugin
from skimage.viewer.widgets import Slider
from skimage.viewer.plugins import (
LineProfile, Measure, CannyPlugin, LabelPainter, Crop, ColorHistogram,
PlotPlugin)
from numpy.testing import assert_equal, assert_allclose, assert_almost_equal
from numpy.testing.decorators import skipif
from skimage._shared._warnings import expected_warnings
def setup_line_profile(image, limits='image'):
viewer = ImageViewer(skimage.img_as_float(image))
plugin = LineProfile(limits=limits)
viewer += plugin
return plugin
@skipif(not has_qt)
def test_line_profile():
""" Test a line profile using an ndim=2 image"""
plugin = setup_line_profile(data.camera())
line_image, scan_data = plugin.output()
for inp in [line_image.nonzero()[0].size,
line_image.sum() / line_image.max(),
scan_data.size]:
assert_equal(inp, 172)
assert_equal(line_image.shape, (512, 512))
assert_allclose(scan_data.max(), 0.9176, rtol=1e-3)
assert_allclose(scan_data.mean(), 0.2812, rtol=1e-3)
@skipif(not has_qt)
def test_line_profile_rgb():
""" Test a line profile using an ndim=3 image"""
plugin = setup_line_profile(data.chelsea(), limits=None)
for i in range(6):
plugin.line_tool._thicken_scan_line()
line_image, scan_data = plugin.output()
assert_equal(line_image[line_image == 128].size, 750)
assert_equal(line_image[line_image == 255].size, 151)
assert_equal(line_image.shape, (300, 451))
assert_equal(scan_data.shape, (151, 3))
assert_allclose(scan_data.max(), 0.772, rtol=1e-3)
assert_allclose(scan_data.mean(), 0.4359, rtol=1e-3)
@skipif(not has_qt)
def test_line_profile_dynamic():
"""Test a line profile updating after an image transform"""
image = data.coins()[:-50, :] # shave some off to make the line lower
image = skimage.img_as_float(image)
viewer = ImageViewer(image)
lp = LineProfile(limits='dtype')
viewer += lp
line = lp.get_profiles()[-1][0]
assert line.size == 129
assert_almost_equal(np.std(viewer.image), 0.208, 3)
assert_almost_equal(np.std(line), 0.229, 3)
assert_almost_equal(np.max(line) - np.min(line), 0.725, 1)
with expected_warnings(['precision loss']):
viewer.image = skimage.img_as_float(median(image,
selem=disk(radius=3)))
line = lp.get_profiles()[-1][0]
assert_almost_equal(np.std(viewer.image), 0.198, 3)
assert_almost_equal(np.std(line), 0.220, 3)
assert_almost_equal(np.max(line) - np.min(line), 0.639, 1)
@skipif(not has_qt)
def test_measure():
image = data.camera()
viewer = ImageViewer(image)
m = Measure()
viewer += m
m.line_changed([(0, 0), (10, 10)])
assert_equal(str(m._length.text), '14.1')
assert_equal(str(m._angle.text[:5]), '135.0')
@skipif(not has_qt)
def test_canny():
image = data.camera()
viewer = ImageViewer(image)
c = CannyPlugin()
viewer += c
canny_edges = viewer.show(False)
viewer.close()
edges = canny_edges[0][0]
assert edges.sum() == 2852
@skipif(not has_qt)
def test_label_painter():
image = data.camera()
moon = data.moon()
viewer = ImageViewer(image)
lp = LabelPainter()
viewer += lp
assert_equal(lp.radius, 5)
lp.label = 1
assert_equal(str(lp.label), '1')
lp.label = 2
assert_equal(str(lp.paint_tool.label), '2')
assert_equal(lp.paint_tool.radius, 5)
lp._on_new_image(moon)
assert_equal(lp.paint_tool.shape, moon.shape)
@skipif(not has_qt)
def test_crop():
image = data.camera()
viewer = ImageViewer(image)
c = Crop()
viewer += c
c.crop((0, 100, 0, 100))
assert_equal(viewer.image.shape, (101, 101))
@skipif(not has_qt)
def test_color_histogram():
image = skimage.img_as_float(data.load('color.png'))
viewer = ImageViewer(image)
ch = ColorHistogram(dock='right')
viewer += ch
assert_almost_equal(viewer.image.std(), 0.352, 3),
ch.ab_selected((0, 100, 0, 100)),
assert_almost_equal(viewer.image.std(), 0.325, 3)
@skipif(not has_qt)
def test_plot_plugin():
viewer = ImageViewer(data.moon())
plugin = PlotPlugin(image_filter=lambda x: x)
viewer += plugin
assert_equal(viewer.image, data.moon())
plugin._update_original_image(data.coins())
assert_equal(viewer.image, data.coins())
viewer.close()
@skipif(not has_qt)
def test_plugin():
img = skimage.img_as_float(data.moon())
viewer = ImageViewer(img)
def median_filter(img, radius=3):
with expected_warnings(['precision loss']):
return median(img, selem=disk(radius=radius))
plugin = Plugin(image_filter=median_filter)
viewer += plugin
plugin += Slider('radius', 1, 5)
assert_almost_equal(np.std(viewer.image), 12.556, 3)
plugin.filter_image()
assert_almost_equal(np.std(viewer.image), 12.931, 3)
plugin.show()
plugin.close()
plugin.clean_up()
img, _ = plugin.output()
assert_equal(img, viewer.image)
|
keakon/Doodle | refs/heads/master | doodle/core/models/tag.py | 1 | # -*- coding: utf-8 -*-
from itertools import izip
from doodle.config import CONFIG
from doodle.core.property import IntegerProperty, StringProperty
from .base_model import JSONModel, SimpleModel
class Tag(SimpleModel):
@classmethod
def get_all(cls):
names = cls.redis_client.smembers(cls.KEY)
return [unicode(name, 'utf-8') for name in names]
@classmethod
def add(cls, name):
cls.redis_client.sadd(cls.KEY, name)
@classmethod
def exists(cls, name):
return cls.redis_client.sismember(cls.KEY, name)
@classmethod
def get_count(cls, name):
return cls.redis_client.zcard(TagArticle.KEY % name)
@classmethod
def get_counts(cls):
names = cls.get_all()
if names:
with cls.redis_client.pipeline(transaction=False) as pipe:
for name in names:
pipe.zcard(TagArticle.KEY % name)
counts = pipe.execute()
return dict(izip(names, counts))
return {}
class TagArticle(JSONModel):
KEY = 'TagArticle:%s'
tag = StringProperty()
article_id = IntegerProperty()
time = IntegerProperty()
def _get_watching_keys(self, inserting=False):
return [self.KEY % self.tag]
def _save_self(self, redis_client, inserting=False):
key = self.KEY % self.tag
if self.time:
redis_client.zadd(key, {self.article_id: self.time})
else:
redis_client.zrem(key, self.article_id)
@classmethod
def get_article_ids(cls, tag_name, cursor=None, limit=CONFIG.ARTICLES_PER_PAGE):
redis_client = cls.redis_client
key = cls.KEY % tag_name
if cursor is None:
return redis_client.zrevrange(key, 0, limit - 1, withscores=True, score_cast_func=int)
else:
return redis_client.zrevrangebyscore(key, '(%d' % cursor, 0, 0, limit, withscores=True, score_cast_func=int)
@classmethod
def get_articles(cls, category_name, cursor=None, limit=CONFIG.ARTICLES_PER_PAGE):
article_ids_with_time = cls.get_article_ids(category_name, cursor)
if article_ids_with_time:
from .article import Article
return Article.get_articles_and_next_cursor(article_ids_with_time, limit=limit)
return [], None
|
FreskyZ/fff-lang | refs/heads/master | scripts/bump-version.py | 1 | from common import solution
import sys
if len(sys.argv) < 2:
print("require 2 args")
exit()
solution.set_version(sys.argv[1]) |
da1z/intellij-community | refs/heads/master | bin/linux/printenv.py | 24 | #!/usr/bin/env python
# Dumps environment variables into specified file.
# Format: zero-separated "name=value" pairs in platform encoding.
import os
import sys
if len(sys.argv) != 2:
raise Exception('Exactly one argument expected')
f = open(sys.argv[1], 'wb')
try:
for key, value in os.environ.items():
s = '%s=%s\0' % (key, value)
f.write(s.encode('utf-8'))
finally:
f.close()
|
benjello/openfisca-core | refs/heads/master | openfisca_core/taxscales.py | 3 | # -*- coding: utf-8 -*-
from __future__ import division
from bisect import bisect_left, bisect_right
import copy
import logging
import itertools
import numpy as np
from numpy import maximum as max_, minimum as min_
from .tools import empty_clone
log = logging.getLogger(__name__)
class AbstractTaxScale(object):
"""Abstract class for various types of tax scales (amount-based tax scales, rate-based tax scales)
French translations:
* base: assiette
* bracket: tranche
* rate: taux
* tax scale: barème
* threshold: seuil
"""
name = None
option = None
thresholds = None
unit = None
def __init__(self, name = None, option = None, unit = None):
self.name = name or 'Untitled TaxScale'
if option is not None:
self.option = option
self.thresholds = []
if unit is not None:
self.unit = unit
def __eq__(self, other):
raise NotImplementedError('Method "__eq__" is not implemented for {}'.format(self.__class__.__name__))
def __ne__(self, other):
raise NotImplementedError('Method "__ne__" is not implemented for {}'.format(self.__class__.__name__))
def __str__(self):
raise NotImplementedError('Method "__str__" is not implemented for {}'.format(self.__class__.__name__))
def calc(self, base):
raise NotImplementedError('Method "calc" is not implemented for {}'.format(self.__class__.__name__))
def copy(self):
new = empty_clone(self)
new.__dict__ = copy.deepcopy(self.__dict__)
return new
class AbstractRateTaxScale(AbstractTaxScale):
"""Abstract class for various types of rate-based tax scales (marginal rate, linear average rate)"""
rates = None
def __init__(self, name = None, option = None, unit = None):
super(AbstractRateTaxScale, self).__init__(name = name, option = option, unit = unit)
self.rates = []
def __str__(self):
return '\n'.join(itertools.chain(
['{}: {}'.format(self.__class__.__name__, self.name)],
(
'- {} {}'.format(threshold, rate)
for threshold, rate in itertools.izip(self.thresholds, self.rates)
),
))
def add_bracket(self, threshold, rate):
if threshold in self.thresholds:
i = self.thresholds.index(threshold)
self.rates[i] += rate
else:
i = bisect_left(self.thresholds, threshold)
self.thresholds.insert(i, threshold)
self.rates.insert(i, rate)
def multiply_rates(self, factor, inplace = True, new_name = None):
if inplace:
assert new_name is None
for i, rate in enumerate(self.rates):
self.rates[i] = rate * factor
return self
new_tax_scale = self.__class__(new_name or self.name, option = self.option, unit = self.unit)
for threshold, rate in itertools.izip(self.thresholds, self.rates):
new_tax_scale.thresholds.append(threshold)
new_tax_scale.rates.append(rate * factor)
return new_tax_scale
def multiply_thresholds(self, factor, decimals = None, inplace = True, new_name = None):
if inplace:
assert new_name is None
for i, threshold in enumerate(self.thresholds):
if decimals is not None:
self.thresholds[i] = np.around(threshold * factor, decimals = decimals)
else:
self.thresholds[i] = threshold * factor
return self
new_tax_scale = self.__class__(new_name or self.name, option = self.option, unit = self.unit)
for threshold, rate in itertools.izip(self.thresholds, self.rates):
if decimals is not None:
new_tax_scale.thresholds.append(np.around(threshold * factor, decimals = decimals))
else:
new_tax_scale.thresholds.append(threshold * factor)
new_tax_scale.rates.append(rate)
return new_tax_scale
class AmountTaxScale(AbstractTaxScale):
amounts = None
def __init__(self, name = None, option = None, unit = None):
super(AmountTaxScale, self).__init__(name = name, option = option, unit = unit)
self.amounts = []
def __str__(self):
return '\n'.join(itertools.chain(
['{}: {}'.format(self.__class__.__name__, self.name)],
(
'- {} {}'.format(threshold, amount)
for threshold, amount in itertools.izip(self.thresholds, self.amounts)
),
))
def add_bracket(self, threshold, amount):
if threshold in self.thresholds:
i = self.thresholds.index(threshold)
self.amounts[i] += amount
else:
i = bisect_left(self.thresholds, threshold)
self.thresholds.insert(i, threshold)
self.amounts.insert(i, amount)
def calc(self, base):
base1 = np.tile(base, (len(self.thresholds), 1)).T
thresholds1 = np.tile(np.hstack((self.thresholds, np.inf)), (len(base), 1))
a = max_(min_(base1, thresholds1[:, 1:]) - thresholds1[:, :-1], 0)
return np.dot(self.amounts, a.T > 0)
class LinearAverageRateTaxScale(AbstractRateTaxScale):
def calc(self, base):
if len(self.rates) == 1:
return base * self.rates[0]
tiled_base = np.tile(base, (len(self.thresholds) - 1, 1)).T
tiled_thresholds = np.tile(self.thresholds, (len(base), 1))
bracket_dummy = (tiled_base >= tiled_thresholds[:, :-1]) * (tiled_base < tiled_thresholds[:, 1:])
rates_array = np.array(self.rates)
thresholds_array = np.array(self.thresholds)
rate_slope = (rates_array[1:] - rates_array[:-1]) / (thresholds_array[1:] - thresholds_array[:-1])
average_rate_slope = np.dot(bracket_dummy, rate_slope.T)
bracket_average_start_rate = np.dot(bracket_dummy, rates_array[:-1])
bracket_threshold = np.dot(bracket_dummy, thresholds_array[:-1])
log.info("bracket_average_start_rate : {}".format(bracket_average_start_rate))
log.info("average_rate_slope: {}".format(average_rate_slope))
return base * (bracket_average_start_rate + (base - bracket_threshold) * average_rate_slope)
def to_marginal(self):
marginal_tax_scale = MarginalRateTaxScale(name = self.name, option = self.option, unit = self.unit)
previous_I = 0
previous_threshold = 0
for threshold, rate in itertools.izip(self.thresholds[1:], self.rates[1:]):
if threshold != float('Inf'):
I = rate * threshold
marginal_tax_scale.add_bracket(previous_threshold, (I - previous_I) / (threshold - previous_threshold))
previous_I = I
previous_threshold = threshold
marginal_tax_scale.add_bracket(previous_threshold, rate)
return marginal_tax_scale
class MarginalRateTaxScale(AbstractRateTaxScale):
def add_tax_scale(self, tax_scale):
if tax_scale.thresholds > 0: # Pour ne pas avoir de problèmes avec les barèmes vides
for threshold_low, threshold_high, rate in itertools.izip(tax_scale.thresholds[:-1],
tax_scale.thresholds[1:], tax_scale.rates):
self.combine_bracket(rate, threshold_low, threshold_high)
self.combine_bracket(tax_scale.rates[-1], tax_scale.thresholds[-1]) # Pour traiter le dernier threshold
def calc(self, base, factor = 1, round_base_decimals = None):
base1 = np.tile(base, (len(self.thresholds), 1)).T
if isinstance(factor, (float, int)):
factor = np.ones(len(base)) * factor
thresholds1 = np.outer(factor, np.array(self.thresholds + [np.inf]))
if round_base_decimals is not None:
thresholds1 = np.round(thresholds1, round_base_decimals)
a = max_(min_(base1, thresholds1[:, 1:]) - thresholds1[:, :-1], 0)
if round_base_decimals is None:
return np.dot(self.rates, a.T)
else:
r = np.tile(self.rates, (len(base), 1))
b = np.round(a, round_base_decimals)
return np.round(r * b, round_base_decimals).sum(axis = 1)
def combine_bracket(self, rate, threshold_low = 0, threshold_high = False):
# Insert threshold_low and threshold_high without modifying rates
if threshold_low not in self.thresholds:
index = bisect_right(self.thresholds, threshold_low) - 1
self.add_bracket(threshold_low, self.rates[index])
if threshold_high and threshold_high not in self.thresholds:
index = bisect_right(self.thresholds, threshold_high) - 1
self.add_bracket(threshold_high, self.rates[index])
# Use add_bracket to add rates where they belongs
i = self.thresholds.index(threshold_low)
if threshold_high:
j = self.thresholds.index(threshold_high) - 1
else:
j = len(self.thresholds) - 1
while i <= j:
self.add_bracket(self.thresholds[i], rate)
i += 1
def inverse(self):
"""Returns a new instance of MarginalRateTaxScale
Inverse un barème: étant donné des seuils et des taux exprimés en fonction
du brut, renvoie un barème avec les seuils et les taux exprimés en net.
si revnet = revbrut - BarmMar(revbrut, B)
alors revbrut = BarmMar(revnet, B.inverse())
threshold : threshold de revenu brut
taxable threshold imposable : threshold de revenu imposable/déclaré
theta : ordonnée à l'origine des segments des différents seuils dans une
représentation du revenu imposable comme fonction linéaire par
morceaux du revenu brut
"""
# Actually 1/(1-global-rate)
inverse = self.__class__(name = self.name + "'", option = self.option, unit = self.unit)
taxable_threshold = 0
for threshold, rate in itertools.izip(self.thresholds, self.rates):
if threshold == 0:
previous_rate = 0
theta = 0
# On calcule le seuil de revenu imposable de la tranche considérée.
taxable_threshold = (1 - previous_rate) * threshold + theta
inverse.add_bracket(taxable_threshold, 1 / (1 - rate))
theta = (rate - previous_rate) * threshold + theta
previous_rate = rate
return inverse
def scale_tax_scales(self, factor):
"""Scale all the MarginalRateTaxScales in the node."""
assert isinstance(factor, (float, int))
scaled_tax_scale = self.copy()
return scaled_tax_scale.multiply_thresholds(factor)
def to_average(self):
average_tax_scale = LinearAverageRateTaxScale(name = self.name, option = self.option, unit = self.unit)
average_tax_scale.add_bracket(0, 0)
if self.thresholds:
I = 0
previous_threshold = self.thresholds[0]
previous_rate = self.rates[0]
for threshold, rate in itertools.islice(itertools.izip(self.thresholds, self.rates), 1, None):
I += previous_rate * (threshold - previous_threshold)
average_tax_scale.add_bracket(threshold, I / threshold)
previous_threshold = threshold
previous_rate = rate
average_tax_scale.add_bracket(float('Inf'), rate)
return average_tax_scale
|
joshfriend/sqlalchemy-utils | refs/heads/master | sqlalchemy_utils/types/color.py | 1 | import six
from sqlalchemy import types
from sqlalchemy_utils.exceptions import ImproperlyConfigured
from .scalar_coercible import ScalarCoercible
colour = None
try:
import colour
except ImportError:
pass
class ColorType(types.TypeDecorator, ScalarCoercible):
"""
ColorType provides a way for saving Color (from colour_ package) objects
into database. ColorType saves Color objects as strings on the way in and
converts them back to objects when querying the database.
::
from colour import Color
from sqlalchemy_utils import ColorType
class Document(Base):
__tablename__ = 'document'
id = sa.Column(sa.Integer, autoincrement=True)
name = sa.Column(sa.Unicode(50))
background_color = sa.Column(ColorType)
document = Document()
document.background_color = Color('#F5F5F5')
session.commit()
Querying the database returns Color objects:
::
document = session.query(Document).first()
document.background_color.hex
# '#f5f5f5'
.. _colour: https://github.com/vaab/colour
"""
STORE_FORMAT = u'hex'
impl = types.Unicode(20)
python_type = colour.Color
def __init__(self, max_length=20, *args, **kwargs):
# Fail if colour is not found.
if colour is None:
raise ImproperlyConfigured(
"'colour' package is required to use 'ColorType'"
)
super(ColorType, self).__init__(*args, **kwargs)
self.impl = types.Unicode(max_length)
def process_bind_param(self, value, dialect):
if value and isinstance(value, colour.Color):
return six.text_type(getattr(value, self.STORE_FORMAT))
return value
def process_result_value(self, value, dialect):
if value:
return colour.Color(value)
return value
def _coerce(self, value):
if value is not None and not isinstance(value, colour.Color):
return colour.Color(value)
return value
|
Cantera/cantera-svn | refs/heads/master | interfaces/cython/cantera/mixmaster/Units/power.py | 4 | from .SI import watt, kilo
#
# Definitions of common power units
# Data taken from Appendix F of Halliday, Resnick, Walker, "Fundamentals of Physics",
# fourth edition, John Willey and Sons, 1993
kilowatt = kilo * watt
horsepower = 745.7 * watt
|
AuyaJackie/odoo | refs/heads/8.0 | addons/mass_mailing/models/mass_mailing.py | 68 | # -*- coding: utf-8 -*-
from datetime import datetime
from dateutil import relativedelta
import json
import random
from openerp import tools
from openerp.exceptions import Warning
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
from openerp.tools import ustr
from openerp.osv import osv, fields
class MassMailingCategory(osv.Model):
"""Model of categories of mass mailing, i.e. marketing, newsletter, ... """
_name = 'mail.mass_mailing.category'
_description = 'Mass Mailing Category'
_order = 'name'
_columns = {
'name': fields.char('Name', required=True),
}
class MassMailingList(osv.Model):
"""Model of a contact list. """
_name = 'mail.mass_mailing.list'
_order = 'name'
_description = 'Mailing List'
def _get_contact_nbr(self, cr, uid, ids, name, arg, context=None):
result = dict.fromkeys(ids, 0)
Contacts = self.pool.get('mail.mass_mailing.contact')
for group in Contacts.read_group(cr, uid, [('list_id', 'in', ids), ('opt_out', '!=', True)], ['list_id'], ['list_id'], context=context):
result[group['list_id'][0]] = group['list_id_count']
return result
_columns = {
'name': fields.char('Mailing List', required=True),
'contact_nbr': fields.function(
_get_contact_nbr, type='integer',
string='Number of Contacts',
),
}
class MassMailingContact(osv.Model):
"""Model of a contact. This model is different from the partner model
because it holds only some basic information: name, email. The purpose is to
be able to deal with large contact list to email without bloating the partner
base."""
_name = 'mail.mass_mailing.contact'
_inherit = 'mail.thread'
_description = 'Mass Mailing Contact'
_order = 'email'
_rec_name = 'email'
_columns = {
'name': fields.char('Name'),
'email': fields.char('Email', required=True),
'create_date': fields.datetime('Create Date'),
'list_id': fields.many2one(
'mail.mass_mailing.list', string='Mailing List',
ondelete='cascade', required=True,
),
'opt_out': fields.boolean('Opt Out', help='The contact has chosen not to receive mails anymore from this list'),
}
def _get_latest_list(self, cr, uid, context={}):
lid = self.pool.get('mail.mass_mailing.list').search(cr, uid, [], limit=1, order='id desc', context=context)
return lid and lid[0] or False
_defaults = {
'list_id': _get_latest_list
}
def get_name_email(self, name, context):
name, email = self.pool['res.partner']._parse_partner_name(name, context=context)
if name and not email:
email = name
if email and not name:
name = email
return name, email
def name_create(self, cr, uid, name, context=None):
name, email = self.get_name_email(name, context=context)
rec_id = self.create(cr, uid, {'name': name, 'email': email}, context=context)
return self.name_get(cr, uid, [rec_id], context)[0]
def add_to_list(self, cr, uid, name, list_id, context=None):
name, email = self.get_name_email(name, context=context)
rec_id = self.create(cr, uid, {'name': name, 'email': email, 'list_id': list_id}, context=context)
return self.name_get(cr, uid, [rec_id], context)[0]
def message_get_default_recipients(self, cr, uid, ids, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
res[record.id] = {'partner_ids': [], 'email_to': record.email, 'email_cc': False}
return res
class MassMailingStage(osv.Model):
"""Stage for mass mailing campaigns. """
_name = 'mail.mass_mailing.stage'
_description = 'Mass Mailing Campaign Stage'
_order = 'sequence'
_columns = {
'name': fields.char('Name', required=True, translate=True),
'sequence': fields.integer('Sequence'),
}
_defaults = {
'sequence': 0,
}
class MassMailingCampaign(osv.Model):
"""Model of mass mailing campaigns. """
_name = "mail.mass_mailing.campaign"
_description = 'Mass Mailing Campaign'
def _get_statistics(self, cr, uid, ids, name, arg, context=None):
""" Compute statistics of the mass mailing campaign """
results = {}
cr.execute("""
SELECT
c.id as campaign_id,
COUNT(s.id) AS total,
COUNT(CASE WHEN s.sent is not null THEN 1 ELSE null END) AS sent,
COUNT(CASE WHEN s.scheduled is not null AND s.sent is null AND s.exception is null THEN 1 ELSE null END) AS scheduled,
COUNT(CASE WHEN s.scheduled is not null AND s.sent is null AND s.exception is not null THEN 1 ELSE null END) AS failed,
COUNT(CASE WHEN s.id is not null AND s.bounced is null THEN 1 ELSE null END) AS delivered,
COUNT(CASE WHEN s.opened is not null THEN 1 ELSE null END) AS opened,
COUNT(CASE WHEN s.replied is not null THEN 1 ELSE null END) AS replied ,
COUNT(CASE WHEN s.bounced is not null THEN 1 ELSE null END) AS bounced
FROM
mail_mail_statistics s
RIGHT JOIN
mail_mass_mailing_campaign c
ON (c.id = s.mass_mailing_campaign_id)
WHERE
c.id IN %s
GROUP BY
c.id
""", (tuple(ids), ))
for row in cr.dictfetchall():
results[row.pop('campaign_id')] = row
total = row['total'] or 1
row['delivered'] = row['sent'] - row['bounced']
row['received_ratio'] = 100.0 * row['delivered'] / total
row['opened_ratio'] = 100.0 * row['opened'] / total
row['replied_ratio'] = 100.0 * row['replied'] / total
return results
_columns = {
'name': fields.char('Name', required=True),
'stage_id': fields.many2one('mail.mass_mailing.stage', 'Stage', required=True),
'user_id': fields.many2one(
'res.users', 'Responsible',
required=True,
),
'category_ids': fields.many2many(
'mail.mass_mailing.category', 'mail_mass_mailing_category_rel',
'category_id', 'campaign_id', string='Categories'),
'mass_mailing_ids': fields.one2many(
'mail.mass_mailing', 'mass_mailing_campaign_id',
'Mass Mailings',
),
'unique_ab_testing': fields.boolean(
'AB Testing',
help='If checked, recipients will be mailed only once, allowing to send'
'various mailings in a single campaign to test the effectiveness'
'of the mailings.'),
'color': fields.integer('Color Index'),
# stat fields
'total': fields.function(
_get_statistics, string='Total',
type='integer', multi='_get_statistics'
),
'scheduled': fields.function(
_get_statistics, string='Scheduled',
type='integer', multi='_get_statistics'
),
'failed': fields.function(
_get_statistics, string='Failed',
type='integer', multi='_get_statistics'
),
'sent': fields.function(
_get_statistics, string='Sent Emails',
type='integer', multi='_get_statistics'
),
'delivered': fields.function(
_get_statistics, string='Delivered',
type='integer', multi='_get_statistics',
),
'opened': fields.function(
_get_statistics, string='Opened',
type='integer', multi='_get_statistics',
),
'replied': fields.function(
_get_statistics, string='Replied',
type='integer', multi='_get_statistics'
),
'bounced': fields.function(
_get_statistics, string='Bounced',
type='integer', multi='_get_statistics'
),
'received_ratio': fields.function(
_get_statistics, string='Received Ratio',
type='integer', multi='_get_statistics',
),
'opened_ratio': fields.function(
_get_statistics, string='Opened Ratio',
type='integer', multi='_get_statistics',
),
'replied_ratio': fields.function(
_get_statistics, string='Replied Ratio',
type='integer', multi='_get_statistics',
),
}
def _get_default_stage_id(self, cr, uid, context=None):
stage_ids = self.pool['mail.mass_mailing.stage'].search(cr, uid, [], limit=1, context=context)
return stage_ids and stage_ids[0] or False
_defaults = {
'user_id': lambda self, cr, uid, ctx=None: uid,
'stage_id': lambda self, *args: self._get_default_stage_id(*args),
}
def get_recipients(self, cr, uid, ids, model=None, context=None):
"""Return the recipients of a mailing campaign. This is based on the statistics
build for each mailing. """
Statistics = self.pool['mail.mail.statistics']
res = dict.fromkeys(ids, False)
for cid in ids:
domain = [('mass_mailing_campaign_id', '=', cid)]
if model:
domain += [('model', '=', model)]
stat_ids = Statistics.search(cr, uid, domain, context=context)
res[cid] = set(stat.res_id for stat in Statistics.browse(cr, uid, stat_ids, context=context))
return res
class MassMailing(osv.Model):
""" MassMailing models a wave of emails for a mass mailign campaign.
A mass mailing is an occurence of sending emails. """
_name = 'mail.mass_mailing'
_description = 'Mass Mailing'
# number of periods for tracking mail_mail statistics
_period_number = 6
_order = 'sent_date DESC'
def __get_bar_values(self, cr, uid, obj, domain, read_fields, value_field, groupby_field, date_begin, context=None):
""" Generic method to generate data for bar chart values using SparklineBarWidget.
This method performs obj.read_group(cr, uid, domain, read_fields, groupby_field).
:param obj: the target model (i.e. crm_lead)
:param domain: the domain applied to the read_group
:param list read_fields: the list of fields to read in the read_group
:param str value_field: the field used to compute the value of the bar slice
:param str groupby_field: the fields used to group
:return list section_result: a list of dicts: [
{ 'value': (int) bar_column_value,
'tootip': (str) bar_column_tooltip,
}
]
"""
date_begin = date_begin.date()
section_result = [{'value': 0,
'tooltip': ustr((date_begin + relativedelta.relativedelta(days=i)).strftime('%d %B %Y')),
} for i in range(0, self._period_number)]
group_obj = obj.read_group(cr, uid, domain, read_fields, groupby_field, context=context)
field = obj._fields.get(groupby_field.split(':')[0])
pattern = tools.DEFAULT_SERVER_DATE_FORMAT if field.type == 'date' else tools.DEFAULT_SERVER_DATETIME_FORMAT
for group in group_obj:
group_begin_date = datetime.strptime(group['__domain'][0][2], pattern).date()
timedelta = relativedelta.relativedelta(group_begin_date, date_begin)
section_result[timedelta.days] = {'value': group.get(value_field, 0), 'tooltip': group.get(groupby_field)}
return section_result
def _get_daily_statistics(self, cr, uid, ids, field_name, arg, context=None):
""" Get the daily statistics of the mass mailing. This is done by a grouping
on opened and replied fields. Using custom format in context, we obtain
results for the next 6 days following the mass mailing date. """
obj = self.pool['mail.mail.statistics']
res = {}
for mailing in self.browse(cr, uid, ids, context=context):
res[mailing.id] = {}
date = mailing.sent_date if mailing.sent_date else mailing.create_date
date_begin = datetime.strptime(date, tools.DEFAULT_SERVER_DATETIME_FORMAT)
date_end = date_begin + relativedelta.relativedelta(days=self._period_number - 1)
date_begin_str = date_begin.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
date_end_str = date_end.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
domain = [('mass_mailing_id', '=', mailing.id), ('opened', '>=', date_begin_str), ('opened', '<=', date_end_str)]
res[mailing.id]['opened_daily'] = json.dumps(self.__get_bar_values(cr, uid, obj, domain, ['opened'], 'opened_count', 'opened:day', date_begin, context=context))
domain = [('mass_mailing_id', '=', mailing.id), ('replied', '>=', date_begin_str), ('replied', '<=', date_end_str)]
res[mailing.id]['replied_daily'] = json.dumps(self.__get_bar_values(cr, uid, obj, domain, ['replied'], 'replied_count', 'replied:day', date_begin, context=context))
return res
def _get_statistics(self, cr, uid, ids, name, arg, context=None):
""" Compute statistics of the mass mailing """
results = {}
cr.execute("""
SELECT
m.id as mailing_id,
COUNT(s.id) AS total,
COUNT(CASE WHEN s.sent is not null THEN 1 ELSE null END) AS sent,
COUNT(CASE WHEN s.scheduled is not null AND s.sent is null AND s.exception is null THEN 1 ELSE null END) AS scheduled,
COUNT(CASE WHEN s.scheduled is not null AND s.sent is null AND s.exception is not null THEN 1 ELSE null END) AS failed,
COUNT(CASE WHEN s.sent is not null AND s.bounced is null THEN 1 ELSE null END) AS delivered,
COUNT(CASE WHEN s.opened is not null THEN 1 ELSE null END) AS opened,
COUNT(CASE WHEN s.replied is not null THEN 1 ELSE null END) AS replied,
COUNT(CASE WHEN s.bounced is not null THEN 1 ELSE null END) AS bounced
FROM
mail_mail_statistics s
RIGHT JOIN
mail_mass_mailing m
ON (m.id = s.mass_mailing_id)
WHERE
m.id IN %s
GROUP BY
m.id
""", (tuple(ids), ))
for row in cr.dictfetchall():
results[row.pop('mailing_id')] = row
total = row['total'] or 1
row['received_ratio'] = 100.0 * row['delivered'] / total
row['opened_ratio'] = 100.0 * row['opened'] / total
row['replied_ratio'] = 100.0 * row['replied'] / total
return results
def _get_mailing_model(self, cr, uid, context=None):
res = []
for model_name in self.pool:
model = self.pool[model_name]
if hasattr(model, '_mail_mass_mailing') and getattr(model, '_mail_mass_mailing'):
res.append((model._name, getattr(model, '_mail_mass_mailing')))
res.append(('mail.mass_mailing.contact', _('Mailing List')))
return res
# indirections for inheritance
_mailing_model = lambda self, *args, **kwargs: self._get_mailing_model(*args, **kwargs)
_columns = {
'name': fields.char('Subject', required=True),
'email_from': fields.char('From', required=True),
'create_date': fields.datetime('Creation Date'),
'sent_date': fields.datetime('Sent Date', oldname='date', copy=False),
'body_html': fields.html('Body'),
'attachment_ids': fields.many2many(
'ir.attachment', 'mass_mailing_ir_attachments_rel',
'mass_mailing_id', 'attachment_id', 'Attachments'
),
'mass_mailing_campaign_id': fields.many2one(
'mail.mass_mailing.campaign', 'Mass Mailing Campaign',
ondelete='set null',
),
'state': fields.selection(
[('draft', 'Draft'), ('test', 'Tested'), ('done', 'Sent')],
string='Status', required=True, copy=False,
),
'color': fields.related(
'mass_mailing_campaign_id', 'color',
type='integer', string='Color Index',
),
# mailing options
'reply_to_mode': fields.selection(
[('thread', 'In Document'), ('email', 'Specified Email Address')],
string='Reply-To Mode', required=True,
),
'reply_to': fields.char('Reply To', help='Preferred Reply-To Address'),
# recipients
'mailing_model': fields.selection(_mailing_model, string='Recipients Model', required=True),
'mailing_domain': fields.char('Domain', oldname='domain'),
'contact_list_ids': fields.many2many(
'mail.mass_mailing.list', 'mail_mass_mailing_list_rel',
string='Mailing Lists',
),
'contact_ab_pc': fields.integer(
'AB Testing percentage',
help='Percentage of the contacts that will be mailed. Recipients will be taken randomly.'
),
# statistics data
'statistics_ids': fields.one2many(
'mail.mail.statistics', 'mass_mailing_id',
'Emails Statistics',
),
'total': fields.function(
_get_statistics, string='Total',
type='integer', multi='_get_statistics',
),
'scheduled': fields.function(
_get_statistics, string='Scheduled',
type='integer', multi='_get_statistics',
),
'failed': fields.function(
_get_statistics, string='Failed',
type='integer', multi='_get_statistics',
),
'sent': fields.function(
_get_statistics, string='Sent',
type='integer', multi='_get_statistics',
),
'delivered': fields.function(
_get_statistics, string='Delivered',
type='integer', multi='_get_statistics',
),
'opened': fields.function(
_get_statistics, string='Opened',
type='integer', multi='_get_statistics',
),
'replied': fields.function(
_get_statistics, string='Replied',
type='integer', multi='_get_statistics',
),
'bounced': fields.function(
_get_statistics, string='Bounced',
type='integer', multi='_get_statistics',
),
'received_ratio': fields.function(
_get_statistics, string='Received Ratio',
type='integer', multi='_get_statistics',
),
'opened_ratio': fields.function(
_get_statistics, string='Opened Ratio',
type='integer', multi='_get_statistics',
),
'replied_ratio': fields.function(
_get_statistics, string='Replied Ratio',
type='integer', multi='_get_statistics',
),
# daily ratio
'opened_daily': fields.function(
_get_daily_statistics, string='Opened',
type='char', multi='_get_daily_statistics',
),
'replied_daily': fields.function(
_get_daily_statistics, string='Replied',
type='char', multi='_get_daily_statistics',
)
}
def default_get(self, cr, uid, fields, context=None):
res = super(MassMailing, self).default_get(cr, uid, fields, context=context)
if 'reply_to_mode' in fields and not 'reply_to_mode' in res and res.get('mailing_model'):
if res['mailing_model'] in ['res.partner', 'mail.mass_mailing.contact']:
res['reply_to_mode'] = 'email'
else:
res['reply_to_mode'] = 'thread'
return res
_defaults = {
'state': 'draft',
'email_from': lambda self, cr, uid, ctx=None: self.pool['mail.message']._get_default_from(cr, uid, context=ctx),
'reply_to': lambda self, cr, uid, ctx=None: self.pool['mail.message']._get_default_from(cr, uid, context=ctx),
'mailing_model': 'mail.mass_mailing.contact',
'contact_ab_pc': 100,
'mailing_domain': [],
}
#------------------------------------------------------
# Technical stuff
#------------------------------------------------------
def copy_data(self, cr, uid, id, default=None, context=None):
mailing = self.browse(cr, uid, id, context=context)
default = dict(default or {},
name=_('%s (copy)') % mailing.name)
return super(MassMailing, self).copy_data(cr, uid, id, default, context=context)
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
""" Override read_group to always display all states. """
if groupby and groupby[0] == "state":
# Default result structure
# states = self._get_state_list(cr, uid, context=context)
states = [('draft', 'Draft'), ('test', 'Tested'), ('done', 'Sent')]
read_group_all_states = [{
'__context': {'group_by': groupby[1:]},
'__domain': domain + [('state', '=', state_value)],
'state': state_value,
'state_count': 0,
} for state_value, state_name in states]
# Get standard results
read_group_res = super(MassMailing, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby)
# Update standard results with default results
result = []
for state_value, state_name in states:
res = filter(lambda x: x['state'] == state_value, read_group_res)
if not res:
res = filter(lambda x: x['state'] == state_value, read_group_all_states)
res[0]['state'] = [state_value, state_name]
result.append(res[0])
return result
else:
return super(MassMailing, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby)
#------------------------------------------------------
# Views & Actions
#------------------------------------------------------
def on_change_model_and_list(self, cr, uid, ids, mailing_model, list_ids, context=None):
value = {}
if mailing_model == 'mail.mass_mailing.contact':
mailing_list_ids = set()
for item in list_ids:
if isinstance(item, (int, long)):
mailing_list_ids.add(item)
elif len(item) == 3:
mailing_list_ids |= set(item[2])
if mailing_list_ids:
value['mailing_domain'] = "[('list_id', 'in', %s), ('opt_out', '=', False)]" % list(mailing_list_ids)
else:
value['mailing_domain'] = "[('list_id', '=', False)]"
else:
value['mailing_domain'] = []
return {'value': value}
def action_duplicate(self, cr, uid, ids, context=None):
copy_id = None
for mid in ids:
copy_id = self.copy(cr, uid, mid, context=context)
if copy_id:
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.mass_mailing',
'res_id': copy_id,
'context': context,
}
return False
def action_test_mailing(self, cr, uid, ids, context=None):
ctx = dict(context, default_mass_mailing_id=ids[0])
return {
'name': _('Test Mailing'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'mail.mass_mailing.test',
'target': 'new',
'context': ctx,
}
def action_edit_html(self, cr, uid, ids, context=None):
if not len(ids) == 1:
raise ValueError('One and only one ID allowed for this action')
mail = self.browse(cr, uid, ids[0], context=context)
url = '/website_mail/email_designer?model=mail.mass_mailing&res_id=%d&template_model=%s&return_action=%d&enable_editor=1' % (ids[0], mail.mailing_model, context['params']['action'])
return {
'name': _('Open with Visual Editor'),
'type': 'ir.actions.act_url',
'url': url,
'target': 'self',
}
#------------------------------------------------------
# Email Sending
#------------------------------------------------------
def get_recipients(self, cr, uid, mailing, context=None):
if mailing.mailing_domain:
domain = eval(mailing.mailing_domain)
res_ids = self.pool[mailing.mailing_model].search(cr, uid, domain, context=context)
else:
res_ids = []
domain = [('id', 'in', res_ids)]
# randomly choose a fragment
if mailing.contact_ab_pc < 100:
contact_nbr = self.pool[mailing.mailing_model].search(cr, uid, domain, count=True, context=context)
topick = int(contact_nbr / 100.0 * mailing.contact_ab_pc)
if mailing.mass_mailing_campaign_id and mailing.mass_mailing_campaign_id.unique_ab_testing:
already_mailed = self.pool['mail.mass_mailing.campaign'].get_recipients(cr, uid, [mailing.mass_mailing_campaign_id.id], context=context)[mailing.mass_mailing_campaign_id.id]
else:
already_mailed = set([])
remaining = set(res_ids).difference(already_mailed)
if topick > len(remaining):
topick = len(remaining)
res_ids = random.sample(remaining, topick)
return res_ids
def send_mail(self, cr, uid, ids, context=None):
author_id = self.pool['res.users'].browse(cr, uid, uid, context=context).partner_id.id
for mailing in self.browse(cr, uid, ids, context=context):
# instantiate an email composer + send emails
res_ids = self.get_recipients(cr, uid, mailing, context=context)
if not res_ids:
raise Warning('Please select recipients.')
comp_ctx = dict(context, active_ids=res_ids)
composer_values = {
'author_id': author_id,
'attachment_ids': [(4, attachment.id) for attachment in mailing.attachment_ids],
'body': mailing.body_html,
'subject': mailing.name,
'model': mailing.mailing_model,
'email_from': mailing.email_from,
'record_name': False,
'composition_mode': 'mass_mail',
'mass_mailing_id': mailing.id,
'mailing_list_ids': [(4, l.id) for l in mailing.contact_list_ids],
'no_auto_thread': mailing.reply_to_mode != 'thread',
}
if mailing.reply_to_mode == 'email':
composer_values['reply_to'] = mailing.reply_to
composer_id = self.pool['mail.compose.message'].create(cr, uid, composer_values, context=comp_ctx)
self.pool['mail.compose.message'].send_mail(cr, uid, [composer_id], context=comp_ctx)
self.write(cr, uid, [mailing.id], {'sent_date': fields.datetime.now(), 'state': 'done'}, context=context)
return True
|
SCSSG/Odoo-SCS | refs/heads/master | addons/account/report/account_invoice_report.py | 224 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
import openerp.addons.decimal_precision as dp
from openerp.osv import fields,osv
class account_invoice_report(osv.osv):
_name = "account.invoice.report"
_description = "Invoices Statistics"
_auto = False
_rec_name = 'date'
def _compute_amounts_in_user_currency(self, cr, uid, ids, field_names, args, context=None):
"""Compute the amounts in the currency of the user
"""
if context is None:
context={}
currency_obj = self.pool.get('res.currency')
currency_rate_obj = self.pool.get('res.currency.rate')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
user_currency_id = user.company_id.currency_id.id
currency_rate_id = currency_rate_obj.search(
cr, uid, [
('rate', '=', 1),
'|',
('currency_id.company_id', '=', user.company_id.id),
('currency_id.company_id', '=', False)
], limit=1, context=context)[0]
base_currency_id = currency_rate_obj.browse(cr, uid, currency_rate_id, context=context).currency_id.id
res = {}
ctx = context.copy()
for item in self.browse(cr, uid, ids, context=context):
ctx['date'] = item.date
price_total = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.price_total, context=ctx)
price_average = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.price_average, context=ctx)
residual = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.residual, context=ctx)
res[item.id] = {
'user_currency_price_total': price_total,
'user_currency_price_average': price_average,
'user_currency_residual': residual,
}
return res
_columns = {
'date': fields.date('Date', readonly=True),
'product_id': fields.many2one('product.product', 'Product', readonly=True),
'product_qty':fields.float('Product Quantity', readonly=True),
'uom_name': fields.char('Reference Unit of Measure', size=128, readonly=True),
'payment_term': fields.many2one('account.payment.term', 'Payment Term', readonly=True),
'period_id': fields.many2one('account.period', 'Force Period', domain=[('state','<>','done')], readonly=True),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position', readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'categ_id': fields.many2one('product.category','Category of Product', readonly=True),
'journal_id': fields.many2one('account.journal', 'Journal', readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'commercial_partner_id': fields.many2one('res.partner', 'Partner Company', help="Commercial Entity"),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'user_id': fields.many2one('res.users', 'Salesperson', readonly=True),
'price_total': fields.float('Total Without Tax', readonly=True),
'user_currency_price_total': fields.function(_compute_amounts_in_user_currency, string="Total Without Tax", type='float', digits_compute=dp.get_precision('Account'), multi="_compute_amounts"),
'price_average': fields.float('Average Price', readonly=True, group_operator="avg"),
'user_currency_price_average': fields.function(_compute_amounts_in_user_currency, string="Average Price", type='float', digits_compute=dp.get_precision('Account'), multi="_compute_amounts"),
'currency_rate': fields.float('Currency Rate', readonly=True),
'nbr': fields.integer('# of Invoices', readonly=True), # TDE FIXME master: rename into nbr_lines
'type': fields.selection([
('out_invoice','Customer Invoice'),
('in_invoice','Supplier Invoice'),
('out_refund','Customer Refund'),
('in_refund','Supplier Refund'),
],'Type', readonly=True),
'state': fields.selection([
('draft','Draft'),
('proforma','Pro-forma'),
('proforma2','Pro-forma'),
('open','Open'),
('paid','Done'),
('cancel','Cancelled')
], 'Invoice Status', readonly=True),
'date_due': fields.date('Due Date', readonly=True),
'account_id': fields.many2one('account.account', 'Account',readonly=True),
'account_line_id': fields.many2one('account.account', 'Account Line',readonly=True),
'partner_bank_id': fields.many2one('res.partner.bank', 'Bank Account',readonly=True),
'residual': fields.float('Total Residual', readonly=True),
'user_currency_residual': fields.function(_compute_amounts_in_user_currency, string="Total Residual", type='float', digits_compute=dp.get_precision('Account'), multi="_compute_amounts"),
'country_id': fields.many2one('res.country', 'Country of the Partner Company'),
}
_order = 'date desc'
_depends = {
'account.invoice': [
'account_id', 'amount_total', 'commercial_partner_id', 'company_id',
'currency_id', 'date_due', 'date_invoice', 'fiscal_position',
'journal_id', 'partner_bank_id', 'partner_id', 'payment_term',
'period_id', 'residual', 'state', 'type', 'user_id',
],
'account.invoice.line': [
'account_id', 'invoice_id', 'price_subtotal', 'product_id',
'quantity', 'uos_id',
],
'product.product': ['product_tmpl_id'],
'product.template': ['categ_id'],
'product.uom': ['category_id', 'factor', 'name', 'uom_type'],
'res.currency.rate': ['currency_id', 'name'],
'res.partner': ['country_id'],
}
def _select(self):
select_str = """
SELECT sub.id, sub.date, sub.product_id, sub.partner_id, sub.country_id,
sub.payment_term, sub.period_id, sub.uom_name, sub.currency_id, sub.journal_id,
sub.fiscal_position, sub.user_id, sub.company_id, sub.nbr, sub.type, sub.state,
sub.categ_id, sub.date_due, sub.account_id, sub.account_line_id, sub.partner_bank_id,
sub.product_qty, sub.price_total / cr.rate as price_total, sub.price_average /cr.rate as price_average,
cr.rate as currency_rate, sub.residual / cr.rate as residual, sub.commercial_partner_id as commercial_partner_id
"""
return select_str
def _sub_select(self):
select_str = """
SELECT min(ail.id) AS id,
ai.date_invoice AS date,
ail.product_id, ai.partner_id, ai.payment_term, ai.period_id,
u2.name AS uom_name,
ai.currency_id, ai.journal_id, ai.fiscal_position, ai.user_id, ai.company_id,
count(ail.*) AS nbr,
ai.type, ai.state, pt.categ_id, ai.date_due, ai.account_id, ail.account_id AS account_line_id,
ai.partner_bank_id,
SUM(CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN (- ail.quantity) / u.factor * u2.factor
ELSE ail.quantity / u.factor * u2.factor
END) AS product_qty,
SUM(CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN - ail.price_subtotal
ELSE ail.price_subtotal
END) AS price_total,
CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN SUM(- ail.price_subtotal)
ELSE SUM(ail.price_subtotal)
END / CASE
WHEN SUM(ail.quantity / u.factor * u2.factor) <> 0::numeric
THEN CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN SUM((- ail.quantity) / u.factor * u2.factor)
ELSE SUM(ail.quantity / u.factor * u2.factor)
END
ELSE 1::numeric
END AS price_average,
CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN - ai.residual
ELSE ai.residual
END / (SELECT count(*) FROM account_invoice_line l where invoice_id = ai.id) *
count(*) AS residual,
ai.commercial_partner_id as commercial_partner_id,
partner.country_id
"""
return select_str
def _from(self):
from_str = """
FROM account_invoice_line ail
JOIN account_invoice ai ON ai.id = ail.invoice_id
JOIN res_partner partner ON ai.commercial_partner_id = partner.id
LEFT JOIN product_product pr ON pr.id = ail.product_id
left JOIN product_template pt ON pt.id = pr.product_tmpl_id
LEFT JOIN product_uom u ON u.id = ail.uos_id
LEFT JOIN product_uom u2 ON u2.id = pt.uom_id
"""
return from_str
def _group_by(self):
group_by_str = """
GROUP BY ail.product_id, ai.date_invoice, ai.id,
ai.partner_id, ai.payment_term, ai.period_id, u2.name, u2.id, ai.currency_id, ai.journal_id,
ai.fiscal_position, ai.user_id, ai.company_id, ai.type, ai.state, pt.categ_id,
ai.date_due, ai.account_id, ail.account_id, ai.partner_bank_id, ai.residual,
ai.amount_total, ai.commercial_partner_id, partner.country_id
"""
return group_by_str
def init(self, cr):
# self._table = account_invoice_report
tools.drop_view_if_exists(cr, self._table)
cr.execute("""CREATE or REPLACE VIEW %s as (
WITH currency_rate (currency_id, rate, date_start, date_end) AS (
SELECT r.currency_id, r.rate, r.name AS date_start,
(SELECT name FROM res_currency_rate r2
WHERE r2.name > r.name AND
r2.currency_id = r.currency_id
ORDER BY r2.name ASC
LIMIT 1) AS date_end
FROM res_currency_rate r
)
%s
FROM (
%s %s %s
) AS sub
JOIN currency_rate cr ON
(cr.currency_id = sub.currency_id AND
cr.date_start <= COALESCE(sub.date, NOW()) AND
(cr.date_end IS NULL OR cr.date_end > COALESCE(sub.date, NOW())))
)""" % (
self._table,
self._select(), self._sub_select(), self._from(), self._group_by()))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
newsteinking/docker | refs/heads/master | tests/data/packages/LocalExtras/setup.py | 46 | import os
from setuptools import setup, find_packages
def path_to_url(path):
"""
Convert a path to URI. The path will be made absolute and
will not have quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
drive, path = os.path.splitdrive(path)
filepath = path.split(os.path.sep)
url = '/'.join(filepath)
if drive:
return 'file:///' + drive + url
return 'file://' +url
HERE = os.path.dirname(__file__)
DEP_PATH = os.path.join(HERE, '..', '..', 'indexes', 'simple', 'simple')
DEP_URL = path_to_url(DEP_PATH)
setup(
name='LocalExtras',
version='0.0.1',
packages=find_packages(),
extras_require={ 'bar': ['simple'] },
dependency_links=[DEP_URL]
)
|
rfkrocktk/powerpy | refs/heads/master | src/powerpy/util/__init__.py | 1 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import re
import string
class base36(object):
matcher = re.compile(r'^([a-z0-9]+)$', re.I)
alphabet = string.digits + string.lowercase
@classmethod
def encode(cls, value):
"""
Converts a number into a base36 string.
Arguments:
value: A number.
"""
if not isinstance(value, (int, long)):
raise TypeError("Value for decoding must be an integer or a long.")
result = ''
sign = ''
if value < 0:
sign = '-'
value = abs(value)
if 0 <= value < len(cls.alphabet):
return sign + cls.alphabet[value]
while value != 0:
value, i = divmod(value, len(cls.alphabet))
result = cls.alphabet[i] + result
return result
@classmethod
def decode(cls, value):
"""
Converts a base-36 string into a number.
Arguments:
value: A base36 string.
"""
if not base36.matcher.match(value):
raise ValueError("Input value '{value}' is not a base36 string.".format(value=value))
return int(value, 36)
|
humblec/glusterblock-provisioner | refs/heads/master | vendor/k8s.io/kubernetes/hack/update_owners.py | 40 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import csv
import re
import json
import os
import random
import subprocess
import sys
import time
import urllib2
import zlib
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
OWNERS_PATH = os.path.abspath(
os.path.join(BASE_DIR, '..', 'test', 'test_owners.csv'))
GCS_URL_BASE = 'https://storage.googleapis.com/kubernetes-test-history/'
SKIP_MAINTAINERS = {
'a-robinson', 'aronchick', 'bgrant0607-nocc', 'david-mcmahon',
'goltermann', 'sarahnovotny'}
def normalize(name):
name = re.sub(r'\[.*?\]|\{.*?\}', '', name)
name = re.sub(r'\s+', ' ', name)
return name.strip()
def get_test_history(days_ago):
url = time.strftime(GCS_URL_BASE + 'logs/%Y-%m-%d.json',
time.gmtime(time.time() - days_ago * 24 * 60 * 60))
resp = urllib2.urlopen(url)
content = resp.read()
if resp.headers.get('content-encoding') == 'gzip':
content = zlib.decompress(content, 15 | 16)
return json.loads(content)
def get_test_names_from_test_history():
test_names = set()
for days_ago in range(4):
test_history = get_test_history(days_ago)
test_names.update(normalize(name) for name in test_history['test_names'])
return test_names
def get_test_names_from_local_files():
tests_json = subprocess.check_output(['go', 'run', 'test/list/main.go', '-json'])
tests = json.loads(tests_json)
return {normalize(t['Name'] + (' ' + t['TestName'] if 'k8s.io/' not in t['Name'] else ''))
for t in tests}
def load_owners(fname):
owners = {}
with open(fname) as f:
for n, (name, owner, random_assignment) in enumerate(csv.reader(f)):
if n == 0:
continue # header
owners[normalize(name)] = (owner, int(random_assignment))
return owners
def write_owners(fname, owners):
with open(fname, 'w') as f:
out = csv.writer(f, lineterminator='\n')
out.writerow(['name', 'owner', 'auto-assigned'])
sort_key = lambda (k, v): (k != 'DEFAULT', k) # put 'DEFAULT' first.
items = sorted(owners.items(), key=sort_key)
for name, (owner, random_assignment) in items:
out.writerow([name, owner, int(random_assignment)])
def get_maintainers():
# Github doesn't seem to support team membership listing without a key with
# org admin privileges. Instead, we do it manually:
# Open https://github.com/orgs/kubernetes/teams/kubernetes-maintainers
# Run this in the js console:
# [].slice.call(document.querySelectorAll('.team-member-username a')).map(
# e => e.textContent.trim())
ret = {"alex-mohr", "apelisse", "aronchick", "bgrant0607", "bgrant0607-nocc",
"bprashanth", "brendandburns", "caesarxuchao", "childsb", "cjcullen",
"david-mcmahon", "davidopp", "dchen1107", "deads2k", "derekwaynecarr",
"eparis", "erictune", "fabioy", "fejta", "fgrzadkowski", "freehan",
"gmarek", "grodrigues3", "ingvagabund", "ixdy", "janetkuo", "jbeda",
"jessfraz", "jingxu97", "jlowdermilk", "jsafrane", "jszczepkowski",
"justinsb", "kargakis", "Kashomon", "kevin-wangzefeng", "krousey",
"lavalamp", "liggitt", "luxas", "madhusudancs", "maisem", "matchstick",
"mbohlool", "mikedanese", "mml", "mtaufen", "mwielgus", "ncdc",
"nikhiljindal", "piosz", "pmorie", "pwittrock", "Q-Lee", "quinton-hoole",
"Random-Liu", "rmmh", "roberthbailey", "saad-ali", "smarterclayton",
"soltysh", "spxtr", "sttts", "thelinuxfoundation", "thockin",
"timothysc", "timstclair", "vishh", "wojtek-t", "xiang90", "yifan-gu",
"yujuhong", "zmerlynn"}
return sorted(ret - SKIP_MAINTAINERS)
def detect_github_username():
origin_url = subprocess.check_output(['git', 'config', 'remote.origin.url'])
m = re.search(r'github.com[:/](.*)/', origin_url)
if m and m.group(1) != 'kubernetes':
return m.group(1)
raise ValueError('unable to determine GitHub user from '
'`git config remote.origin.url` output, run with --user instead')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--history', action='store_true', help='Generate test list from result history.')
parser.add_argument('--user', help='User to assign new tests to (or RANDOM, default: current GitHub user).')
parser.add_argument('--addonly', action='store_true', help='Only add missing tests, do not change existing.')
parser.add_argument('--check', action='store_true', help='Exit with a nonzero status if the test list has changed.')
options = parser.parse_args()
if options.history:
test_names = get_test_names_from_test_history()
else:
test_names = get_test_names_from_local_files()
test_names.add('DEFAULT')
test_names = sorted(test_names)
owners = load_owners(OWNERS_PATH)
outdated_tests = sorted(set(owners) - set(test_names))
new_tests = sorted(set(test_names) - set(owners))
maintainers = get_maintainers()
print '# OUTDATED TESTS (%d):' % len(outdated_tests)
print '\n'.join('%s -- %s%s' %
(t, owners[t][0], ['', ' (random)'][owners[t][1]])
for t in outdated_tests)
print '# NEW TESTS (%d):' % len(new_tests)
print '\n'.join(new_tests)
if options.check:
if new_tests or outdated_tests:
print
print 'ERROR: the test list has changed'
sys.exit(1)
sys.exit(0)
if not options.user:
options.user = detect_github_username()
for name in outdated_tests:
owners.pop(name)
if not options.addonly:
print '# UNEXPECTED MAINTAINERS ',
print '(randomly assigned, but not in kubernetes-maintainers)'
for name, (owner, random_assignment) in sorted(owners.iteritems()):
if random_assignment and owner not in maintainers:
print '%-16s %s' % (owner, name)
owners.pop(name)
print
owner_counts = collections.Counter(
owner for name, (owner, random) in owners.iteritems()
if owner in maintainers)
for test_name in set(test_names) - set(owners):
random_assignment = True
if options.user.lower() == 'random':
new_owner, _count = random.choice(owner_counts.most_common()[-4:])
else:
new_owner = options.user
random_assignment = False
owner_counts[new_owner] += 1
owners[test_name] = (new_owner, random_assignment)
if options.user.lower() == 'random':
print '# Tests per maintainer:'
for owner, count in owner_counts.most_common():
print '%-20s %3d' % (owner, count)
write_owners(OWNERS_PATH, owners)
if __name__ == '__main__':
main()
|
zfil/ansible-modules-core | refs/heads/devel | files/replace.py | 103 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Evan Kaufman <[email protected]
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import re
import os
import tempfile
DOCUMENTATION = """
---
module: replace
author: "Evan Kaufman (@EvanK)"
extends_documentation_fragment:
- files
- validate
short_description: Replace all instances of a particular string in a
file using a back-referenced regular expression.
description:
- This module will replace all instances of a pattern within a file.
- It is up to the user to maintain idempotence by ensuring that the
same pattern would never match any replacements made.
version_added: "1.6"
options:
dest:
required: true
aliases: [ name, destfile ]
description:
- The file to modify.
regexp:
required: true
description:
- The regular expression to look for in the contents of the file.
Uses Python regular expressions; see
U(http://docs.python.org/2/library/re.html).
Uses multiline mode, which means C(^) and C($) match the beginning
and end respectively of I(each line) of the file.
replace:
required: false
description:
- The string to replace regexp matches. May contain backreferences
that will get expanded with the regexp capture groups if the regexp
matches. If not set, matches are removed entirely.
backup:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
others:
description:
- All arguments accepted by the M(file) module also work here.
required: false
"""
EXAMPLES = r"""
- replace: dest=/etc/hosts regexp='(\s+)old\.host\.name(\s+.*)?$' replace='\1new.host.name\2' backup=yes
- replace: dest=/home/jdoe/.ssh/known_hosts regexp='^old\.host\.name[^\n]*\n' owner=jdoe group=jdoe mode=644
- replace: dest=/etc/apache/ports regexp='^(NameVirtualHost|Listen)\s+80\s*$' replace='\1 127.0.0.1:8080' validate='/usr/sbin/apache2ctl -f %s -t'
"""
def write_changes(module,contents,dest):
tmpfd, tmpfile = tempfile.mkstemp()
f = os.fdopen(tmpfd,'wb')
f.write(contents)
f.close()
validate = module.params.get('validate', None)
valid = not validate
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(validate % tmpfile)
valid = rc == 0
if rc != 0:
module.fail_json(msg='failed to validate: '
'rc:%s error:%s' % (rc,err))
if valid:
module.atomic_move(tmpfile, dest)
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_file_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
module = AnsibleModule(
argument_spec=dict(
dest=dict(required=True, aliases=['name', 'destfile']),
regexp=dict(required=True),
replace=dict(default='', type='str'),
backup=dict(default=False, type='bool'),
validate=dict(default=None, type='str'),
),
add_file_common_args=True,
supports_check_mode=True
)
params = module.params
dest = os.path.expanduser(params['dest'])
if os.path.isdir(dest):
module.fail_json(rc=256, msg='Destination %s is a directory !' % dest)
if not os.path.exists(dest):
module.fail_json(rc=257, msg='Destination %s does not exist !' % dest)
else:
f = open(dest, 'rb')
contents = f.read()
f.close()
mre = re.compile(params['regexp'], re.MULTILINE)
result = re.subn(mre, params['replace'], contents, 0)
if result[1] > 0 and contents != result[0]:
msg = '%s replacements made' % result[1]
changed = True
else:
msg = ''
changed = False
if changed and not module.check_mode:
if params['backup'] and os.path.exists(dest):
module.backup_local(dest)
if params['follow'] and os.path.islink(dest):
dest = os.path.realpath(dest)
write_changes(module, result[0], dest)
msg, changed = check_file_attrs(module, changed, msg)
module.exit_json(changed=changed, msg=msg)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
dhenrygithub/QGIS | refs/heads/master | python/ext-libs/future/past/types/olddict.py | 62 | """
A dict subclass for Python 3 that behaves like Python 2's dict
Example use:
>>> from past.builtins import dict
>>> d1 = dict() # instead of {} for an empty dict
>>> d2 = dict(key1='value1', key2='value2')
The keys, values and items methods now return lists on Python 3.x and there are
methods for iterkeys, itervalues, iteritems, and viewkeys etc.
>>> for d in (d1, d2):
... assert isinstance(d.keys(), list)
... assert isinstance(d.values(), list)
... assert isinstance(d.items(), list)
"""
import sys
from past.utils import with_metaclass
_builtin_dict = dict
ver = sys.version_info[:2]
class BaseOldDict(type):
def __instancecheck__(cls, instance):
return isinstance(instance, _builtin_dict)
class olddict(with_metaclass(BaseOldDict, _builtin_dict)):
"""
A backport of the Python 3 dict object to Py2
"""
iterkeys = _builtin_dict.keys
viewkeys = _builtin_dict.keys
def keys(self):
return list(super(olddict, self).keys())
itervalues = _builtin_dict.values
viewvalues = _builtin_dict.values
def values(self):
return list(super(olddict, self).values())
iteritems = _builtin_dict.items
viewitems = _builtin_dict.items
def items(self):
return list(super(olddict, self).items())
def has_key(self, k):
"""
D.has_key(k) -> True if D has a key k, else False
"""
return k in self
# def __new__(cls, *args, **kwargs):
# """
# dict() -> new empty dictionary
# dict(mapping) -> new dictionary initialized from a mapping object's
# (key, value) pairs
# dict(iterable) -> new dictionary initialized as if via:
# d = {}
# for k, v in iterable:
# d[k] = v
# dict(**kwargs) -> new dictionary initialized with the name=value pairs
# in the keyword argument list. For example: dict(one=1, two=2)
# """
#
# if len(args) == 0:
# return super(olddict, cls).__new__(cls)
# # Was: elif isinstance(args[0], newbytes):
# # We use type() instead of the above because we're redefining
# # this to be True for all unicode string subclasses. Warning:
# # This may render newstr un-subclassable.
# elif type(args[0]) == olddict:
# return args[0]
# # elif isinstance(args[0], _builtin_dict):
# # value = args[0]
# else:
# value = args[0]
# return super(olddict, cls).__new__(cls, value)
def __native__(self):
"""
Hook for the past.utils.native() function
"""
return super(oldbytes, self)
__all__ = ['olddict']
|
NLeSC/eEcology-script-wrapper | refs/heads/master | extras/nagios/check_redis.py | 1 | #!/usr/local/bin/python2.7
import socket
import sys
from optparse import OptionParser
EXIT_OK = 0
EXIT_WARN = 1
EXIT_CRITICAL = 2
def get_info(host, port, timeout):
socket.setdefaulttimeout(timeout or None)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.send("*1\r\n$4\r\ninfo\r\n")
buf = ""
while '\r\n\r\n' not in buf:
buf += s.recv(1024)
s.close()
return dict(x.split(':', 1) for x in buf.split('\r\n') if ':' in x)
def build_parser():
parser = OptionParser()
parser.add_option("-s", "--server", dest="server", help="Redis server to connect to.", default="127.0.0.1")
parser.add_option("-p", "--port", dest="port", help="Redis port to connect to.", type="int", default=6379)
parser.add_option("-w", "--warn", dest="warn_memory", help="Memory utilization (in MB) that triggers a warning status.", type="int")
parser.add_option("-c", "--critical", dest="crit_memory", help="Memory utilization (in MB) that triggers a critical status.", type="int")
parser.add_option("-t", "--timeout", dest="timeout", help="Number of milliesconds to wait before timing out and considering redis down", type="int", default=2000)
return parser
def main():
parser = build_parser()
options, _args = parser.parse_args()
if not options.warn_memory:
parser.error("Warning level required")
if not options.crit_memory:
parser.error("Critical level required")
try:
info = get_info(options.server, int(options.port), timeout=options.timeout / 1000.0)
except socket.error, exc:
print "CRITICAL: Error connecting or getting INFO from redis %s:%s: %s" % (options.server, options.port, exc)
sys.exit(EXIT_CRITICAL)
memory = int(info.get("used_memory_rss") or info["used_memory"]) / (1024*1024)
if memory > options.crit_memory:
print "CRITICAL: Redis memory usage is %dMB (threshold %dMB)" % (memory, options.crit_memory)
sys.exit(EXIT_CRITICAL)
elif memory > options.warn_memory:
print "WARN: Redis memory usage is %dMB (threshold %dMB)" % (memory, options.warn_memory)
sys.exit(EXIT_WARN)
print "OK: Redis memory usage is %dMB" % memory
sys.exit(EXIT_OK)
if __name__ == "__main__":
main()
|
cpcloud/numba | refs/heads/master | numba/typeconv/rules.py | 2 | from __future__ import print_function, absolute_import
import itertools
from .typeconv import TypeManager, TypeCastingRules
from numba import types
default_type_manager = TypeManager()
def dump_number_rules():
tm = default_type_manager
for a, b in itertools.product(types.number_domain, types.number_domain):
print(a, '->', b, tm.check_compatible(a, b))
def _init_casting_rules(tm):
tcr = TypeCastingRules(tm)
tcr.safe_unsafe(types.boolean, types.int8)
tcr.safe_unsafe(types.boolean, types.uint8)
tcr.promote_unsafe(types.int8, types.int16)
tcr.promote_unsafe(types.uint8, types.uint16)
tcr.promote_unsafe(types.int16, types.int32)
tcr.promote_unsafe(types.uint16, types.uint32)
tcr.promote_unsafe(types.int32, types.int64)
tcr.promote_unsafe(types.uint32, types.uint64)
tcr.safe_unsafe(types.uint8, types.int16)
tcr.safe_unsafe(types.uint16, types.int32)
tcr.safe_unsafe(types.uint32, types.int64)
tcr.safe_unsafe(types.int16, types.float32)
tcr.safe_unsafe(types.int32, types.float64)
tcr.unsafe_unsafe(types.int32, types.float32)
# XXX this is inconsistent with the above; but we want to prefer
# float64 over int64 when typing a heterogeneous operation,
# e.g. `float64 + int64`. Perhaps we need more granularity in the
# conversion kinds.
tcr.safe_unsafe(types.int64, types.float64)
tcr.safe_unsafe(types.uint64, types.float64)
tcr.promote_unsafe(types.float32, types.float64)
tcr.safe(types.float32, types.complex64)
tcr.safe(types.float64, types.complex128)
tcr.promote_unsafe(types.complex64, types.complex128)
# Allow integers to cast ot void*
tcr.unsafe_unsafe(types.uintp, types.voidptr)
return tcr
default_casting_rules = _init_casting_rules(default_type_manager)
|
aioue/ansible | refs/heads/devel | lib/ansible/cli/galaxy.py | 15 | ########################################################################
#
# (C) 2013, James Cammarata <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
import re
import shutil
import sys
import time
import yaml
from jinja2 import Environment, FileSystemLoader
import ansible.constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.login import GalaxyLogin
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.token import GalaxyToken
from ansible.module_utils._text import to_text
from ansible.playbook.role.requirement import RoleRequirement
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyCLI(CLI):
'''command to manage Ansible roles in shared repostories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup")
def __init__(self, args):
self.api = None
self.galaxy = None
super(GalaxyCLI, self).__init__(args)
def set_action(self):
super(GalaxyCLI, self).set_action()
# specific to actions
if self.action == "delete":
self.parser.set_usage("usage: %prog delete [options] github_user github_repo")
elif self.action == "import":
self.parser.set_usage("usage: %prog import [options] github_user github_repo")
self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.')
self.parser.add_option('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
self.parser.add_option('--role-name', dest='role_name', help='The name the role should have, if different than the repo name')
self.parser.add_option('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_user/github_repo.')
elif self.action == "info":
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
elif self.action == "init":
self.parser.set_usage("usage: %prog init [options] role_name")
self.parser.add_option('--init-path', dest='init_path', default="./",
help='The path in which the skeleton role will be created. The default is the current working directory.')
self.parser.add_option('--container-enabled', dest='container_enabled', action='store_true', default=False,
help='Initialize the skeleton role with default contents for a Container Enabled role.')
self.parser.add_option('--role-skeleton', dest='role_skeleton', default=C.GALAXY_ROLE_SKELETON,
help='The path to a role skeleton that the new role should be based upon.')
elif self.action == "install":
self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors and continue with the next specified role.')
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies')
self.parser.add_option('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported')
elif self.action == "remove":
self.parser.set_usage("usage: %prog remove role1 role2 ...")
elif self.action == "list":
self.parser.set_usage("usage: %prog list [role_name]")
elif self.action == "login":
self.parser.set_usage("usage: %prog login [options]")
self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.')
elif self.action == "search":
self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] "
"[--author username]")
self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by')
self.parser.add_option('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
self.parser.add_option('--author', dest='author', help='GitHub username')
elif self.action == "setup":
self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret")
self.parser.add_option('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see ID values.')
self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.')
# options that apply to more than one action
if self.action in ['init', 'info']:
self.parser.add_option('--offline', dest='offline', default=False, action='store_true', help="Don't query the galaxy API when creating roles")
if self.action not in ("delete", "import", "init", "login", "setup"):
# NOTE: while the option type=str, the default is a list, and the
# callback will set the value to a list.
self.parser.add_option('-p', '--roles-path', dest='roles_path', action="callback", callback=CLI.unfrack_paths, default=C.DEFAULT_ROLES_PATH,
help='The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfg'
'file (/etc/ansible/roles if not configured)', type='str')
if self.action in ("init", "install"):
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role')
def parse(self):
''' create an options parser for bin/ansible '''
self.parser = CLI.base_parser(
usage="usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS),
epilog="\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
# common
self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination')
self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS,
help='Ignore SSL certificate validation errors.')
self.set_action()
super(GalaxyCLI, self).parse()
display.verbosity = self.options.verbosity
self.galaxy = Galaxy(self.options)
def run(self):
super(GalaxyCLI, self).run()
self.api = GalaxyAPI(self.galaxy)
self.execute()
def exit_without_ignore(self, rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not self.options.ignore_errors:
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
def _display_role_info(self, role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
text.append(u"\tdescription: %s" % role_info.get('description', ''))
for k in sorted(role_info.keys()):
if k in self.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in self.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
return u'\n'.join(text)
############################
# execute actions
############################
def execute_init(self):
"""
creates the skeleton framework of a role that complies with the galaxy metadata format.
"""
init_path = self.options.init_path
force = self.options.force
role_skeleton = self.options.role_skeleton
role_name = self.args.pop(0).strip() if self.args else None
if not role_name:
raise AnsibleOptionsError("- no role name specified for init")
role_path = os.path.join(init_path, role_name)
if os.path.exists(role_path):
if os.path.isfile(role_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path)
elif not force:
raise AnsibleError("- the directory %s already exists."
"you can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % role_path)
inject_data = dict(
role_name=role_name,
author='your name',
description='your description',
company='your company (optional)',
license='license (GPLv2, CC-BY, etc)',
issue_tracker_url='http://example.com/issue/tracker',
min_ansible_version='1.2',
container_enabled=self.options.container_enabled
)
# create role directory
if not os.path.exists(role_path):
os.makedirs(role_path)
if role_skeleton is not None:
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
else:
role_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
role_skeleton = os.path.expanduser(role_skeleton)
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
template_env = Environment(loader=FileSystemLoader(role_skeleton))
for root, dirs, files in os.walk(role_skeleton, topdown=True):
rel_root = os.path.relpath(root, role_skeleton)
in_templates_dir = rel_root.split(os.sep, 1)[0] == 'templates'
dirs[:] = [d for d in dirs if not any(r.match(os.path.join(rel_root, d)) for r in skeleton_ignore_re)]
for f in files:
filename, ext = os.path.splitext(f)
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
continue
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(rel_root, f)
dest_file = os.path.join(role_path, rel_root, filename)
template_env.get_template(src_template).stream(inject_data).dump(dest_file)
else:
f_rel_path = os.path.relpath(os.path.join(root, f), role_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(role_path, f_rel_path))
for d in dirs:
dir_path = os.path.join(role_path, rel_root, d)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
display.display("- %s was created successfully" % role_name)
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
if len(self.args) == 0:
# the user needs to specify a role
raise AnsibleOptionsError("- you must specify a user/role name")
roles_path = self.options.roles_path
data = ''
for role in self.args:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['intalled_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if not self.options.offline:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec = req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data = self._display_role_info(role_info)
# FIXME: This is broken in both 1.9 and 2.0 as
# _display_role_info() always returns something
if not data:
data = u"\n- the role %s was not found" % role
self.pager(data)
def execute_install(self):
"""
uses the args list of roles to be installed, unless -f was specified. The list of roles
can be a name (which will be downloaded via the galaxy API and github), or it can be a local .tar.gz file.
"""
role_file = self.options.role_file
if len(self.args) == 0 and role_file is None:
# the user needs to specify one of either --role-file or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
elif len(self.args) == 1 and role_file is not None:
# using a role file is mutually exclusive of specifying the role name on the command line
raise AnsibleOptionsError("- please specify a user/role name, or a roles file, but not both")
no_deps = self.options.no_deps
force = self.options.force
roles_left = []
if role_file:
try:
f = open(role_file, 'r')
if role_file.endswith('.yaml') or role_file.endswith('.yml'):
try:
required_roles = yaml.safe_load(f.read())
except Exception as e:
raise AnsibleError("Unable to load data from the requirements file: %s" % role_file)
if required_roles is None:
raise AnsibleError("No roles found in file: %s" % role_file)
for role in required_roles:
if "include" not in role:
role = RoleRequirement.role_yaml_parse(role)
display.vvv("found role %s in yaml file" % str(role))
if "name" not in role and "scm" not in role:
raise AnsibleError("Must specify name or src for role")
roles_left.append(GalaxyRole(self.galaxy, **role))
else:
with open(role["include"]) as f_include:
try:
roles_left += [
GalaxyRole(self.galaxy, **r) for r in
(RoleRequirement.role_yaml_parse(i) for i in yaml.safe_load(f_include))
]
except Exception as e:
msg = "Unable to load data from the include requirements file: %s %s"
raise AnsibleError(msg % (role_file, e))
else:
display.deprecated("going forward only the yaml format will be supported", version="2.6")
# roles listed in a file, one per line
for rline in f.readlines():
if rline.startswith("#") or rline.strip() == '':
continue
display.debug('found role %s in text file' % str(rline))
role = RoleRequirement.role_yaml_parse(rline.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
f.close()
except (IOError, OSError) as e:
raise AnsibleError('Unable to open %s: %s' % (role_file, str(e)))
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in self.args:
role = RoleRequirement.role_yaml_parse(rname.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
for role in roles_left:
display.vvv('Installing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning("- %s was NOT installed successfully: %s " % (role.name, str(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
role_dependencies = role.metadata.get('dependencies') or []
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in roles_left:
display.display('- adding dependency: %s' % str(dep_role))
roles_left.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
display.warning('- dependency %s from role %s differs from already installed version (%s), skipping' %
(str(dep_role), role.name, dep_role.install_info['version']))
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if len(self.args) == 0:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in self.args:
role = GalaxyRole(self.galaxy, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, str(e)))
return 0
def execute_list(self):
"""
lists the roles installed on the local system or matches a single role passed as an argument.
"""
if len(self.args) > 1:
raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list")
if len(self.args) == 1:
# show only the request role, if it exists
name = self.args.pop()
gr = GalaxyRole(self.galaxy, name)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
# show some more info about single roles here
display.display("- %s, %s" % (name, version))
else:
display.display("- the role %s was not found" % name)
else:
# show all valid roles in the roles_path directory
roles_path = self.options.roles_path
for path in roles_path:
role_path = os.path.expanduser(path)
if not os.path.exists(role_path):
raise AnsibleOptionsError("- the path %s does not exist. Please specify a valid path with --roles-path" % role_path)
elif not os.path.isdir(role_path):
raise AnsibleOptionsError("- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % role_path)
path_files = os.listdir(role_path)
for path_file in path_files:
gr = GalaxyRole(self.galaxy, path_file)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (path_file, version))
return 0
def execute_search(self):
''' searches for roles on the Ansible Galaxy server'''
page_size = 1000
search = None
if len(self.args):
terms = []
for i in range(len(self.args)):
terms.append(self.args.pop())
search = '+'.join(terms[::-1])
if not search and not self.options.platforms and not self.options.galaxy_tags and not self.options.author:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=self.options.platforms,
tags=self.options.galaxy_tags, author=self.options.author, page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_login(self):
"""
verify user's identify via Github and retrieve an auth token from Ansible Galaxy.
"""
# Authenticate with github and retrieve a token
if self.options.token is None:
login = GalaxyLogin(self.galaxy)
github_token = login.create_github_token()
else:
github_token = self.options.token
galaxy_response = self.api.authenticate(github_token)
if self.options.token is None:
# Remove the token we created
login.remove_github_token()
# Store the Galaxy token
token = GalaxyToken()
token.set(galaxy_response['token'])
display.display("Successfully logged into Galaxy as %s" % galaxy_response['username'])
return 0
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
if len(self.args) < 2:
raise AnsibleError("Expected a github_username and github_repository. Use --help.")
github_repo = to_text(self.args.pop(), errors='surrogate_or_strict')
github_user = to_text(self.args.pop(), errors='surrogate_or_strict')
if self.options.check_status:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference, role_name=self.options.role_name)
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not self.options.wait:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
if self.options.check_status or self.options.wait:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
""" Setup an integration from Github or Travis for Ansible Galaxy roles"""
if self.options.setup_list:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']), color=C.COLOR_OK)
return 0
if self.options.remove_id:
# Remove a secret
self.api.remove_secret(self.options.remove_id)
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
if len(self.args) < 4:
raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret")
return 0
secret = self.args.pop()
github_repo = self.args.pop()
github_user = self.args.pop()
source = self.args.pop()
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
if len(self.args) < 2:
raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo")
github_repo = self.args.pop()
github_user = self.args.pop()
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
display.display(resp['status'])
return True
|
provaleks/o8 | refs/heads/8.0 | addons/auth_signup/__openerp__.py | 313 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
{
'name': 'Signup',
'description': """
Allow users to sign up and reset their password
===============================================
""",
'author': 'OpenERP SA',
'version': '1.0',
'category': 'Authentication',
'website': 'https://www.odoo.com',
'installable': True,
'auto_install': True,
'depends': [
'base_setup',
'email_template',
'web',
],
'data': [
'auth_signup_data.xml',
'res_config.xml',
'res_users_view.xml',
'views/auth_signup_login.xml',
],
'bootstrap': True,
}
|
aselle/tensorflow | refs/heads/master | tensorflow/python/debug/lib/grpc_debug_server.py | 57 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""gRPC debug server in Python."""
# pylint: disable=g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import threading
import time
from concurrent import futures
import grpc
from six.moves import queue
from tensorflow.core.debug import debug_service_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.debug.lib import debug_service_pb2_grpc
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
DebugWatch = collections.namedtuple("DebugWatch",
["node_name", "output_slot", "debug_op"])
def _state_change(new_state, node_name, output_slot, debug_op):
state_change = debug_service_pb2.EventReply.DebugOpStateChange()
state_change.state = new_state
state_change.node_name = node_name
state_change.output_slot = output_slot
state_change.debug_op = debug_op
return state_change
class EventListenerBaseStreamHandler(object):
"""Per-stream handler of EventListener gRPC streams."""
def __init__(self):
"""Constructor of EventListenerBaseStreamHandler."""
def on_core_metadata_event(self, event):
"""Callback for core metadata.
Args:
event: The Event proto that carries a JSON string in its
`log_message.message` field.
Returns:
`None` or an `EventReply` proto to be sent back to the client. If `None`,
an `EventReply` proto construct with the default no-arg constructor will
be sent back to the client.
"""
raise NotImplementedError(
"on_core_metadata_event() is not implemented in the base servicer "
"class")
def on_graph_def(self, graph_def, device_name, wall_time):
"""Callback for Event proto received through the gRPC stream.
This Event proto carries a GraphDef, encoded as bytes, in its graph_def
field.
Args:
graph_def: A GraphDef object.
device_name: Name of the device on which the graph was created.
wall_time: An epoch timestamp (in microseconds) for the graph.
Returns:
`None` or an `EventReply` proto to be sent back to the client. If `None`,
an `EventReply` proto construct with the default no-arg constructor will
be sent back to the client.
"""
raise NotImplementedError(
"on_graph_def() is not implemented in the base servicer class")
def on_value_event(self, event):
"""Callback for Event proto received through the gRPC stream.
This Event proto carries a Tensor in its summary.value[0] field.
Args:
event: The Event proto from the stream to be processed.
"""
raise NotImplementedError(
"on_value_event() is not implemented in the base servicer class")
class EventListenerBaseServicer(debug_service_pb2_grpc.EventListenerServicer):
"""Base Python class for gRPC debug server."""
def __init__(self, server_port, stream_handler_class):
"""Constructor.
Args:
server_port: (int) Port number to bind to.
stream_handler_class: A class of the base class
`EventListenerBaseStreamHandler` that will be used to constructor
stream handler objects during `SendEvents` calls.
"""
self._server_port = server_port
self._stream_handler_class = stream_handler_class
self._server_lock = threading.Lock()
self._server_started = False
self._stop_requested = False
self._debug_ops_state_change_queue = queue.Queue()
self._gated_grpc_debug_watches = set()
self._breakpoints = set()
def SendEvents(self, request_iterator, context):
"""Implementation of the SendEvents service method.
This method receives streams of Event protos from the client, and processes
them in ways specified in the on_event() callback. The stream is
bi-directional, but currently only the client-to-server stream (i.e., the
stream from the debug ops to the server) is used.
Args:
request_iterator: The incoming stream of Event protos.
context: Server context.
Raises:
ValueError: If there are more than one core metadata events.
Yields:
An empty stream of responses.
"""
core_metadata_count = 0
# A map from GraphDef hash to a list of received chunks.
graph_def_chunks = {}
tensor_chunks = {}
stream_handler = None
for event in request_iterator:
if not stream_handler:
stream_handler = self._stream_handler_class()
if event.summary and event.summary.value:
# An Event proto carrying a tensor value.
maybe_tensor_event = self._process_tensor_event_in_chunks(
event, tensor_chunks)
if maybe_tensor_event:
event_reply = stream_handler.on_value_event(maybe_tensor_event)
if event_reply is not None:
yield self._process_debug_op_state_changes(event_reply)
else:
# Non-tensor-value Event.
if event.graph_def:
# GraphDef-carrying Event.
maybe_graph_def, maybe_device_name, maybe_wall_time = (
self._process_encoded_graph_def_in_chunks(
event, graph_def_chunks))
if maybe_graph_def:
reply = stream_handler.on_graph_def(
maybe_graph_def, maybe_device_name, maybe_wall_time)
yield self._process_debug_op_state_changes(reply)
elif event.log_message.message:
# Core metadata-carrying Event.
core_metadata_count += 1
if core_metadata_count > 1:
raise ValueError(
"Expected one core metadata event; received multiple")
reply = stream_handler.on_core_metadata_event(event)
yield self._process_debug_op_state_changes(reply)
def _process_debug_op_state_changes(self, event_reply=None):
"""Dequeue and process all the queued debug-op state change protos.
Include all the debug-op state change protos in a `EventReply` proto.
Args:
event_reply: An `EventReply` to add the `DebugOpStateChange` protos to,
or `None`.
Returns:
An `EventReply` proto with the dequeued `DebugOpStateChange` protos (if
any) added.
"""
if event_reply is None:
event_reply = debug_service_pb2.EventReply()
while not self._debug_ops_state_change_queue.empty():
state_change = self._debug_ops_state_change_queue.get()
debug_node_key = (state_change.node_name, state_change.output_slot,
state_change.debug_op)
if (state_change.state ==
debug_service_pb2.EventReply.DebugOpStateChange.READ_WRITE):
logging.info("Adding breakpoint %s:%d:%s", state_change.node_name,
state_change.output_slot, state_change.debug_op)
self._breakpoints.add(debug_node_key)
elif (state_change.state ==
debug_service_pb2.EventReply.DebugOpStateChange.READ_ONLY):
logging.info("Adding watchpoint %s:%d:%s", state_change.node_name,
state_change.output_slot, state_change.debug_op)
if debug_node_key in self._breakpoints:
self._breakpoints.discard(debug_node_key)
elif (state_change.state ==
debug_service_pb2.EventReply.DebugOpStateChange.DISABLED):
logging.info("Removing watchpoint or breakpoint: %s:%d:%s",
state_change.node_name, state_change.output_slot,
state_change.debug_op)
if debug_node_key in self._breakpoints:
self._breakpoints.discard(debug_node_key)
else:
logging.warn(
"Attempting to remove a non-existent debug node key: %s",
debug_node_key)
new_state_change = event_reply.debug_op_state_changes.add()
new_state_change.CopyFrom(state_change)
return event_reply
def _process_tensor_event_in_chunks(self, event, tensor_chunks):
"""Possibly reassemble event chunks.
Due to gRPC's message size limit, a large tensor can be encapsulated in
multiple Event proto chunks to be sent through the debugger stream. This
method keeps track of the chunks that have arrived, reassemble all chunks
corresponding to a tensor when they have arrived and return the reassembled
Event proto.
Args:
event: The single Event proto that has arrived.
tensor_chunks: A dict used to keep track of the Event protos that have
arrived but haven't been reassembled.
Returns:
If all Event protos corresponding to a tensor have arrived, returns the
reassembled Event proto. Otherwise, return None.
"""
value = event.summary.value[0]
debugger_plugin_metadata = json.loads(
compat.as_text(value.metadata.plugin_data.content))
device_name = debugger_plugin_metadata["device"]
num_chunks = debugger_plugin_metadata["numChunks"]
chunk_index = debugger_plugin_metadata["chunkIndex"]
if num_chunks <= 1:
return event
debug_node_name = value.node_name
timestamp = int(event.wall_time)
tensor_key = "%s_%s_%d" % (device_name, debug_node_name, timestamp)
if tensor_key not in tensor_chunks:
tensor_chunks[tensor_key] = [None] * num_chunks
chunks = tensor_chunks[tensor_key]
if value.tensor.tensor_content:
chunks[chunk_index] = value.tensor
elif value.tensor.string_val:
chunks[chunk_index] = event
if None not in chunks:
if value.tensor.tensor_content:
event.summary.value[0].tensor.tensor_content = b"".join(
chunk.tensor_content for chunk in chunks)
del tensor_chunks[tensor_key]
return event
elif value.tensor.string_val:
merged_event = chunks[0]
for chunk in chunks[1:]:
merged_event.summary.value[0].tensor.string_val.extend(
list(chunk.summary.value[0].tensor.string_val))
return merged_event
def _process_encoded_graph_def_in_chunks(self,
event,
graph_def_chunks):
"""Process an Event proto containing a chunk of encoded GraphDef.
Args:
event: the Event proto containing the chunk of encoded GraphDef.
graph_def_chunks: A dict mapping keys for GraphDefs (i.e.,
"<graph_def_hash>,<device_name>,<wall_time>") to a list of chunks of
encoded GraphDefs.
Returns:
If all chunks of the GraphDef have arrived,
return decoded GraphDef proto, device name, wall_time.
Otherwise,
return None, None, None.
"""
graph_def = graph_pb2.GraphDef()
index_bar_0 = event.graph_def.find(b"|")
index_bar_1 = event.graph_def.find(b"|", index_bar_0 + 1)
index_bar_2 = event.graph_def.find(b"|", index_bar_1 + 1)
graph_def_hash_device_timestamp = event.graph_def[:index_bar_0]
chunk_index = int(event.graph_def[index_bar_0 + 1 : index_bar_1])
num_chunks = int(event.graph_def[index_bar_1 + 1 : index_bar_2])
if graph_def_hash_device_timestamp not in graph_def_chunks:
graph_def_chunks[graph_def_hash_device_timestamp] = [None] * num_chunks
graph_def_chunks[graph_def_hash_device_timestamp][
chunk_index] = event.graph_def[index_bar_2 + 1:]
if all(graph_def_chunks[graph_def_hash_device_timestamp]):
device_name = graph_def_hash_device_timestamp.split(b",")[1]
wall_time = int(graph_def_hash_device_timestamp.split(b",")[2])
graph_def.ParseFromString(
b"".join(graph_def_chunks[graph_def_hash_device_timestamp]))
del graph_def_chunks[graph_def_hash_device_timestamp]
self._process_graph_def(graph_def)
return graph_def, device_name, wall_time
else:
return None, None, None
def _process_graph_def(self, graph_def):
for node_def in graph_def.node:
if (debug_graphs.is_debug_node(node_def.name) and
node_def.attr["gated_grpc"].b):
node_name, output_slot, _, debug_op = (
debug_graphs.parse_debug_node_name(node_def.name))
self._gated_grpc_debug_watches.add(
DebugWatch(node_name, output_slot, debug_op))
def run_server(self, blocking=True):
"""Start running the server.
Args:
blocking: If `True`, block until `stop_server()` is invoked.
Raises:
ValueError: If server stop has already been requested, or if the server
has already started running.
"""
self._server_lock.acquire()
try:
if self._stop_requested:
raise ValueError("Server has already stopped")
if self._server_started:
raise ValueError("Server has already started running")
self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
debug_service_pb2_grpc.add_EventListenerServicer_to_server(self,
self.server)
self.server.add_insecure_port("[::]:%d" % self._server_port)
self.server.start()
self._server_started = True
finally:
self._server_lock.release()
if blocking:
while not self._stop_requested:
time.sleep(1.0)
def stop_server(self, grace=1.0):
"""Request server stopping.
Once stopped, server cannot be stopped or started again. This method is
non-blocking. Call `wait()` on the returned event to block until the server
has completely stopped.
Args:
grace: Grace period in seconds to be used when calling `server.stop()`.
Raises:
ValueError: If server stop has already been requested, or if the server
has not started running yet.
Returns:
A threading.Event that will be set when the server has completely stopped.
"""
self._server_lock.acquire()
try:
if not self._server_started:
raise ValueError("Server has not started running")
if self._stop_requested:
raise ValueError("Server has already stopped")
self._stop_requested = True
return self.server.stop(grace=grace)
finally:
self._server_lock.release()
def request_watch(self, node_name, output_slot, debug_op, breakpoint=False):
"""Request enabling a debug tensor watchpoint or breakpoint.
This will let the server send a EventReply to the client side
(i.e., the debugged TensorFlow runtime process) to request adding a watch
key (i.e., <node_name>:<output_slot>:<debug_op>) to the list of enabled
watch keys. The list applies only to debug ops with the attribute
gated_grpc=True.
To disable the watch, use `request_unwatch()`.
Args:
node_name: (`str`) name of the node that the to-be-watched tensor belongs
to, e.g., "hidden/Weights".
output_slot: (`int`) output slot index of the tensor to watch.
debug_op: (`str`) name of the debug op to enable. This should not include
any attribute substrings.
breakpoint: (`bool`) Iff `True`, the debug op will block and wait until it
receives an `EventReply` response from the server. The `EventReply`
proto may carry a TensorProto that modifies the value of the debug op's
output tensor.
"""
self._debug_ops_state_change_queue.put(
_state_change(
debug_service_pb2.EventReply.DebugOpStateChange.READ_WRITE
if breakpoint
else debug_service_pb2.EventReply.DebugOpStateChange.READ_ONLY,
node_name, output_slot, debug_op))
def request_unwatch(self, node_name, output_slot, debug_op):
"""Request disabling a debug tensor watchpoint or breakpoint.
This is the opposite of `request_watch()`.
Args:
node_name: (`str`) name of the node that the to-be-watched tensor belongs
to, e.g., "hidden/Weights".
output_slot: (`int`) output slot index of the tensor to watch.
debug_op: (`str`) name of the debug op to enable. This should not include
any attribute substrings.
"""
self._debug_ops_state_change_queue.put(
_state_change(
debug_service_pb2.EventReply.DebugOpStateChange.DISABLED, node_name,
output_slot, debug_op))
@property
def breakpoints(self):
"""Get a set of the currently-activated breakpoints.
Returns:
A `set` of 3-tuples: (node_name, output_slot, debug_op), e.g.,
{("MatMul", 0, "DebugIdentity")}.
"""
return self._breakpoints
def gated_grpc_debug_watches(self):
"""Get the list of debug watches with attribute gated_grpc=True.
Since the server receives `GraphDef` from the debugged runtime, it can only
return such debug watches that it has received so far.
Returns:
A `list` of `DebugWatch` `namedtuples` representing the debug watches with
gated_grpc=True. Each `namedtuple` element has the attributes:
`node_name` as a `str`,
`output_slot` as an `int`,
`debug_op` as a `str`.
"""
return list(self._gated_grpc_debug_watches)
def SendTracebacks(self, request, context):
"""Base implementation of the handling of SendTracebacks calls.
The base implementation does nothing with the incoming request.
Override in an implementation of the server if necessary.
Args:
request: A `CallTraceback` proto, containing information about the
type (e.g., graph vs. eager execution) and source-code traceback of the
call and (any) associated `tf.Graph`s.
context: Server context.
Returns:
A `EventReply` proto.
"""
return debug_service_pb2.EventReply()
def SendSourceFiles(self, request, context):
"""Base implementation of the handling of SendSourceFiles calls.
The base implementation does nothing with the incoming request.
Override in an implementation of the server if necessary.
Args:
request: A `DebuggedSourceFiles` proto, containing the path, content, size
and last-modified timestamp of source files.
context: Server context.
Returns:
A `EventReply` proto.
"""
return debug_service_pb2.EventReply()
|
i4Ds/IRE | refs/heads/master | IREMedia/libraries/OpenCV/modules/ts/misc/report.py | 19 | #!/usr/bin/env python
import testlog_parser, sys, os, xml, re, glob
from table_formatter import *
from optparse import OptionParser
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-o", "--output", dest="format", help="output results in text format (can be 'txt', 'html' or 'auto' - default)", metavar="FMT", default="auto")
parser.add_option("-u", "--units", dest="units", help="units for output values (s, ms (default), mks, ns or ticks)", metavar="UNITS", default="ms")
parser.add_option("-c", "--columns", dest="columns", help="comma-separated list of columns to show", metavar="COLS", default="")
parser.add_option("-f", "--filter", dest="filter", help="regex to filter tests", metavar="REGEX", default=None)
parser.add_option("", "--show-all", action="store_true", dest="showall", default=False, help="also include empty and \"notrun\" lines")
(options, args) = parser.parse_args()
if len(args) < 1:
print >> sys.stderr, "Usage:\n", os.path.basename(sys.argv[0]), "<log_name1>.xml"
exit(0)
options.generateHtml = detectHtmlOutputType(options.format)
# expand wildcards and filter duplicates
files = []
files1 = []
for arg in args:
if ("*" in arg) or ("?" in arg):
files1.extend([os.path.abspath(f) for f in glob.glob(arg)])
else:
files.append(os.path.abspath(arg))
seen = set()
files = [ x for x in files if x not in seen and not seen.add(x)]
files.extend((set(files1) - set(files)))
args = files
# load test data
tests = []
files = []
for arg in set(args):
try:
cases = testlog_parser.parseLogFile(arg)
if cases:
files.append(os.path.basename(arg))
tests.extend(cases)
except:
pass
if options.filter:
expr = re.compile(options.filter)
tests = [t for t in tests if expr.search(str(t))]
tbl = table(", ".join(files))
if options.columns:
metrics = [s.strip() for s in options.columns.split(",")]
metrics = [m for m in metrics if m and not m.endswith("%") and m in metrix_table]
else:
metrics = None
if not metrics:
metrics = ["name", "samples", "outliers", "min", "median", "gmean", "mean", "stddev"]
if "name" not in metrics:
metrics.insert(0, "name")
for m in metrics:
if m == "name":
tbl.newColumn(m, metrix_table[m][0])
else:
tbl.newColumn(m, metrix_table[m][0], align = "center")
needNewRow = True
for case in sorted(tests):
if needNewRow:
tbl.newRow()
if not options.showall:
needNewRow = False
status = case.get("status")
if status != "run":
if status != "notrun":
needNewRow = True
for m in metrics:
if m == "name":
tbl.newCell(m, str(case))
else:
tbl.newCell(m, status, color = "red")
else:
needNewRow = True
for m in metrics:
val = metrix_table[m][1](case, None, options.units)
if isinstance(val, float):
tbl.newCell(m, "%.2f %s" % (val, options.units), val)
else:
tbl.newCell(m, val, val)
if not needNewRow:
tbl.trimLastRow()
# output table
if options.generateHtml:
if options.format == "moinwiki":
tbl.htmlPrintTable(sys.stdout, True)
else:
htmlPrintHeader(sys.stdout, "Report %s tests from %s" % (len(tests), ", ".join(files)))
tbl.htmlPrintTable(sys.stdout)
htmlPrintFooter(sys.stdout)
else:
tbl.consolePrintTable(sys.stdout)
|
pulkitpahwa/smart-image-coordinates | refs/heads/master | smarter/base/apps.py | 15 | from __future__ import unicode_literals
from django.apps import AppConfig
class BaseConfig(AppConfig):
name = 'base'
|
watonyweng/nova | refs/heads/master | nova/db/sqlalchemy/migrate_repo/versions/273_sqlite_foreign_keys.py | 79 | # Copyright 2014 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate import ForeignKeyConstraint, UniqueConstraint
from oslo_db.sqlalchemy import utils
from sqlalchemy import MetaData, schema, Table
FKEYS = [
('fixed_ips', 'instance_uuid', 'instances', 'uuid',
'fixed_ips_instance_uuid_fkey'),
('block_device_mapping', 'instance_uuid', 'instances', 'uuid',
'block_device_mapping_instance_uuid_fkey'),
('instance_info_caches', 'instance_uuid', 'instances', 'uuid',
'instance_info_caches_instance_uuid_fkey'),
('instance_metadata', 'instance_uuid', 'instances', 'uuid',
'instance_metadata_instance_uuid_fkey'),
('instance_system_metadata', 'instance_uuid', 'instances', 'uuid',
'instance_system_metadata_ibfk_1'),
('instance_type_projects', 'instance_type_id', 'instance_types', 'id',
'instance_type_projects_ibfk_1'),
('iscsi_targets', 'volume_id', 'volumes', 'id',
'iscsi_targets_volume_id_fkey'),
('reservations', 'usage_id', 'quota_usages', 'id',
'reservations_ibfk_1'),
('security_group_instance_association', 'instance_uuid',
'instances', 'uuid',
'security_group_instance_association_instance_uuid_fkey'),
('security_group_instance_association', 'security_group_id',
'security_groups', 'id',
'security_group_instance_association_ibfk_1'),
('virtual_interfaces', 'instance_uuid', 'instances', 'uuid',
'virtual_interfaces_instance_uuid_fkey'),
('compute_nodes', 'service_id', 'services', 'id',
'fk_compute_nodes_service_id'),
('instance_actions', 'instance_uuid', 'instances', 'uuid',
'fk_instance_actions_instance_uuid'),
('instance_faults', 'instance_uuid', 'instances', 'uuid',
'fk_instance_faults_instance_uuid'),
('migrations', 'instance_uuid', 'instances', 'uuid',
'fk_migrations_instance_uuid'),
]
UNIQUES = [
('compute_nodes', 'uniq_compute_nodes0host0hypervisor_hostname',
['host', 'hypervisor_hostname']),
('fixed_ips', 'uniq_fixed_ips0address0deleted',
['address', 'deleted']),
('instance_info_caches', 'uniq_instance_info_caches0instance_uuid',
['instance_uuid']),
('instance_type_projects',
'uniq_instance_type_projects0instance_type_id0project_id0deleted',
['instance_type_id', 'project_id', 'deleted']),
('pci_devices', 'uniq_pci_devices0compute_node_id0address0deleted',
['compute_node_id', 'address', 'deleted']),
('virtual_interfaces', 'uniq_virtual_interfaces0address0deleted',
['address', 'deleted']),
]
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
if migrate_engine.name == 'sqlite':
# SQLite is also missing this one index
if not utils.index_exists(migrate_engine, 'fixed_ips', 'address'):
utils.add_index(migrate_engine, 'fixed_ips', 'address',
['address'])
for src_table, src_column, dst_table, dst_column, name in FKEYS:
src_table = Table(src_table, meta, autoload=True)
if name in set(fk.name for fk in src_table.foreign_keys):
continue
src_column = src_table.c[src_column]
dst_table = Table(dst_table, meta, autoload=True)
dst_column = dst_table.c[dst_column]
fkey = ForeignKeyConstraint(columns=[src_column],
refcolumns=[dst_column],
name=name)
fkey.create()
# SQLAlchemy versions < 1.0.0 don't reflect unique constraints
# for SQLite correctly causing sqlalchemy-migrate to recreate
# some tables with missing unique constraints. Re-add some
# potentially missing unique constraints as a workaround.
for table_name, name, column_names in UNIQUES:
table = Table(table_name, meta, autoload=True)
if name in set(c.name for c in table.constraints
if isinstance(table, schema.UniqueConstraint)):
continue
uc = UniqueConstraint(*column_names, table=table, name=name)
uc.create()
|
Vauxoo/hr | refs/heads/8.0 | hr_employee_age/models/hr_employee.py | 13 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2015 Salton Massally (<[email protected]>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from datetime import datetime
from openerp import fields, models, api
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as OE_DFORMAT
class HrEmployee(models.Model):
_inherit = 'hr.employee'
age = fields.Integer(
'Age',
readonly=True,
compute='_compute_age'
)
@api.one
def _compute_age(self):
if self.birthday:
dBday = datetime.strptime(self.birthday, OE_DFORMAT).date()
dToday = datetime.now().date()
self.age = dToday.year - dBday.year - ((
dToday.month, dToday.day) < (dBday.month, dBday.day))
|
Workday/OpenFrame | refs/heads/master | tools/json_schema_compiler/features_compiler.py | 49 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generator for C++ features from json files.
Usage example:
features_compiler.py --destdir gen --root /home/Work/src _permissions.json
"""
import optparse
import os
from schema_loader import SchemaLoader
from features_cc_generator import CCGenerator
from features_h_generator import HGenerator
from model import CreateFeature
def _GenerateSchema(filename, root, destdir, namespace):
"""Generates C++ features files from the json file |filename|.
"""
# Load in the feature permissions from the JSON file.
schema = os.path.normpath(filename)
schema_loader = SchemaLoader(os.path.dirname(os.path.relpath(schema, root)),
os.path.dirname(schema),
[],
None)
schema_filename = os.path.splitext(schema)[0]
feature_defs = schema_loader.LoadSchema(schema)
# Generate a list of the features defined and a list of their models.
feature_list = []
for feature_def, feature in feature_defs.iteritems():
feature_list.append(CreateFeature(feature_def, feature))
source_file_dir, _ = os.path.split(schema)
relpath = os.path.relpath(os.path.normpath(source_file_dir), root)
full_path = os.path.join(relpath, schema)
generators = [
('%s.cc' % schema_filename, CCGenerator()),
('%s.h' % schema_filename, HGenerator())
]
# Generate and output the code for all features.
output_code = []
for filename, generator in generators:
code = generator.Generate(feature_list, full_path, namespace).Render()
if destdir:
with open(os.path.join(destdir, relpath, filename), 'w') as f:
f.write(code)
output_code += [filename, '', code, '']
return '\n'.join(output_code)
if __name__ == '__main__':
parser = optparse.OptionParser(
description='Generates a C++ features model from JSON schema',
usage='usage: %prog [option]... schema')
parser.add_option('-r', '--root', default='.',
help='logical include root directory. Path to schema files from '
'specified dir will be the include path.')
parser.add_option('-d', '--destdir',
help='root directory to output generated files.')
parser.add_option('-n', '--namespace', default='generated_features',
help='C++ namespace for generated files. e.g extensions::api.')
(opts, filenames) = parser.parse_args()
# Only one file is currently specified.
if len(filenames) != 1:
raise ValueError('One (and only one) file is required (for now).')
result = _GenerateSchema(filenames[0], opts.root, opts.destdir,
opts.namespace)
if not opts.destdir:
print result
|
cfg2015/EPT-2015-2 | refs/heads/master | addons/website_event_sale/__init__.py | 1577 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
import models
|
kenwang815/KodiPlugins | refs/heads/master | script.module.youtube.dl/lib/youtube_dl/extractor/rtvnh.py | 9 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import ExtractorError
class RTVNHIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?rtvnh\.nl/video/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.rtvnh.nl/video/131946',
'md5': '6e1d0ab079e2a00b6161442d3ceacfc1',
'info_dict': {
'id': '131946',
'ext': 'mp4',
'title': 'Grote zoektocht in zee bij Zandvoort naar vermiste vrouw',
'thumbnail': 're:^https?:.*\.jpg$'
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
meta = self._parse_json(self._download_webpage(
'http://www.rtvnh.nl/video/json?m=' + video_id, video_id), video_id)
status = meta.get('status')
if status != 200:
raise ExtractorError(
'%s returned error code %d' % (self.IE_NAME, status), expected=True)
formats = self._extract_smil_formats(
'http://www.rtvnh.nl/video/smil?m=' + video_id, video_id, fatal=False)
for item in meta['source']['fb']:
if item.get('type') == 'hls':
formats.extend(self._extract_m3u8_formats(
item['file'], video_id, ext='mp4', entry_protocol='m3u8_native'))
elif item.get('type') == '':
formats.append({'url': item['file']})
self._sort_formats(formats)
return {
'id': video_id,
'title': meta['title'].strip(),
'thumbnail': meta.get('image'),
'formats': formats
}
|
analurandis/Tur | refs/heads/master | backend/venv/Lib/site-packages/unidecode/x0c6.py | 253 | data = (
'yeoss', # 0x00
'yeong', # 0x01
'yeoj', # 0x02
'yeoc', # 0x03
'yeok', # 0x04
'yeot', # 0x05
'yeop', # 0x06
'yeoh', # 0x07
'ye', # 0x08
'yeg', # 0x09
'yegg', # 0x0a
'yegs', # 0x0b
'yen', # 0x0c
'yenj', # 0x0d
'yenh', # 0x0e
'yed', # 0x0f
'yel', # 0x10
'yelg', # 0x11
'yelm', # 0x12
'yelb', # 0x13
'yels', # 0x14
'yelt', # 0x15
'yelp', # 0x16
'yelh', # 0x17
'yem', # 0x18
'yeb', # 0x19
'yebs', # 0x1a
'yes', # 0x1b
'yess', # 0x1c
'yeng', # 0x1d
'yej', # 0x1e
'yec', # 0x1f
'yek', # 0x20
'yet', # 0x21
'yep', # 0x22
'yeh', # 0x23
'o', # 0x24
'og', # 0x25
'ogg', # 0x26
'ogs', # 0x27
'on', # 0x28
'onj', # 0x29
'onh', # 0x2a
'od', # 0x2b
'ol', # 0x2c
'olg', # 0x2d
'olm', # 0x2e
'olb', # 0x2f
'ols', # 0x30
'olt', # 0x31
'olp', # 0x32
'olh', # 0x33
'om', # 0x34
'ob', # 0x35
'obs', # 0x36
'os', # 0x37
'oss', # 0x38
'ong', # 0x39
'oj', # 0x3a
'oc', # 0x3b
'ok', # 0x3c
'ot', # 0x3d
'op', # 0x3e
'oh', # 0x3f
'wa', # 0x40
'wag', # 0x41
'wagg', # 0x42
'wags', # 0x43
'wan', # 0x44
'wanj', # 0x45
'wanh', # 0x46
'wad', # 0x47
'wal', # 0x48
'walg', # 0x49
'walm', # 0x4a
'walb', # 0x4b
'wals', # 0x4c
'walt', # 0x4d
'walp', # 0x4e
'walh', # 0x4f
'wam', # 0x50
'wab', # 0x51
'wabs', # 0x52
'was', # 0x53
'wass', # 0x54
'wang', # 0x55
'waj', # 0x56
'wac', # 0x57
'wak', # 0x58
'wat', # 0x59
'wap', # 0x5a
'wah', # 0x5b
'wae', # 0x5c
'waeg', # 0x5d
'waegg', # 0x5e
'waegs', # 0x5f
'waen', # 0x60
'waenj', # 0x61
'waenh', # 0x62
'waed', # 0x63
'wael', # 0x64
'waelg', # 0x65
'waelm', # 0x66
'waelb', # 0x67
'waels', # 0x68
'waelt', # 0x69
'waelp', # 0x6a
'waelh', # 0x6b
'waem', # 0x6c
'waeb', # 0x6d
'waebs', # 0x6e
'waes', # 0x6f
'waess', # 0x70
'waeng', # 0x71
'waej', # 0x72
'waec', # 0x73
'waek', # 0x74
'waet', # 0x75
'waep', # 0x76
'waeh', # 0x77
'oe', # 0x78
'oeg', # 0x79
'oegg', # 0x7a
'oegs', # 0x7b
'oen', # 0x7c
'oenj', # 0x7d
'oenh', # 0x7e
'oed', # 0x7f
'oel', # 0x80
'oelg', # 0x81
'oelm', # 0x82
'oelb', # 0x83
'oels', # 0x84
'oelt', # 0x85
'oelp', # 0x86
'oelh', # 0x87
'oem', # 0x88
'oeb', # 0x89
'oebs', # 0x8a
'oes', # 0x8b
'oess', # 0x8c
'oeng', # 0x8d
'oej', # 0x8e
'oec', # 0x8f
'oek', # 0x90
'oet', # 0x91
'oep', # 0x92
'oeh', # 0x93
'yo', # 0x94
'yog', # 0x95
'yogg', # 0x96
'yogs', # 0x97
'yon', # 0x98
'yonj', # 0x99
'yonh', # 0x9a
'yod', # 0x9b
'yol', # 0x9c
'yolg', # 0x9d
'yolm', # 0x9e
'yolb', # 0x9f
'yols', # 0xa0
'yolt', # 0xa1
'yolp', # 0xa2
'yolh', # 0xa3
'yom', # 0xa4
'yob', # 0xa5
'yobs', # 0xa6
'yos', # 0xa7
'yoss', # 0xa8
'yong', # 0xa9
'yoj', # 0xaa
'yoc', # 0xab
'yok', # 0xac
'yot', # 0xad
'yop', # 0xae
'yoh', # 0xaf
'u', # 0xb0
'ug', # 0xb1
'ugg', # 0xb2
'ugs', # 0xb3
'un', # 0xb4
'unj', # 0xb5
'unh', # 0xb6
'ud', # 0xb7
'ul', # 0xb8
'ulg', # 0xb9
'ulm', # 0xba
'ulb', # 0xbb
'uls', # 0xbc
'ult', # 0xbd
'ulp', # 0xbe
'ulh', # 0xbf
'um', # 0xc0
'ub', # 0xc1
'ubs', # 0xc2
'us', # 0xc3
'uss', # 0xc4
'ung', # 0xc5
'uj', # 0xc6
'uc', # 0xc7
'uk', # 0xc8
'ut', # 0xc9
'up', # 0xca
'uh', # 0xcb
'weo', # 0xcc
'weog', # 0xcd
'weogg', # 0xce
'weogs', # 0xcf
'weon', # 0xd0
'weonj', # 0xd1
'weonh', # 0xd2
'weod', # 0xd3
'weol', # 0xd4
'weolg', # 0xd5
'weolm', # 0xd6
'weolb', # 0xd7
'weols', # 0xd8
'weolt', # 0xd9
'weolp', # 0xda
'weolh', # 0xdb
'weom', # 0xdc
'weob', # 0xdd
'weobs', # 0xde
'weos', # 0xdf
'weoss', # 0xe0
'weong', # 0xe1
'weoj', # 0xe2
'weoc', # 0xe3
'weok', # 0xe4
'weot', # 0xe5
'weop', # 0xe6
'weoh', # 0xe7
'we', # 0xe8
'weg', # 0xe9
'wegg', # 0xea
'wegs', # 0xeb
'wen', # 0xec
'wenj', # 0xed
'wenh', # 0xee
'wed', # 0xef
'wel', # 0xf0
'welg', # 0xf1
'welm', # 0xf2
'welb', # 0xf3
'wels', # 0xf4
'welt', # 0xf5
'welp', # 0xf6
'welh', # 0xf7
'wem', # 0xf8
'web', # 0xf9
'webs', # 0xfa
'wes', # 0xfb
'wess', # 0xfc
'weng', # 0xfd
'wej', # 0xfe
'wec', # 0xff
)
|
scorphus/django | refs/heads/master | tests/defer/models.py | 282 | """
Tests for defer() and only().
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Secondary(models.Model):
first = models.CharField(max_length=50)
second = models.CharField(max_length=50)
@python_2_unicode_compatible
class Primary(models.Model):
name = models.CharField(max_length=50)
value = models.CharField(max_length=50)
related = models.ForeignKey(Secondary, models.CASCADE)
def __str__(self):
return self.name
class Child(Primary):
pass
class BigChild(Primary):
other = models.CharField(max_length=50)
class ChildProxy(Child):
class Meta:
proxy = True
class RefreshPrimaryProxy(Primary):
class Meta:
proxy = True
def refresh_from_db(self, using=None, fields=None, **kwargs):
# Reloads all deferred fields if any of the fields is deferred.
if fields is not None:
fields = set(fields)
deferred_fields = self.get_deferred_fields()
if fields.intersection(deferred_fields):
fields = fields.union(deferred_fields)
super(RefreshPrimaryProxy, self).refresh_from_db(using, fields, **kwargs)
|
la3lma/lightblue-0.4 | refs/heads/master | src/linux/__init__.py | 180 | # Copyright (c) 2009 Bea Lam. All rights reserved.
#
# This file is part of LightBlue.
#
# LightBlue is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LightBlue is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with LightBlue. If not, see <http://www.gnu.org/licenses/>.
"LightBlue - a simple bluetooth library."
# Docstrings for attributes in this module.
_docstrings = {
"finddevices":
"""
Performs a device discovery and returns the found devices as a list of
(address, name, class-of-device) tuples. Raises BluetoothError if an error
occurs.
Arguments:
- getnames=True: True if device names should be retrieved during
discovery. If false, None will be returned instead of the device
name.
- length=10: the number of seconds to spend discovering devices
(this argument has no effect on Python for Series 60)
Do not invoke a new discovery before a previous discovery has finished.
Also, to minimise interference with other wireless and bluetooth traffic,
and to conserve battery power on the local device, discoveries should not
be invoked too frequently (an interval of at least 20 seconds is
recommended).
""",
"findservices":
"""
Performs a service discovery and returns the found services as a list of
(device-address, service-port, service-name) tuples. Raises BluetoothError
if an error occurs.
Arguments:
- addr=None: a device address, to search only for services on a
specific device
- name=None: a service name string, to search only for a service with a
specific name
- servicetype=None: can be RFCOMM or OBEX to search only for RFCOMM or
OBEX-type services. (OBEX services are not returned from an RFCOMM
search)
If more than one criteria is specified, this returns services that match
all criteria.
Currently the Python for Series 60 implementation will only find RFCOMM and
OBEX services.
""",
"finddevicename":
"""
Returns the name of the device with the given bluetooth address.
finddevicename(gethostaddr()) returns the local device name.
Arguments:
- address: the address of the device to look up
- usecache=True: if True, the device name will be fetched from a local
cache if possible. If False, or if the device name is not in the
cache, the remote device will be contacted to request its name.
Raise BluetoothError if the name cannot be retrieved.
""",
"gethostaddr":
"""
Returns the address of the local bluetooth device.
Raise BluetoothError if the local device is not available.
""",
"gethostclass":
"""
Returns the class of device of the local bluetooth device.
These values indicate the device's major services and the type of the
device (e.g. mobile phone, laptop, etc.). If you google for
"assigned numbers bluetooth baseband" you might find some documents
that discuss how to extract this information from the class of device.
Raise BluetoothError if the local device is not available.
""",
"socket":
"""
socket(proto=RFCOMM) -> socket object
Returns a new socket object.
Arguments:
- proto=RFCOMM: the type of socket to be created - either L2CAP or
RFCOMM.
Note that L2CAP sockets are not available on Python For Series 60, and
only L2CAP client sockets are supported on Mac OS X and Linux (i.e. you can
connect() the socket but not bind(), accept(), etc.).
""",
"advertise":
"""
Starts advertising a service with the given name, using the given server
socket. Raises BluetoothError if the service cannot be advertised.
Arguments:
- name: name of the service to be advertised
- sock: the socket object that will serve this service. The socket must
be already bound to a channel. If a RFCOMM service is being
advertised, the socket should also be listening.
- servicetype: the type of service to advertise - either RFCOMM or
OBEX. (L2CAP services are not currently supported.)
(If the servicetype is RFCOMM, the service will be advertised with the
Serial Port Profile; if the servicetype is OBEX, the service will be
advertised with the OBEX Object Push Profile.)
""",
"stopadvertise":
"""
Stops advertising the service on the given socket. Raises BluetoothError if
no service is advertised on the socket.
This will error if the given socket is already closed.
""",
"selectdevice":
"""
Displays a GUI which allows the end user to select a device from a list of
discovered devices.
Returns the selected device as an (address, name, class-of-device) tuple.
Returns None if the selection was cancelled.
(On Python For Series 60, the device selection will fail if there are any
open bluetooth connections.)
""",
"selectservice":
"""
Displays a GUI which allows the end user to select a service from a list of
discovered devices and their services.
Returns the selected service as a (device-address, service-port, service-
name) tuple. Returns None if the selection was cancelled.
(On Python For Series 60, the device selection will fail if there are any
open bluetooth connections.)
Currently the Python for Series 60 implementation will only find RFCOMM and
OBEX services.
"""
}
# import implementation modules
from _lightblue import *
from _lightbluecommon import *
import obex # plus submodule
# set docstrings
import _lightblue
localattrs = locals()
for attr in _lightblue.__all__:
try:
localattrs[attr].__doc__ = _docstrings[attr]
except KeyError:
pass
del attr, localattrs
|
shiblon/pytour | refs/heads/master | tutorials/regular_expressions.py | 1 | # vim:tw=50
"""Regular Expressions
Python, like most other languages these days, has
**regular expression** facilities, but not built
into the language. If you don't know what regular
expressions are, that's a topic all by itself, so
we'll only be covering the barest of the basics
here to show how to use them in Python. More info
can be found here:
http://docs.python.org/2/howto/regex.html
To use regular expressions, you import the |re| module.
You then have access to all of its functions, like
|search|, |match|, and |sub|. There are many others.
Note that |match| almost _never_ does what people think
it should, so ignore it: |search| always works fine.
You can also **compile** your regular expressions
and use them pre-built. This can be more
efficient, and it allows some of their parameters
to be specified outside of the expression, like
|IGNORECASE| instead of |(?i)|. It also makes it
easier to remember parameter order for functions
like |search| and |sub|.
Note that we introduced a new kind of string here,
called a **raw string**. This is a string
specified with |r| in front of it, e.g., |r"I'm
\\raw"|. Raw strings make the |\\| have no
special meaning, so you'll see them used all the
time with regular expressions, and you should
adopt this practice as well.
"""
import re
# When finding things using regular expressions, either
# None or a match object is returned. Since None
# evaluates to False in boolean contexts, you can do
# things like this:
if re.search(r"(?i)kittens", "Kittens on YouTube."):
print "Kittens found!"
# Match objects also contain information about the
# search, like which groups matched where, etc.
# Here is an alternative approach that first compiles
# the regex and then uses it to extract group
# information.
expr = re.compile(r"^kittens (.*)$", re.IGNORECASE)
match = expr.search("Kittens on YouTube.")
print match.groups()
# Note that we preface all pattern strings with the
# letter 'r' because raw strings are best for regular
# expression patterns, because they tend to be
# backslash-heavy.
print re.sub(r"(?i)(\s|.t)", "", "Kittens on YouTube")
# With date strings:
m = re.search(r"^(\d{4})-(\d{2})-(\d{2})$", "2012-10-31")
print m.groups()
# Just the year (groups are 1-based when accessed this
# way):
print m.group(1)
|
zasdfgbnm/tensorflow | refs/heads/master | tensorflow/python/profiler/profile_context_test.py | 32 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.profiler import option_builder
# pylint: disable=g-bad-import-order
from tensorflow.python.profiler import profile_context
from tensorflow.python.profiler.internal import model_analyzer_testlib as lib
builder = option_builder.ProfileOptionBuilder
class ProfilerContextTest(test.TestCase):
def testBasics(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), "dump")
opts = builder(builder.time_and_memory()
).with_file_output(outfile).build()
x = lib.BuildFullModel()
profile_str = None
profile_step100 = os.path.join(test.get_temp_dir(), "profile_100")
with profile_context.ProfileContext(test.get_temp_dir()) as pctx:
pctx.add_auto_profiling("op", options=opts, profile_steps=[15, 50, 100])
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
total_steps = 101
for i in range(total_steps):
sess.run(x)
if i == 14 or i == 49:
self.assertTrue(gfile.Exists(outfile))
gfile.Remove(outfile)
if i == 99:
self.assertTrue(gfile.Exists(profile_step100))
with gfile.Open(outfile, "r") as f:
profile_str = f.read()
gfile.Remove(outfile)
with lib.ProfilerFromFile(
os.path.join(test.get_temp_dir(), "profile_100")) as profiler:
profiler.profile_operations(options=opts)
with gfile.Open(outfile, "r") as f:
self.assertEqual(profile_str, f.read())
def testAutoTracingInDeubMode(self):
ops.reset_default_graph()
x = lib.BuildFullModel()
with profile_context.ProfileContext(test.get_temp_dir(), debug=True):
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
for _ in range(10):
sess.run(x)
for f in gfile.ListDirectory(test.get_temp_dir()):
# Warm up, no tracing.
self.assertFalse("run_meta" in f)
sess.run(x)
self.assertTrue(
gfile.Exists(os.path.join(test.get_temp_dir(), "run_meta_11")))
gfile.Remove(os.path.join(test.get_temp_dir(), "run_meta_11"))
# fetched already.
sess.run(x)
for f in gfile.ListDirectory(test.get_temp_dir()):
self.assertFalse("run_meta" in f)
def testDisabled(self):
ops.reset_default_graph()
x = lib.BuildFullModel()
with profile_context.ProfileContext(test.get_temp_dir(),
enabled=False) as pctx:
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
for _ in range(10):
sess.run(x)
self.assertTrue(pctx.profiler is None)
self.assertTrue(
getattr(session.BaseSession, "profile_context", None) is None)
with profile_context.ProfileContext(test.get_temp_dir()) as pctx:
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
for _ in range(10):
sess.run(x)
self.assertFalse(pctx.profiler is None)
self.assertFalse(
getattr(session.BaseSession, "profile_context", None) is None)
if __name__ == "__main__":
test.main()
|
bslatkin/8-bits | refs/heads/master | tools/cssutils/src/cssutils/_codec2.py | 9 | #!/usr/bin/env python
"""Python codec for CSS."""
__docformat__ = 'restructuredtext'
__author__ = 'Walter Doerwald'
__version__ = '$Id: util.py 1114 2008-03-05 13:22:59Z cthedot $'
import codecs
import marshal
# We're using bits to store all possible candidate encodings (or variants, i.e.
# we have two bits for the variants of UTF-16 and two for the
# variants of UTF-32).
#
# Prefixes for various CSS encodings
# UTF-8-SIG xEF xBB xBF
# UTF-16 (LE) xFF xFE ~x00|~x00
# UTF-16 (BE) xFE xFF
# UTF-16-LE @ x00 @ x00
# UTF-16-BE x00 @
# UTF-32 (LE) xFF xFE x00 x00
# UTF-32 (BE) x00 x00 xFE xFF
# UTF-32-LE @ x00 x00 x00
# UTF-32-BE x00 x00 x00 @
# CHARSET @ c h a ...
def detectencoding_str(input, final=False):
"""
Detect the encoding of the byte string ``input``, which contains the
beginning of a CSS file. This function returns the detected encoding (or
``None`` if it hasn't got enough data), and a flag that indicates whether
that encoding has been detected explicitely or implicitely. To detect the
encoding the first few bytes are used (or if ``input`` is ASCII compatible
and starts with a charset rule the encoding name from the rule). "Explicit"
detection means that the bytes start with a BOM or a charset rule.
If the encoding can't be detected yet, ``None`` is returned as the encoding.
``final`` specifies whether more data will be available in later calls or
not. If ``final`` is true, ``detectencoding_str()`` will never return
``None`` as the encoding.
"""
# A bit for every candidate
CANDIDATE_UTF_8_SIG = 1
CANDIDATE_UTF_16_AS_LE = 2
CANDIDATE_UTF_16_AS_BE = 4
CANDIDATE_UTF_16_LE = 8
CANDIDATE_UTF_16_BE = 16
CANDIDATE_UTF_32_AS_LE = 32
CANDIDATE_UTF_32_AS_BE = 64
CANDIDATE_UTF_32_LE = 128
CANDIDATE_UTF_32_BE = 256
CANDIDATE_CHARSET = 512
candidates = 1023 # all candidates
li = len(input)
if li>=1:
# Check first byte
c = input[0]
if c != "\xef":
candidates &= ~CANDIDATE_UTF_8_SIG
if c != "\xff":
candidates &= ~(CANDIDATE_UTF_32_AS_LE|CANDIDATE_UTF_16_AS_LE)
if c != "\xfe":
candidates &= ~CANDIDATE_UTF_16_AS_BE
if c != "@":
candidates &= ~(CANDIDATE_UTF_32_LE|CANDIDATE_UTF_16_LE|CANDIDATE_CHARSET)
if c != "\x00":
candidates &= ~(CANDIDATE_UTF_32_AS_BE|CANDIDATE_UTF_32_BE|CANDIDATE_UTF_16_BE)
if li>=2:
# Check second byte
c = input[1]
if c != "\xbb":
candidates &= ~CANDIDATE_UTF_8_SIG
if c != "\xfe":
candidates &= ~(CANDIDATE_UTF_16_AS_LE|CANDIDATE_UTF_32_AS_LE)
if c != "\xff":
candidates &= ~CANDIDATE_UTF_16_AS_BE
if c != "\x00":
candidates &= ~(CANDIDATE_UTF_16_LE|CANDIDATE_UTF_32_AS_BE|CANDIDATE_UTF_32_LE|CANDIDATE_UTF_32_BE)
if c != "@":
candidates &= ~CANDIDATE_UTF_16_BE
if c != "c":
candidates &= ~CANDIDATE_CHARSET
if li>=3:
# Check third byte
c = input[2]
if c != "\xbf":
candidates &= ~CANDIDATE_UTF_8_SIG
if c != "c":
candidates &= ~CANDIDATE_UTF_16_LE
if c != "\x00":
candidates &= ~(CANDIDATE_UTF_32_AS_LE|CANDIDATE_UTF_32_LE|CANDIDATE_UTF_32_BE)
if c != "\xfe":
candidates &= ~CANDIDATE_UTF_32_AS_BE
if c != "h":
candidates &= ~CANDIDATE_CHARSET
if li>=4:
# Check fourth byte
c = input[3]
if input[2:4] == "\x00\x00":
candidates &= ~CANDIDATE_UTF_16_AS_LE
if c != "\x00":
candidates &= ~(CANDIDATE_UTF_16_LE|CANDIDATE_UTF_32_AS_LE|CANDIDATE_UTF_32_LE)
if c != "\xff":
candidates &= ~CANDIDATE_UTF_32_AS_BE
if c != "@":
candidates &= ~CANDIDATE_UTF_32_BE
if c != "a":
candidates &= ~CANDIDATE_CHARSET
if candidates == 0:
return ("utf-8", False)
if not (candidates & (candidates-1)): # only one candidate remaining
if candidates == CANDIDATE_UTF_8_SIG and li >= 3:
return ("utf-8-sig", True)
elif candidates == CANDIDATE_UTF_16_AS_LE and li >= 2:
return ("utf-16", True)
elif candidates == CANDIDATE_UTF_16_AS_BE and li >= 2:
return ("utf-16", True)
elif candidates == CANDIDATE_UTF_16_LE and li >= 4:
return ("utf-16-le", False)
elif candidates == CANDIDATE_UTF_16_BE and li >= 2:
return ("utf-16-be", False)
elif candidates == CANDIDATE_UTF_32_AS_LE and li >= 4:
return ("utf-32", True)
elif candidates == CANDIDATE_UTF_32_AS_BE and li >= 4:
return ("utf-32", True)
elif candidates == CANDIDATE_UTF_32_LE and li >= 4:
return ("utf-32-le", False)
elif candidates == CANDIDATE_UTF_32_BE and li >= 4:
return ("utf-32-be", False)
elif candidates == CANDIDATE_CHARSET and li >= 4:
prefix = '@charset "'
if input[:len(prefix)] == prefix:
pos = input.find('"', len(prefix))
if pos >= 0:
return (input[len(prefix):pos], True)
# if this is the last call, and we haven't determined an encoding yet,
# we default to UTF-8
if final:
return ("utf-8", False)
return (None, False) # dont' know yet
def detectencoding_unicode(input, final=False):
"""
Detect the encoding of the unicode string ``input``, which contains the
beginning of a CSS file. The encoding is detected from the charset rule
at the beginning of ``input``. If there is no charset rule, ``"utf-8"``
will be returned.
If the encoding can't be detected yet, ``None`` is returned. ``final``
specifies whether more data will be available in later calls or not. If
``final`` is true, ``detectencoding_unicode()`` will never return ``None``.
"""
prefix = u'@charset "'
if input.startswith(prefix):
pos = input.find(u'"', len(prefix))
if pos >= 0:
return (input[len(prefix):pos], True)
elif final or not prefix.startswith(input):
# if this is the last call, and we haven't determined an encoding yet,
# (or the string definitely doesn't start with prefix) we default to UTF-8
return ("utf-8", False)
return (None, False) # don't know yet
def _fixencoding(input, encoding, final=False):
"""
Replace the name of the encoding in the charset rule at the beginning of
``input`` with ``encoding``. If ``input`` doesn't starts with a charset
rule, ``input`` will be returned unmodified.
If the encoding can't be found yet, ``None`` is returned. ``final``
specifies whether more data will be available in later calls or not.
If ``final`` is true, ``_fixencoding()`` will never return ``None``.
"""
prefix = u'@charset "'
if len(input) > len(prefix):
if input.startswith(prefix):
pos = input.find(u'"', len(prefix))
if pos >= 0:
if encoding.replace("_", "-").lower() == "utf-8-sig":
encoding = u"utf-8"
return prefix + encoding + input[pos:]
# we haven't seen the end of the encoding name yet => fall through
else:
return input # doesn't start with prefix, so nothing to fix
elif not prefix.startswith(input) or final:
# can't turn out to be a @charset rule later (or there is no "later")
return input
if final:
return input
return None # don't know yet
def decode(input, errors="strict", encoding=None, force=True):
if encoding is None or not force:
(_encoding, explicit) = detectencoding_str(input, True)
if _encoding == "css":
raise ValueError("css not allowed as encoding name")
if (explicit and not force) or encoding is None: # Take the encoding from the input
encoding = _encoding
(input, consumed) = codecs.getdecoder(encoding)(input, errors)
return (_fixencoding(input, unicode(encoding), True), consumed)
def encode(input, errors="strict", encoding=None):
consumed = len(input)
if encoding is None:
encoding = detectencoding_unicode(input, True)[0]
if encoding.replace("_", "-").lower() == "utf-8-sig":
input = _fixencoding(input, u"utf-8", True)
else:
input = _fixencoding(input, unicode(encoding), True)
if encoding == "css":
raise ValueError("css not allowed as encoding name")
encoder = codecs.getencoder(encoding)
return (encoder(input, errors)[0], consumed)
def _bytes2int(bytes):
# Helper: convert an 8 bit string into an ``int``.
i = 0
for byte in bytes:
i = (i<<8) + ord(byte)
return i
def _int2bytes(i):
# Helper: convert an ``int`` into an 8-bit string.
v = []
while i:
v.insert(0, chr(i&0xff))
i >>= 8
return "".join(v)
if hasattr(codecs, "IncrementalDecoder"):
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors="strict", encoding=None, force=True):
self.decoder = None
self.encoding = encoding
self.force = force
codecs.IncrementalDecoder.__init__(self, errors)
# Store ``errors`` somewhere else,
# because we have to hide it in a property
self._errors = errors
self.buffer = u"".encode()
self.headerfixed = False
def iterdecode(self, input):
for part in input:
result = self.decode(part, False)
if result:
yield result
result = self.decode("", True)
if result:
yield result
def decode(self, input, final=False):
# We're doing basically the same as a ``BufferedIncrementalDecoder``,
# but since the buffer is only relevant until the encoding has been
# detected (in which case the buffer of the underlying codec might
# kick in), we're implementing buffering ourselves to avoid some
# overhead.
if self.decoder is None:
input = self.buffer + input
# Do we have to detect the encoding from the input?
if self.encoding is None or not self.force:
(encoding, explicit) = detectencoding_str(input, final)
if encoding is None: # no encoding determined yet
self.buffer = input # retry the complete input on the next call
return u"" # no encoding determined yet, so no output
elif encoding == "css":
raise ValueError("css not allowed as encoding name")
if (explicit and not self.force) or self.encoding is None: # Take the encoding from the input
self.encoding = encoding
self.buffer = "" # drop buffer, as the decoder might keep its own
decoder = codecs.getincrementaldecoder(self.encoding)
self.decoder = decoder(self._errors)
if self.headerfixed:
return self.decoder.decode(input, final)
# If we haven't fixed the header yet,
# the content of ``self.buffer`` is a ``unicode`` object
output = self.buffer + self.decoder.decode(input, final)
encoding = self.encoding
if encoding.replace("_", "-").lower() == "utf-8-sig":
encoding = "utf-8"
newoutput = _fixencoding(output, unicode(encoding), final)
if newoutput is None:
# retry fixing the @charset rule (but keep the decoded stuff)
self.buffer = output
return u""
self.headerfixed = True
return newoutput
def reset(self):
codecs.IncrementalDecoder.reset(self)
self.decoder = None
self.buffer = u"".encode()
self.headerfixed = False
def _geterrors(self):
return self._errors
def _seterrors(self, errors):
# Setting ``errors`` must be done on the real decoder too
if self.decoder is not None:
self.decoder.errors = errors
self._errors = errors
errors = property(_geterrors, _seterrors)
def getstate(self):
if self.decoder is not None:
state = (self.encoding, self.buffer, self.headerfixed, True, self.decoder.getstate())
else:
state = (self.encoding, self.buffer, self.headerfixed, False, None)
return ("", _bytes2int(marshal.dumps(state)))
def setstate(self, state):
state = _int2bytes(marshal.loads(state[1])) # ignore buffered input
self.encoding = state[0]
self.buffer = state[1]
self.headerfixed = state[2]
if state[3] is not None:
self.decoder = codecs.getincrementaldecoder(self.encoding)(self._errors)
self.decoder.setstate(state[4])
else:
self.decoder = None
if hasattr(codecs, "IncrementalEncoder"):
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors="strict", encoding=None):
self.encoder = None
self.encoding = encoding
codecs.IncrementalEncoder.__init__(self, errors)
# Store ``errors`` somewhere else,
# because we have to hide it in a property
self._errors = errors
self.buffer = u""
def iterencode(self, input):
for part in input:
result = self.encode(part, False)
if result:
yield result
result = self.encode(u"", True)
if result:
yield result
def encode(self, input, final=False):
if self.encoder is None:
input = self.buffer + input
if self.encoding is not None:
# Replace encoding in the @charset rule with the specified one
encoding = self.encoding
if encoding.replace("_", "-").lower() == "utf-8-sig":
encoding = "utf-8"
newinput = _fixencoding(input, unicode(encoding), final)
if newinput is None: # @charset rule incomplete => Retry next time
self.buffer = input
return ""
input = newinput
else:
# Use encoding from the @charset declaration
self.encoding = detectencoding_unicode(input, final)[0]
if self.encoding is not None:
if self.encoding == "css":
raise ValueError("css not allowed as encoding name")
info = codecs.lookup(self.encoding)
encoding = self.encoding
if self.encoding.replace("_", "-").lower() == "utf-8-sig":
input = _fixencoding(input, u"utf-8", True)
self.encoder = info.incrementalencoder(self._errors)
self.buffer = u""
else:
self.buffer = input
return ""
return self.encoder.encode(input, final)
def reset(self):
codecs.IncrementalEncoder.reset(self)
self.encoder = None
self.buffer = u""
def _geterrors(self):
return self._errors
def _seterrors(self, errors):
# Setting ``errors ``must be done on the real encoder too
if self.encoder is not None:
self.encoder.errors = errors
self._errors = errors
errors = property(_geterrors, _seterrors)
def getstate(self):
if self.encoder is not None:
state = (self.encoding, self.buffer, True, self.encoder.getstate())
else:
state = (self.encoding, self.buffer, False, None)
return _bytes2int(marshal.dumps(state))
def setstate(self, state):
state = _int2bytes(marshal.loads(state))
self.encoding = state[0]
self.buffer = state[1]
if state[2] is not None:
self.encoder = codecs.getincrementalencoder(self.encoding)(self._errors)
self.encoder.setstate(state[4])
else:
self.encoder = None
class StreamWriter(codecs.StreamWriter):
def __init__(self, stream, errors="strict", encoding=None, header=False):
codecs.StreamWriter.__init__(self, stream, errors)
self.streamwriter = None
self.encoding = encoding
self._errors = errors
self.buffer = u""
def encode(self, input, errors='strict'):
li = len(input)
if self.streamwriter is None:
input = self.buffer + input
li = len(input)
if self.encoding is not None:
# Replace encoding in the @charset rule with the specified one
encoding = self.encoding
if encoding.replace("_", "-").lower() == "utf-8-sig":
encoding = "utf-8"
newinput = _fixencoding(input, unicode(encoding), False)
if newinput is None: # @charset rule incomplete => Retry next time
self.buffer = input
return ("", 0)
input = newinput
else:
# Use encoding from the @charset declaration
self.encoding = detectencoding_unicode(input, False)[0]
if self.encoding is not None:
if self.encoding == "css":
raise ValueError("css not allowed as encoding name")
self.streamwriter = codecs.getwriter(self.encoding)(self.stream, self._errors)
encoding = self.encoding
if self.encoding.replace("_", "-").lower() == "utf-8-sig":
input = _fixencoding(input, u"utf-8", True)
self.buffer = u""
else:
self.buffer = input
return ("", 0)
return (self.streamwriter.encode(input, errors)[0], li)
def _geterrors(self):
return self._errors
def _seterrors(self, errors):
# Setting ``errors`` must be done on the streamwriter too
if self.streamwriter is not None:
self.streamwriter.errors = errors
self._errors = errors
errors = property(_geterrors, _seterrors)
class StreamReader(codecs.StreamReader):
def __init__(self, stream, errors="strict", encoding=None, force=True):
codecs.StreamReader.__init__(self, stream, errors)
self.streamreader = None
self.encoding = encoding
self.force = force
self._errors = errors
def decode(self, input, errors='strict'):
if self.streamreader is None:
if self.encoding is None or not self.force:
(encoding, explicit) = detectencoding_str(input, False)
if encoding is None: # no encoding determined yet
return (u"", 0) # no encoding determined yet, so no output
elif encoding == "css":
raise ValueError("css not allowed as encoding name")
if (explicit and not self.force) or self.encoding is None: # Take the encoding from the input
self.encoding = encoding
streamreader = codecs.getreader(self.encoding)
streamreader = streamreader(self.stream, self._errors)
(output, consumed) = streamreader.decode(input, errors)
encoding = self.encoding
if encoding.replace("_", "-").lower() == "utf-8-sig":
encoding = "utf-8"
newoutput = _fixencoding(output, unicode(encoding), False)
if newoutput is not None:
self.streamreader = streamreader
return (newoutput, consumed)
return (u"", 0) # we will create a new streamreader on the next call
return self.streamreader.decode(input, errors)
def _geterrors(self):
return self._errors
def _seterrors(self, errors):
# Setting ``errors`` must be done on the streamreader too
if self.streamreader is not None:
self.streamreader.errors = errors
self._errors = errors
errors = property(_geterrors, _seterrors)
if hasattr(codecs, "CodecInfo"):
# We're running on Python 2.5 or better
def search_function(name):
if name == "css":
return codecs.CodecInfo(
name="css",
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
else:
# If we're running on Python 2.4, define the utf-8-sig codec here
def utf8sig_encode(input, errors='strict'):
return (codecs.BOM_UTF8 + codecs.utf_8_encode(input, errors)[0], len(input))
def utf8sig_decode(input, errors='strict'):
prefix = 0
if input[:3] == codecs.BOM_UTF8:
input = input[3:]
prefix = 3
(output, consumed) = codecs.utf_8_decode(input, errors, True)
return (output, consumed+prefix)
class UTF8SigStreamWriter(codecs.StreamWriter):
def reset(self):
codecs.StreamWriter.reset(self)
try:
del self.encode
except AttributeError:
pass
def encode(self, input, errors='strict'):
self.encode = codecs.utf_8_encode
return utf8sig_encode(input, errors)
class UTF8SigStreamReader(codecs.StreamReader):
def reset(self):
codecs.StreamReader.reset(self)
try:
del self.decode
except AttributeError:
pass
def decode(self, input, errors='strict'):
if len(input) < 3 and codecs.BOM_UTF8.startswith(input):
# not enough data to decide if this is a BOM
# => try again on the next call
return (u"", 0)
self.decode = codecs.utf_8_decode
return utf8sig_decode(input, errors)
def search_function(name):
import encodings
name = encodings.normalize_encoding(name)
if name == "css":
return (encode, decode, StreamReader, StreamWriter)
elif name == "utf_8_sig":
return (utf8sig_encode, utf8sig_decode, UTF8SigStreamReader, UTF8SigStreamWriter)
codecs.register(search_function)
# Error handler for CSS escaping
def cssescape(exc):
if not isinstance(exc, UnicodeEncodeError):
raise TypeError("don't know how to handle %r" % exc)
return (u"".join(u"\\%06x" % ord(c) for c in exc.object[exc.start:exc.end]), exc.end)
codecs.register_error("cssescape", cssescape)
|
moio/spacewalk | refs/heads/master | backend/common/RPC_Base.py | 1 | #
# Copyright (c) 2008--2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# Base XML-RPC class handler
class RPC_Base:
def __init__(self):
self.functions = []
# retrieve a function handler
def get_function(self, function):
if function in self.functions:
return getattr(self, function)
return None
|
agwilt/python | refs/heads/master | onetime/OneTime_Main.py | 1 | #!/usr/bin/env python3
# OneTime Papa Edition Main Window
# With an overview of everything:
# key manager
# en/de-crypter
# KeyGen(r) :p
from tkinter import *
from tkinter import filedialog
import random, pickle, os, sys
def keygen():
save_file = filedialog.asksaveasfilename()
key = [ random.randint(0,255) for x in range(1024) ]
if save_file:
pickle.dump(key,open(save_file,'wb'))
def cipher():
sidewindow('OneTime_Cipher')
def manage():
sidewindow('OneTime_Manager')
def sidewindow(thing):
global rightbit
global righton
global right
exec("import " + thing)
if righton:
canvas.delete(rightbit)
right.destroy()
righton = 0
else:
right = Frame(canvas, relief=GROOVE, borderwidth=2)
rightbit = canvas.create_window(640,480,window=right,anchor=SE)
exec(thing + ".init(right,path)")
righton = 1
user = os.getlogin()
if sys.platform == 'darwin':
path = '/Users/%s/.dcryptpe/' % user
elif 'lin' in sys.platform:
path = '/home/%s/.dcryptpe/' % user
else:
print("Error: Can't sepcify platform")
path = str(filedialog.askdirectory() + '/.dcryptpe/')
if not os.path.isdir(path): # check for first run conditions
os.mkdir(path)
righton = 0
root = Tk()
root.wm_title('OneTime Papa Edition')
root.resizable(0,0)
canvas = Canvas(root, width=640, height=480)
background = PhotoImage(file="/home/andreas/Programming/python/papa/background.gif")
canvas.create_image(0,0,image=background,anchor=NW)
canvas.pack()
top = Frame(canvas, relief=GROOVE, borderwidth=2)
middle = Frame(canvas, relief=GROOVE, borderwidth=2)
bottom = Frame(canvas, relief=GROOVE, borderwidth=2)
Button(top, text='KeyGen', command=keygen).pack()
Button(middle, text='Manager', command=manage).pack()
Button(bottom, text='Cipher', command=cipher).pack()
canvas.create_window(100,100,window=top,anchor=CENTER)
canvas.create_window(100,200,window=middle,anchor=CENTER)
canvas.create_window(100,300,window=bottom,anchor=CENTER)
root.mainloop()
|
h3biomed/ansible-modules-core | refs/heads/devel | network/nxos/nxos_evpn_vni.py | 13 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: nxos_evpn_vni
version_added: "2.2"
short_description: Manages Cisco EVPN VXLAN Network Identifier (VNI).
description:
- Manages Cisco Ethernet Virtual Private Network (EVPN) VXLAN Network
Identifier (VNI) configurations of a Nexus device.
author: Gabriele Gerbino (@GGabriele)
extends_documentation_fragment: nxos
notes:
- default, where supported, restores params default value.
- RD override is not permitted. You should set it to the default values
first and then reconfigure it.
- C(route_target_both), C(route_target_import) and
C(route_target_export valid) values are a list of extended communities,
(i.e. ['1.2.3.4:5', '33:55']) or the keywords 'auto' or 'default'.
- The C(route_target_both) property is discouraged due to the inconsistent
behavior of the property across Nexus platforms and image versions.
For this reason it is recommended to use explicit C(route_target_export)
and C(route_target_import) properties instead of C(route_target_both).
- RD valid values are a string in one of the route-distinguisher formats,
the keyword 'auto', or the keyword 'default'.
options:
vni:
description:
- The EVPN VXLAN Network Identifier.
required: true
default: null
route_distinguisher:
description:
- The VPN Route Distinguisher (RD). The RD is combined with
the IPv4 or IPv6 prefix learned by the PE router to create a
globally unique address.
required: true
default: null
route_target_both:
description:
- Enables/Disables route-target settings for both import and
export target communities using a single property.
required: false
default: null
route_target_import:
description:
- Sets the route-target 'import' extended communities.
required: false
default: null
route_target_export:
description:
- Sets the route-target 'import' extended communities.
required: false
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_evpn_vni:
vni: 6000
route_distinguisher: "60:10"
route_target_import:
- "5000:10"
- "4100:100"
route_target_export: auto
route_target_both: default
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"route_target_import": ["5000:10", "4100:100",
"5001:10"],"vni": "6000"}
existing:
description: k/v pairs of existing EVPN VNI configuration
returned: verbose mode
type: dict
sample: {"route_distinguisher": "70:10", "route_target_both": [],
"route_target_export": [], "route_target_import": [
"4100:100", "5000:10"], "vni": "6000"}
end_state:
description: k/v pairs of EVPN VNI configuration after module execution
returned: verbose mode
type: dict
sample: {"route_distinguisher": "70:10", "route_target_both": [],
"route_target_export": [], "route_target_import": [
"4100:100", "5000:10", "5001:10"], "vni": "6000"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["evpn", "vni 6000 l2", "route-target import 5001:10"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
# COMMON CODE FOR MIGRATION
import re
import ansible.module_utils.nxos
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
from ansible.module_utils.network import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
PARAM_TO_COMMAND_KEYMAP = {
'vni': 'vni',
'route_target_both': 'route-target both',
'route_target_import': 'route-target import',
'route_target_export': 'route-target export',
'route_distinguisher': 'rd'
}
WARNINGS = []
import time
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_value(arg, config, module):
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = REGEX.search(config).group('value')
return value
def get_route_target_value(arg, config, module):
splitted_config = config.splitlines()
value_list = []
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
for line in splitted_config:
value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in line.strip():
value = REGEX.search(line).group('value')
value_list.append(value)
return value_list
def get_existing(module, args):
existing = {}
netcfg = get_config(module)
parents = ['evpn', 'vni {0} l2'.format(module.params['vni'])]
config = netcfg.get_section(parents)
if config:
for arg in args:
if arg != 'vni':
if arg == 'route_distinguisher':
existing[arg] = get_value(arg, config, module)
else:
existing[arg] = get_route_target_value(arg, config, module)
existing_fix = dict((k, v) for k, v in existing.iteritems() if v)
if existing_fix:
existing['vni'] = module.params['vni']
else:
existing = existing_fix
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def state_present(module, existing, proposed):
commands = list()
parents = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.iteritems():
if key.startswith('route-target'):
if value == ['default']:
existing_value = existing_commands.get(key)
if existing_value:
for target in existing_value:
commands.append('no {0} {1}'.format(key, target))
else:
if not isinstance(value, list):
value = [value]
for target in value:
if existing:
if target not in existing.get(key.replace('-', '_').replace(' ', '_')):
commands.append('{0} {1}'.format(key, target))
else:
commands.append('{0} {1}'.format(key, target))
else:
if value == 'default':
existing_value = existing_commands.get(key)
if existing_value:
commands.append('no {0} {1}'.format(key, existing_value))
else:
command = '{0} {1}'.format(key, value)
commands.append(command)
if commands:
parents = ['evpn', 'vni {0} l2'.format(module.params['vni'])]
return commands, parents
def state_absent(module, existing, proposed):
commands = ['no vni {0} l2'.format(module.params['vni'])]
parents = ['evpn']
return commands, parents
def execute_config(module, candidate):
result = {}
try:
response = load_config(module, candidate)
result.update(response)
except ShellError:
exc = get_exception()
module.fail_json(msg=str(exc))
return result
def main():
argument_spec = dict(
vni=dict(required=True, type='str'),
route_distinguisher=dict(required=False, type='str'),
route_target_both=dict(required=False, type='list'),
route_target_import=dict(required=False, type='list'),
route_target_export=dict(required=False, type='list'),
state=dict(choices=['present', 'absent'], default='present',
required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
state = module.params['state']
args = [
'vni',
'route_distinguisher',
'route_target_both',
'route_target_import',
'route_target_export'
]
existing = invoke('get_existing', module, args)
end_state = existing
proposed_args = dict((k, v) for k, v in module.params.iteritems()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.iteritems():
if key != 'vni':
if value == 'true':
value = True
elif value == 'false':
value = False
if existing.get(key) or (not existing.get(key) and value):
proposed[key] = value
result = {}
if state == 'present' or (state == 'absent' and existing):
candidate = CustomNetworkConfig(indent=3)
commands, parents = invoke('state_%s' % state, module, existing,
proposed)
if commands:
if (existing.get('route_distinguisher') and
proposed.get('route_distinguisher')):
if (existing['route_distinguisher'] != proposed[
'route_distinguisher'] and
proposed['route_distinguisher'] != 'default'):
WARNINGS.append('EVPN RD {0} was automatically removed. '
'It is highly recommended to use a task '
'(with default as value) to explicitly '
'unconfigure it.'.format(
existing['route_distinguisher']))
remove_commands = ['no rd {0}'.format(
existing['route_distinguisher'])]
candidate.add(remove_commands, parents=parents)
result = execute_config(module, candidate)
time.sleep(30)
candidate = CustomNetworkConfig(indent=3)
candidate.add(commands, parents=parents)
result = execute_config(module, candidate)
else:
result['updates'] = []
result['connected'] = module.connected
if module._verbosity > 0:
end_state = invoke('get_existing', module, args)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed_args
if WARNINGS:
result['warnings'] = WARNINGS
module.exit_json(**result)
if __name__ == '__main__':
main()
|
ewitz/PhotoHaus | refs/heads/master | venv/lib/python2.7/site-packages/wtforms/ext/django/fields.py | 175 | """
Useful form fields for use with the Django ORM.
"""
from __future__ import unicode_literals
import datetime
import operator
try:
from django.conf import settings
from django.utils import timezone
has_timezone = True
except ImportError:
has_timezone = False
from wtforms import fields, widgets
from wtforms.compat import string_types
from wtforms.validators import ValidationError
__all__ = (
'ModelSelectField', 'QuerySetSelectField', 'DateTimeField'
)
class QuerySetSelectField(fields.SelectFieldBase):
"""
Given a QuerySet either at initialization or inside a view, will display a
select drop-down field of choices. The `data` property actually will
store/keep an ORM model instance, not the ID. Submitting a choice which is
not in the queryset will result in a validation error.
Specify `get_label` to customize the label associated with each option. If
a string, this is the name of an attribute on the model object to use as
the label text. If a one-argument callable, this callable will be passed
model instance and expected to return the label text. Otherwise, the model
object's `__str__` or `__unicode__` will be used.
If `allow_blank` is set to `True`, then a blank choice will be added to the
top of the list. Selecting this choice will result in the `data` property
being `None`. The label for the blank choice can be set by specifying the
`blank_text` parameter.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, queryset=None, get_label=None, allow_blank=False, blank_text='', **kwargs):
super(QuerySetSelectField, self).__init__(label, validators, **kwargs)
self.allow_blank = allow_blank
self.blank_text = blank_text
self._set_data(None)
if queryset is not None:
self.queryset = queryset.all() # Make sure the queryset is fresh
if get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, string_types):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
def _get_data(self):
if self._formdata is not None:
for obj in self.queryset:
if obj.pk == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
if self.allow_blank:
yield ('__None', self.blank_text, self.data is None)
for obj in self.queryset:
yield (obj.pk, self.get_label(obj), obj == self.data)
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = int(valuelist[0])
def pre_validate(self, form):
if not self.allow_blank or self.data is not None:
for obj in self.queryset:
if self.data == obj:
break
else:
raise ValidationError(self.gettext('Not a valid choice'))
class ModelSelectField(QuerySetSelectField):
"""
Like a QuerySetSelectField, except takes a model class instead of a
queryset and lists everything in it.
"""
def __init__(self, label=None, validators=None, model=None, **kwargs):
super(ModelSelectField, self).__init__(label, validators, queryset=model._default_manager.all(), **kwargs)
class DateTimeField(fields.DateTimeField):
"""
Adds support for Django's timezone utilities.
Requires Django >= 1.5
"""
def __init__(self, *args, **kwargs):
if not has_timezone:
raise ImportError('DateTimeField requires Django >= 1.5')
super(DateTimeField, self).__init__(*args, **kwargs)
def process_formdata(self, valuelist):
super(DateTimeField, self).process_formdata(valuelist)
date = self.data
if settings.USE_TZ and date is not None and timezone.is_naive(date):
current_timezone = timezone.get_current_timezone()
self.data = timezone.make_aware(date, current_timezone)
def _value(self):
date = self.data
if settings.USE_TZ and isinstance(date, datetime.datetime) and timezone.is_aware(date):
self.data = timezone.localtime(date)
return super(DateTimeField, self)._value()
|
jaloren/robotframework | refs/heads/master | src/robot/writer/__init__.py | 8 | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements writing of parsed, and possibly edited, test data back to files.
This functionality is used by :meth:`robot.parsing.model.TestCaseFile.save`
and indirectly by :mod:`robot.tidy`. External tools should not need to use
this package directly.
This package is considered stable, although the planned changes to
:mod:`robot.parsing` may affect also this package.
"""
from .datafilewriter import DataFileWriter
|
dlozeve/reveal_CommunityDetection | refs/heads/master | node_modules/node-gyp/gyp/tools/graphviz.py | 2679 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print 'digraph D {'
print ' fontsize=8' # Used by subgraphs.
print ' node [fontsize=8]'
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name)
else:
# Group multiple nodes together in a subgraph.
print ' subgraph "cluster_%s" {' % filename
print ' label = "%s"' % filename
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [label="%s"]' % (target, target_name)
print ' }'
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print ' "%s" -> "%s"' % (src, dst)
print '}'
def main():
if len(sys.argv) < 2:
print >>sys.stderr, __doc__
print >>sys.stderr
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
return 1
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
return 0
if __name__ == '__main__':
sys.exit(main())
|
jor-/scipy | refs/heads/master | scipy/sparse/linalg/isolve/iterative/test.py | 6 | from __future__ import division, print_function, absolute_import
from iterative import *
import numpy as np
def test_fun(alpha, x, beta, y, A, n):
# compute z = alpha*A*x + beta*y
xx = x[:n]
yy = y[:n]
w = np.dot(A,xx)
z = alpha*w+beta*yy
y[:n] = z
return
def test_fun_t(alpha, x, beta, y, A, n):
# compute z = alpha*A*x + beta*y
xx = x[:n]
yy = y[:n]
AA = np.conj(np.transpose(A))
w = np.dot(AA,xx)
z = alpha*w+beta*yy
y[:n] = z
return
def test_psolve(x,b,n):
x[:n] = b[:n]
return
def test_psolve_t(x,b,n):
x[:n] = b[:n]
return
def test_psolveq(x,b,which,n):
x[:n] = b[:n]
return
def test_psolveq_t(x,b,which,n):
x[:n] = b[:n]
return
n = 5
dA = 1.0*np.array([[2, -1, 0, 0, 0],
[-1, 2, -1, 0, 0],
[0, -1, 2, -1, 0],
[0, 0, -1, 2, -1],
[0, 0, 0, 1, 2]])
db = 1.0*np.array([0,1,1,0,0])
##zA = (1.0+0j)*np.array([[ 2, -1+0.1j, 0, 0, 0],
## [-1+0.1j, 2, -1-0.1j, 0, 0],
## [ 0, -1-0.1j, 2, -1+0.1j, 0],
## [ 0, 0, -1+0.1j, 2, -1-0.1j],
## [ 0, 0, 0, -1, 2-0.1j]])
zA = (1.0+0j)*np.array([[2, -1 + 1j, 0, 0, 0],
[-1+0.1j, 2, -1-0.1j, 0, 0],
[0, -1 - 1j, 2, -1+0.1j, 0],
[0, 0, -1+0.1j, 2, -1-0.1j],
[0, 0, 0, -1, 2-0.1j]])
zb = (1.0+0j)*np.array([0,1,1,0,0])
dx = 0*db.copy()
zx = 0*zb.copy()
diter = 1000
dresid = 1e-6
ziter = 1000
zresid = 1e-6
drestrt = n
zrestrt = n
############### BiCG #######################
dx,diter,dresid,dinfor = dbicg(db,dx,diter,dresid,test_fun,test_fun_t,test_psolve,test_psolve_t,(dA,n),(dA,n),(n,),(n,))
zx,ziter,zresid,zinfor = zbicg(zb,zx,ziter,zresid,test_fun,test_fun_t,test_psolve,test_psolve_t,(zA,n),(zA,n),(n,),(n,))
############### BiCGSTAB ###################
#dx,diter,dresid,dinfor = dbicgstab(db,dx,diter,dresid,test_fun,test_psolve,(dA,n),(n,))
#zx,ziter,zresid,zinfor = zbicgstab(zb,zx,ziter,zresid,test_fun,test_psolve,(zA,n),(n,))
############### CG #########################
##dA = 1.0*array([[ 2, -1, 0, 0, 0],
## [-1, 2, -1, 0, 0],
## [ 0, -1, 2, -1, 0],
## [ 0, 0, -1, 2, -1],
## [ 0, 0, 0, -1, 2]])
##dx = db.copy()
##zA = (1.0+0j)*array([[ 2, -1+0.1j, 0, 0, 0],
## [-1+0.1j, 2, -1-0.1j, 0, 0],
## [ 0, -1-0.1j, 2, -1+0.1j, 0],
## [ 0, 0, -1+0.1j, 2, -1-0.1j],
## [ 0, 0, 0, -1, 2-0.1j]])
##zx = zb.copy()
##dx,diter,dresid,dinfor = dcg(db,dx,diter,dresid,test_fun,test_psolve,(dA,n),(n,))
##zx,ziter,zresid,zinfor = zcg(zb,zx,ziter,zresid,test_fun,test_psolve,(zA,n),(n,))
############### CGS ########################
#dx,diter,dresid,dinfor = dcgs(db,dx,diter,dresid,test_fun,test_psolve,(dA,n),(n,))
#zx,ziter,zresid,zinfor = zcgs(zb,zx,ziter,zresid,test_fun,test_psolve,(zA,n),(n,))
############### GMRES ######################
#dx,diter,dresid,dinfor = dgmres(db,dx,drestrt,diter,dresid,test_fun,test_psolve,(dA,n),(n,))
#zx,ziter,zresid,zinfor = zgmres(zb,zx,zrestrt,ziter,zresid,test_fun,test_psolve,(zA,n),(n,))
############### QMR ########################
#dx,diter,dresid,dinfor = dqmr(db,dx,diter,dresid,test_fun,test_fun_t,test_psolveq,test_psolveq_t,(dA,n),(dA,n),(n,),(n,))
#zx,ziter,zresid,zinfor = zqmr(zb,zx,ziter,zresid,test_fun,test_fun_t,test_psolveq,test_psolveq_t,(zA,n),(zA,n),(n,),(n,))
print()
print('**************** double *****************')
print('iter:',diter, 'resid:', dresid, 'info:',dinfor)
print('x=',dx)
print('*****************************************')
print()
print()
print('**************** complex ****************')
print('iter:',ziter, 'resid:',zresid, 'info:',zinfor)
print('x=',zx)
print('*****************************************')
print()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.