repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
psiwczak/openstack
|
nova/virt/xenapi/volume_utils.py
|
1
|
15660
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of volumes,
and storage repositories
"""
import re
import string
from nova import db
from nova import context
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from nova.virt import xenapi
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
class StorageError(Exception):
"""To raise errors related to SR, VDI, PBD, and VBD commands"""
def __init__(self, message=None):
super(StorageError, self).__init__(message)
class VolumeHelper(xenapi.HelperBase):
"""
The class that wraps the helper methods together.
"""
@classmethod
def create_sr(cls, session, label, params):
LOG.debug(_("creating sr within volume_utils"))
type = params['sr_type']
del params['sr_type']
LOG.debug(_('type is = %s') % type)
if 'name_description' in params:
desc = params['name_description']
LOG.debug(_('name = %s') % desc)
del params['name_description']
else:
desc = ''
if 'id' in params:
del params['id']
LOG.debug(params)
try:
sr_ref = session.call_xenapi("SR.create",
session.get_xenapi_host(),
params,
'0', label, desc, type, '', False, {})
LOG.debug(_('Created %(label)s as %(sr_ref)s.') % locals())
return sr_ref
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to create Storage Repository'))
@classmethod
def introduce_sr(cls, session, sr_uuid, label, params):
LOG.debug(_("introducing sr within volume_utils"))
type = params['sr_type']
del params['sr_type']
LOG.debug(_('type is = %s') % type)
if 'name_description' in params:
desc = params['name_description']
LOG.debug(_('name = %s') % desc)
del params['name_description']
else:
desc = ''
if 'id' in params:
del params['id']
LOG.debug(params)
try:
sr_ref = session.call_xenapi("SR.introduce",
sr_uuid,
label,
desc,
type,
'',
False,
params,)
LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
#Create pbd
LOG.debug(_('Creating pbd for SR'))
pbd_ref = cls.create_pbd(session, sr_ref, params)
LOG.debug(_('Plugging SR'))
#Plug pbd
session.call_xenapi("PBD.plug", pbd_ref)
session.call_xenapi("SR.scan", sr_ref)
return sr_ref
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to introduce Storage Repository'))
@classmethod
def forget_sr(cls, session, sr_uuid):
"""
Forgets the storage repository without destroying the VDIs within
"""
try:
sr_ref = session.call_xenapi("SR.get_by_uuid", sr_uuid)
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to get SR using uuid'))
LOG.debug(_('Forgetting SR %s...') % sr_ref)
try:
cls.unplug_pbds(session, sr_ref)
sr_ref = session.call_xenapi("SR.forget", sr_ref)
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to forget Storage Repository'))
@classmethod
def find_sr_by_uuid(cls, session, sr_uuid):
"""
Return the storage repository given a uuid.
"""
for sr_ref, sr_rec in cls.get_all_refs_and_recs(session, 'SR'):
if sr_rec['uuid'] == sr_uuid:
return sr_ref
return None
@classmethod
def create_iscsi_storage(cls, session, info, label, description):
"""
Create an iSCSI storage repository that will be used to mount
the volume for the specified instance
"""
sr_ref = session.call_xenapi("SR.get_by_name_label", label)
if len(sr_ref) == 0:
LOG.debug(_('Introducing %s...'), label)
record = {}
if 'chapuser' in info and 'chappassword' in info:
record = {'target': info['targetHost'],
'port': info['targetPort'],
'targetIQN': info['targetIQN'],
'chapuser': info['chapuser'],
'chappassword': info['chappassword']}
else:
record = {'target': info['targetHost'],
'port': info['targetPort'],
'targetIQN': info['targetIQN']}
try:
LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
return sr_ref
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to create Storage Repository'))
else:
return sr_ref[0]
@classmethod
def find_sr_from_vbd(cls, session, vbd_ref):
"""Find the SR reference from the VBD reference"""
try:
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref)
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to find SR from VBD %s') % vbd_ref)
return sr_ref
@classmethod
def create_pbd(cls, session, sr_ref, params):
pbd_rec = {}
pbd_rec['host'] = session.get_xenapi_host()
pbd_rec['SR'] = sr_ref
pbd_rec['device_config'] = params
pbd_ref = session.call_xenapi("PBD.create", pbd_rec)
return pbd_ref
@classmethod
def unplug_pbds(cls, session, sr_ref):
pbds = []
try:
pbds = session.call_xenapi("SR.get_PBDs", sr_ref)
except session.XenAPI.Failure, exc:
LOG.warn(_('Ignoring exception %(exc)s when getting PBDs'
' for %(sr_ref)s') % locals())
for pbd in pbds:
try:
session.call_xenapi("PBD.unplug", pbd)
except session.XenAPI.Failure, exc:
LOG.warn(_('Ignoring exception %(exc)s when unplugging'
' PBD %(pbd)s') % locals())
@classmethod
def introduce_vdi(cls, session, sr_ref, vdi_uuid=None, target_lun=None):
"""Introduce VDI in the host"""
try:
session.call_xenapi("SR.scan", sr_ref)
if vdi_uuid:
LOG.debug("vdi_uuid: %s" % vdi_uuid)
vdi_ref = session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
elif target_lun:
vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref)
for curr_ref in vdi_refs:
curr_rec = session.call_xenapi("VDI.get_record", curr_ref)
if ('sm_config' in curr_rec and
'LUNid' in curr_rec['sm_config'] and
curr_rec['sm_config']['LUNid'] == str(target_lun)):
vdi_ref = curr_ref
break
else:
vdi_ref = (session.call_xenapi("SR.get_VDIs", sr_ref))[0]
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref)
try:
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
LOG.debug(vdi_rec)
LOG.debug(type(vdi_rec))
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to get record'
' of VDI %s on') % vdi_ref)
if vdi_rec['managed']:
# We do not need to introduce the vdi
return vdi_ref
try:
return session.call_xenapi("VDI.introduce",
vdi_rec['uuid'],
vdi_rec['name_label'],
vdi_rec['name_description'],
vdi_rec['SR'],
vdi_rec['type'],
vdi_rec['sharable'],
vdi_rec['read_only'],
vdi_rec['other_config'],
vdi_rec['location'],
vdi_rec['xenstore_data'],
vdi_rec['sm_config'])
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to introduce VDI for SR %s')
% sr_ref)
@classmethod
def purge_sr(cls, session, sr_ref):
try:
sr_rec = session.call_xenapi("SR.get_record", sr_ref)
vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref)
except StorageError, ex:
LOG.exception(ex)
raise StorageError(_('Error finding vdis in SR %s') % sr_ref)
for vdi_ref in vdi_refs:
try:
vbd_refs = session.call_xenapi("VDI.get_VBDs", vdi_ref)
except StorageError, ex:
LOG.exception(ex)
raise StorageError(_('Unable to find vbd for vdi %s') %
vdi_ref)
if len(vbd_refs) > 0:
return
cls.forget_sr(session, sr_rec['uuid'])
@classmethod
def parse_volume_info(cls, connection_info, mountpoint):
"""
Parse device_path and mountpoint as they can be used by XenAPI.
In particular, the mountpoint (e.g. /dev/sdc) must be translated
into a numeric literal.
FIXME(armando):
As for device_path, currently cannot be used as it is,
because it does not contain target information. As for interim
solution, target details are passed either via Flags or obtained
by iscsiadm. Long-term solution is to add a few more fields to the
db in the iscsi_target table with the necessary info and modify
the iscsi driver to set them.
"""
device_number = VolumeHelper.mountpoint_to_number(mountpoint)
data = connection_info['data']
volume_id = data['volume_id']
target_portal = data['target_portal']
target_host = _get_target_host(target_portal)
target_port = _get_target_port(target_portal)
target_iqn = data['target_iqn']
LOG.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)',
volume_id, target_host, target_port, target_iqn)
if (device_number < 0 or
volume_id is None or
target_host is None or
target_iqn is None):
raise StorageError(_('Unable to obtain target information'
' %(data)s, %(mountpoint)s') % locals())
volume_info = {}
volume_info['id'] = volume_id
volume_info['target'] = target_host
volume_info['port'] = target_port
volume_info['targetIQN'] = target_iqn
if ('auth_method' in connection_info and
connection_info['auth_method'] == 'CHAP'):
volume_info['chapuser'] = connection_info['auth_username']
volume_info['chappassword'] = connection_info['auth_password']
return volume_info
@classmethod
def mountpoint_to_number(cls, mountpoint):
"""Translate a mountpoint like /dev/sdc into a numeric"""
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
if re.match('^[hs]d[a-p]$', mountpoint):
return (ord(mountpoint[2:3]) - ord('a'))
elif re.match('^x?vd[a-p]$', mountpoint):
return (ord(mountpoint[-1]) - ord('a'))
elif re.match('^[0-9]+$', mountpoint):
return string.atoi(mountpoint, 10)
else:
LOG.warn(_('Mountpoint cannot be translated: %s'), mountpoint)
return -1
def _get_volume_id(path_or_id):
"""Retrieve the volume id from device_path"""
# If we have the ID and not a path, just return it.
if isinstance(path_or_id, int):
return path_or_id
# n must contain at least the volume_id
# :volume- is for remote volumes
# -volume- is for local volumes
# see compute/manager->setup_compute_volume
volume_id = path_or_id[path_or_id.find(':volume-') + 1:]
if volume_id == path_or_id:
volume_id = path_or_id[path_or_id.find('-volume--') + 1:]
volume_id = volume_id.replace('volume--', '')
else:
volume_id = volume_id.replace('volume-', '')
volume_id = volume_id[0:volume_id.find('-')]
return int(volume_id)
def _get_target_host(iscsi_string):
"""Retrieve target host"""
if iscsi_string:
return iscsi_string[0:iscsi_string.find(':')]
elif iscsi_string is None or FLAGS.target_host:
return FLAGS.target_host
def _get_target_port(iscsi_string):
"""Retrieve target port"""
if iscsi_string:
return iscsi_string[iscsi_string.find(':') + 1:]
elif iscsi_string is None or FLAGS.target_port:
return FLAGS.target_port
def _get_iqn(iscsi_string, id):
"""Retrieve target IQN"""
if iscsi_string:
return iscsi_string
elif iscsi_string is None or FLAGS.iqn_prefix:
volume_id = _get_volume_id(id)
return '%s:%s' % (FLAGS.iqn_prefix, volume_id)
def _get_target(volume_id):
"""
Gets iscsi name and portal from volume name and host.
For this method to work the following are needed:
1) volume_ref['host'] must resolve to something rather than loopback
"""
volume_ref = db.volume_get(context.get_admin_context(),
volume_id)
result = (None, None)
try:
(r, _e) = utils.execute('iscsiadm',
'-m', 'discovery',
'-t', 'sendtargets',
'-p', volume_ref['host'], run_as_root=True)
except exception.ProcessExecutionError, exc:
LOG.exception(exc)
else:
volume_name = "volume-%08x" % volume_id
for target in r.splitlines():
if FLAGS.iscsi_ip_prefix in target and volume_name in target:
(location, _sep, iscsi_name) = target.partition(" ")
break
iscsi_portal = location.split(",")[0]
result = (iscsi_name, iscsi_portal)
return result
|
apache-2.0
|
yangming85/lettuce
|
tests/integration/lib/Django-1.2.5/django/contrib/auth/tests/basic.py
|
137
|
3575
|
from django.test import TestCase
from django.contrib.auth.models import User, AnonymousUser
from django.core.management import call_command
from StringIO import StringIO
class BasicTestCase(TestCase):
def test_user(self):
"Check that users can be created and can set their password"
u = User.objects.create_user('testuser', '[email protected]', 'testpw')
self.assertTrue(u.has_usable_password())
self.assertFalse(u.check_password('bad'))
self.assertTrue(u.check_password('testpw'))
# Check we can manually set an unusable password
u.set_unusable_password()
u.save()
self.assertFalse(u.check_password('testpw'))
self.assertFalse(u.has_usable_password())
u.set_password('testpw')
self.assertTrue(u.check_password('testpw'))
u.set_password(None)
self.assertFalse(u.has_usable_password())
# Check authentication/permissions
self.assertTrue(u.is_authenticated())
self.assertFalse(u.is_staff)
self.assertTrue(u.is_active)
self.assertFalse(u.is_superuser)
# Check API-based user creation with no password
u2 = User.objects.create_user('testuser2', '[email protected]')
self.assertFalse(u.has_usable_password())
def test_anonymous_user(self):
"Check the properties of the anonymous user"
a = AnonymousUser()
self.assertFalse(a.is_authenticated())
self.assertFalse(a.is_staff)
self.assertFalse(a.is_active)
self.assertFalse(a.is_superuser)
self.assertEqual(a.groups.all().count(), 0)
self.assertEqual(a.user_permissions.all().count(), 0)
def test_superuser(self):
"Check the creation and properties of a superuser"
super = User.objects.create_superuser('super', '[email protected]', 'super')
self.assertTrue(super.is_superuser)
self.assertTrue(super.is_active)
self.assertTrue(super.is_staff)
def test_createsuperuser_management_command(self):
"Check the operation of the createsuperuser management command"
# We can use the management command to create a superuser
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe",
email="[email protected]",
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = User.objects.get(username="joe")
self.assertEquals(u.email, '[email protected]')
self.assertTrue(u.check_password(''))
# We can supress output on the management command
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe2",
email="[email protected]",
verbosity=0,
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, '')
u = User.objects.get(username="joe2")
self.assertEquals(u.email, '[email protected]')
self.assertTrue(u.check_password(''))
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="[email protected]",
email="[email protected]",
stdout=new_io
)
u = User.objects.get(username="[email protected]")
self.assertEquals(u.email, '[email protected]')
self.assertTrue(u.check_password(''))
|
gpl-3.0
|
davidwilson-85/easymap
|
graphic_output/Pillow-4.2.1/Tests/test_file_sgi.py
|
1
|
1819
|
from helper import unittest, PillowTestCase, hopper
from PIL import Image, SgiImagePlugin
class TestFileSgi(PillowTestCase):
def test_rgb(self):
# Created with ImageMagick then renamed:
# convert hopper.ppm -compress None sgi:hopper.rgb
test_file = "Tests/images/hopper.rgb"
im = Image.open(test_file)
self.assert_image_equal(im, hopper())
def test_l(self):
# Created with ImageMagick
# convert hopper.ppm -monochrome -compress None sgi:hopper.bw
test_file = "Tests/images/hopper.bw"
im = Image.open(test_file)
self.assert_image_similar(im, hopper('L'), 2)
def test_rgba(self):
# Created with ImageMagick:
# convert transparent.png -compress None transparent.sgi
test_file = "Tests/images/transparent.sgi"
im = Image.open(test_file)
target = Image.open('Tests/images/transparent.png')
self.assert_image_equal(im, target)
def test_rle(self):
# convert hopper.ppm hopper.sgi
# We don't support RLE compression, this should throw a value error
test_file = "Tests/images/hopper.sgi"
with self.assertRaises(ValueError):
Image.open(test_file)
def test_invalid_file(self):
invalid_file = "Tests/images/flower.jpg"
self.assertRaises(ValueError,
lambda:
SgiImagePlugin.SgiImageFile(invalid_file))
def test_write(self):
def roundtrip(img):
out = self.tempfile('temp.sgi')
img.save(out, format='sgi')
reloaded = Image.open(out)
self.assert_image_equal(img, reloaded)
for mode in ('L', 'RGB', 'RGBA'):
roundtrip(hopper(mode))
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
|
ruler501/acrTestingScripts
|
autorun.py
|
1
|
2366
|
import sys
import threading
import psutil
if sys.platform.startswith('win32'):
from winpexpect import EOF, TIMEOUT
from winpexpect import winspawn as spawn
else:
from pexpect import EOF, TIMEOUT
from pexpect import spawn
class PlatformError(Exception):
def __init__(self, platform):
self.platform = platform
def __str__(self):
return self.platform+" is not currently supported"
def ACRcommand(client=True):
if sys.platform.startswith('linux'):
if client:
return "bin_unix/native_client --home=data --mod=acr --init"
else:
return "bin_unix/native_server"
elif sys.platform.startswith('win32'):
if client:
return "bin_win32/ac_client.exe --home=data --mod=acr --init"
else:
return "bin_win32/ac_server.exe"
else:
raise PlatformError(sys.platform)
def checkRAM():
if childPID < 1:
return False
return psutil.Process(childPID).memory_info()[0] > 1073741824
shouldExit = False
childPID = -1
def main(argv=None):
global shouldExit
global childPID
if argv == None:
argv = sys.argv
for i in xrange(len(argv)):
if argv[i] == "--log":
log = open(argv[i+1],'w')
break
else:
log = open('debug.log','w')
print "Starting child"
child = spawn("gdb -quiet -fullname -args "+ACRcommand(), logfile=log)
child.expect_exact('(gdb)')
print "Loading Scripts"
child.sendline('source client.py')
child.expect_exact('(gdb)')
print "Running child"
child.sendline('r')
child.expect(r"New Thread \d+\.")
try:
childPID = int(child.after.split('.')[0].split()[-1])
except ValueError:
print "Couldn't find the child's PID"
if "--ucontrol" in argv:
child.interact()
else:
try:
while child.isalive():
i = child.expect_exact(['(gdb)', 'exited with code', TIMEOUT], timeout=1)
if i == 0:
log.write("ERROR ABOVE\n")
print "continuing"
child.sendline('c')
elif i == 1:
log.write("Exited\n")
log.close()
return 0
elif i == 2:
if checkRAM():
log.write("Memory Overflow")
child.kill(5)
#child.terminate()
if shouldExit:
print "Exitting"
child.terminate()
except EOF:
pass
log.close()
return 0
class debugRun(threading.Thread):
def stop(self):
global shouldExit
shouldExit = True
if __name__=="__main__":
main()
|
lgpl-3.0
|
tradel/AppDynamicsREST
|
docs/conf.py
|
1
|
8506
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# AppDynamicsREST documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import appd
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'AppDynamics REST API Library'
copyright = u'2013-2015 AppDynamics'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = appd.__version__
# The full version, including alpha/beta/rc tags.
release = appd.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphnix_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'appd_doc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'AppDynamicsREST.tex',
u'AppDynamics REST API Library Documentation',
u'Todd Radel', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'AppDynamicsREST',
u'AppDynamics REST API Library Documentation',
[u'Todd Radel'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'AppDynamicsREST',
u'AppDynamics REST API Library Documentation',
u'Todd Radel',
'AppDynamicsREST',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
apache-2.0
|
kxliugang/edx-platform
|
lms/djangoapps/discussion_api/tests/test_api.py
|
6
|
122916
|
"""
Tests for Discussion API internal interface
"""
from datetime import datetime, timedelta
import itertools
from urlparse import parse_qs, urlparse, urlunparse
from urllib import urlencode
import ddt
import httpretty
import mock
from pytz import UTC
from django.core.exceptions import ValidationError
from django.http import Http404
from django.test.client import RequestFactory
from rest_framework.exceptions import PermissionDenied
from opaque_keys.edx.locator import CourseLocator
from common.test.utils import MockSignalHandlerMixin, disable_signal
from courseware.tests.factories import BetaTesterFactory, StaffFactory
from discussion_api import api
from discussion_api.api import (
create_comment,
create_thread,
delete_comment,
delete_thread,
get_comment_list,
get_course,
get_course_topics,
get_thread_list,
update_comment,
update_thread,
get_thread,
)
from discussion_api.tests.utils import (
CommentsServiceMockMixin,
make_minimal_cs_comment,
make_minimal_cs_thread,
)
from django_comment_common.models import (
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_STUDENT,
Role,
)
from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from util.testing import UrlResetMixin
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
def _remove_discussion_tab(course, user_id):
"""
Remove the discussion tab for the course.
user_id is passed to the modulestore as the editor of the module.
"""
course.tabs = [tab for tab in course.tabs if not tab.type == 'discussion']
modulestore().update_item(course, user_id)
def _discussion_disabled_course_for(user):
"""
Create and return a course with discussions disabled.
The user passed in will be enrolled in the course.
"""
course_with_disabled_forums = CourseFactory.create()
CourseEnrollmentFactory.create(user=user, course_id=course_with_disabled_forums.id)
_remove_discussion_tab(course_with_disabled_forums, user.id)
return course_with_disabled_forums
@ddt.ddt
class GetCourseTest(UrlResetMixin, SharedModuleStoreTestCase):
"""Test for get_course"""
@classmethod
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUpClass(cls):
super(GetCourseTest, cls).setUpClass()
cls.course = CourseFactory.create(org="x", course="y", run="z")
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetCourseTest, self).setUp()
self.user = UserFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
def test_nonexistent_course(self):
with self.assertRaises(Http404):
get_course(self.request, CourseLocator.from_string("non/existent/course"))
def test_not_enrolled(self):
unenrolled_user = UserFactory.create()
self.request.user = unenrolled_user
with self.assertRaises(Http404):
get_course(self.request, self.course.id)
def test_discussions_disabled(self):
with self.assertRaises(Http404):
get_course(self.request, _discussion_disabled_course_for(self.user).id)
def test_basic(self):
self.assertEqual(
get_course(self.request, self.course.id),
{
"id": unicode(self.course.id),
"blackouts": [],
"thread_list_url": "http://testserver/api/discussion/v1/threads/?course_id=x%2Fy%2Fz",
"following_thread_list_url": (
"http://testserver/api/discussion/v1/threads/?course_id=x%2Fy%2Fz&following=True"
),
"topics_url": "http://testserver/api/discussion/v1/course_topics/x/y/z",
}
)
def test_blackout(self):
# A variety of formats is accepted
self.course.discussion_blackouts = [
["2015-06-09T00:00:00Z", "6-10-15"],
[1433980800000, datetime(2015, 6, 12)],
]
modulestore().update_item(self.course, self.user.id)
result = get_course(self.request, self.course.id)
self.assertEqual(
result["blackouts"],
[
{"start": "2015-06-09T00:00:00+00:00", "end": "2015-06-10T00:00:00+00:00"},
{"start": "2015-06-11T00:00:00+00:00", "end": "2015-06-12T00:00:00+00:00"},
]
)
@ddt.data(None, "not a datetime", "2015", [])
def test_blackout_errors(self, bad_value):
self.course.discussion_blackouts = [
[bad_value, "2015-06-09T00:00:00Z"],
["2015-06-10T00:00:00Z", "2015-06-11T00:00:00Z"],
]
modulestore().update_item(self.course, self.user.id)
result = get_course(self.request, self.course.id)
self.assertEqual(result["blackouts"], [])
@mock.patch.dict("django.conf.settings.FEATURES", {"DISABLE_START_DATES": False})
class GetCourseTopicsTest(UrlResetMixin, ModuleStoreTestCase):
"""Test for get_course_topics"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetCourseTopicsTest, self).setUp()
self.maxDiff = None # pylint: disable=invalid-name
self.partition = UserPartition(
0,
"partition",
"Test Partition",
[Group(0, "Cohort A"), Group(1, "Cohort B")],
scheme_id="cohort"
)
self.course = CourseFactory.create(
org="x",
course="y",
run="z",
start=datetime.now(UTC),
discussion_topics={"Test Topic": {"id": "non-courseware-topic-id"}},
user_partitions=[self.partition],
cohort_config={"cohorted": True},
days_early_for_beta=3
)
self.user = UserFactory.create()
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def make_discussion_module(self, topic_id, category, subcategory, **kwargs):
"""Build a discussion module in self.course"""
ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id=topic_id,
discussion_category=category,
discussion_target=subcategory,
**kwargs
)
def get_thread_list_url(self, topic_id_list):
"""
Returns the URL for the thread_list_url field, given a list of topic_ids
"""
path = "http://testserver/api/discussion/v1/threads/"
query_list = [("course_id", unicode(self.course.id))] + [("topic_id", topic_id) for topic_id in topic_id_list]
return urlunparse(("", "", path, "", urlencode(query_list), ""))
def get_course_topics(self):
"""
Get course topics for self.course, using the given user or self.user if
not provided, and generating absolute URIs with a test scheme/host.
"""
return get_course_topics(self.request, self.course.id)
def make_expected_tree(self, topic_id, name, children=None):
"""
Build an expected result tree given a topic id, display name, and
children
"""
topic_id_list = [topic_id] if topic_id else [child["id"] for child in children]
children = children or []
node = {
"id": topic_id,
"name": name,
"children": children,
"thread_list_url": self.get_thread_list_url(topic_id_list)
}
return node
def test_nonexistent_course(self):
with self.assertRaises(Http404):
get_course_topics(self.request, CourseLocator.from_string("non/existent/course"))
def test_not_enrolled(self):
unenrolled_user = UserFactory.create()
self.request.user = unenrolled_user
with self.assertRaises(Http404):
self.get_course_topics()
def test_discussions_disabled(self):
_remove_discussion_tab(self.course, self.user.id)
with self.assertRaises(Http404):
self.get_course_topics()
def test_without_courseware(self):
actual = self.get_course_topics()
expected = {
"courseware_topics": [],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic")
],
}
self.assertEqual(actual, expected)
def test_with_courseware(self):
self.make_discussion_module("courseware-topic-id", "Foo", "Bar")
actual = self.get_course_topics()
expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"Foo",
[self.make_expected_tree("courseware-topic-id", "Bar")]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic")
],
}
self.assertEqual(actual, expected)
def test_many(self):
with self.store.bulk_operations(self.course.id, emit_signals=False):
self.course.discussion_topics = {
"A": {"id": "non-courseware-1"},
"B": {"id": "non-courseware-2"},
}
self.store.update_item(self.course, self.user.id)
self.make_discussion_module("courseware-1", "A", "1")
self.make_discussion_module("courseware-2", "A", "2")
self.make_discussion_module("courseware-3", "B", "1")
self.make_discussion_module("courseware-4", "B", "2")
self.make_discussion_module("courseware-5", "C", "1")
actual = self.get_course_topics()
expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"A",
[
self.make_expected_tree("courseware-1", "1"),
self.make_expected_tree("courseware-2", "2"),
]
),
self.make_expected_tree(
None,
"B",
[
self.make_expected_tree("courseware-3", "1"),
self.make_expected_tree("courseware-4", "2"),
]
),
self.make_expected_tree(
None,
"C",
[self.make_expected_tree("courseware-5", "1")]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-1", "A"),
self.make_expected_tree("non-courseware-2", "B"),
],
}
self.assertEqual(actual, expected)
def test_sort_key(self):
with self.store.bulk_operations(self.course.id, emit_signals=False):
self.course.discussion_topics = {
"W": {"id": "non-courseware-1", "sort_key": "Z"},
"X": {"id": "non-courseware-2"},
"Y": {"id": "non-courseware-3", "sort_key": "Y"},
"Z": {"id": "non-courseware-4", "sort_key": "W"},
}
self.store.update_item(self.course, self.user.id)
self.make_discussion_module("courseware-1", "First", "A", sort_key="D")
self.make_discussion_module("courseware-2", "First", "B", sort_key="B")
self.make_discussion_module("courseware-3", "First", "C", sort_key="E")
self.make_discussion_module("courseware-4", "Second", "A", sort_key="F")
self.make_discussion_module("courseware-5", "Second", "B", sort_key="G")
self.make_discussion_module("courseware-6", "Second", "C")
self.make_discussion_module("courseware-7", "Second", "D", sort_key="A")
actual = self.get_course_topics()
expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-2", "B"),
self.make_expected_tree("courseware-1", "A"),
self.make_expected_tree("courseware-3", "C"),
]
),
self.make_expected_tree(
None,
"Second",
[
self.make_expected_tree("courseware-7", "D"),
self.make_expected_tree("courseware-6", "C"),
self.make_expected_tree("courseware-4", "A"),
self.make_expected_tree("courseware-5", "B"),
]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-4", "Z"),
self.make_expected_tree("non-courseware-2", "X"),
self.make_expected_tree("non-courseware-3", "Y"),
self.make_expected_tree("non-courseware-1", "W"),
],
}
self.assertEqual(actual, expected)
def test_access_control(self):
"""
Test that only topics that a user has access to are returned. The
ways in which a user may not have access are:
* Module is visible to staff only
* Module has a start date in the future
* Module is accessible only to a group the user is not in
Also, there is a case that ensures that a category with no accessible
subcategories does not appear in the result.
"""
beta_tester = BetaTesterFactory.create(course_key=self.course.id)
CourseEnrollmentFactory.create(user=beta_tester, course_id=self.course.id)
staff = StaffFactory.create(course_key=self.course.id)
for user, group_idx in [(self.user, 0), (beta_tester, 1)]:
cohort = CohortFactory.create(
course_id=self.course.id,
name=self.partition.groups[group_idx].name,
users=[user]
)
CourseUserGroupPartitionGroup.objects.create(
course_user_group=cohort,
partition_id=self.partition.id,
group_id=self.partition.groups[group_idx].id
)
with self.store.bulk_operations(self.course.id, emit_signals=False):
self.make_discussion_module("courseware-1", "First", "Everybody")
self.make_discussion_module(
"courseware-2",
"First",
"Cohort A",
group_access={self.partition.id: [self.partition.groups[0].id]}
)
self.make_discussion_module(
"courseware-3",
"First",
"Cohort B",
group_access={self.partition.id: [self.partition.groups[1].id]}
)
self.make_discussion_module("courseware-4", "Second", "Staff Only", visible_to_staff_only=True)
self.make_discussion_module(
"courseware-5",
"Second",
"Future Start Date",
start=datetime.now(UTC) + timedelta(days=1)
)
student_actual = self.get_course_topics()
student_expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-2", "Cohort A"),
self.make_expected_tree("courseware-1", "Everybody"),
]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic"),
],
}
self.assertEqual(student_actual, student_expected)
self.request.user = beta_tester
beta_actual = self.get_course_topics()
beta_expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-3", "Cohort B"),
self.make_expected_tree("courseware-1", "Everybody"),
]
),
self.make_expected_tree(
None,
"Second",
[self.make_expected_tree("courseware-5", "Future Start Date")]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic"),
],
}
self.assertEqual(beta_actual, beta_expected)
self.request.user = staff
staff_actual = self.get_course_topics()
staff_expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-2", "Cohort A"),
self.make_expected_tree("courseware-3", "Cohort B"),
self.make_expected_tree("courseware-1", "Everybody"),
]
),
self.make_expected_tree(
None,
"Second",
[
self.make_expected_tree("courseware-5", "Future Start Date"),
self.make_expected_tree("courseware-4", "Staff Only"),
]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic"),
],
}
self.assertEqual(staff_actual, staff_expected)
@ddt.ddt
class GetThreadListTest(CommentsServiceMockMixin, UrlResetMixin, SharedModuleStoreTestCase):
"""Test for get_thread_list"""
@classmethod
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUpClass(cls):
super(GetThreadListTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetThreadListTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.maxDiff = None # pylint: disable=invalid-name
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.author = UserFactory.create()
self.cohort = CohortFactory.create(course_id=self.course.id)
def get_thread_list(
self,
threads,
page=1,
page_size=1,
num_pages=1,
course=None,
topic_id_list=None,
):
"""
Register the appropriate comments service response, then call
get_thread_list and return the result.
"""
course = course or self.course
self.register_get_threads_response(threads, page, num_pages)
ret = get_thread_list(self.request, course.id, page, page_size, topic_id_list)
return ret
def test_nonexistent_course(self):
with self.assertRaises(Http404):
get_thread_list(self.request, CourseLocator.from_string("non/existent/course"), 1, 1)
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(Http404):
self.get_thread_list([])
def test_discussions_disabled(self):
with self.assertRaises(Http404):
self.get_thread_list([], course=_discussion_disabled_course_for(self.user))
def test_empty(self):
self.assertEqual(
self.get_thread_list([]),
{
"results": [],
"next": None,
"previous": None,
"text_search_rewrite": None,
}
)
def test_get_threads_by_topic_id(self):
self.get_thread_list([], topic_id_list=["topic_x", "topic_meow"])
self.assertEqual(urlparse(httpretty.last_request().path).path, "/api/v1/threads")
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["date"],
"sort_order": ["desc"],
"page": ["1"],
"per_page": ["1"],
"recursive": ["False"],
"commentable_ids": ["topic_x,topic_meow"]
})
def test_basic_query_params(self):
self.get_thread_list([], page=6, page_size=14)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["date"],
"sort_order": ["desc"],
"page": ["6"],
"per_page": ["14"],
"recursive": ["False"],
})
def test_thread_content(self):
source_threads = [
{
"type": "thread",
"id": "test_thread_id_0",
"course_id": unicode(self.course.id),
"commentable_id": "topic_x",
"group_id": None,
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
"thread_type": "discussion",
"title": "Test Title",
"body": "Test body",
"pinned": False,
"closed": False,
"abuse_flaggers": [],
"votes": {"up_count": 4},
"comments_count": 5,
"unread_comments_count": 3,
"endorsed": True,
"read": True,
},
{
"type": "thread",
"id": "test_thread_id_1",
"course_id": unicode(self.course.id),
"commentable_id": "topic_y",
"group_id": self.cohort.id,
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "2015-04-28T22:22:22Z",
"updated_at": "2015-04-28T00:33:33Z",
"thread_type": "question",
"title": "Another Test Title",
"body": "More content",
"pinned": False,
"closed": True,
"abuse_flaggers": [],
"votes": {"up_count": 9},
"comments_count": 18,
"unread_comments_count": 0,
"endorsed": False,
"read": False,
},
]
expected_threads = [
{
"id": "test_thread_id_0",
"course_id": unicode(self.course.id),
"topic_id": "topic_x",
"group_id": None,
"group_name": None,
"author": self.author.username,
"author_label": None,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
"type": "discussion",
"title": "Test Title",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"pinned": False,
"closed": False,
"following": False,
"abuse_flagged": False,
"voted": False,
"vote_count": 4,
"comment_count": 5,
"unread_comment_count": 3,
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_thread_id_0",
"endorsed_comment_list_url": None,
"non_endorsed_comment_list_url": None,
"editable_fields": ["abuse_flagged", "following", "voted"],
"has_endorsed": True,
"read": True,
},
{
"id": "test_thread_id_1",
"course_id": unicode(self.course.id),
"topic_id": "topic_y",
"group_id": self.cohort.id,
"group_name": self.cohort.name,
"author": self.author.username,
"author_label": None,
"created_at": "2015-04-28T22:22:22Z",
"updated_at": "2015-04-28T00:33:33Z",
"type": "question",
"title": "Another Test Title",
"raw_body": "More content",
"rendered_body": "<p>More content</p>",
"pinned": False,
"closed": True,
"following": False,
"abuse_flagged": False,
"voted": False,
"vote_count": 9,
"comment_count": 18,
"unread_comment_count": 0,
"comment_list_url": None,
"endorsed_comment_list_url": (
"http://testserver/api/discussion/v1/comments/?thread_id=test_thread_id_1&endorsed=True"
),
"non_endorsed_comment_list_url": (
"http://testserver/api/discussion/v1/comments/?thread_id=test_thread_id_1&endorsed=False"
),
"editable_fields": ["abuse_flagged", "following", "voted"],
"has_endorsed": False,
"read": False,
},
]
self.assertEqual(
self.get_thread_list(source_threads),
{
"results": expected_threads,
"next": None,
"previous": None,
"text_search_rewrite": None,
}
)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False]
)
)
@ddt.unpack
def test_request_group(self, role_name, course_is_cohorted):
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.get_thread_list([], course=cohort_course)
actual_has_group = "group_id" in httpretty.last_request().querystring
expected_has_group = (course_is_cohorted and role_name == FORUM_ROLE_STUDENT)
self.assertEqual(actual_has_group, expected_has_group)
def test_pagination(self):
# N.B. Empty thread list is not realistic but convenient for this test
self.assertEqual(
self.get_thread_list([], page=1, num_pages=3),
{
"results": [],
"next": "http://testserver/test_path?page=2",
"previous": None,
"text_search_rewrite": None,
}
)
self.assertEqual(
self.get_thread_list([], page=2, num_pages=3),
{
"results": [],
"next": "http://testserver/test_path?page=3",
"previous": "http://testserver/test_path?page=1",
"text_search_rewrite": None,
}
)
self.assertEqual(
self.get_thread_list([], page=3, num_pages=3),
{
"results": [],
"next": None,
"previous": "http://testserver/test_path?page=2",
"text_search_rewrite": None,
}
)
# Test page past the last one
self.register_get_threads_response([], page=3, num_pages=3)
with self.assertRaises(Http404):
get_thread_list(self.request, self.course.id, page=4, page_size=10)
@ddt.data(None, "rewritten search string")
def test_text_search(self, text_search_rewrite):
self.register_get_threads_search_response([], text_search_rewrite)
self.assertEqual(
get_thread_list(
self.request,
self.course.id,
page=1,
page_size=10,
text_search="test search string"
),
{
"results": [],
"next": None,
"previous": None,
"text_search_rewrite": text_search_rewrite,
}
)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["date"],
"sort_order": ["desc"],
"page": ["1"],
"per_page": ["10"],
"recursive": ["False"],
"text": ["test search string"],
})
def test_following(self):
self.register_subscribed_threads_response(self.user, [], page=1, num_pages=1)
result = get_thread_list(
self.request,
self.course.id,
page=1,
page_size=11,
following=True,
)
self.assertEqual(
result,
{"results": [], "next": None, "previous": None, "text_search_rewrite": None}
)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/users/{}/subscribed_threads".format(self.user.id)
)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["date"],
"sort_order": ["desc"],
"page": ["1"],
"per_page": ["11"],
})
@ddt.data("unanswered", "unread")
def test_view_query(self, query):
self.register_get_threads_response([], page=1, num_pages=1)
result = get_thread_list(
self.request,
self.course.id,
page=1,
page_size=11,
view=query,
)
self.assertEqual(
result,
{"results": [], "next": None, "previous": None, "text_search_rewrite": None}
)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/threads"
)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["date"],
"sort_order": ["desc"],
"page": ["1"],
"per_page": ["11"],
"recursive": ["False"],
query: ["true"],
})
@ddt.data(
("last_activity_at", "date"),
("comment_count", "comments"),
("vote_count", "votes")
)
@ddt.unpack
def test_order_by_query(self, http_query, cc_query):
"""
Tests the order_by parameter
Arguments:
http_query (str): Query string sent in the http request
cc_query (str): Query string used for the comments client service
"""
self.register_get_threads_response([], page=1, num_pages=1)
result = get_thread_list(
self.request,
self.course.id,
page=1,
page_size=11,
order_by=http_query,
)
self.assertEqual(
result,
{"results": [], "next": None, "previous": None, "text_search_rewrite": None}
)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/threads"
)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": [cc_query],
"sort_order": ["desc"],
"page": ["1"],
"per_page": ["11"],
"recursive": ["False"],
})
@ddt.data("asc", "desc")
def test_order_direction_query(self, http_query):
self.register_get_threads_response([], page=1, num_pages=1)
result = get_thread_list(
self.request,
self.course.id,
page=1,
page_size=11,
order_direction=http_query,
)
self.assertEqual(
result,
{"results": [], "next": None, "previous": None, "text_search_rewrite": None}
)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/threads"
)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["date"],
"sort_order": [http_query],
"page": ["1"],
"per_page": ["11"],
"recursive": ["False"],
})
@ddt.ddt
class GetCommentListTest(CommentsServiceMockMixin, SharedModuleStoreTestCase):
"""Test for get_comment_list"""
@classmethod
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUpClass(cls):
super(GetCommentListTest, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(GetCommentListTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.maxDiff = None # pylint: disable=invalid-name
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.author = UserFactory.create()
def make_minimal_cs_thread(self, overrides=None):
"""
Create a thread with the given overrides, plus the course_id if not
already in overrides.
"""
overrides = overrides.copy() if overrides else {}
overrides.setdefault("course_id", unicode(self.course.id))
return make_minimal_cs_thread(overrides)
def get_comment_list(self, thread, endorsed=None, page=1, page_size=1):
"""
Register the appropriate comments service response, then call
get_comment_list and return the result.
"""
self.register_get_thread_response(thread)
return get_comment_list(self.request, thread["id"], endorsed, page, page_size)
def test_nonexistent_thread(self):
thread_id = "nonexistent_thread"
self.register_get_thread_error_response(thread_id, 404)
with self.assertRaises(Http404):
get_comment_list(self.request, thread_id, endorsed=False, page=1, page_size=1)
def test_nonexistent_course(self):
with self.assertRaises(Http404):
self.get_comment_list(self.make_minimal_cs_thread({"course_id": "non/existent/course"}))
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(Http404):
self.get_comment_list(self.make_minimal_cs_thread())
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
with self.assertRaises(Http404):
self.get_comment_list(
self.make_minimal_cs_thread(
overrides={"course_id": unicode(disabled_course.id)}
)
)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(
self,
role_name,
course_is_cohorted,
topic_is_cohorted,
thread_group_state
):
cohort_course = CourseFactory.create(
discussion_topics={"Test Topic": {"id": "test_topic"}},
cohort_config={
"cohorted": course_is_cohorted,
"cohorted_discussions": ["test_topic"] if topic_is_cohorted else [],
}
)
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
thread = self.make_minimal_cs_thread({
"course_id": unicode(cohort_course.id),
"commentable_id": "test_topic",
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
topic_is_cohorted and
thread_group_state == "different_group"
)
try:
self.get_comment_list(thread)
self.assertFalse(expected_error)
except Http404:
self.assertTrue(expected_error)
@ddt.data(True, False)
def test_discussion_endorsed(self, endorsed_value):
with self.assertRaises(ValidationError) as assertion:
self.get_comment_list(
self.make_minimal_cs_thread({"thread_type": "discussion"}),
endorsed=endorsed_value
)
self.assertEqual(
assertion.exception.message_dict,
{"endorsed": ["This field may not be specified for discussion threads."]}
)
def test_question_without_endorsed(self):
with self.assertRaises(ValidationError) as assertion:
self.get_comment_list(
self.make_minimal_cs_thread({"thread_type": "question"}),
endorsed=None
)
self.assertEqual(
assertion.exception.message_dict,
{"endorsed": ["This field is required for question threads."]}
)
def test_empty(self):
discussion_thread = self.make_minimal_cs_thread(
{"thread_type": "discussion", "children": [], "resp_total": 0}
)
self.assertEqual(
self.get_comment_list(discussion_thread),
{"results": [], "next": None, "previous": None}
)
question_thread = self.make_minimal_cs_thread({
"thread_type": "question",
"endorsed_responses": [],
"non_endorsed_responses": [],
"non_endorsed_resp_total": 0
})
self.assertEqual(
self.get_comment_list(question_thread, endorsed=False),
{"results": [], "next": None, "previous": None}
)
self.assertEqual(
self.get_comment_list(question_thread, endorsed=True),
{"results": [], "next": None, "previous": None}
)
def test_basic_query_params(self):
self.get_comment_list(
self.make_minimal_cs_thread({
"children": [make_minimal_cs_comment()],
"resp_total": 71
}),
page=6,
page_size=14
)
self.assert_query_params_equal(
httpretty.httpretty.latest_requests[-2],
{
"recursive": ["True"],
"user_id": [str(self.user.id)],
"mark_as_read": ["False"],
"resp_skip": ["70"],
"resp_limit": ["14"],
}
)
def test_discussion_content(self):
source_comments = [
{
"type": "comment",
"id": "test_comment_1",
"thread_id": "test_thread",
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "2015-05-11T00:00:00Z",
"updated_at": "2015-05-11T11:11:11Z",
"body": "Test body",
"endorsed": False,
"abuse_flaggers": [],
"votes": {"up_count": 4},
"children": [],
},
{
"type": "comment",
"id": "test_comment_2",
"thread_id": "test_thread",
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": True,
"anonymous_to_peers": False,
"created_at": "2015-05-11T22:22:22Z",
"updated_at": "2015-05-11T33:33:33Z",
"body": "More content",
"endorsed": False,
"abuse_flaggers": [str(self.user.id)],
"votes": {"up_count": 7},
"children": [],
}
]
expected_comments = [
{
"id": "test_comment_1",
"thread_id": "test_thread",
"parent_id": None,
"author": self.author.username,
"author_label": None,
"created_at": "2015-05-11T00:00:00Z",
"updated_at": "2015-05-11T11:11:11Z",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 4,
"children": [],
"editable_fields": ["abuse_flagged", "voted"],
},
{
"id": "test_comment_2",
"thread_id": "test_thread",
"parent_id": None,
"author": None,
"author_label": None,
"created_at": "2015-05-11T22:22:22Z",
"updated_at": "2015-05-11T33:33:33Z",
"raw_body": "More content",
"rendered_body": "<p>More content</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": True,
"voted": False,
"vote_count": 7,
"children": [],
"editable_fields": ["abuse_flagged", "voted"],
},
]
actual_comments = self.get_comment_list(
self.make_minimal_cs_thread({"children": source_comments})
)["results"]
self.assertEqual(actual_comments, expected_comments)
def test_question_content(self):
thread = self.make_minimal_cs_thread({
"thread_type": "question",
"endorsed_responses": [make_minimal_cs_comment({"id": "endorsed_comment"})],
"non_endorsed_responses": [make_minimal_cs_comment({"id": "non_endorsed_comment"})],
"non_endorsed_resp_total": 1,
})
endorsed_actual = self.get_comment_list(thread, endorsed=True)
self.assertEqual(endorsed_actual["results"][0]["id"], "endorsed_comment")
non_endorsed_actual = self.get_comment_list(thread, endorsed=False)
self.assertEqual(non_endorsed_actual["results"][0]["id"], "non_endorsed_comment")
def test_endorsed_by_anonymity(self):
"""
Ensure thread anonymity is properly considered in serializing
endorsed_by.
"""
thread = self.make_minimal_cs_thread({
"anonymous": True,
"children": [
make_minimal_cs_comment({
"endorsement": {"user_id": str(self.author.id), "time": "2015-05-18T12:34:56Z"}
})
]
})
actual_comments = self.get_comment_list(thread)["results"]
self.assertIsNone(actual_comments[0]["endorsed_by"])
@ddt.data(
("discussion", None, "children", "resp_total"),
("question", False, "non_endorsed_responses", "non_endorsed_resp_total"),
)
@ddt.unpack
def test_cs_pagination(self, thread_type, endorsed_arg, response_field, response_total_field):
"""
Test cases in which pagination is done by the comments service.
thread_type is the type of thread (question or discussion).
endorsed_arg is the value of the endorsed argument.
repsonse_field is the field in which responses are returned for the
given thread type.
response_total_field is the field in which the total number of responses
is returned for the given thread type.
"""
# N.B. The mismatch between the number of children and the listed total
# number of responses is unrealistic but convenient for this test
thread = self.make_minimal_cs_thread({
"thread_type": thread_type,
response_field: [make_minimal_cs_comment()],
response_total_field: 5,
})
# Only page
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=1, page_size=5)
self.assertIsNone(actual["next"])
self.assertIsNone(actual["previous"])
# First page of many
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=1, page_size=2)
self.assertEqual(actual["next"], "http://testserver/test_path?page=2")
self.assertIsNone(actual["previous"])
# Middle page of many
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=2, page_size=2)
self.assertEqual(actual["next"], "http://testserver/test_path?page=3")
self.assertEqual(actual["previous"], "http://testserver/test_path?page=1")
# Last page of many
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=3, page_size=2)
self.assertIsNone(actual["next"])
self.assertEqual(actual["previous"], "http://testserver/test_path?page=2")
# Page past the end
thread = self.make_minimal_cs_thread({
"thread_type": thread_type,
response_field: [],
response_total_field: 5
})
with self.assertRaises(Http404):
self.get_comment_list(thread, endorsed=endorsed_arg, page=2, page_size=5)
def test_question_endorsed_pagination(self):
thread = self.make_minimal_cs_thread({
"thread_type": "question",
"endorsed_responses": [
make_minimal_cs_comment({"id": "comment_{}".format(i)}) for i in range(10)
]
})
def assert_page_correct(page, page_size, expected_start, expected_stop, expected_next, expected_prev):
"""
Check that requesting the given page/page_size returns the expected
output
"""
actual = self.get_comment_list(thread, endorsed=True, page=page, page_size=page_size)
result_ids = [result["id"] for result in actual["results"]]
self.assertEqual(
result_ids,
["comment_{}".format(i) for i in range(expected_start, expected_stop)]
)
self.assertEqual(
actual["next"],
"http://testserver/test_path?page={}".format(expected_next) if expected_next else None
)
self.assertEqual(
actual["previous"],
"http://testserver/test_path?page={}".format(expected_prev) if expected_prev else None
)
# Only page
assert_page_correct(
page=1,
page_size=10,
expected_start=0,
expected_stop=10,
expected_next=None,
expected_prev=None
)
# First page of many
assert_page_correct(
page=1,
page_size=4,
expected_start=0,
expected_stop=4,
expected_next=2,
expected_prev=None
)
# Middle page of many
assert_page_correct(
page=2,
page_size=4,
expected_start=4,
expected_stop=8,
expected_next=3,
expected_prev=1
)
# Last page of many
assert_page_correct(
page=3,
page_size=4,
expected_start=8,
expected_stop=10,
expected_next=None,
expected_prev=2
)
# Page past the end
with self.assertRaises(Http404):
self.get_comment_list(thread, endorsed=True, page=2, page_size=10)
@ddt.ddt
@disable_signal(api, 'thread_created')
@disable_signal(api, 'thread_voted')
class CreateThreadTest(
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for create_thread"""
@classmethod
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUpClass(cls):
super(CreateThreadTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CreateThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.minimal_data = {
"course_id": unicode(self.course.id),
"topic_id": "test_topic",
"type": "discussion",
"title": "Test Title",
"raw_body": "Test body",
}
@mock.patch("eventtracking.tracker.emit")
def test_basic(self, mock_emit):
self.register_post_thread_response({
"id": "test_id",
"username": self.user.username,
"created_at": "2015-05-19T00:00:00Z",
"updated_at": "2015-05-19T00:00:00Z",
})
with self.assert_signal_sent(api, 'thread_created', sender=None, user=self.user, exclude_args=('post',)):
actual = create_thread(self.request, self.minimal_data)
expected = {
"id": "test_id",
"course_id": unicode(self.course.id),
"topic_id": "test_topic",
"group_id": None,
"group_name": None,
"author": self.user.username,
"author_label": None,
"created_at": "2015-05-19T00:00:00Z",
"updated_at": "2015-05-19T00:00:00Z",
"type": "discussion",
"title": "Test Title",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"pinned": False,
"closed": False,
"following": False,
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"comment_count": 0,
"unread_comment_count": 0,
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_id",
"endorsed_comment_list_url": None,
"non_endorsed_comment_list_url": None,
"editable_fields": ["abuse_flagged", "following", "raw_body", "title", "topic_id", "type", "voted"],
'read': False,
'has_endorsed': False
}
self.assertEqual(actual, expected)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"commentable_id": ["test_topic"],
"thread_type": ["discussion"],
"title": ["Test Title"],
"body": ["Test body"],
"user_id": [str(self.user.id)],
}
)
event_name, event_data = mock_emit.call_args[0]
self.assertEqual(event_name, "edx.forum.thread.created")
self.assertEqual(
event_data,
{
"commentable_id": "test_topic",
"group_id": None,
"thread_type": "discussion",
"title": "Test Title",
"anonymous": False,
"anonymous_to_peers": False,
"options": {"followed": False},
"id": "test_id",
"truncated": False,
"body": "Test body",
"url": "",
"user_forums_roles": [FORUM_ROLE_STUDENT],
"user_course_roles": [],
}
)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
[True, False],
["no_group_set", "group_is_none", "group_is_set"],
)
)
@ddt.unpack
def test_group_id(self, role_name, course_is_cohorted, topic_is_cohorted, data_group_state):
"""
Tests whether the user has permission to create a thread with certain
group_id values.
If there is no group, user cannot create a thread.
Else if group is None or set, and the course is not cohorted and/or the
role is a student, user can create a thread.
"""
cohort_course = CourseFactory.create(
discussion_topics={"Test Topic": {"id": "test_topic"}},
cohort_config={
"cohorted": course_is_cohorted,
"cohorted_discussions": ["test_topic"] if topic_is_cohorted else [],
}
)
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
if course_is_cohorted:
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_post_thread_response({})
data = self.minimal_data.copy()
data["course_id"] = unicode(cohort_course.id)
if data_group_state == "group_is_none":
data["group_id"] = None
elif data_group_state == "group_is_set":
if course_is_cohorted:
data["group_id"] = cohort.id + 1
else:
data["group_id"] = 1 # Set to any value since there is no cohort
expected_error = (
data_group_state in ["group_is_none", "group_is_set"] and
(not course_is_cohorted or role_name == FORUM_ROLE_STUDENT)
)
try:
create_thread(self.request, data)
self.assertFalse(expected_error)
actual_post_data = httpretty.last_request().parsed_body
if data_group_state == "group_is_set":
self.assertEqual(actual_post_data["group_id"], [str(data["group_id"])])
elif data_group_state == "no_group_set" and course_is_cohorted and topic_is_cohorted:
self.assertEqual(actual_post_data["group_id"], [str(cohort.id)])
else:
self.assertNotIn("group_id", actual_post_data)
except ValidationError:
self.assertTrue(expected_error)
def test_following(self):
self.register_post_thread_response({"id": "test_id"})
self.register_subscription_response(self.user)
data = self.minimal_data.copy()
data["following"] = "True"
result = create_thread(self.request, data)
self.assertEqual(result["following"], True)
cs_request = httpretty.last_request()
self.assertEqual(
urlparse(cs_request.path).path,
"/api/v1/users/{}/subscriptions".format(self.user.id)
)
self.assertEqual(cs_request.method, "POST")
self.assertEqual(
cs_request.parsed_body,
{"source_type": ["thread"], "source_id": ["test_id"]}
)
def test_voted(self):
self.register_post_thread_response({"id": "test_id"})
self.register_thread_votes_response("test_id")
data = self.minimal_data.copy()
data["voted"] = "True"
with self.assert_signal_sent(api, 'thread_voted', sender=None, user=self.user, exclude_args=('post',)):
result = create_thread(self.request, data)
self.assertEqual(result["voted"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/threads/test_id/votes")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(
cs_request.parsed_body,
{"user_id": [str(self.user.id)], "value": ["up"]}
)
def test_abuse_flagged(self):
self.register_post_thread_response({"id": "test_id"})
self.register_thread_flag_response("test_id")
data = self.minimal_data.copy()
data["abuse_flagged"] = "True"
result = create_thread(self.request, data)
self.assertEqual(result["abuse_flagged"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/threads/test_id/abuse_flag")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(cs_request.parsed_body, {"user_id": [str(self.user.id)]})
def test_course_id_missing(self):
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, {})
self.assertEqual(assertion.exception.message_dict, {"course_id": ["This field is required."]})
def test_course_id_invalid(self):
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, {"course_id": "invalid!"})
self.assertEqual(assertion.exception.message_dict, {"course_id": ["Invalid value."]})
def test_nonexistent_course(self):
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, {"course_id": "non/existent/course"})
self.assertEqual(assertion.exception.message_dict, {"course_id": ["Invalid value."]})
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, self.minimal_data)
self.assertEqual(assertion.exception.message_dict, {"course_id": ["Invalid value."]})
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.minimal_data["course_id"] = unicode(disabled_course.id)
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, self.minimal_data)
self.assertEqual(assertion.exception.message_dict, {"course_id": ["Invalid value."]})
def test_invalid_field(self):
data = self.minimal_data.copy()
data["type"] = "invalid_type"
with self.assertRaises(ValidationError):
create_thread(self.request, data)
@ddt.ddt
@disable_signal(api, 'comment_created')
@disable_signal(api, 'comment_voted')
class CreateCommentTest(
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for create_comment"""
@classmethod
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUpClass(cls):
super(CreateCommentTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CreateCommentTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.register_get_thread_response(
make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(self.course.id),
"commentable_id": "test_topic",
})
)
self.minimal_data = {
"thread_id": "test_thread",
"raw_body": "Test body",
}
@ddt.data(None, "test_parent")
@mock.patch("eventtracking.tracker.emit")
def test_success(self, parent_id, mock_emit):
if parent_id:
self.register_get_comment_response({"id": parent_id, "thread_id": "test_thread"})
self.register_post_comment_response(
{
"id": "test_comment",
"username": self.user.username,
"created_at": "2015-05-27T00:00:00Z",
"updated_at": "2015-05-27T00:00:00Z",
},
thread_id="test_thread",
parent_id=parent_id
)
data = self.minimal_data.copy()
if parent_id:
data["parent_id"] = parent_id
with self.assert_signal_sent(api, 'comment_created', sender=None, user=self.user, exclude_args=('post',)):
actual = create_comment(self.request, data)
expected = {
"id": "test_comment",
"thread_id": "test_thread",
"parent_id": parent_id,
"author": self.user.username,
"author_label": None,
"created_at": "2015-05-27T00:00:00Z",
"updated_at": "2015-05-27T00:00:00Z",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"children": [],
"editable_fields": ["abuse_flagged", "raw_body", "voted"]
}
self.assertEqual(actual, expected)
expected_url = (
"/api/v1/comments/{}".format(parent_id) if parent_id else
"/api/v1/threads/test_thread/comments"
)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
expected_url
)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"body": ["Test body"],
"user_id": [str(self.user.id)]
}
)
expected_event_name = (
"edx.forum.comment.created" if parent_id else
"edx.forum.response.created"
)
expected_event_data = {
"discussion": {"id": "test_thread"},
"commentable_id": "test_topic",
"options": {"followed": False},
"id": "test_comment",
"truncated": False,
"body": "Test body",
"url": "",
"user_forums_roles": [FORUM_ROLE_STUDENT],
"user_course_roles": [],
}
if parent_id:
expected_event_data["response"] = {"id": parent_id}
actual_event_name, actual_event_data = mock_emit.call_args[0]
self.assertEqual(actual_event_name, expected_event_name)
self.assertEqual(actual_event_data, expected_event_data)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["question", "discussion"],
)
)
@ddt.unpack
def test_endorsed(self, role_name, is_thread_author, thread_type):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_get_thread_response(
make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(self.course.id),
"thread_type": thread_type,
"user_id": str(self.user.id) if is_thread_author else str(self.user.id + 1),
})
)
self.register_post_comment_response({}, "test_thread")
data = self.minimal_data.copy()
data["endorsed"] = True
expected_error = (
role_name == FORUM_ROLE_STUDENT and
(not is_thread_author or thread_type == "discussion")
)
try:
create_comment(self.request, data)
self.assertEqual(httpretty.last_request().parsed_body["endorsed"], ["True"])
self.assertFalse(expected_error)
except ValidationError:
self.assertTrue(expected_error)
def test_voted(self):
self.register_post_comment_response({"id": "test_comment"}, "test_thread")
self.register_comment_votes_response("test_comment")
data = self.minimal_data.copy()
data["voted"] = "True"
with self.assert_signal_sent(api, 'comment_voted', sender=None, user=self.user, exclude_args=('post',)):
result = create_comment(self.request, data)
self.assertEqual(result["voted"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/comments/test_comment/votes")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(
cs_request.parsed_body,
{"user_id": [str(self.user.id)], "value": ["up"]}
)
def test_abuse_flagged(self):
self.register_post_comment_response({"id": "test_comment"}, "test_thread")
self.register_comment_flag_response("test_comment")
data = self.minimal_data.copy()
data["abuse_flagged"] = "True"
result = create_comment(self.request, data)
self.assertEqual(result["abuse_flagged"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/comments/test_comment/abuse_flag")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(cs_request.parsed_body, {"user_id": [str(self.user.id)]})
def test_thread_id_missing(self):
with self.assertRaises(ValidationError) as assertion:
create_comment(self.request, {})
self.assertEqual(assertion.exception.message_dict, {"thread_id": ["This field is required."]})
def test_thread_id_not_found(self):
self.register_get_thread_error_response("test_thread", 404)
with self.assertRaises(ValidationError) as assertion:
create_comment(self.request, self.minimal_data)
self.assertEqual(assertion.exception.message_dict, {"thread_id": ["Invalid value."]})
def test_nonexistent_course(self):
self.register_get_thread_response(
make_minimal_cs_thread({"id": "test_thread", "course_id": "non/existent/course"})
)
with self.assertRaises(ValidationError) as assertion:
create_comment(self.request, self.minimal_data)
self.assertEqual(assertion.exception.message_dict, {"thread_id": ["Invalid value."]})
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(ValidationError) as assertion:
create_comment(self.request, self.minimal_data)
self.assertEqual(assertion.exception.message_dict, {"thread_id": ["Invalid value."]})
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.register_get_thread_response(
make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(disabled_course.id),
"commentable_id": "test_topic",
})
)
with self.assertRaises(ValidationError) as assertion:
create_comment(self.request, self.minimal_data)
self.assertEqual(assertion.exception.message_dict, {"thread_id": ["Invalid value."]})
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_get_thread_response(make_minimal_cs_thread({
"id": "cohort_thread",
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
}))
self.register_post_comment_response({}, thread_id="cohort_thread")
data = self.minimal_data.copy()
data["thread_id"] = "cohort_thread"
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
create_comment(self.request, data)
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{"thread_id": ["Invalid value."]}
)
def test_invalid_field(self):
data = self.minimal_data.copy()
del data["raw_body"]
with self.assertRaises(ValidationError):
create_comment(self.request, data)
@ddt.ddt
@disable_signal(api, 'thread_edited')
@disable_signal(api, 'thread_voted')
class UpdateThreadTest(
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for update_thread"""
@classmethod
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUpClass(cls):
super(UpdateThreadTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(UpdateThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_thread(self, overrides=None):
"""
Make a thread with appropriate data overridden by the overrides
parameter and register mock responses for both GET and PUT on its
endpoint.
"""
cs_data = make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(self.course.id),
"commentable_id": "original_topic",
"username": self.user.username,
"user_id": str(self.user.id),
"created_at": "2015-05-29T00:00:00Z",
"updated_at": "2015-05-29T00:00:00Z",
"thread_type": "discussion",
"title": "Original Title",
"body": "Original body",
})
cs_data.update(overrides or {})
self.register_get_thread_response(cs_data)
self.register_put_thread_response(cs_data)
def test_empty(self):
"""Check that an empty update does not make any modifying requests."""
# Ensure that the default following value of False is not applied implicitly
self.register_get_user_response(self.user, subscribed_thread_ids=["test_thread"])
self.register_thread()
update_thread(self.request, "test_thread", {})
for request in httpretty.httpretty.latest_requests:
self.assertEqual(request.method, "GET")
def test_basic(self):
self.register_thread()
with self.assert_signal_sent(api, 'thread_edited', sender=None, user=self.user, exclude_args=('post',)):
actual = update_thread(self.request, "test_thread", {"raw_body": "Edited body"})
expected = {
"id": "test_thread",
"course_id": unicode(self.course.id),
"topic_id": "original_topic",
"group_id": None,
"group_name": None,
"author": self.user.username,
"author_label": None,
"created_at": "2015-05-29T00:00:00Z",
"updated_at": "2015-05-29T00:00:00Z",
"type": "discussion",
"title": "Original Title",
"raw_body": "Edited body",
"rendered_body": "<p>Edited body</p>",
"pinned": False,
"closed": False,
"following": False,
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"comment_count": 0,
"unread_comment_count": 0,
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_thread",
"endorsed_comment_list_url": None,
"non_endorsed_comment_list_url": None,
"editable_fields": ["abuse_flagged", "following", "raw_body", "title", "topic_id", "type", "voted"],
'read': False,
'has_endorsed': False,
}
self.assertEqual(actual, expected)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"commentable_id": ["original_topic"],
"thread_type": ["discussion"],
"title": ["Original Title"],
"body": ["Edited body"],
"user_id": [str(self.user.id)],
"anonymous": ["False"],
"anonymous_to_peers": ["False"],
"closed": ["False"],
"pinned": ["False"],
}
)
def test_nonexistent_thread(self):
self.register_get_thread_error_response("test_thread", 404)
with self.assertRaises(Http404):
update_thread(self.request, "test_thread", {})
def test_nonexistent_course(self):
self.register_thread({"course_id": "non/existent/course"})
with self.assertRaises(Http404):
update_thread(self.request, "test_thread", {})
def test_not_enrolled(self):
self.register_thread()
self.request.user = UserFactory.create()
with self.assertRaises(Http404):
update_thread(self.request, "test_thread", {})
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.register_thread(overrides={"course_id": unicode(disabled_course.id)})
with self.assertRaises(Http404):
update_thread(self.request, "test_thread", {})
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_thread({
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
update_thread(self.request, "test_thread", {})
self.assertFalse(expected_error)
except Http404:
self.assertTrue(expected_error)
@ddt.data(
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
)
def test_author_only_fields(self, role_name):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_thread({"user_id": str(self.user.id + 1)})
data = {field: "edited" for field in ["topic_id", "title", "raw_body"]}
data["type"] = "question"
expected_error = role_name == FORUM_ROLE_STUDENT
try:
update_thread(self.request, "test_thread", data)
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{field: ["This field is not editable."] for field in data.keys()}
)
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_following(self, old_following, new_following):
"""
Test attempts to edit the "following" field.
old_following indicates whether the thread should be followed at the
start of the test. new_following indicates the value for the "following"
field in the update. If old_following and new_following are the same, no
update should be made. Otherwise, a subscription should be POSTed or
DELETEd according to the new_following value.
"""
if old_following:
self.register_get_user_response(self.user, subscribed_thread_ids=["test_thread"])
self.register_subscription_response(self.user)
self.register_thread()
data = {"following": new_following}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["following"], new_following)
last_request_path = urlparse(httpretty.last_request().path).path
subscription_url = "/api/v1/users/{}/subscriptions".format(self.user.id)
if old_following == new_following:
self.assertNotEqual(last_request_path, subscription_url)
else:
self.assertEqual(last_request_path, subscription_url)
self.assertEqual(
httpretty.last_request().method,
"POST" if new_following else "DELETE"
)
request_data = (
httpretty.last_request().parsed_body if new_following else
parse_qs(urlparse(httpretty.last_request().path).query)
)
request_data.pop("request_id", None)
self.assertEqual(
request_data,
{"source_type": ["thread"], "source_id": ["test_thread"]}
)
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_voted(self, current_vote_status, new_vote_status):
"""
Test attempts to edit the "voted" field.
current_vote_status indicates whether the thread should be upvoted at
the start of the test. new_vote_status indicates the value for the
"voted" field in the update. If current_vote_status and new_vote_status
are the same, no update should be made. Otherwise, a vote should be PUT
or DELETEd according to the new_vote_status value.
"""
if current_vote_status:
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
self.register_thread_votes_response("test_thread")
self.register_thread()
data = {"voted": new_vote_status}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["voted"], new_vote_status)
last_request_path = urlparse(httpretty.last_request().path).path
votes_url = "/api/v1/threads/test_thread/votes"
if current_vote_status == new_vote_status:
self.assertNotEqual(last_request_path, votes_url)
else:
self.assertEqual(last_request_path, votes_url)
self.assertEqual(
httpretty.last_request().method,
"PUT" if new_vote_status else "DELETE"
)
actual_request_data = (
httpretty.last_request().parsed_body if new_vote_status else
parse_qs(urlparse(httpretty.last_request().path).query)
)
actual_request_data.pop("request_id", None)
expected_request_data = {"user_id": [str(self.user.id)]}
if new_vote_status:
expected_request_data["value"] = ["up"]
self.assertEqual(actual_request_data, expected_request_data)
@ddt.data(*itertools.product([True, False], [True, False], [True, False]))
@ddt.unpack
def test_vote_count(self, current_vote_status, first_vote, second_vote):
"""
Tests vote_count increases and decreases correctly from the same user
"""
#setup
starting_vote_count = 0
if current_vote_status:
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
starting_vote_count = 1
self.register_thread_votes_response("test_thread")
self.register_thread(overrides={"votes": {"up_count": starting_vote_count}})
#first vote
data = {"voted": first_vote}
result = update_thread(self.request, "test_thread", data)
self.register_thread(overrides={"voted": first_vote})
self.assertEqual(result["vote_count"], 1 if first_vote else 0)
#second vote
data = {"voted": second_vote}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["vote_count"], 1 if second_vote else 0)
@ddt.data(*itertools.product([True, False], [True, False], [True, False], [True, False]))
@ddt.unpack
def test_vote_count_two_users(
self,
current_user1_vote,
current_user2_vote,
user1_vote,
user2_vote
):
"""
Tests vote_count increases and decreases correctly from different users
"""
#setup
user2 = UserFactory.create()
self.register_get_user_response(user2)
request2 = RequestFactory().get("/test_path")
request2.user = user2
CourseEnrollmentFactory.create(user=user2, course_id=self.course.id)
vote_count = 0
if current_user1_vote:
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
vote_count += 1
if current_user2_vote:
self.register_get_user_response(user2, upvoted_ids=["test_thread"])
vote_count += 1
for (current_vote, user_vote, request) in \
[(current_user1_vote, user1_vote, self.request),
(current_user2_vote, user2_vote, request2)]:
self.register_thread_votes_response("test_thread")
self.register_thread(overrides={"votes": {"up_count": vote_count}})
data = {"voted": user_vote}
result = update_thread(request, "test_thread", data)
if current_vote == user_vote:
self.assertEqual(result["vote_count"], vote_count)
elif user_vote:
vote_count += 1
self.assertEqual(result["vote_count"], vote_count)
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
else:
vote_count -= 1
self.assertEqual(result["vote_count"], vote_count)
self.register_get_user_response(self.user, upvoted_ids=[])
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_abuse_flagged(self, old_flagged, new_flagged):
"""
Test attempts to edit the "abuse_flagged" field.
old_flagged indicates whether the thread should be flagged at the start
of the test. new_flagged indicates the value for the "abuse_flagged"
field in the update. If old_flagged and new_flagged are the same, no
update should be made. Otherwise, a PUT should be made to the flag or
or unflag endpoint according to the new_flagged value.
"""
self.register_get_user_response(self.user)
self.register_thread_flag_response("test_thread")
self.register_thread({"abuse_flaggers": [str(self.user.id)] if old_flagged else []})
data = {"abuse_flagged": new_flagged}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["abuse_flagged"], new_flagged)
last_request_path = urlparse(httpretty.last_request().path).path
flag_url = "/api/v1/threads/test_thread/abuse_flag"
unflag_url = "/api/v1/threads/test_thread/abuse_unflag"
if old_flagged == new_flagged:
self.assertNotEqual(last_request_path, flag_url)
self.assertNotEqual(last_request_path, unflag_url)
else:
self.assertEqual(
last_request_path,
flag_url if new_flagged else unflag_url
)
self.assertEqual(httpretty.last_request().method, "PUT")
self.assertEqual(
httpretty.last_request().parsed_body,
{"user_id": [str(self.user.id)]}
)
def test_invalid_field(self):
self.register_thread()
with self.assertRaises(ValidationError) as assertion:
update_thread(self.request, "test_thread", {"raw_body": ""})
self.assertEqual(
assertion.exception.message_dict,
{"raw_body": ["This field is required."]}
)
@ddt.ddt
@disable_signal(api, 'comment_edited')
@disable_signal(api, 'comment_voted')
class UpdateCommentTest(
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for update_comment"""
@classmethod
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUpClass(cls):
super(UpdateCommentTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(UpdateCommentTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_comment(self, overrides=None, thread_overrides=None, course=None):
"""
Make a comment with appropriate data overridden by the overrides
parameter and register mock responses for both GET and PUT on its
endpoint. Also mock GET for the related thread with thread_overrides.
"""
if course is None:
course = self.course
cs_thread_data = make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(course.id)
})
cs_thread_data.update(thread_overrides or {})
self.register_get_thread_response(cs_thread_data)
cs_comment_data = make_minimal_cs_comment({
"id": "test_comment",
"course_id": cs_thread_data["course_id"],
"thread_id": cs_thread_data["id"],
"username": self.user.username,
"user_id": str(self.user.id),
"created_at": "2015-06-03T00:00:00Z",
"updated_at": "2015-06-03T00:00:00Z",
"body": "Original body",
})
cs_comment_data.update(overrides or {})
self.register_get_comment_response(cs_comment_data)
self.register_put_comment_response(cs_comment_data)
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def test_empty(self):
"""Check that an empty update does not make any modifying requests."""
self.register_comment()
update_comment(self.request, "test_comment", {})
for request in httpretty.httpretty.latest_requests:
self.assertEqual(request.method, "GET")
@ddt.data(None, "test_parent")
def test_basic(self, parent_id):
self.register_comment({"parent_id": parent_id})
with self.assert_signal_sent(api, 'comment_edited', sender=None, user=self.user, exclude_args=('post',)):
actual = update_comment(self.request, "test_comment", {"raw_body": "Edited body"})
expected = {
"id": "test_comment",
"thread_id": "test_thread",
"parent_id": parent_id,
"author": self.user.username,
"author_label": None,
"created_at": "2015-06-03T00:00:00Z",
"updated_at": "2015-06-03T00:00:00Z",
"raw_body": "Edited body",
"rendered_body": "<p>Edited body</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"children": [],
"editable_fields": ["abuse_flagged", "raw_body", "voted"]
}
self.assertEqual(actual, expected)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"body": ["Edited body"],
"course_id": [unicode(self.course.id)],
"user_id": [str(self.user.id)],
"anonymous": ["False"],
"anonymous_to_peers": ["False"],
"endorsed": ["False"],
}
)
def test_nonexistent_comment(self):
self.register_get_comment_error_response("test_comment", 404)
with self.assertRaises(Http404):
update_comment(self.request, "test_comment", {})
def test_nonexistent_course(self):
self.register_comment(thread_overrides={"course_id": "non/existent/course"})
with self.assertRaises(Http404):
update_comment(self.request, "test_comment", {})
def test_unenrolled(self):
self.register_comment()
self.request.user = UserFactory.create()
with self.assertRaises(Http404):
update_comment(self.request, "test_comment", {})
def test_discussions_disabled(self):
self.register_comment(course=_discussion_disabled_course_for(self.user))
with self.assertRaises(Http404):
update_comment(self.request, "test_comment", {})
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_get_thread_response(make_minimal_cs_thread())
self.register_comment(
{"thread_id": "test_thread"},
thread_overrides={
"id": "test_thread",
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
}
)
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
update_comment(self.request, "test_comment", {})
self.assertFalse(expected_error)
except Http404:
self.assertTrue(expected_error)
@ddt.data(*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
[True, False],
))
@ddt.unpack
def test_raw_body_access(self, role_name, is_thread_author, is_comment_author):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_comment(
{"user_id": str(self.user.id if is_comment_author else (self.user.id + 1))},
thread_overrides={
"user_id": str(self.user.id if is_thread_author else (self.user.id + 1))
}
)
expected_error = role_name == FORUM_ROLE_STUDENT and not is_comment_author
try:
update_comment(self.request, "test_comment", {"raw_body": "edited"})
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{"raw_body": ["This field is not editable."]}
)
@ddt.data(*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["question", "discussion"],
[True, False],
))
@ddt.unpack
def test_endorsed_access(self, role_name, is_thread_author, thread_type, is_comment_author):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_comment(
{"user_id": str(self.user.id if is_comment_author else (self.user.id + 1))},
thread_overrides={
"thread_type": thread_type,
"user_id": str(self.user.id if is_thread_author else (self.user.id + 1)),
}
)
expected_error = (
role_name == FORUM_ROLE_STUDENT and
(thread_type == "discussion" or not is_thread_author)
)
try:
update_comment(self.request, "test_comment", {"endorsed": True})
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{"endorsed": ["This field is not editable."]}
)
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_voted(self, current_vote_status, new_vote_status):
"""
Test attempts to edit the "voted" field.
current_vote_status indicates whether the comment should be upvoted at
the start of the test. new_vote_status indicates the value for the
"voted" field in the update. If current_vote_status and new_vote_status
are the same, no update should be made. Otherwise, a vote should be PUT
or DELETEd according to the new_vote_status value.
"""
vote_count = 0
if current_vote_status:
self.register_get_user_response(self.user, upvoted_ids=["test_comment"])
vote_count = 1
self.register_comment_votes_response("test_comment")
self.register_comment(overrides={"votes": {"up_count": vote_count}})
data = {"voted": new_vote_status}
result = update_comment(self.request, "test_comment", data)
self.assertEqual(result["vote_count"], 1 if new_vote_status else 0)
self.assertEqual(result["voted"], new_vote_status)
last_request_path = urlparse(httpretty.last_request().path).path
votes_url = "/api/v1/comments/test_comment/votes"
if current_vote_status == new_vote_status:
self.assertNotEqual(last_request_path, votes_url)
else:
self.assertEqual(last_request_path, votes_url)
self.assertEqual(
httpretty.last_request().method,
"PUT" if new_vote_status else "DELETE"
)
actual_request_data = (
httpretty.last_request().parsed_body if new_vote_status else
parse_qs(urlparse(httpretty.last_request().path).query)
)
actual_request_data.pop("request_id", None)
expected_request_data = {"user_id": [str(self.user.id)]}
if new_vote_status:
expected_request_data["value"] = ["up"]
self.assertEqual(actual_request_data, expected_request_data)
@ddt.data(*itertools.product([True, False], [True, False], [True, False]))
@ddt.unpack
def test_vote_count(self, current_vote_status, first_vote, second_vote):
"""
Tests vote_count increases and decreases correctly from the same user
"""
#setup
starting_vote_count = 0
if current_vote_status:
self.register_get_user_response(self.user, upvoted_ids=["test_comment"])
starting_vote_count = 1
self.register_comment_votes_response("test_comment")
self.register_comment(overrides={"votes": {"up_count": starting_vote_count}})
#first vote
data = {"voted": first_vote}
result = update_comment(self.request, "test_comment", data)
self.register_comment(overrides={"voted": first_vote})
self.assertEqual(result["vote_count"], 1 if first_vote else 0)
#second vote
data = {"voted": second_vote}
result = update_comment(self.request, "test_comment", data)
self.assertEqual(result["vote_count"], 1 if second_vote else 0)
@ddt.data(*itertools.product([True, False], [True, False], [True, False], [True, False]))
@ddt.unpack
def test_vote_count_two_users(
self,
current_user1_vote,
current_user2_vote,
user1_vote,
user2_vote
):
"""
Tests vote_count increases and decreases correctly from different users
"""
user2 = UserFactory.create()
self.register_get_user_response(user2)
request2 = RequestFactory().get("/test_path")
request2.user = user2
CourseEnrollmentFactory.create(user=user2, course_id=self.course.id)
vote_count = 0
if current_user1_vote:
self.register_get_user_response(self.user, upvoted_ids=["test_comment"])
vote_count += 1
if current_user2_vote:
self.register_get_user_response(user2, upvoted_ids=["test_comment"])
vote_count += 1
for (current_vote, user_vote, request) in \
[(current_user1_vote, user1_vote, self.request),
(current_user2_vote, user2_vote, request2)]:
self.register_comment_votes_response("test_comment")
self.register_comment(overrides={"votes": {"up_count": vote_count}})
data = {"voted": user_vote}
result = update_comment(request, "test_comment", data)
if current_vote == user_vote:
self.assertEqual(result["vote_count"], vote_count)
elif user_vote:
vote_count += 1
self.assertEqual(result["vote_count"], vote_count)
self.register_get_user_response(self.user, upvoted_ids=["test_comment"])
else:
vote_count -= 1
self.assertEqual(result["vote_count"], vote_count)
self.register_get_user_response(self.user, upvoted_ids=[])
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_abuse_flagged(self, old_flagged, new_flagged):
"""
Test attempts to edit the "abuse_flagged" field.
old_flagged indicates whether the comment should be flagged at the start
of the test. new_flagged indicates the value for the "abuse_flagged"
field in the update. If old_flagged and new_flagged are the same, no
update should be made. Otherwise, a PUT should be made to the flag or
or unflag endpoint according to the new_flagged value.
"""
self.register_get_user_response(self.user)
self.register_comment_flag_response("test_comment")
self.register_comment({"abuse_flaggers": [str(self.user.id)] if old_flagged else []})
data = {"abuse_flagged": new_flagged}
result = update_comment(self.request, "test_comment", data)
self.assertEqual(result["abuse_flagged"], new_flagged)
last_request_path = urlparse(httpretty.last_request().path).path
flag_url = "/api/v1/comments/test_comment/abuse_flag"
unflag_url = "/api/v1/comments/test_comment/abuse_unflag"
if old_flagged == new_flagged:
self.assertNotEqual(last_request_path, flag_url)
self.assertNotEqual(last_request_path, unflag_url)
else:
self.assertEqual(
last_request_path,
flag_url if new_flagged else unflag_url
)
self.assertEqual(httpretty.last_request().method, "PUT")
self.assertEqual(
httpretty.last_request().parsed_body,
{"user_id": [str(self.user.id)]}
)
@ddt.ddt
@disable_signal(api, 'thread_deleted')
class DeleteThreadTest(
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for delete_thread"""
@classmethod
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUpClass(cls):
super(DeleteThreadTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(DeleteThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.thread_id = "test_thread"
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_thread(self, overrides=None):
"""
Make a thread with appropriate data overridden by the overrides
parameter and register mock responses for both GET and DELETE on its
endpoint.
"""
cs_data = make_minimal_cs_thread({
"id": self.thread_id,
"course_id": unicode(self.course.id),
"user_id": str(self.user.id),
})
cs_data.update(overrides or {})
self.register_get_thread_response(cs_data)
self.register_delete_thread_response(cs_data["id"])
def test_basic(self):
self.register_thread()
with self.assert_signal_sent(api, 'thread_deleted', sender=None, user=self.user, exclude_args=('post',)):
self.assertIsNone(delete_thread(self.request, self.thread_id))
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/threads/{}".format(self.thread_id)
)
self.assertEqual(httpretty.last_request().method, "DELETE")
def test_thread_id_not_found(self):
self.register_get_thread_error_response("missing_thread", 404)
with self.assertRaises(Http404):
delete_thread(self.request, "missing_thread")
def test_nonexistent_course(self):
self.register_thread({"course_id": "non/existent/course"})
with self.assertRaises(Http404):
delete_thread(self.request, self.thread_id)
def test_not_enrolled(self):
self.register_thread()
self.request.user = UserFactory.create()
with self.assertRaises(Http404):
delete_thread(self.request, self.thread_id)
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.register_thread(overrides={"course_id": unicode(disabled_course.id)})
with self.assertRaises(Http404):
delete_thread(self.request, self.thread_id)
@ddt.data(
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
)
def test_non_author_delete_allowed(self, role_name):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_thread({"user_id": str(self.user.id + 1)})
expected_error = role_name == FORUM_ROLE_STUDENT
try:
delete_thread(self.request, self.thread_id)
self.assertFalse(expected_error)
except PermissionDenied:
self.assertTrue(expected_error)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
"""
Tests group access for deleting a thread
All privileged roles are able to delete a thread. A student role can
only delete a thread if,
the student role is the author and the thread is not in a cohort,
the student role is the author and the thread is in the author's cohort.
"""
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_thread({
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
delete_thread(self.request, self.thread_id)
self.assertFalse(expected_error)
except Http404:
self.assertTrue(expected_error)
@ddt.ddt
@disable_signal(api, 'comment_deleted')
class DeleteCommentTest(
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for delete_comment"""
@classmethod
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUpClass(cls):
super(DeleteCommentTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(DeleteCommentTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.thread_id = "test_thread"
self.comment_id = "test_comment"
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_comment_and_thread(self, overrides=None, thread_overrides=None):
"""
Make a comment with appropriate data overridden by the override
parameters and register mock responses for both GET and DELETE on its
endpoint. Also mock GET for the related thread with thread_overrides.
"""
cs_thread_data = make_minimal_cs_thread({
"id": self.thread_id,
"course_id": unicode(self.course.id)
})
cs_thread_data.update(thread_overrides or {})
self.register_get_thread_response(cs_thread_data)
cs_comment_data = make_minimal_cs_comment({
"id": self.comment_id,
"course_id": cs_thread_data["course_id"],
"thread_id": cs_thread_data["id"],
"username": self.user.username,
"user_id": str(self.user.id),
})
cs_comment_data.update(overrides or {})
self.register_get_comment_response(cs_comment_data)
self.register_delete_comment_response(self.comment_id)
def test_basic(self):
self.register_comment_and_thread()
with self.assert_signal_sent(api, 'comment_deleted', sender=None, user=self.user, exclude_args=('post',)):
self.assertIsNone(delete_comment(self.request, self.comment_id))
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/comments/{}".format(self.comment_id)
)
self.assertEqual(httpretty.last_request().method, "DELETE")
def test_comment_id_not_found(self):
self.register_get_comment_error_response("missing_comment", 404)
with self.assertRaises(Http404):
delete_comment(self.request, "missing_comment")
def test_nonexistent_course(self):
self.register_comment_and_thread(
thread_overrides={"course_id": "non/existent/course"}
)
with self.assertRaises(Http404):
delete_comment(self.request, self.comment_id)
def test_not_enrolled(self):
self.register_comment_and_thread()
self.request.user = UserFactory.create()
with self.assertRaises(Http404):
delete_comment(self.request, self.comment_id)
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.register_comment_and_thread(
thread_overrides={"course_id": unicode(disabled_course.id)},
overrides={"course_id": unicode(disabled_course.id)}
)
with self.assertRaises(Http404):
delete_comment(self.request, self.comment_id)
@ddt.data(
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
)
def test_non_author_delete_allowed(self, role_name):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_comment_and_thread(
overrides={"user_id": str(self.user.id + 1)}
)
expected_error = role_name == FORUM_ROLE_STUDENT
try:
delete_comment(self.request, self.comment_id)
self.assertFalse(expected_error)
except PermissionDenied:
self.assertTrue(expected_error)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
"""
Tests group access for deleting a comment
All privileged roles are able to delete a comment. A student role can
only delete a comment if,
the student role is the author and the comment is not in a cohort,
the student role is the author and the comment is in the author's cohort.
"""
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_comment_and_thread(
overrides={"thread_id": "test_thread"},
thread_overrides={
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
}
)
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
delete_comment(self.request, self.comment_id)
self.assertFalse(expected_error)
except Http404:
self.assertTrue(expected_error)
@ddt.ddt
class RetrieveThreadTest(
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase
):
"""Tests for get_thread"""
@classmethod
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUpClass(cls):
super(RetrieveThreadTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(RetrieveThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.thread_author = UserFactory.create()
self.register_get_user_response(self.thread_author)
self.request = RequestFactory().get("/test_path")
self.request.user = self.thread_author
self.thread_id = "test_thread"
CourseEnrollmentFactory.create(user=self.thread_author, course_id=self.course.id)
def register_thread(self, overrides=None):
"""
Make a thread with appropriate data overridden by the overrides
parameter and register mock responses for GET on its
endpoint.
"""
cs_data = make_minimal_cs_thread({
"id": self.thread_id,
"course_id": unicode(self.course.id),
"commentable_id": "test_topic",
"username": self.thread_author.username,
"user_id": str(self.thread_author.id),
"title": "Test Title",
"body": "Test body",
"created_at": "2015-05-29T00:00:00Z",
"updated_at": "2015-05-29T00:00:00Z"
})
cs_data.update(overrides or {})
self.register_get_thread_response(cs_data)
def test_basic(self):
expected_response_data = {
"author": self.thread_author.username,
"author_label": None,
"created_at": "2015-05-29T00:00:00Z",
"updated_at": "2015-05-29T00:00:00Z",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"editable_fields": ["abuse_flagged", "following", "raw_body", "title", "topic_id", "type", "voted"],
"course_id": unicode(self.course.id),
"topic_id": "test_topic",
"group_id": None,
"group_name": None,
"title": "Test Title",
"pinned": False,
"closed": False,
"following": False,
"comment_count": 0,
"unread_comment_count": 0,
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_thread",
"endorsed_comment_list_url": None,
"non_endorsed_comment_list_url": None,
"read": False,
"has_endorsed": False,
"id": "test_thread",
"type": "discussion"
}
self.register_thread()
self.assertEqual(get_thread(self.request, self.thread_id), expected_response_data)
self.assertEqual(httpretty.last_request().method, "GET")
def test_thread_id_not_found(self):
self.register_get_thread_error_response("missing_thread", 404)
with self.assertRaises(Http404):
get_thread(self.request, "missing_thread")
def test_nonauthor_enrolled_in_course(self):
expected_response_data = {
"author": self.thread_author.username,
"author_label": None,
"created_at": "2015-05-29T00:00:00Z",
"updated_at": "2015-05-29T00:00:00Z",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"editable_fields": ["abuse_flagged", "following", "voted"],
"course_id": unicode(self.course.id),
"topic_id": "test_topic",
"group_id": None,
"group_name": None,
"title": "Test Title",
"pinned": False,
"closed": False,
"following": False,
"comment_count": 0,
"unread_comment_count": 0,
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_thread",
"endorsed_comment_list_url": None,
"non_endorsed_comment_list_url": None,
"read": False,
"has_endorsed": False,
"id": "test_thread",
"type": "discussion"
}
non_author_user = UserFactory.create() # pylint: disable=attribute-defined-outside-init
self.register_get_user_response(non_author_user)
CourseEnrollmentFactory.create(user=non_author_user, course_id=self.course.id)
self.register_thread()
self.request.user = non_author_user
self.assertEqual(get_thread(self.request, self.thread_id), expected_response_data)
self.assertEqual(httpretty.last_request().method, "GET")
def test_not_enrolled_in_course(self):
self.register_thread()
self.request.user = UserFactory.create()
with self.assertRaises(Http404):
get_thread(self.request, self.thread_id)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
"""
Tests group access for retrieving a thread
All privileged roles are able to retrieve a thread. A student role can
only retrieve a thread if,
the student role is the author and the thread is not in a cohort,
the student role is the author and the thread is in the author's cohort.
"""
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.thread_author, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.thread_author])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.thread_author]
self.register_thread({
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
get_thread(self.request, self.thread_id)
self.assertFalse(expected_error)
except Http404:
self.assertTrue(expected_error)
|
agpl-3.0
|
hehongliang/tensorflow
|
tensorflow/python/debug/cli/evaluator.py
|
80
|
5731
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for arbitrary expression evaluation based on a debugger data dump."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np # pylint: disable=unused-import
from tensorflow.python.debug.lib import debug_data
_DUMP_TENSOR_PATTERN = re.compile(r"`.*?`")
_DEVICE_NAME_PREFIX_PATTERN = re.compile(
r"/job:(\w)+/replica:(\d)+/task:(\d)+/(\w)+:(\d)+:")
_EXEC_INDEX_SUFFIX_PATTERN = re.compile(r"\[(\d)*\]$")
_DEFAULT_DEBUG_OP = "DebugIdentity"
def _parse_debug_tensor_name(debug_tensor_name):
# pylint: disable=line-too-long
"""Parse a debug tensor name in a to-be-evaluated expression.
Args:
debug_tensor_name: name of the debug tensor, with or without
device name as a prefix, with or without debug op, with or
without '[<exec_index>]' as a suffix.
E.g., without device name prefix, without debug op suffix:
"hidden_0/MatMul:0"
E.g., with device name prefix:
"/job:worker/replica:0/task:1/gpu:0:hidden_0/MatMul:0"
E.g., with debug op suffix:
"hidden_0/MatMul:0:DebugNumericSummary"
E.g., with device name prefix and debug op suffix:
"/job:worker/replica:0/task:1/gpu:0:hidden_0/MatMul:0:DebugNumericSummary"
E.g., with device name prefix, debug op and an exec index:
"/job:worker/replica:0/task:1/gpu:0:hidden_0/MatMul:0:DebugNumericSummary[1]"
Returns:
device_name: If device name prefix exists, the device name; otherwise,
`None`.
node_name: Name of the node.
output_slot: Output slot index as an `int`.
debug_op: If the debug op suffix exists, the debug op name; otheriwse,
`None`.
exec_index: Execution index (applicable to cases in which a debug tensor
is computed multiple times in a `tf.Session.run` call, e.g., due to
`tf.while_loop`). If the exec_index suffix does not exist, this value
defaults to `0`.
Raises:
ValueError: If the input `debug_tensor_name` is malformed.
"""
# pylint: enable=line-too-long
device_prefix_match = re.match(_DEVICE_NAME_PREFIX_PATTERN, debug_tensor_name)
if device_prefix_match:
device_name = debug_tensor_name[
device_prefix_match.start() : device_prefix_match.end() - 1]
debug_tensor_name = debug_tensor_name[device_prefix_match.end():]
else:
device_name = None
split_items = debug_tensor_name.split(":")
if len(split_items) not in (2, 3):
raise ValueError(
"The debug tensor name in the to-be-evaluated expression is malformed: "
"'%s'" % debug_tensor_name)
# TODO(cais): Provide examples of good debug tensor names in the error
# message.
exec_index_match = re.search(_EXEC_INDEX_SUFFIX_PATTERN, split_items[-1])
if exec_index_match:
exec_index = int(split_items[-1][
exec_index_match.start() + 1 : exec_index_match.end() - 1])
split_items[-1] = split_items[-1][:exec_index_match.start()]
else:
exec_index = 0
if len(split_items) == 2:
node_name = split_items[0]
output_slot = int(split_items[1])
debug_op = _DEFAULT_DEBUG_OP
else:
split_items = debug_tensor_name.split(":")
node_name = split_items[0]
output_slot = int(split_items[1])
debug_op = split_items[2]
return device_name, node_name, output_slot, debug_op, exec_index
class ExpressionEvaluator(object):
"""Evaluates Python expressions using debug tensor values from a dump."""
def __init__(self, dump):
"""Constructor of ExpressionEvaluator.
Args:
dump: an instance of `DebugDumpDir`.
"""
self._dump = dump
self._cached_tensor_values = dict()
def evaluate(self, expression):
"""Parse an expression.
Args:
expression: the expression to be parsed.
Returns:
The result of the evaluation.
Raises:
ValueError: If the value of one or more of the debug tensors in the
expression are not available.
"""
dump_tensors_iter = re.finditer(_DUMP_TENSOR_PATTERN, expression)
rewritten_expression = expression
for match in reversed(list(dump_tensors_iter)):
tensor_name = match.group(0)[1:-1].strip()
device_name, node_name, output_slot, debug_op, exec_index = (
_parse_debug_tensor_name(tensor_name))
if tensor_name not in self._cached_tensor_values:
try:
value = self._dump.get_tensors(
node_name, output_slot, debug_op,
device_name=device_name)[exec_index]
except debug_data.WatchKeyDoesNotExistInDebugDumpDirError:
raise ValueError(
"Eval failed due to the value of %s:%d:DebugIdentity being "
"unavailable" % (node_name, output_slot))
self._cached_tensor_values[tensor_name] = value
rewritten_expression = (
rewritten_expression[:match.start(0)] +
"self._cached_tensor_values['" + tensor_name + "']" +
rewritten_expression[match.end(0):])
return eval(rewritten_expression) # pylint: disable=eval-used
|
apache-2.0
|
JulyKikuAkita/PythonPrac
|
cs15211/BuddyStrings.py
|
1
|
3894
|
__source__ = 'https://leetcode.com/problems/buddy-strings/'
# Time: O(N) where NN is the length of A and B
# Space: O(1)
#
# Description: Leetcode # 859. Buddy Strings
#
# Given two strings A and B of lowercase letters,
# return true if and only if we can swap two letters in A so that the result equals B.
#
# Example 1:
#
# Input: A = "ab", B = "ba"
# Output: true
# Example 2:
#
# Input: A = "ab", B = "ab"
# Output: false
# Example 3:
#
# Input: A = "aa", B = "aa"
# Output: true
# Example 4:
#
# Input: A = "aaaaaaabc", B = "aaaaaaacb"
# Output: true
# Example 5:
#
# Input: A = "", B = "aa"
# Output: false
#
#
# Note:
#
# 0 <= A.length <= 20000
# 0 <= B.length <= 20000
# A and B consist only of lowercase letters.
#
import unittest
import itertools
# 88.96% 24ms
class Solution(object):
def buddyStrings(self, A, B):
"""
:type A: str
:type B: str
:rtype: bool
"""
if len(A) != len(B):return False
if A == B: # True when A can swap 2 identical chars
seen = set()
for a in A:
if a in seen:
return True
seen.add(a)
return False
else:
pairs = []
for a, b in itertools.izip(A, B):
if a != b:
pairs.append((a, b))
if len(pairs) >= 3:
return False
return len(pairs) == 2 and pairs[0] == pairs[1][::-1]
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
#Thought: https://leetcode.com/problems/buddy-strings/solution/
#
# If swapping A[i] and A[j] would demonstrate that A and B are buddy strings,
# then A[i] == B[j] and A[j] == B[i]. That means among the four free variables A[i], A[j],
# B[i], B[j], there are only two cases: either A[i] == A[j] or not.
#
# 2ms 99.73$
class Solution {
public boolean buddyStrings(String A, String B) {
if (A.length() != B.length()) return false;
if (A.equals(B)) {
int[] count = new int[26];
for (char c : A.toCharArray()) {
count[c-'a']++;
}
for (int cnt : count) {
if (cnt > 1) return true;
}
return false;
} else {
int first = -1, second = -1;
for (int i = 0; i < A.length(); i++) {
if (A.charAt(i) != B.charAt(i)) {
if (first == -1) first = i;
else if (second == -1) second = i;
else return false;
}
}
return (second != -1 && A.charAt(first) == B.charAt(second) && A.charAt(second) == B.charAt(first));
}
}
}
# use char array
# 2ms 98.73%
class Solution {
public boolean buddyStrings(String A, String B) {
if (A == null || B == null) return false;
if (A.length() != B.length()) return false;
if (A.equals(B)) {
int[] alphabets = new int[26];
for (int i = 0; i < A.length(); i++) {
if (++alphabets[A.charAt(i) - 90] > 1) return true;
}
return false;
}
int diffCnt = 0;
char[] arrA = new char[2];
char[] arrB = new char[2];
for (int i = 0; i < A.length(); i++) {
if (A.charAt(i) != B.charAt(i)) {
if (diffCnt == 0) {
arrA[0] = A.charAt(i);
arrB[0] = B.charAt(i);
} else {
arrA[1] = A.charAt(i);
arrB[1] = B.charAt(i);
}
diffCnt++;
}
if (diffCnt > 2) return false;
}
if (arrA[0] == arrB[1] && arrA[1] == arrB[0]) return true;
return false;
}
}
'''
|
apache-2.0
|
kikusu/chainer
|
chainer/training/trainer.py
|
1
|
12389
|
import collections
import os
import six
from chainer import reporter as reporter_module
from chainer.training import extension as extension_module
from chainer.training import trigger as trigger_module
class _ExtensionEntry(object):
def __init__(self, extension, priority, trigger, invoke_before_training):
self.extension = extension
self.trigger = trigger
self.invoke_before_training = invoke_before_training
self.priority = priority
class Trainer(object):
"""The standard training loop in Chainer.
Trainer is an implementation of a training loop. Users can invoke the
training by calling the :meth:`run` method.
Each iteration of the training loop proceeds as follows.
- Update of the parameters. It includes the mini-batch loading, forward
and backward computations, and an execution of the update formula.
These are all done by the update object held by the trainer.
- Invocation of trainer extensions in the descending order of their
priorities. A trigger object is attached to each extension, and it
decides at each iteration whether the extension should be executed.
Trigger objects are callable objects that take the trainer object as the
argument and return a boolean value indicating whether the extension
should be called or not.
Extensions are callable objects that take the trainer object as the
argument. There are two ways to define custom extensions: inheriting the
:class:`Extension` class, and decorating functions by
:func:`make_extension`. See :class:`Extension` for more details on custom
extensions.
Users can register extensions to the trainer by calling the :meth:`extend`
method, where some configurations can be added.
- Trigger object, which is also explained above. In most cases,
:class:`IntervalTrigger` is used, in which case users can simply specify
a tuple of the interval length and its unit, like
``(1000, 'iteration')`` or ``(1, 'epoch')``.
- The order of execution of extensions is determined by their priorities.
Extensions of higher priorities are invoked earlier. There are three
standard values for the priorities:
- ``PRIORITY_WRITER``. This is the priority for extensions that write
some records to the :attr:`observation` dictionary. It includes cases
that the extension directly adds values to the observation dictionary,
or the extension uses the :func:`chainer.report` function to report
values to the observation dictionary.
- ``PRIORITY_EDITOR``. This is the priority for extensions that edit the
:attr:`observation` dictionary based on already reported values.
- ``PRIORITY_READER``. This is the priority for extensions that only read
records from the :attr:`observation` dictionary. This is also suitable
for extensions that do not use the :attr:`observation` dictionary at
all.
- Extensions with ``invoke_before_training`` flag on are also invoked at
the beginning of the training loop. Extensions that update the training
status (e.g., changing learning rates) should have this flag to be
``True`` to ensure that resume of the training loop correctly recovers
the training status.
The current state of the trainer object and objects handled by the trainer
can be serialized through the standard serialization protocol of Chainer.
It enables us to easily suspend and resume the training loop.
.. note::
The serialization does not recover everything of the training loop. It
only recovers the states which change over the training (e.g.
parameters, optimizer states, the batch iterator state, extension
states, etc.). You must initialize the objects correctly before
deserializing the states.
On the other hand, it means that users can change the settings on
deserialization. For example, the exit condition can be changed on the
deserialization, so users can train the model for some iterations,
suspend it, and then resume it with larger number of total iterations.
During the training, it also creates a :class:`~chainer.Reporter` object to
store observed values on each update. For each iteration, it creates a
fresh observation dictionary and stores it in the :attr:`observation`
attribute.
Links of the target model of each optimizer are registered to the reporter
object as observers, where the name of each observer is constructed as the
format ``<optimizer name><link name>``. The link name is given by the
:meth:`chainer.Link.namedlink` method, which represents the path to each
link in the hierarchy. Other observers can be registered by accessing the
reporter object via the :attr:`reporter` attribute.
The default trainer is `plain`, i.e., it does not contain any extensions.
Args:
updater (~chainer.training.Updater): Updater object. It defines how to
update the models.
stop_trigger: Trigger that determines when to stop the training loop.
If it is not callable, it is passed to :class:`IntervalTrigger`.
Attributes:
updater: The updater object for this trainer.
stop_trigger: Trigger that determines when to stop the training loop.
The training loop stops at the iteration on which this trigger
returns ``True``.
observation: Observation of values made at the last update. See the
:class:`Reporter` class for details.
out: Output directory.
reporter: Reporter object to report observed values.
"""
def __init__(self, updater, stop_trigger=None, out='result'):
self.updater = updater
self.stop_trigger = trigger_module.get_trigger(stop_trigger)
self.observation = {}
self.out = out
reporter = reporter_module.Reporter()
for name, optimizer in six.iteritems(updater.get_all_optimizers()):
reporter.add_observer(name, optimizer.target)
reporter.add_observers(
name, optimizer.target.namedlinks(skipself=True))
self.reporter = reporter
self._done = False
self._extensions = collections.OrderedDict()
updater.connect_trainer(self)
def extend(self, extension, name=None, trigger=None, priority=None,
invoke_before_training=None):
"""Registers an extension to the trainer.
:class:`Extension` is a callable object which is called after each
update unless the corresponding trigger object decides to skip the
iteration. The order of execution is determined by priorities:
extensions with higher priorities are called earlier in each iteration.
Extensions with the same priority are invoked in the order of
registrations.
If two or more extensions with the same name are registered, suffixes
are added to the names of the second to last extensions. The suffix is
``_N`` where N is the ordinal of the extensions.
See :class:`Extension` for the interface of extensions.
Args:
extension: Extension to register.
name (str): Name of the extension. If it is omitted, the
``default_name`` attribute of the extension is used instead.
Note that the name would be suffixed by an ordinal in case of
duplicated names as explained above.
trigger (tuple or Trigger): Trigger object that determines when to
invoke the extension. If it is ``None``, ``extension.trigger``
is used instead. If the trigger is not callable, it is passed
to :class:`IntervalTrigger` to build an interval trigger.
priority (int): Invocation priority of the extension. Extensions
are invoked in the descending order of priorities in each
iteration. If this is ``None``, ``extension.priority`` is used
instead.
invoke_before_training (bool): If ``True``, the extension is also
invoked just before entering the training loop. If this
``None``, ``extension.invoke_before_training`` is used instead.
This option is mainly used for extensions that alter the
training configuration (e.g., learning rates); in such a case,
resuming from snapshots require the call of extension to
recover the configuration before any updates.
"""
if name is None:
name = getattr(extension, 'name', None)
if name is None:
name = getattr(extension, 'default_name', None)
if name is None:
raise TypeError('name is not given for the extension')
if name == 'training':
raise ValueError(
'the name "training" is prohibited as an extension name')
if trigger is None:
trigger = getattr(extension, 'trigger', None)
trigger = trigger_module.get_trigger(trigger)
if priority is None:
priority = getattr(
extension, 'priority', extension_module.PRIORITY_READER)
if invoke_before_training is None:
invoke_before_training = getattr(
extension, 'invoke_before_training', False)
modified_name = name
ordinal = 0
while modified_name in self._extensions:
ordinal += 1
modified_name = '%s_%d' % (name, ordinal)
extension.name = modified_name
self._extensions[modified_name] = _ExtensionEntry(
extension, priority, trigger, invoke_before_training)
def get_extension(self, name):
"""Returns the extension of a given name.
Args:
name (str): Name of the extension.
Returns:
Extension.
"""
extensions = self._extensions
if name in extensions:
return extensions[name].extension
else:
raise ValueError('extension %s not found' % name)
def run(self):
"""Executes the training loop.
This method is the core of ``Trainer``. It executes the whole loop of
training the models.
Note that this method cannot run multiple times for one trainer object.
"""
if self._done:
raise RuntimeError('cannot run training loop multiple times')
try:
os.makedirs(self.out)
except OSError:
pass
# sort extensions by priorities
extension_order = sorted(
self._extensions.keys(),
key=lambda name: self._extensions[name].priority, reverse=True)
extensions = [(name, self._extensions[name])
for name in extension_order]
# invoke extensions before the loop
for _, entry in extensions:
if entry.invoke_before_training:
entry.extension(self)
update = self.updater.update
reporter = self.reporter
stop_trigger = self.stop_trigger
# main training loop
try:
while not stop_trigger(self):
self.observation = {}
with reporter.scope(self.observation):
update()
for name, entry in extensions:
if entry.trigger(self):
entry.extension(self)
finally:
for _, entry in extensions:
finalize = entry.extension.finalize
if finalize:
finalize()
self.updater.finalize()
self._done = True
def serialize(self, serializer):
self.updater.serialize(serializer['updater'])
if hasattr(self.stop_trigger, 'serialize'):
self.stop_trigger.serialize(serializer['stop_trigger'])
s = serializer['extensions']
t = serializer['extension_triggers']
for name, entry in six.iteritems(self._extensions):
if hasattr(entry.extension, 'serialize'):
entry.extension.serialize(s[name])
if hasattr(entry.trigger, 'serialize'):
entry.trigger.serialize(t[name])
|
mit
|
jakesyl/androguard
|
demos/dalvikvm_format_6.py
|
38
|
2935
|
#!/usr/bin/env python
import sys
PATH_INSTALL = "./"
sys.path.append(PATH_INSTALL)
from androguard.core.bytecodes import dvm
from androguard.core.bytecodes import apk
from androguard.core.analysis import analysis
from androguard.core import androconf
import hashlib
def hexdump(src, length=8, off=0):
result = []
digits = 4 if isinstance(src, unicode) else 2
for i in xrange(0, len(src), length):
s = src[i:i+length]
hexa = b' '.join(["%0*X" % (digits, ord(x)) for x in s])
text = b''.join([x if 0x20 <= ord(x) < 0x7F else b'.' for x in s])
result.append( b"%04X %-*s %s" % (i+off, length*(digits + 1), hexa, text) )
return b'\n'.join(result)
class MDalvikVMFormat:
def __init__(self, vm, vmx) :
self.vm = vm
self.vmx = vmx
def modify_instruction(self, class_name, method_name, descriptor, offset, instructions) :
pass
def test_save(self) :
original_buff = self.vm.get_buff()
b1 = original_buff
method = self.vm.get_method_descriptor(
"Lfr/t0t0/android/TestModifActivity;", "onCreate",
"(Landroid/os/Bundle;)V" )
# method.show()
# print hex(method.code_off)
# instructions = [i for i in method.get_instructions()]
# ins = instructions[3]
# print ins
# ins.BBBB = 12
# instructions.insert(3, ins)
# method.set_instructions( instructions )
b2 = self.vm.save()
self.check(b1, b2, 40)
return b2
def check(self, b1, b2, off) :
if hashlib.md5( b1 ).hexdigest() != hashlib.md5( b2 ).hexdigest() :
j = 0
end = max(len(b1), len(b2))
while j < end :
if j < off :
j += 1
continue
if j >= len(b1) :
print "OUT OF B1 @ OFFSET 0x%x(%d)" % (j,j)
raise("ooo")
if j >= len(b2) :
print "OUT OF B2 @ OFFSET 0x%x(%d)" % (j,j)
raise("ooo")
if b1[j] != b2[j] :
print "BEGIN @ OFFSET 0x%x" % j
print "ORIG : "
print hexdump(b1[j - 8: j + 8], off=j-8) + "\n"
print "NEW : "
print hexdump(b2[j - 8: j + 8], off=j-8) + "\n"
j += 1
print "OK"
#TEST = "examples/android/TestsAndroguard/bin/TestsAndroguard.apk"
TEST = "apks/malwares/smszombie/40F3F16742CD8AC8598BF859A23AC290.apk"
FILENAME = "./toto.apk"
androconf.set_debug()
a = apk.APK( TEST )
j = dvm.DalvikVMFormat( a.get_dex() )
x = analysis.VMAnalysis( j )
m = MDalvikVMFormat(j, x)
print j, x, m
new_dex = m.test_save()
a.new_zip( filename=FILENAME,
deleted_files="(META-INF/.)", new_files = {
"classes.dex" : new_dex } )
apk.sign_apk( FILENAME, "./keystore/keystore1", "tototo" )
|
apache-2.0
|
stijnvanhoey/defence
|
node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSProject.py
|
2736
|
6387
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
class Tool(object):
"""Visual Studio tool."""
def __init__(self, name, attrs=None):
"""Initializes the tool.
Args:
name: Tool name.
attrs: Dict of tool attributes; may be None.
"""
self._attrs = attrs or {}
self._attrs['Name'] = name
def _GetSpecification(self):
"""Creates an element for the tool.
Returns:
A new xml.dom.Element for the tool.
"""
return ['Tool', self._attrs]
class Filter(object):
"""Visual Studio filter - that is, a virtual folder."""
def __init__(self, name, contents=None):
"""Initializes the folder.
Args:
name: Filter (folder) name.
contents: List of filenames and/or Filter objects contained.
"""
self.name = name
self.contents = list(contents or [])
#------------------------------------------------------------------------------
class Writer(object):
"""Visual Studio XML project writer."""
def __init__(self, project_path, version, name, guid=None, platforms=None):
"""Initializes the project.
Args:
project_path: Path to the project file.
version: Format version to emit.
name: Name of the project.
guid: GUID to use for project, if not None.
platforms: Array of string, the supported platforms. If null, ['Win32']
"""
self.project_path = project_path
self.version = version
self.name = name
self.guid = guid
# Default to Win32 for platforms.
if not platforms:
platforms = ['Win32']
# Initialize the specifications of the various sections.
self.platform_section = ['Platforms']
for platform in platforms:
self.platform_section.append(['Platform', {'Name': platform}])
self.tool_files_section = ['ToolFiles']
self.configurations_section = ['Configurations']
self.files_section = ['Files']
# Keep a dict keyed on filename to speed up access.
self.files_dict = dict()
def AddToolFile(self, path):
"""Adds a tool file to the project.
Args:
path: Relative path from project to tool file.
"""
self.tool_files_section.append(['ToolFile', {'RelativePath': path}])
def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools):
"""Returns the specification for a configuration.
Args:
config_type: Type of configuration node.
config_name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Returns:
"""
# Handle defaults
if not attrs:
attrs = {}
if not tools:
tools = []
# Add configuration node and its attributes
node_attrs = attrs.copy()
node_attrs['Name'] = config_name
specification = [config_type, node_attrs]
# Add tool nodes and their attributes
if tools:
for t in tools:
if isinstance(t, Tool):
specification.append(t._GetSpecification())
else:
specification.append(Tool(t)._GetSpecification())
return specification
def AddConfig(self, name, attrs=None, tools=None):
"""Adds a configuration to the project.
Args:
name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools)
self.configurations_section.append(spec)
def _AddFilesToNode(self, parent, files):
"""Adds files and/or filters to the parent node.
Args:
parent: Destination node
files: A list of Filter objects and/or relative paths to files.
Will call itself recursively, if the files list contains Filter objects.
"""
for f in files:
if isinstance(f, Filter):
node = ['Filter', {'Name': f.name}]
self._AddFilesToNode(node, f.contents)
else:
node = ['File', {'RelativePath': f}]
self.files_dict[f] = node
parent.append(node)
def AddFiles(self, files):
"""Adds files to the project.
Args:
files: A list of Filter objects and/or relative paths to files.
This makes a copy of the file/filter tree at the time of this call. If you
later add files to a Filter object which was passed into a previous call
to AddFiles(), it will not be reflected in this project.
"""
self._AddFilesToNode(self.files_section, files)
# TODO(rspangler) This also doesn't handle adding files to an existing
# filter. That is, it doesn't merge the trees.
def AddFileConfig(self, path, config, attrs=None, tools=None):
"""Adds a configuration to a file.
Args:
path: Relative path to the file.
config: Name of configuration to add.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Raises:
ValueError: Relative path does not match any file added via AddFiles().
"""
# Find the file node with the right relative path
parent = self.files_dict.get(path)
if not parent:
raise ValueError('AddFileConfig: file "%s" not in project.' % path)
# Add the config to the file node
spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs,
tools)
parent.append(spec)
def WriteIfChanged(self):
"""Writes the project file."""
# First create XML content definition
content = [
'VisualStudioProject',
{'ProjectType': 'Visual C++',
'Version': self.version.ProjectVersion(),
'Name': self.name,
'ProjectGUID': self.guid,
'RootNamespace': self.name,
'Keyword': 'Win32Proj'
},
self.platform_section,
self.tool_files_section,
self.configurations_section,
['References'], # empty section
self.files_section,
['Globals'] # empty section
]
easy_xml.WriteXmlIfChanged(content, self.project_path,
encoding="Windows-1252")
|
mit
|
GiladE/birde
|
venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/lint.py
|
250
|
4062
|
from __future__ import absolute_import, division, unicode_literals
from gettext import gettext
_ = gettext
from . import _base
from ..constants import cdataElements, rcdataElements, voidElements
from ..constants import spaceCharacters
spaceCharacters = "".join(spaceCharacters)
class LintError(Exception):
pass
class Filter(_base.Filter):
def __iter__(self):
open_elements = []
contentModelFlag = "PCDATA"
for token in _base.Filter.__iter__(self):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("StartTag not in PCDATA content model flag: %s") % name)
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %r") % name)
if not name:
raise LintError(_("Empty tag name"))
if type == "StartTag" and name in voidElements:
raise LintError(_("Void element reported as StartTag token: %s") % name)
elif type == "EmptyTag" and name not in voidElements:
raise LintError(_("Non-void element reported as EmptyTag token: %s") % token["name"])
if type == "StartTag":
open_elements.append(name)
for name, value in token["data"]:
if not isinstance(name, str):
raise LintError(_("Attribute name is not a string: %r") % name)
if not name:
raise LintError(_("Empty attribute name"))
if not isinstance(value, str):
raise LintError(_("Attribute value is not a string: %r") % value)
if name in cdataElements:
contentModelFlag = "CDATA"
elif name in rcdataElements:
contentModelFlag = "RCDATA"
elif name == "plaintext":
contentModelFlag = "PLAINTEXT"
elif type == "EndTag":
name = token["name"]
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %r") % name)
if not name:
raise LintError(_("Empty tag name"))
if name in voidElements:
raise LintError(_("Void element reported as EndTag token: %s") % name)
start_name = open_elements.pop()
if start_name != name:
raise LintError(_("EndTag (%s) does not match StartTag (%s)") % (name, start_name))
contentModelFlag = "PCDATA"
elif type == "Comment":
if contentModelFlag != "PCDATA":
raise LintError(_("Comment not in PCDATA content model flag"))
elif type in ("Characters", "SpaceCharacters"):
data = token["data"]
if not isinstance(data, str):
raise LintError(_("Attribute name is not a string: %r") % data)
if not data:
raise LintError(_("%s token with empty data") % type)
if type == "SpaceCharacters":
data = data.strip(spaceCharacters)
if data:
raise LintError(_("Non-space character(s) found in SpaceCharacters token: ") % data)
elif type == "Doctype":
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("Doctype not in PCDATA content model flag: %s") % name)
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %r") % name)
# XXX: what to do with token["data"] ?
elif type in ("ParseError", "SerializeError"):
pass
else:
raise LintError(_("Unknown token type: %s") % type)
yield token
|
mit
|
nkuhlen/replication-study-financial-macro
|
.mywaflib/waflib/extras/run_r_script.py
|
2
|
2830
|
#!/usr/bin/env python
# encoding: utf-8
# Hans-Martin von Gaudecker, 2012-15
"""
Run a R script in the directory specified by **ctx.bldnode**.
Strings supplied to the **prepend** and **append** keywords will be
added to the command line.
Usage::
ctx(
features='run_r_script',
source='some_script.r',
target=['some_table.tex', 'some_figure.eps'],
deps='some_data.csv',
append='',
prepend=''
)
"""
from waflib import Task, TaskGen, Logs
R_COMMANDS = ['RScript', 'Rscript']
def configure(ctx):
ctx.find_program(
R_COMMANDS,
var='RCMD',
errmsg="""\n
No R executable found!\n\n
If R is needed:\n
1) Check the settings of your system path.
2) Note we are looking for R executables called: %s
If yours has a different name, please report to hmgaudecker [at] gmail\n
Else:\n
Do not load the 'run_r_script' tool in the main wscript.\n\n"""
% R_COMMANDS
)
ctx.env.RFLAGS = ''
@Task.update_outputs
class run_r_script(Task.Task):
"""Run a R script."""
run_str = '${PREPEND} "${RCMD}" ${RFLAGS} "${SRC[0].abspath()}" ${APPEND}'
shell = True
def exec_command(self, cmd, **kw):
bld = self.generator.bld
try:
if not kw.get('cwd', None):
kw['cwd'] = bld.cwd
except AttributeError:
bld.cwd = kw['cwd'] = bld.variant_dir
if not self.buffer_output:
kw["stdout"] = kw["stderr"] = None
return bld.exec_command(cmd, **kw)
def keyword(self):
"""
Override the 'Compiling' default.
"""
return 'Running'
def __str__(self):
"""
More useful output.
"""
return "{prepend} [R] {rflags} {fn} {append}".format(
prepend=self.env.PREPEND,
rflags=self.env.RFLAGS,
fn=self.inputs[0].path_from(self.inputs[0].ctx.launch_node()),
append=self.env.APPEND
)
@TaskGen.feature('run_r_script')
@TaskGen.before_method('process_source')
def apply_run_r_script(tg):
"""Task generator customising the options etc. to call R in batch
mode for running a R script.
"""
# Convert sources and targets to nodes
src_node = tg.path.find_resource(tg.source)
tgt_nodes = [tg.path.find_or_declare(t) for t in tg.to_list(tg.target)]
tsk = tg.create_task('run_r_script', src=src_node, tgt=tgt_nodes)
tsk.env.APPEND = getattr(tg, 'append', '')
tsk.env.PREPEND = getattr(tg, 'prepend', '')
tsk.buffer_output = getattr(tg, 'buffer_output', True)
# dependencies (if the attribute 'deps' changes, trigger a recompilation)
for x in tg.to_list(getattr(tg, 'deps', [])):
node = tg.path.find_resource(x)
if not node:
tg.bld.fatal(
'Could not find dependency %r for running %r'
% (x, src_node.relpath())
)
else:
tsk.dep_nodes.append(node)
Logs.debug(
'deps: found dependencies %r for running %r' % (
tsk.dep_nodes, src_node.relpath())
)
# Bypass the execution of process_source by setting the source to an empty
# list
tg.source = []
|
gpl-3.0
|
marcelovilaca/DIRAC
|
Workflow/Utilities/Utils.py
|
13
|
3645
|
""" Collection of utilities function
"""
import os, time
from DIRAC.ConfigurationSystem.Client.Helpers.CSGlobals import getVO, getCSExtensions
from DIRAC.Core.Workflow.Module import ModuleDefinition
from DIRAC.Core.Workflow.Parameter import Parameter
from DIRAC.Core.Workflow.Step import StepDefinition
#############################################################################
def getStepDefinition( stepName, modulesNameList = [], importLine = """""", parametersList = [] ):
""" Given a name, a list of modules name, and a list of parameters, returns a step definition.
Remember that Step definition = Parameters + Module Instances
"""
# In case the importLine is not set, this is looking for a DIRAC extension, if any.
# The extension is supposed to be called ExtDIRAC.
if not importLine:
importLine = "DIRAC.Workflow.Modules"
for ext in getCSExtensions():
if ext.lower() == getVO():
importLine = ext + "DIRAC.Workflow.Modules"
break
stepDef = StepDefinition( stepName )
for moduleName in modulesNameList:
# create the module definition
moduleDef = ModuleDefinition( moduleName )
try:
# Look in the importLine given, or the DIRAC if the given location can't be imported
moduleDef.setDescription( getattr( __import__( "%s.%s" % ( importLine, moduleName ),
globals(), locals(), ['__doc__'] ),
"__doc__" ) )
moduleDef.setBody( """\nfrom %s.%s import %s\n""" % ( importLine, moduleName, moduleName ) )
except ImportError:
alternativeImportLine = "DIRAC.Workflow.Modules"
moduleDef.setDescription( getattr( __import__( "%s.%s" % ( alternativeImportLine, moduleName ),
globals(), locals(), ['__doc__'] ),
"__doc__" ) )
moduleDef.setBody( """\nfrom %s.%s import %s\n""" % ( alternativeImportLine, moduleName, moduleName ) )
# add the module to the step, and instance it
stepDef.addModule( moduleDef )
stepDef.createModuleInstance( module_type = moduleName, name = moduleName )
# add parameters to the module definition
for pName, pType, pValue, pDesc in parametersList:
p = Parameter( pName, pValue, pType, "", "", True, False, pDesc )
stepDef.addParameter( Parameter( parameter = p ) )
return stepDef
#############################################################################
def addStepToWorkflow( workflow, stepDefinition, name ):
""" Add a stepDefinition to a workflow, instantiating it, and giving it a name
"""
workflow.addStep( stepDefinition )
return workflow.createStepInstance( stepDefinition.getType(), name )
#############################################################################
def getStepCPUTimes( step_commons ):
""" CPU times of a step
"""
exectime = 0
if step_commons.has_key( 'StartTime' ):
exectime = time.time() - step_commons['StartTime']
cputime = 0
if step_commons.has_key( 'StartStats' ):
# 5-tuple: utime, stime, cutime, cstime, elapsed_time
stats = os.times()
cputimeNow = stats[ 0 ] + stats[ 1 ] + stats[ 2 ] + stats[ 3 ]
cputimeBefore = step_commons[ 'StartStats' ][ 0 ] + step_commons[ 'StartStats' ][ 1 ] \
+ step_commons[ 'StartStats' ][ 2 ] + step_commons[ 'StartStats' ][ 3 ]
cputime = cputimeNow - cputimeBefore
return exectime, cputime
#############################################################################
|
gpl-3.0
|
snarfed/beautifulsoup
|
bs4/element.py
|
3
|
62762
|
import collections
import re
import sys
import warnings
from bs4.dammit import EntitySubstitution
DEFAULT_OUTPUT_ENCODING = "utf-8"
PY3K = (sys.version_info[0] > 2)
whitespace_re = re.compile("\s+")
def _alias(attr):
"""Alias one attribute name to another for backward compatibility"""
@property
def alias(self):
return getattr(self, attr)
@alias.setter
def alias(self):
return setattr(self, attr)
return alias
class NamespacedAttribute(unicode):
def __new__(cls, prefix, name, namespace=None):
if name is None:
obj = unicode.__new__(cls, prefix)
elif prefix is None:
# Not really namespaced.
obj = unicode.__new__(cls, name)
else:
obj = unicode.__new__(cls, prefix + ":" + name)
obj.prefix = prefix
obj.name = name
obj.namespace = namespace
return obj
class AttributeValueWithCharsetSubstitution(unicode):
"""A stand-in object for a character encoding specified in HTML."""
class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'charset' attribute.
When Beautiful Soup parses the markup '<meta charset="utf8">', the
value of the 'charset' attribute will be one of these objects.
"""
def __new__(cls, original_value):
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
return encoding
class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'content' attribute.
When Beautiful Soup parses the markup:
<meta http-equiv="content-type" content="text/html; charset=utf8">
The value of the 'content' attribute will be one of these objects.
"""
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def __new__(cls, original_value):
match = cls.CHARSET_RE.search(original_value)
if match is None:
# No substitution necessary.
return unicode.__new__(unicode, original_value)
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
def rewrite(match):
return match.group(1) + encoding
return self.CHARSET_RE.sub(rewrite, self.original_value)
class HTMLAwareEntitySubstitution(EntitySubstitution):
"""Entity substitution rules that are aware of some HTML quirks.
Specifically, the contents of <script> and <style> tags should not
undergo entity substitution.
Incoming NavigableString objects are checked to see if they're the
direct children of a <script> or <style> tag.
"""
cdata_containing_tags = set(["script", "style"])
preformatted_tags = set(["pre"])
@classmethod
def _substitute_if_appropriate(cls, ns, f):
if (isinstance(ns, NavigableString)
and ns.parent is not None
and ns.parent.name in cls.cdata_containing_tags):
# Do nothing.
return ns
# Substitute.
return f(ns)
@classmethod
def substitute_html(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_html)
@classmethod
def substitute_xml(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_xml)
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
# There are five possible values for the "formatter" argument passed in
# to methods like encode() and prettify():
#
# "html" - All Unicode characters with corresponding HTML entities
# are converted to those entities on output.
# "minimal" - Bare ampersands and angle brackets are converted to
# XML entities: & < >
# None - The null formatter. Unicode characters are never
# converted to entities. This is not recommended, but it's
# faster than "minimal".
# A function - This function will be called on every string that
# needs to undergo entity substitution.
#
# In an HTML document, the default "html" and "minimal" functions
# will leave the contents of <script> and <style> tags alone. For
# an XML document, all tags will be given the same treatment.
HTML_FORMATTERS = {
"html" : HTMLAwareEntitySubstitution.substitute_html,
"minimal" : HTMLAwareEntitySubstitution.substitute_xml,
None : None
}
XML_FORMATTERS = {
"html" : EntitySubstitution.substitute_html,
"minimal" : EntitySubstitution.substitute_xml,
None : None
}
def format_string(self, s, formatter='minimal'):
"""Format the given string using the given formatter."""
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
if formatter is None:
output = s
else:
output = formatter(s)
return output
@property
def _is_xml(self):
"""Is this element part of an XML tree or an HTML tree?
This is used when mapping a formatter name ("minimal") to an
appropriate function (one that performs entity-substitution on
the contents of <script> and <style> tags, or not). It's
inefficient, but it should be called very rarely.
"""
if self.parent is None:
# This is the top-level object. It should have .is_xml set
# from tree creation. If not, take a guess--BS is usually
# used on HTML markup.
return getattr(self, 'is_xml', False)
return self.parent._is_xml
def _formatter_for_name(self, name):
"Look up a formatter function based on its name and the tree."
if self._is_xml:
return self.XML_FORMATTERS.get(
name, EntitySubstitution.substitute_xml)
else:
return self.HTML_FORMATTERS.get(
name, HTMLAwareEntitySubstitution.substitute_xml)
def setup(self, parent=None, previous_element=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous_element = previous_element
if previous_element is not None:
self.previous_element.next_element = self
self.next_element = None
self.previous_sibling = None
self.next_sibling = None
if self.parent is not None and self.parent.contents:
self.previous_sibling = self.parent.contents[-1]
self.previous_sibling.next_sibling = self
nextSibling = _alias("next_sibling") # BS3
previousSibling = _alias("previous_sibling") # BS3
def replace_with(self, replace_with):
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError("Cannot replace a Tag with its parent.")
old_parent = self.parent
my_index = self.parent.index(self)
self.extract()
old_parent.insert(my_index, replace_with)
return self
replaceWith = replace_with # BS3
def unwrap(self):
my_parent = self.parent
my_index = self.parent.index(self)
self.extract()
for child in reversed(self.contents[:]):
my_parent.insert(my_index, child)
return self
replace_with_children = unwrap
replaceWithChildren = unwrap # BS3
def wrap(self, wrap_inside):
me = self.replace_with(wrap_inside)
wrap_inside.append(me)
return wrap_inside
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent is not None:
del self.parent.contents[self.parent.index(self)]
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
last_child = self._last_descendant()
next_element = last_child.next_element
if self.previous_element is not None:
self.previous_element.next_element = next_element
if next_element is not None:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if self.previous_sibling is not None:
self.previous_sibling.next_sibling = self.next_sibling
if self.next_sibling is not None:
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
def _last_descendant(self, is_initialized=True, accept_self=True):
"Finds the last element beneath this object to be parsed."
if is_initialized and self.next_sibling:
last_child = self.next_sibling.previous_element
else:
last_child = self
while isinstance(last_child, Tag) and last_child.contents:
last_child = last_child.contents[-1]
if not accept_self and last_child == self:
last_child = None
return last_child
# BS3: Not part of the API!
_lastRecursiveChild = _last_descendant
def insert(self, position, new_child):
if new_child is self:
raise ValueError("Cannot insert a tag into itself.")
if (isinstance(new_child, basestring)
and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child)
position = min(position, len(self.contents))
if hasattr(new_child, 'parent') and new_child.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if new_child.parent is self:
current_index = self.index(new_child)
if current_index < position:
# We're moving this element further down the list
# of this object's children. That means that when
# we extract this element, our target index will
# jump down one.
position -= 1
new_child.extract()
new_child.parent = self
previous_child = None
if position == 0:
new_child.previous_sibling = None
new_child.previous_element = self
else:
previous_child = self.contents[position - 1]
new_child.previous_sibling = previous_child
new_child.previous_sibling.next_sibling = new_child
new_child.previous_element = previous_child._last_descendant(False)
if new_child.previous_element is not None:
new_child.previous_element.next_element = new_child
new_childs_last_element = new_child._last_descendant(False)
if position >= len(self.contents):
new_child.next_sibling = None
parent = self
parents_next_sibling = None
while parents_next_sibling is None and parent is not None:
parents_next_sibling = parent.next_sibling
parent = parent.parent
if parents_next_sibling is not None:
# We found the element that comes next in the document.
break
if parents_next_sibling is not None:
new_childs_last_element.next_element = parents_next_sibling
else:
# The last element of this tag is the last element in
# the document.
new_childs_last_element.next_element = None
else:
next_child = self.contents[position]
new_child.next_sibling = next_child
if new_child.next_sibling is not None:
new_child.next_sibling.previous_sibling = new_child
new_childs_last_element.next_element = next_child
if new_childs_last_element.next_element is not None:
new_childs_last_element.next_element.previous_element = new_childs_last_element
self.contents.insert(position, new_child)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def insert_before(self, predecessor):
"""Makes the given element the immediate predecessor of this one.
The two elements will have the same parent, and the given element
will be immediately before this one.
"""
if self is predecessor:
raise ValueError("Can't insert an element before itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'before' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(predecessor, PageElement):
predecessor.extract()
index = parent.index(self)
parent.insert(index, predecessor)
def insert_after(self, successor):
"""Makes the given element the immediate successor of this one.
The two elements will have the same parent, and the given element
will be immediately after this one.
"""
if self is successor:
raise ValueError("Can't insert an element after itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'after' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(successor, PageElement):
successor.extract()
index = parent.index(self)
parent.insert(index+1, successor)
def find_next(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._find_one(self.find_all_next, name, attrs, text, **kwargs)
findNext = find_next # BS3
def find_all_next(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.next_elements,
**kwargs)
findAllNext = find_all_next # BS3
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
findNextSibling = find_next_sibling # BS3
def find_next_siblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.next_siblings, **kwargs)
findNextSiblings = find_next_siblings # BS3
fetchNextSiblings = find_next_siblings # BS2
def find_previous(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._find_one(
self.find_all_previous, name, attrs, text, **kwargs)
findPrevious = find_previous # BS3
def find_all_previous(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.previous_elements,
**kwargs)
findAllPrevious = find_all_previous # BS3
fetchPrevious = find_all_previous # BS2
def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._find_one(self.find_previous_siblings, name, attrs, text,
**kwargs)
findPreviousSibling = find_previous_sibling # BS3
def find_previous_siblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.previous_siblings, **kwargs)
findPreviousSiblings = find_previous_siblings # BS3
fetchPreviousSiblings = find_previous_siblings # BS2
def find_parent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _find_one because findParents takes a different
# set of arguments.
r = None
l = self.find_parents(name, attrs, 1, **kwargs)
if l:
r = l[0]
return r
findParent = find_parent # BS3
def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._find_all(name, attrs, None, limit, self.parents,
**kwargs)
findParents = find_parents # BS3
fetchParents = find_parents # BS2
@property
def next(self):
return self.next_element
@property
def previous(self):
return self.previous_element
#These methods do the real heavy lifting.
def _find_one(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
if text is None and not limit and not attrs and not kwargs:
if name is True or name is None:
# Optimization to find all tags.
result = (element for element in generator
if isinstance(element, Tag))
return ResultSet(strainer, result)
elif isinstance(name, basestring):
# Optimization to find all tags with a given name.
result = (element for element in generator
if isinstance(element, Tag)
and element.name == name)
return ResultSet(strainer, result)
results = ResultSet(strainer)
while True:
try:
i = next(generator)
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These generators can be used to navigate starting from both
#NavigableStrings and Tags.
@property
def next_elements(self):
i = self.next_element
while i is not None:
yield i
i = i.next_element
@property
def next_siblings(self):
i = self.next_sibling
while i is not None:
yield i
i = i.next_sibling
@property
def previous_elements(self):
i = self.previous_element
while i is not None:
yield i
i = i.previous_element
@property
def previous_siblings(self):
i = self.previous_sibling
while i is not None:
yield i
i = i.previous_sibling
@property
def parents(self):
i = self.parent
while i is not None:
yield i
i = i.parent
# Methods for supporting CSS selectors.
tag_name_re = re.compile('^[a-zA-Z0-9][-.a-zA-Z0-9:_]*$')
# /^([a-zA-Z0-9][-.a-zA-Z0-9:_]*)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---------------------------/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
attribselect_re = re.compile(
r'^(?P<tag>[a-zA-Z0-9][-.a-zA-Z0-9:_]*)?\[(?P<attribute>\w+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
def _attr_value_as_string(self, value, default=None):
"""Force an attribute value into a string representation.
A multi-valued attribute will be converted into a
space-separated stirng.
"""
value = self.get(value, default)
if isinstance(value, list) or isinstance(value, tuple):
value =" ".join(value)
return value
def _tag_name_matches_and(self, function, tag_name):
if not tag_name:
return function
else:
def _match(tag):
return tag.name == tag_name and function(tag)
return _match
def _attribute_checker(self, operator, attribute, value=''):
"""Create a function that performs a CSS selector operation.
Takes an operator, attribute and optional value. Returns a
function that will return True for elements that match that
combination.
"""
if operator == '=':
# string representation of `attribute` is equal to `value`
return lambda el: el._attr_value_as_string(attribute) == value
elif operator == '~':
# space-separated list representation of `attribute`
# contains `value`
def _includes_value(element):
attribute_value = element.get(attribute, [])
if not isinstance(attribute_value, list):
attribute_value = attribute_value.split()
return value in attribute_value
return _includes_value
elif operator == '^':
# string representation of `attribute` starts with `value`
return lambda el: el._attr_value_as_string(
attribute, '').startswith(value)
elif operator == '$':
# string represenation of `attribute` ends with `value`
return lambda el: el._attr_value_as_string(
attribute, '').endswith(value)
elif operator == '*':
# string representation of `attribute` contains `value`
return lambda el: value in el._attr_value_as_string(attribute, '')
elif operator == '|':
# string representation of `attribute` is either exactly
# `value` or starts with `value` and then a dash.
def _is_or_starts_with_dash(element):
attribute_value = element._attr_value_as_string(attribute, '')
return (attribute_value == value or attribute_value.startswith(
value + '-'))
return _is_or_starts_with_dash
else:
return lambda el: el.has_attr(attribute)
# Old non-property versions of the generators, for backwards
# compatibility with BS3.
def nextGenerator(self):
return self.next_elements
def nextSiblingGenerator(self):
return self.next_siblings
def previousGenerator(self):
return self.previous_elements
def previousSiblingGenerator(self):
return self.previous_siblings
def parentGenerator(self):
return self.parents
class NavigableString(unicode, PageElement):
PREFIX = ''
SUFFIX = ''
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __copy__(self):
return self
def __getnewargs__(self):
return (unicode(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def output_ready(self, formatter="minimal"):
output = self.format_string(self, formatter)
return self.PREFIX + output + self.SUFFIX
@property
def name(self):
return None
@name.setter
def name(self, name):
raise AttributeError("A NavigableString cannot be given a name.")
class PreformattedString(NavigableString):
"""A NavigableString not subject to the normal formatting rules.
The string will be passed into the formatter (to trigger side effects),
but the return value will be ignored.
"""
def output_ready(self, formatter="minimal"):
"""CData strings are passed into the formatter.
But the return value is ignored."""
self.format_string(self, formatter)
return self.PREFIX + self + self.SUFFIX
class CData(PreformattedString):
PREFIX = u'<![CDATA['
SUFFIX = u']]>'
class ProcessingInstruction(PreformattedString):
PREFIX = u'<?'
SUFFIX = u'>'
class Comment(PreformattedString):
PREFIX = u'<!--'
SUFFIX = u'-->'
class Declaration(PreformattedString):
PREFIX = u'<!'
SUFFIX = u'!>'
class Doctype(PreformattedString):
@classmethod
def for_name_and_ids(cls, name, pub_id, system_id):
value = name or ''
if pub_id is not None:
value += ' PUBLIC "%s"' % pub_id
if system_id is not None:
value += ' "%s"' % system_id
elif system_id is not None:
value += ' SYSTEM "%s"' % system_id
return Doctype(value)
PREFIX = u'<!DOCTYPE '
SUFFIX = u'>\n'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, parser=None, builder=None, name=None, namespace=None,
prefix=None, attrs=None, parent=None, previous=None):
"Basic constructor."
if parser is None:
self.parser_class = None
else:
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected.
self.parser_class = parser.__class__
if name is None:
raise ValueError("No value provided for new tag's name.")
self.name = name
self.namespace = namespace
self.prefix = prefix
if attrs is None:
attrs = {}
elif attrs and builder.cdata_list_attributes:
attrs = builder._replace_cdata_list_attribute_values(
self.name, attrs)
else:
attrs = dict(attrs)
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
# Set up any substitutions, such as the charset in a META tag.
if builder is not None:
builder.set_up_substitutions(self)
self.can_be_empty_element = builder.can_be_empty_element(name)
else:
self.can_be_empty_element = False
parserClass = _alias("parser_class") # BS3
@property
def is_empty_element(self):
"""Is this tag an empty-element tag? (aka a self-closing tag)
A tag that has contents is never an empty-element tag.
A tag that has no contents may or may not be an empty-element
tag. It depends on the builder used to create the tag. If the
builder has a designated list of empty-element tags, then only
a tag whose name shows up in that list is considered an
empty-element tag.
If the builder has no designated list of empty-element tags,
then any tag with no contents is an empty-element tag.
"""
return len(self.contents) == 0 and self.can_be_empty_element
isSelfClosing = is_empty_element # BS3
@property
def string(self):
"""Convenience property to get the single string within this tag.
:Return: If this tag has a single string child, return value
is that string. If this tag has no children, or more than one
child, return value is None. If this tag has one child tag,
return value is the 'string' attribute of the child tag,
recursively.
"""
if len(self.contents) != 1:
return None
child = self.contents[0]
if isinstance(child, NavigableString):
return child
return child.string
@string.setter
def string(self, string):
self.clear()
self.append(string.__class__(string))
def _all_strings(self, strip=False, types=(NavigableString, CData)):
"""Yield all strings of certain classes, possibly stripping them.
By default, yields only NavigableString and CData objects. So
no comments, processing instructions, etc.
"""
for descendant in self.descendants:
if (
(types is None and not isinstance(descendant, NavigableString))
or
(types is not None and type(descendant) not in types)):
continue
if strip:
descendant = descendant.strip()
if len(descendant) == 0:
continue
yield descendant
strings = property(_all_strings)
@property
def stripped_strings(self):
for string in self._all_strings(True):
yield string
def get_text(self, separator=u"", strip=False,
types=(NavigableString, CData)):
"""
Get all child strings, concatenated using the given separator.
"""
return separator.join([s for s in self._all_strings(
strip, types=types)])
getText = get_text
text = property(get_text)
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
i = self
while i is not None:
next = i.next_element
i.__dict__.clear()
i.contents = []
i = next
def clear(self, decompose=False):
"""
Extract all children. If decompose is True, decompose instead.
"""
if decompose:
for element in self.contents[:]:
if isinstance(element, Tag):
element.decompose()
else:
element.extract()
else:
for element in self.contents[:]:
element.extract()
def index(self, element):
"""
Find the index of a child by identity, not value. Avoids issues with
tag.contents.index(element) getting the index of equal elements.
"""
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self.attrs.get(key, default)
def has_attr(self, key):
return key in self.attrs
def __hash__(self):
return str(self).__hash__()
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self.attrs[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self.attrs[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
self.attrs.pop(key, None)
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
find_all() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return self.find_all(*args, **kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.endswith('Tag'):
# BS3: soup.aTag -> "soup.find("a")
tag_name = tag[:-3]
warnings.warn(
'.%sTag is deprecated, use .find("%s") instead.' % (
tag_name, tag_name))
return self.find(tag_name)
# We special case contents to avoid recursion.
elif not tag.startswith("__") and not tag=="contents":
return self.find(tag)
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__, tag))
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag."""
if self is other:
return True
if (not hasattr(other, 'name') or
not hasattr(other, 'attrs') or
not hasattr(other, 'contents') or
self.name != other.name or
self.attrs != other.attrs or
len(self) != len(other)):
return False
for i, my_child in enumerate(self.contents):
if my_child != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.encode(encoding)
def __unicode__(self):
return self.decode()
def __str__(self):
return self.encode()
if PY3K:
__str__ = __repr__ = __unicode__
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
indent_level=None, formatter="minimal",
errors="xmlcharrefreplace"):
# Turn the data structure into Unicode, then encode the
# Unicode.
u = self.decode(indent_level, encoding, formatter)
return u.encode(encoding, errors)
def _should_pretty_print(self, indent_level):
"""Should this tag be pretty-printed?"""
return (
indent_level is not None and
(self.name not in HTMLAwareEntitySubstitution.preformatted_tags
or self._is_xml))
def decode(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a Unicode representation of this tag and its contents.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
attrs = []
if self.attrs:
for key, val in sorted(self.attrs.items()):
if val is None:
decoded = key
else:
if isinstance(val, list) or isinstance(val, tuple):
val = ' '.join(val)
elif not isinstance(val, basestring):
val = unicode(val)
elif (
isinstance(val, AttributeValueWithCharsetSubstitution)
and eventual_encoding is not None):
val = val.encode(eventual_encoding)
text = self.format_string(val, formatter)
decoded = (
unicode(key) + '='
+ EntitySubstitution.quoted_attribute_value(text))
attrs.append(decoded)
close = ''
closeTag = ''
prefix = ''
if self.prefix:
prefix = self.prefix + ":"
if self.is_empty_element:
close = '/'
else:
closeTag = '</%s%s>' % (prefix, self.name)
pretty_print = self._should_pretty_print(indent_level)
space = ''
indent_space = ''
if indent_level is not None:
indent_space = (' ' * (indent_level - 1))
if pretty_print:
space = indent_space
indent_contents = indent_level + 1
else:
indent_contents = None
contents = self.decode_contents(
indent_contents, eventual_encoding, formatter)
if self.hidden:
# This is the 'document root' object.
s = contents
else:
s = []
attribute_string = ''
if attrs:
attribute_string = ' ' + ' '.join(attrs)
if indent_level is not None:
# Even if this particular tag is not pretty-printed,
# we should indent up to the start of the tag.
s.append(indent_space)
s.append('<%s%s%s%s>' % (
prefix, self.name, attribute_string, close))
if pretty_print:
s.append("\n")
s.append(contents)
if pretty_print and contents and contents[-1] != "\n":
s.append("\n")
if pretty_print and closeTag:
s.append(space)
s.append(closeTag)
if indent_level is not None and closeTag and self.next_sibling:
# Even if this particular tag is not pretty-printed,
# we're now done with the tag, and we should add a
# newline if appropriate.
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=None, formatter="minimal"):
if encoding is None:
return self.decode(True, formatter=formatter)
else:
return self.encode(encoding, True, formatter=formatter)
def decode_contents(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a Unicode string.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
pretty_print = (indent_level is not None)
s = []
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.output_ready(formatter)
elif isinstance(c, Tag):
s.append(c.decode(indent_level, eventual_encoding,
formatter))
if text and indent_level and not self.name == 'pre':
text = text.strip()
if text:
if pretty_print and not self.name == 'pre':
s.append(" " * (indent_level - 1))
s.append(text)
if pretty_print and not self.name == 'pre':
s.append("\n")
return ''.join(s)
def encode_contents(
self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a bytestring."""
contents = self.decode_contents(indent_level, encoding, formatter)
return contents.encode(encoding)
# Old method for BS3 compatibility
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
if not prettyPrint:
indentLevel = None
return self.encode_contents(
indent_level=indentLevel, encoding=encoding)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def find_all(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.descendants
if not recursive:
generator = self.children
return self._find_all(name, attrs, text, limit, generator, **kwargs)
findAll = find_all # BS3
findChildren = find_all # BS2
#Generator methods
@property
def children(self):
# return iter() to make the purpose of the method clear
return iter(self.contents) # XXX This seems to be untested.
@property
def descendants(self):
if not len(self.contents):
return
stopNode = self._last_descendant().next_element
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next_element
# CSS selector code
_selector_combinators = ['>', '+', '~']
_select_debug = False
def select(self, selector, _candidate_generator=None):
"""Perform a CSS selection operation on the current element."""
# Remove whitespace directly after the grouping operator ','
# then split into tokens.
tokens = re.sub(',[\s]*',',', selector).split()
current_context = [self]
if tokens[-1] in self._selector_combinators:
raise ValueError(
'Final combinator "%s" is missing an argument.' % tokens[-1])
if self._select_debug:
print 'Running CSS selector "%s"' % selector
for index, token_group in enumerate(tokens):
new_context = []
new_context_ids = set([])
# Grouping selectors, ie: p,a
grouped_tokens = token_group.split(',')
if '' in grouped_tokens:
raise ValueError('Invalid group selection syntax: %s' % token_group)
if tokens[index-1] in self._selector_combinators:
# This token was consumed by the previous combinator. Skip it.
if self._select_debug:
print ' Token was consumed by the previous combinator.'
continue
for token in grouped_tokens:
if self._select_debug:
print ' Considering token "%s"' % token
recursive_candidate_generator = None
tag_name = None
# Each operation corresponds to a checker function, a rule
# for determining whether a candidate matches the
# selector. Candidates are generated by the active
# iterator.
checker = None
m = self.attribselect_re.match(token)
if m is not None:
# Attribute selector
tag_name, attribute, operator, value = m.groups()
checker = self._attribute_checker(operator, attribute, value)
elif '#' in token:
# ID selector
tag_name, tag_id = token.split('#', 1)
def id_matches(tag):
return tag.get('id', None) == tag_id
checker = id_matches
elif '.' in token:
# Class selector
tag_name, klass = token.split('.', 1)
classes = set(klass.split('.'))
def classes_match(candidate):
return classes.issubset(candidate.get('class', []))
checker = classes_match
elif ':' in token:
# Pseudo-class
tag_name, pseudo = token.split(':', 1)
if tag_name == '':
raise ValueError(
"A pseudo-class must be prefixed with a tag name.")
pseudo_attributes = re.match('([a-zA-Z\d-]+)\(([a-zA-Z\d]+)\)', pseudo)
found = []
if pseudo_attributes is not None:
pseudo_type, pseudo_value = pseudo_attributes.groups()
if pseudo_type == 'nth-of-type':
try:
pseudo_value = int(pseudo_value)
except:
raise NotImplementedError(
'Only numeric values are currently supported for the nth-of-type pseudo-class.')
if pseudo_value < 1:
raise ValueError(
'nth-of-type pseudo-class value must be at least 1.')
class Counter(object):
def __init__(self, destination):
self.count = 0
self.destination = destination
def nth_child_of_type(self, tag):
self.count += 1
if self.count == self.destination:
return True
if self.count > self.destination:
# Stop the generator that's sending us
# these things.
raise StopIteration()
return False
checker = Counter(pseudo_value).nth_child_of_type
else:
raise NotImplementedError(
'Only the following pseudo-classes are implemented: nth-of-type.')
elif token == '*':
# Star selector -- matches everything
pass
elif token == '>':
# Run the next token as a CSS selector against the
# direct children of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.children
elif token == '~':
# Run the next token as a CSS selector against the
# siblings of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.next_siblings
elif token == '+':
# For each tag in the current context, run the next
# token as a CSS selector against the tag's next
# sibling that's a tag.
def next_tag_sibling(tag):
yield tag.find_next_sibling(True)
recursive_candidate_generator = next_tag_sibling
elif self.tag_name_re.match(token):
# Just a tag name.
tag_name = token
else:
raise ValueError(
'Unsupported or invalid CSS selector: "%s"' % token)
if recursive_candidate_generator:
# This happens when the selector looks like "> foo".
#
# The generator calls select() recursively on every
# member of the current context, passing in a different
# candidate generator and a different selector.
#
# In the case of "> foo", the candidate generator is
# one that yields a tag's direct children (">"), and
# the selector is "foo".
next_token = tokens[index+1]
def recursive_select(tag):
if self._select_debug:
print ' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs)
print '-' * 40
for i in tag.select(next_token, recursive_candidate_generator):
if self._select_debug:
print '(Recursive select picked up candidate %s %s)' % (i.name, i.attrs)
yield i
if self._select_debug:
print '-' * 40
_use_candidate_generator = recursive_select
elif _candidate_generator is None:
# By default, a tag's candidates are all of its
# children. If tag_name is defined, only yield tags
# with that name.
if self._select_debug:
if tag_name:
check = "[any]"
else:
check = tag_name
print ' Default candidate generator, tag name="%s"' % check
if self._select_debug:
# This is redundant with later code, but it stops
# a bunch of bogus tags from cluttering up the
# debug log.
def default_candidate_generator(tag):
for child in tag.descendants:
if not isinstance(child, Tag):
continue
if tag_name and not child.name == tag_name:
continue
yield child
_use_candidate_generator = default_candidate_generator
else:
_use_candidate_generator = lambda tag: tag.descendants
else:
_use_candidate_generator = _candidate_generator
for tag in current_context:
if self._select_debug:
print " Running candidate generator on %s %s" % (
tag.name, repr(tag.attrs))
for candidate in _use_candidate_generator(tag):
if not isinstance(candidate, Tag):
continue
if tag_name and candidate.name != tag_name:
continue
if checker is not None:
try:
result = checker(candidate)
except StopIteration:
# The checker has decided we should no longer
# run the generator.
break
if checker is None or result:
if self._select_debug:
print " SUCCESS %s %s" % (candidate.name, repr(candidate.attrs))
if id(candidate) not in new_context_ids:
# If a tag matches a selector more than once,
# don't include it in the context more than once.
new_context.append(candidate)
new_context_ids.add(id(candidate))
elif self._select_debug:
print " FAILURE %s %s" % (candidate.name, repr(candidate.attrs))
current_context = new_context
if self._select_debug:
print "Final verdict:"
for i in current_context:
print " %s %s" % (i.name, i.attrs)
return current_context
# Old names for backwards compatibility
def childGenerator(self):
return self.children
def recursiveChildGenerator(self):
return self.descendants
def has_key(self, key):
"""This was kind of misleading because has_key() (attributes)
was different from __in__ (contents). has_key() is gone in
Python 3, anyway."""
warnings.warn('has_key is deprecated. Use has_attr("%s") instead.' % (
key))
return self.has_attr(key)
# Next, a couple classes to represent queries and their results.
class SoupStrainer(object):
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = self._normalize_search_value(name)
if not isinstance(attrs, dict):
# Treat a non-dict value for attrs as a search for the 'class'
# attribute.
kwargs['class'] = attrs
attrs = None
if 'class_' in kwargs:
# Treat class_="foo" as a search for the 'class'
# attribute, overriding any non-dict value for attrs.
kwargs['class'] = kwargs['class_']
del kwargs['class_']
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
normalized_attrs = {}
for key, value in attrs.items():
normalized_attrs[key] = self._normalize_search_value(value)
self.attrs = normalized_attrs
self.text = self._normalize_search_value(text)
def _normalize_search_value(self, value):
# Leave it alone if it's a Unicode string, a callable, a
# regular expression, a boolean, or None.
if (isinstance(value, unicode) or callable(value) or hasattr(value, 'match')
or isinstance(value, bool) or value is None):
return value
# If it's a bytestring, convert it to Unicode, treating it as UTF-8.
if isinstance(value, bytes):
return value.decode("utf8")
# If it's listlike, convert it into a list of strings.
if hasattr(value, '__iter__'):
new_value = []
for v in value:
if (hasattr(v, '__iter__') and not isinstance(v, bytes)
and not isinstance(v, unicode)):
# This is almost certainly the user's mistake. In the
# interests of avoiding infinite loops, we'll let
# it through as-is rather than doing a recursive call.
new_value.append(v)
else:
new_value.append(self._normalize_search_value(v))
return new_value
# Otherwise, convert it into a Unicode string.
# The unicode(str()) thing is so this will do the same thing on Python 2
# and Python 3.
return unicode(str(value))
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def search_tag(self, markup_name=None, markup_attrs={}):
found = None
markup = None
if isinstance(markup_name, Tag):
markup = markup_name
markup_attrs = markup
call_function_with_tag_data = (
isinstance(self.name, collections.Callable)
and not isinstance(markup_name, Tag))
if ((not self.name)
or call_function_with_tag_data
or (markup and self._matches(markup, self.name))
or (not markup and self._matches(markup_name, self.name))):
if call_function_with_tag_data:
match = self.name(markup_name, markup_attrs)
else:
match = True
markup_attr_map = None
for attr, match_against in list(self.attrs.items()):
if not markup_attr_map:
if hasattr(markup_attrs, 'get'):
markup_attr_map = markup_attrs
else:
markup_attr_map = {}
for k, v in markup_attrs:
markup_attr_map[k] = v
attr_value = markup_attr_map.get(attr)
if not self._matches(attr_value, match_against):
match = False
break
if match:
if markup:
found = markup
else:
found = markup_name
if found and self.text and not self._matches(found.string, self.text):
found = None
return found
searchTag = search_tag
def search(self, markup):
# print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, basestring)):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text or self.name or self.attrs:
found = self.search_tag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if not self.name and not self.attrs and self._matches(markup, self.text):
found = markup
else:
raise Exception(
"I don't know how to match against a %s" % markup.__class__)
return found
def _matches(self, markup, match_against):
# print u"Matching %s against %s" % (markup, match_against)
result = False
if isinstance(markup, list) or isinstance(markup, tuple):
# This should only happen when searching a multi-valued attribute
# like 'class'.
if (isinstance(match_against, unicode)
and ' ' in match_against):
# A bit of a special case. If they try to match "foo
# bar" on a multivalue attribute's value, only accept
# the literal value "foo bar"
#
# XXX This is going to be pretty slow because we keep
# splitting match_against. But it shouldn't come up
# too often.
return (whitespace_re.split(match_against) == markup)
else:
for item in markup:
if self._matches(item, match_against):
return True
return False
if match_against is True:
# True matches any non-None value.
return markup is not None
if isinstance(match_against, collections.Callable):
return match_against(markup)
# Custom callables take the tag as an argument, but all
# other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
# Ensure that `markup` is either a Unicode string, or None.
markup = self._normalize_search_value(markup)
if markup is None:
# None matches None, False, an empty string, an empty list, and so on.
return not match_against
if isinstance(match_against, unicode):
# Exact string match
return markup == match_against
if hasattr(match_against, 'match'):
# Regexp match
return match_against.search(markup)
if hasattr(match_against, '__iter__'):
# The markup must be an exact match against something
# in the iterable.
return markup in match_against
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source, result=()):
super(ResultSet, self).__init__(result)
self.source = source
|
mit
|
unseenlaser/python-for-android
|
python3-alpha/python3-src/Lib/unittest/test/test_assertions.py
|
51
|
11863
|
import datetime
import warnings
import unittest
class Test_Assertions(unittest.TestCase):
def test_AlmostEqual(self):
self.assertAlmostEqual(1.00000001, 1.0)
self.assertNotAlmostEqual(1.0000001, 1.0)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 1.0000001, 1.0)
self.assertRaises(self.failureException,
self.assertNotAlmostEqual, 1.00000001, 1.0)
self.assertAlmostEqual(1.1, 1.0, places=0)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 1.1, 1.0, places=1)
self.assertAlmostEqual(0, .1+.1j, places=0)
self.assertNotAlmostEqual(0, .1+.1j, places=1)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 0, .1+.1j, places=1)
self.assertRaises(self.failureException,
self.assertNotAlmostEqual, 0, .1+.1j, places=0)
self.assertAlmostEqual(float('inf'), float('inf'))
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
float('inf'), float('inf'))
def test_AmostEqualWithDelta(self):
self.assertAlmostEqual(1.1, 1.0, delta=0.5)
self.assertAlmostEqual(1.0, 1.1, delta=0.5)
self.assertNotAlmostEqual(1.1, 1.0, delta=0.05)
self.assertNotAlmostEqual(1.0, 1.1, delta=0.05)
self.assertRaises(self.failureException, self.assertAlmostEqual,
1.1, 1.0, delta=0.05)
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
1.1, 1.0, delta=0.5)
self.assertRaises(TypeError, self.assertAlmostEqual,
1.1, 1.0, places=2, delta=2)
self.assertRaises(TypeError, self.assertNotAlmostEqual,
1.1, 1.0, places=2, delta=2)
first = datetime.datetime.now()
second = first + datetime.timedelta(seconds=10)
self.assertAlmostEqual(first, second,
delta=datetime.timedelta(seconds=20))
self.assertNotAlmostEqual(first, second,
delta=datetime.timedelta(seconds=5))
def test_assertRaises(self):
def _raise(e):
raise e
self.assertRaises(KeyError, _raise, KeyError)
self.assertRaises(KeyError, _raise, KeyError("key"))
try:
self.assertRaises(KeyError, lambda: None)
except self.failureException as e:
self.assertIn("KeyError not raised", str(e))
else:
self.fail("assertRaises() didn't fail")
try:
self.assertRaises(KeyError, _raise, ValueError)
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
with self.assertRaises(KeyError) as cm:
try:
raise KeyError
except Exception as e:
exc = e
raise
self.assertIs(cm.exception, exc)
with self.assertRaises(KeyError):
raise KeyError("key")
try:
with self.assertRaises(KeyError):
pass
except self.failureException as e:
self.assertIn("KeyError not raised", str(e))
else:
self.fail("assertRaises() didn't fail")
try:
with self.assertRaises(KeyError):
raise ValueError
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
def testAssertNotRegex(self):
self.assertNotRegex('Ala ma kota', r'r+')
try:
self.assertNotRegex('Ala ma kota', r'k.t', 'Message')
except self.failureException as e:
self.assertIn("'kot'", e.args[0])
self.assertIn('Message', e.args[0])
else:
self.fail('assertNotRegex should have failed.')
class TestLongMessage(unittest.TestCase):
"""Test that the individual asserts honour longMessage.
This actually tests all the message behaviour for
asserts that use longMessage."""
def setUp(self):
class TestableTestFalse(unittest.TestCase):
longMessage = False
failureException = self.failureException
def testTest(self):
pass
class TestableTestTrue(unittest.TestCase):
longMessage = True
failureException = self.failureException
def testTest(self):
pass
self.testableTrue = TestableTestTrue('testTest')
self.testableFalse = TestableTestFalse('testTest')
def testDefault(self):
self.assertTrue(unittest.TestCase.longMessage)
def test_formatMsg(self):
self.assertEqual(self.testableFalse._formatMessage(None, "foo"), "foo")
self.assertEqual(self.testableFalse._formatMessage("foo", "bar"), "foo")
self.assertEqual(self.testableTrue._formatMessage(None, "foo"), "foo")
self.assertEqual(self.testableTrue._formatMessage("foo", "bar"), "bar : foo")
# This blows up if _formatMessage uses string concatenation
self.testableTrue._formatMessage(object(), 'foo')
def test_formatMessage_unicode_error(self):
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing msg
self.testableTrue._formatMessage(one, '\uFFFD')
def assertMessages(self, methodName, args, errors):
def getMethod(i):
useTestableFalse = i < 2
if useTestableFalse:
test = self.testableFalse
else:
test = self.testableTrue
return getattr(test, methodName)
for i, expected_regex in enumerate(errors):
testMethod = getMethod(i)
kwargs = {}
withMsg = i % 2
if withMsg:
kwargs = {"msg": "oops"}
with self.assertRaisesRegex(self.failureException,
expected_regex=expected_regex):
testMethod(*args, **kwargs)
def testAssertTrue(self):
self.assertMessages('assertTrue', (False,),
["^False is not true$", "^oops$", "^False is not true$",
"^False is not true : oops$"])
def testAssertFalse(self):
self.assertMessages('assertFalse', (True,),
["^True is not false$", "^oops$", "^True is not false$",
"^True is not false : oops$"])
def testNotEqual(self):
self.assertMessages('assertNotEqual', (1, 1),
["^1 == 1$", "^oops$", "^1 == 1$",
"^1 == 1 : oops$"])
def testAlmostEqual(self):
self.assertMessages('assertAlmostEqual', (1, 2),
["^1 != 2 within 7 places$", "^oops$",
"^1 != 2 within 7 places$", "^1 != 2 within 7 places : oops$"])
def testNotAlmostEqual(self):
self.assertMessages('assertNotAlmostEqual', (1, 1),
["^1 == 1 within 7 places$", "^oops$",
"^1 == 1 within 7 places$", "^1 == 1 within 7 places : oops$"])
def test_baseAssertEqual(self):
self.assertMessages('_baseAssertEqual', (1, 2),
["^1 != 2$", "^oops$", "^1 != 2$", "^1 != 2 : oops$"])
def testAssertSequenceEqual(self):
# Error messages are multiline so not testing on full message
# assertTupleEqual and assertListEqual delegate to this method
self.assertMessages('assertSequenceEqual', ([], [None]),
["\+ \[None\]$", "^oops$", r"\+ \[None\]$",
r"\+ \[None\] : oops$"])
def testAssertSetEqual(self):
self.assertMessages('assertSetEqual', (set(), set([None])),
["None$", "^oops$", "None$",
"None : oops$"])
def testAssertIn(self):
self.assertMessages('assertIn', (None, []),
['^None not found in \[\]$', "^oops$",
'^None not found in \[\]$',
'^None not found in \[\] : oops$'])
def testAssertNotIn(self):
self.assertMessages('assertNotIn', (None, [None]),
['^None unexpectedly found in \[None\]$', "^oops$",
'^None unexpectedly found in \[None\]$',
'^None unexpectedly found in \[None\] : oops$'])
def testAssertDictEqual(self):
self.assertMessages('assertDictEqual', ({}, {'key': 'value'}),
[r"\+ \{'key': 'value'\}$", "^oops$",
"\+ \{'key': 'value'\}$",
"\+ \{'key': 'value'\} : oops$"])
def testAssertDictContainsSubset(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertMessages('assertDictContainsSubset', ({'key': 'value'}, {}),
["^Missing: 'key'$", "^oops$",
"^Missing: 'key'$",
"^Missing: 'key' : oops$"])
def testAssertMultiLineEqual(self):
self.assertMessages('assertMultiLineEqual', ("", "foo"),
[r"\+ foo$", "^oops$",
r"\+ foo$",
r"\+ foo : oops$"])
def testAssertLess(self):
self.assertMessages('assertLess', (2, 1),
["^2 not less than 1$", "^oops$",
"^2 not less than 1$", "^2 not less than 1 : oops$"])
def testAssertLessEqual(self):
self.assertMessages('assertLessEqual', (2, 1),
["^2 not less than or equal to 1$", "^oops$",
"^2 not less than or equal to 1$",
"^2 not less than or equal to 1 : oops$"])
def testAssertGreater(self):
self.assertMessages('assertGreater', (1, 2),
["^1 not greater than 2$", "^oops$",
"^1 not greater than 2$",
"^1 not greater than 2 : oops$"])
def testAssertGreaterEqual(self):
self.assertMessages('assertGreaterEqual', (1, 2),
["^1 not greater than or equal to 2$", "^oops$",
"^1 not greater than or equal to 2$",
"^1 not greater than or equal to 2 : oops$"])
def testAssertIsNone(self):
self.assertMessages('assertIsNone', ('not None',),
["^'not None' is not None$", "^oops$",
"^'not None' is not None$",
"^'not None' is not None : oops$"])
def testAssertIsNotNone(self):
self.assertMessages('assertIsNotNone', (None,),
["^unexpectedly None$", "^oops$",
"^unexpectedly None$",
"^unexpectedly None : oops$"])
def testAssertIs(self):
self.assertMessages('assertIs', (None, 'foo'),
["^None is not 'foo'$", "^oops$",
"^None is not 'foo'$",
"^None is not 'foo' : oops$"])
def testAssertIsNot(self):
self.assertMessages('assertIsNot', (None, None),
["^unexpectedly identical: None$", "^oops$",
"^unexpectedly identical: None$",
"^unexpectedly identical: None : oops$"])
|
apache-2.0
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/virtual_network_gateway_connection_list_entity.py
|
1
|
7733
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualNetworkGatewayConnectionListEntity(Resource):
"""A common class for general resource information.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param authorization_key: The authorizationKey.
:type authorization_key: str
:param virtual_network_gateway1: Required. The reference to virtual
network gateway resource.
:type virtual_network_gateway1:
~azure.mgmt.network.v2017_08_01.models.VirtualNetworkConnectionGatewayReference
:param virtual_network_gateway2: The reference to virtual network gateway
resource.
:type virtual_network_gateway2:
~azure.mgmt.network.v2017_08_01.models.VirtualNetworkConnectionGatewayReference
:param local_network_gateway2: The reference to local network gateway
resource.
:type local_network_gateway2:
~azure.mgmt.network.v2017_08_01.models.VirtualNetworkConnectionGatewayReference
:param connection_type: Required. Gateway connection type. Possible values
are: 'Ipsec','Vnet2Vnet','ExpressRoute', and 'VPNClient. Possible values
include: 'IPsec', 'Vnet2Vnet', 'ExpressRoute', 'VPNClient'
:type connection_type: str or
~azure.mgmt.network.v2017_08_01.models.VirtualNetworkGatewayConnectionType
:param routing_weight: The routing weight.
:type routing_weight: int
:param shared_key: The IPSec shared key.
:type shared_key: str
:ivar connection_status: Virtual network Gateway connection status.
Possible values are 'Unknown', 'Connecting', 'Connected' and
'NotConnected'. Possible values include: 'Unknown', 'Connecting',
'Connected', 'NotConnected'
:vartype connection_status: str or
~azure.mgmt.network.v2017_08_01.models.VirtualNetworkGatewayConnectionStatus
:ivar tunnel_connection_status: Collection of all tunnels' connection
health status.
:vartype tunnel_connection_status:
list[~azure.mgmt.network.v2017_08_01.models.TunnelConnectionHealth]
:ivar egress_bytes_transferred: The egress bytes transferred in this
connection.
:vartype egress_bytes_transferred: long
:ivar ingress_bytes_transferred: The ingress bytes transferred in this
connection.
:vartype ingress_bytes_transferred: long
:param peer: The reference to peerings resource.
:type peer: ~azure.mgmt.network.v2017_08_01.models.SubResource
:param enable_bgp: EnableBgp flag
:type enable_bgp: bool
:param use_policy_based_traffic_selectors: Enable policy-based traffic
selectors.
:type use_policy_based_traffic_selectors: bool
:param ipsec_policies: The IPSec Policies to be considered by this
connection.
:type ipsec_policies:
list[~azure.mgmt.network.v2017_08_01.models.IpsecPolicy]
:param resource_guid: The resource GUID property of the
VirtualNetworkGatewayConnection resource.
:type resource_guid: str
:ivar provisioning_state: The provisioning state of the
VirtualNetworkGatewayConnection resource. Possible values are: 'Updating',
'Deleting', and 'Failed'.
:vartype provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'virtual_network_gateway1': {'required': True},
'connection_type': {'required': True},
'connection_status': {'readonly': True},
'tunnel_connection_status': {'readonly': True},
'egress_bytes_transferred': {'readonly': True},
'ingress_bytes_transferred': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'},
'virtual_network_gateway1': {'key': 'properties.virtualNetworkGateway1', 'type': 'VirtualNetworkConnectionGatewayReference'},
'virtual_network_gateway2': {'key': 'properties.virtualNetworkGateway2', 'type': 'VirtualNetworkConnectionGatewayReference'},
'local_network_gateway2': {'key': 'properties.localNetworkGateway2', 'type': 'VirtualNetworkConnectionGatewayReference'},
'connection_type': {'key': 'properties.connectionType', 'type': 'str'},
'routing_weight': {'key': 'properties.routingWeight', 'type': 'int'},
'shared_key': {'key': 'properties.sharedKey', 'type': 'str'},
'connection_status': {'key': 'properties.connectionStatus', 'type': 'str'},
'tunnel_connection_status': {'key': 'properties.tunnelConnectionStatus', 'type': '[TunnelConnectionHealth]'},
'egress_bytes_transferred': {'key': 'properties.egressBytesTransferred', 'type': 'long'},
'ingress_bytes_transferred': {'key': 'properties.ingressBytesTransferred', 'type': 'long'},
'peer': {'key': 'properties.peer', 'type': 'SubResource'},
'enable_bgp': {'key': 'properties.enableBgp', 'type': 'bool'},
'use_policy_based_traffic_selectors': {'key': 'properties.usePolicyBasedTrafficSelectors', 'type': 'bool'},
'ipsec_policies': {'key': 'properties.ipsecPolicies', 'type': '[IpsecPolicy]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VirtualNetworkGatewayConnectionListEntity, self).__init__(**kwargs)
self.authorization_key = kwargs.get('authorization_key', None)
self.virtual_network_gateway1 = kwargs.get('virtual_network_gateway1', None)
self.virtual_network_gateway2 = kwargs.get('virtual_network_gateway2', None)
self.local_network_gateway2 = kwargs.get('local_network_gateway2', None)
self.connection_type = kwargs.get('connection_type', None)
self.routing_weight = kwargs.get('routing_weight', None)
self.shared_key = kwargs.get('shared_key', None)
self.connection_status = None
self.tunnel_connection_status = None
self.egress_bytes_transferred = None
self.ingress_bytes_transferred = None
self.peer = kwargs.get('peer', None)
self.enable_bgp = kwargs.get('enable_bgp', None)
self.use_policy_based_traffic_selectors = kwargs.get('use_policy_based_traffic_selectors', None)
self.ipsec_policies = kwargs.get('ipsec_policies', None)
self.resource_guid = kwargs.get('resource_guid', None)
self.provisioning_state = None
self.etag = kwargs.get('etag', None)
|
mit
|
JuliaPackageMirrors/Jumos.jl
|
doc/_ext_/juliadoc/jlhelp.py
|
7
|
3856
|
import codecs
from os import path
from docutils import nodes
from sphinx.builders.text import TextBuilder
from sphinx.writers.text import TextTranslator
from sphinx.writers.text import TextWriter
from sphinx.util.osutil import ensuredir
from sphinx.util.console import bold, purple, darkgreen, term_width_line
def jl_escape(text):
# XXX: crude & fragile
return text.replace('\\',r'\\').replace('$',"\\$").replace('"',"\\\"")
class JuliaHelpTranslator(TextTranslator):
def __init__(self, document, builder):
TextTranslator.__init__(self, document, builder)
self.in_desc = False
def add_text(self, text, escape=True):
if self.in_desc:
etext = jl_escape(text) if escape else text
TextTranslator.add_text(self, etext)
def visit_title(self, node):
raise nodes.SkipNode
def visit_desc(self, node):
self.in_desc = True
self.first_sig = True
def visit_desc_signature(self, node):
self._current_module = node.attributes.get('module', None)
self._current_class = node.attributes.get('class', None)
TextTranslator.visit_desc_signature(self, node)
def visit_desc_name(self, node):
self._desc_name = node.astext()
TextTranslator.visit_desc_name(self, node)
def depart_desc_signature(self, node):
if self._current_module is not None:
module = self._current_module
else:
module = ''
name = self._desc_name
if self._current_class:
name = self._current_class
if self.first_sig:
self.first_sig = False
first = '("%s","%s","' % ( \
jl_escape(module), \
jl_escape(name))
else:
first = None
self.end_state(first=first, wrap=False, end=None)
def depart_desc_content(self, node):
TextTranslator.depart_desc_content(self, node)
self.new_state(0)
self.add_text('"),\n', escape=False)
self.end_state()
def depart_desc(self, node):
self.in_desc = False
class JuliaHelpWriter(TextWriter):
def translate(self):
visitor = JuliaHelpTranslator(self.document, self.builder)
self.document.walkabout(visitor)
self.output = visitor.body
class JuliaHelpBuilder(TextBuilder):
name = "jlhelp"
out_suffix = ".jl"
def write(self, *ignored):
# build_all
docnames = set([doc for doc in self.env.found_docs if doc.startswith("stdlib")])
self.info(bold('preparing documents... '), nonl=True)
self.prepare_writing(docnames)
self.info('done')
# write target files
warnings = []
self.env.set_warnfunc(lambda *args: warnings.append(args))
outfilename = path.join(self.outdir, self.name + self.out_suffix)
ensuredir(path.dirname(outfilename))
try:
f = codecs.open(outfilename, 'w', 'utf-8')
try:
f.write('# automatically generated from files in doc/stdlib/ -- do not edit here\n\n' +
'Any[\n\n')
for docname in self.status_iterator(
sorted(docnames), 'processing... ', darkgreen, len(docnames)):
doctree = self.env.get_and_resolve_doctree(docname, self)
self.writer.write(doctree, f)
f.write("\n")
f.write('\n]\n')
finally:
f.close()
except (IOError, OSError) as err:
self.warn("error writing file %s: %s" % (outfilename, err))
for warning in warnings:
self.warn(*warning)
self.env.set_warnfunc(self.warn)
def prepare_writing(self, docnames):
self.writer = JuliaHelpWriter(self)
def setup(app):
app.add_builder(JuliaHelpBuilder)
|
mpl-2.0
|
wilvk/ansible
|
lib/ansible/modules/cloud/vmware/vmware_dvswitch.py
|
21
|
7755
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_dvswitch
short_description: Create or remove a distributed vSwitch
description:
- Create or remove a distributed vSwitch
version_added: 2.0
author: "Joseph Callen (@jcpowermac)"
notes:
- Tested on vSphere 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
datacenter_name:
description:
- The name of the datacenter that will contain the dvSwitch
required: True
switch_name:
description:
- The name of the switch to create or remove
required: True
switch_version:
description:
- The version of the switch to create. Can be 6.5.0, 6.0.0, 5.5.0, 5.1.0, 5.0.0 with a vcenter running vSphere 6.5
- Needed if you have a vcenter version > ESXi version to join DVS. If not specified version=version of vcenter
required: False
version_added: 2.5
mtu:
description:
- The switch maximum transmission unit
required: True
uplink_quantity:
description:
- Quantity of uplink per ESXi host added to the switch
required: True
discovery_proto:
description:
- Link discovery protocol between Cisco and Link Layer discovery
choices:
- 'cdp'
- 'lldp'
required: True
discovery_operation:
description:
- Select the discovery operation
choices:
- 'both'
- 'none'
- 'advertise'
- 'listen'
state:
description:
- Create or remove dvSwitch
default: 'present'
choices:
- 'present'
- 'absent'
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Create dvswitch
local_action:
module: vmware_dvswitch
hostname: vcenter_ip_or_hostname
username: vcenter_username
password: vcenter_password
datacenter_name: datacenter
switch_name: dvSwitch
switch_version: 6.0.0
mtu: 9000
uplink_quantity: 2
discovery_proto: lldp
discovery_operation: both
state: present
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import (HAS_PYVMOMI,
connect_to_api,
find_datacenter_by_name,
find_dvs_by_name,
vmware_argument_spec,
wait_for_task
)
class VMwareDVSwitch(object):
def __init__(self, module):
self.module = module
self.dvs = None
self.switch_name = self.module.params['switch_name']
self.switch_version = self.module.params['switch_version']
self.datacenter_name = self.module.params['datacenter_name']
self.mtu = self.module.params['mtu']
self.uplink_quantity = self.module.params['uplink_quantity']
self.discovery_proto = self.module.params['discovery_proto']
self.discovery_operation = self.module.params['discovery_operation']
self.state = self.module.params['state']
self.content = connect_to_api(module)
def process_state(self):
try:
dvs_states = {
'absent': {
'present': self.state_destroy_dvs,
'absent': self.state_exit_unchanged,
},
'present': {
'update': self.state_update_dvs,
'present': self.state_exit_unchanged,
'absent': self.state_create_dvs,
}
}
dvs_states[self.state][self.check_dvs_configuration()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def create_dvswitch(self, network_folder):
result = None
changed = False
spec = vim.DistributedVirtualSwitch.CreateSpec()
spec.configSpec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
spec.configSpec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy()
spec.configSpec.linkDiscoveryProtocolConfig = vim.host.LinkDiscoveryProtocolConfig()
spec.configSpec.name = self.switch_name
spec.configSpec.maxMtu = self.mtu
spec.configSpec.linkDiscoveryProtocolConfig.protocol = self.discovery_proto
spec.configSpec.linkDiscoveryProtocolConfig.operation = self.discovery_operation
spec.productInfo = vim.dvs.ProductSpec()
spec.productInfo.name = "DVS"
spec.productInfo.vendor = "VMware"
spec.productInfo.version = self.switch_version
for count in range(1, self.uplink_quantity + 1):
spec.configSpec.uplinkPortPolicy.uplinkPortName.append("uplink%d" % count)
task = network_folder.CreateDVS_Task(spec)
changed, result = wait_for_task(task)
return changed, result
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_destroy_dvs(self):
task = self.dvs.Destroy_Task()
changed, result = wait_for_task(task)
self.module.exit_json(changed=changed, result=str(result))
def state_update_dvs(self):
self.module.exit_json(changed=False, msg="Currently not implemented.")
def state_create_dvs(self):
changed = True
result = None
if not self.module.check_mode:
dc = find_datacenter_by_name(self.content, self.datacenter_name)
changed, result = self.create_dvswitch(dc.networkFolder)
self.module.exit_json(changed=changed, result=str(result))
def check_dvs_configuration(self):
self.dvs = find_dvs_by_name(self.content, self.switch_name)
if self.dvs is None:
return 'absent'
else:
return 'present'
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(datacenter_name=dict(required=True, type='str'),
switch_name=dict(required=True, type='str'),
mtu=dict(required=True, type='int'),
switch_version=dict(type='str'),
uplink_quantity=dict(required=True, type='int'),
discovery_proto=dict(required=True, choices=['cdp', 'lldp'], type='str'),
discovery_operation=dict(required=True, choices=['both', 'none', 'advertise', 'listen'], type='str'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmware_dvswitch = VMwareDVSwitch(module)
vmware_dvswitch.process_state()
if __name__ == '__main__':
main()
|
gpl-3.0
|
GoogleCloudPlatform/training-data-analyst
|
courses/developingapps/python/kubernetesengine/start/frontend/quiz/webapp/questions.py
|
18
|
1280
|
# Copyright 2017, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from quiz.gcp import storage, datastore
"""
uploads file into google cloud storage
- upload file
- return public_url
"""
def upload_file(image_file, public):
if not image_file:
return None
public_url = storage.upload_file(
image_file,
public
)
return public_url
"""
uploads file into google cloud storage
- call method to upload file (public=true)
- call datastore helper method to save question
"""
def save_question(data, image_file):
if image_file:
data['imageUrl'] = unicode(upload_file(image_file, True))
else:
data['imageUrl'] = u''
data['correctAnswer'] = int(data['correctAnswer'])
datastore.save_question(data)
return
|
apache-2.0
|
Drooids/odoo
|
openerp/report/int_to_text.py
|
442
|
2641
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
unites = {
0: '', 1:'un', 2:'deux', 3:'trois', 4:'quatre', 5:'cinq', 6:'six', 7:'sept', 8:'huit', 9:'neuf',
10:'dix', 11:'onze', 12:'douze', 13:'treize', 14:'quatorze', 15:'quinze', 16:'seize',
21:'vingt et un', 31:'trente et un', 41:'quarante et un', 51:'cinquante et un', 61:'soixante et un',
71:'septante et un', 91:'nonante et un', 80:'quatre-vingts'
}
dizaine = {
1: 'dix', 2:'vingt', 3:'trente',4:'quarante', 5:'cinquante', 6:'soixante', 7:'septante', 8:'quatre-vingt', 9:'nonante'
}
centaine = {
0:'', 1: 'cent', 2:'deux cent', 3:'trois cent',4:'quatre cent', 5:'cinq cent', 6:'six cent', 7:'sept cent', 8:'huit cent', 9:'neuf cent'
}
mille = {
0:'', 1:'mille'
}
def _100_to_text(chiffre):
if chiffre in unites:
return unites[chiffre]
else:
if chiffre%10>0:
return dizaine[chiffre / 10]+'-'+unites[chiffre % 10]
else:
return dizaine[chiffre / 10]
def _1000_to_text(chiffre):
d = _100_to_text(chiffre % 100)
d2 = chiffre/100
if d2>0 and d:
return centaine[d2]+' '+d
elif d2>1 and not d:
return centaine[d2]+'s'
else:
return centaine[d2] or d
def _10000_to_text(chiffre):
if chiffre==0:
return 'zero'
part1 = _1000_to_text(chiffre % 1000)
part2 = mille.get(chiffre / 1000, _1000_to_text(chiffre / 1000)+' mille')
if part2 and part1:
part1 = ' '+part1
return part2+part1
def int_to_text(i):
return _10000_to_text(i)
if __name__=='__main__':
for i in range(1,999999,139):
print int_to_text(i)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
syaiful6/django
|
django/contrib/auth/backends.py
|
468
|
6114
|
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
class ModelBackend(object):
"""
Authenticates against settings.AUTH_USER_MODEL.
"""
def authenticate(self, username=None, password=None, **kwargs):
UserModel = get_user_model()
if username is None:
username = kwargs.get(UserModel.USERNAME_FIELD)
try:
user = UserModel._default_manager.get_by_natural_key(username)
if user.check_password(password):
return user
except UserModel.DoesNotExist:
# Run the default password hasher once to reduce the timing
# difference between an existing and a non-existing user (#20760).
UserModel().set_password(password)
def _get_user_permissions(self, user_obj):
return user_obj.user_permissions.all()
def _get_group_permissions(self, user_obj):
user_groups_field = get_user_model()._meta.get_field('groups')
user_groups_query = 'group__%s' % user_groups_field.related_query_name()
return Permission.objects.filter(**{user_groups_query: user_obj})
def _get_permissions(self, user_obj, obj, from_name):
"""
Returns the permissions of `user_obj` from `from_name`. `from_name` can
be either "group" or "user" to return permissions from
`_get_group_permissions` or `_get_user_permissions` respectively.
"""
if not user_obj.is_active or user_obj.is_anonymous() or obj is not None:
return set()
perm_cache_name = '_%s_perm_cache' % from_name
if not hasattr(user_obj, perm_cache_name):
if user_obj.is_superuser:
perms = Permission.objects.all()
else:
perms = getattr(self, '_get_%s_permissions' % from_name)(user_obj)
perms = perms.values_list('content_type__app_label', 'codename').order_by()
setattr(user_obj, perm_cache_name, set("%s.%s" % (ct, name) for ct, name in perms))
return getattr(user_obj, perm_cache_name)
def get_user_permissions(self, user_obj, obj=None):
"""
Returns a set of permission strings the user `user_obj` has from their
`user_permissions`.
"""
return self._get_permissions(user_obj, obj, 'user')
def get_group_permissions(self, user_obj, obj=None):
"""
Returns a set of permission strings the user `user_obj` has from the
groups they belong.
"""
return self._get_permissions(user_obj, obj, 'group')
def get_all_permissions(self, user_obj, obj=None):
if not user_obj.is_active or user_obj.is_anonymous() or obj is not None:
return set()
if not hasattr(user_obj, '_perm_cache'):
user_obj._perm_cache = self.get_user_permissions(user_obj)
user_obj._perm_cache.update(self.get_group_permissions(user_obj))
return user_obj._perm_cache
def has_perm(self, user_obj, perm, obj=None):
if not user_obj.is_active:
return False
return perm in self.get_all_permissions(user_obj, obj)
def has_module_perms(self, user_obj, app_label):
"""
Returns True if user_obj has any permissions in the given app_label.
"""
if not user_obj.is_active:
return False
for perm in self.get_all_permissions(user_obj):
if perm[:perm.index('.')] == app_label:
return True
return False
def get_user(self, user_id):
UserModel = get_user_model()
try:
return UserModel._default_manager.get(pk=user_id)
except UserModel.DoesNotExist:
return None
class RemoteUserBackend(ModelBackend):
"""
This backend is to be used in conjunction with the ``RemoteUserMiddleware``
found in the middleware module of this package, and is used when the server
is handling authentication outside of Django.
By default, the ``authenticate`` method creates ``User`` objects for
usernames that don't already exist in the database. Subclasses can disable
this behavior by setting the ``create_unknown_user`` attribute to
``False``.
"""
# Create a User object if not already in the database?
create_unknown_user = True
def authenticate(self, remote_user):
"""
The username passed as ``remote_user`` is considered trusted. This
method simply returns the ``User`` object with the given username,
creating a new ``User`` object if ``create_unknown_user`` is ``True``.
Returns None if ``create_unknown_user`` is ``False`` and a ``User``
object with the given username is not found in the database.
"""
if not remote_user:
return
user = None
username = self.clean_username(remote_user)
UserModel = get_user_model()
# Note that this could be accomplished in one try-except clause, but
# instead we use get_or_create when creating unknown users since it has
# built-in safeguards for multiple threads.
if self.create_unknown_user:
user, created = UserModel._default_manager.get_or_create(**{
UserModel.USERNAME_FIELD: username
})
if created:
user = self.configure_user(user)
else:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
pass
return user
def clean_username(self, username):
"""
Performs any cleaning on the "username" prior to using it to get or
create the user object. Returns the cleaned username.
By default, returns the username unchanged.
"""
return username
def configure_user(self, user):
"""
Configures a user after creation and returns the updated user.
By default, returns the user unmodified.
"""
return user
|
bsd-3-clause
|
broferek/ansible
|
lib/ansible/modules/cloud/amazon/efs_info.py
|
4
|
13913
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: efs_info
short_description: Get information about Amazon EFS file systems
description:
- This module can be used to search Amazon EFS file systems.
- This module was called C(efs_facts) before Ansible 2.9, returning C(ansible_facts).
Note that the M(efs_info) module no longer returns C(ansible_facts)!
version_added: "2.2"
requirements: [ boto3 ]
author:
- "Ryan Sydnor (@ryansydnor)"
options:
name:
description:
- Creation Token of Amazon EFS file system.
aliases: [ creation_token ]
type: str
id:
description:
- ID of Amazon EFS.
type: str
tags:
description:
- List of tags of Amazon EFS. Should be defined as dictionary.
type: dict
targets:
description:
- List of targets on which to filter the returned results.
- Result must match all of the specified targets, each of which can be a security group ID, a subnet ID or an IP address.
type: list
elements: str
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Find all existing efs
efs_info:
register: result
- name: Find efs using id
efs_info:
id: fs-1234abcd
register: result
- name: Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a'
efs_info:
tags:
name: myTestNameTag
targets:
- subnet-1a2b3c4d
- sg-4d3c2b1a
register: result
- debug:
msg: "{{ result['efs'] }}"
'''
RETURN = '''
creation_time:
description: timestamp of creation date
returned: always
type: str
sample: "2015-11-16 07:30:57-05:00"
creation_token:
description: EFS creation token
returned: always
type: str
sample: console-88609e04-9a0e-4a2e-912c-feaa99509961
file_system_id:
description: ID of the file system
returned: always
type: str
sample: fs-xxxxxxxx
life_cycle_state:
description: state of the EFS file system
returned: always
type: str
sample: creating, available, deleting, deleted
mount_point:
description: url of file system with leading dot from the time AWS EFS required to add network suffix to EFS address
returned: always
type: str
sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
filesystem_address:
description: url of file system
returned: always
type: str
sample: fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
mount_targets:
description: list of mount targets
returned: always
type: list
sample:
[
{
"file_system_id": "fs-a7ad440e",
"ip_address": "172.31.17.173",
"life_cycle_state": "available",
"mount_target_id": "fsmt-d8907871",
"network_interface_id": "eni-6e387e26",
"owner_id": "740748460359",
"security_groups": [
"sg-a30b22c6"
],
"subnet_id": "subnet-e265c895"
},
...
]
name:
description: name of the file system
returned: always
type: str
sample: my-efs
number_of_mount_targets:
description: the number of targets mounted
returned: always
type: int
sample: 3
owner_id:
description: AWS account ID of EFS owner
returned: always
type: str
sample: XXXXXXXXXXXX
size_in_bytes:
description: size of the file system in bytes as of a timestamp
returned: always
type: dict
sample:
{
"timestamp": "2015-12-21 13:59:59-05:00",
"value": 12288
}
performance_mode:
description: performance mode of the file system
returned: always
type: str
sample: "generalPurpose"
throughput_mode:
description: mode of throughput for the file system
returned: when botocore >= 1.10.57
type: str
sample: "bursting"
provisioned_throughput_in_mibps:
description: throughput provisioned in Mibps
returned: when botocore >= 1.10.57 and throughput_mode is set to "provisioned"
type: float
sample: 15.0
tags:
description: tags on the efs instance
returned: always
type: dict
sample:
{
"name": "my-efs",
"key": "Value"
}
'''
from collections import defaultdict
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec, AWSRetry
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict
from ansible.module_utils._text import to_native
class EFSConnection(object):
STATE_CREATING = 'creating'
STATE_AVAILABLE = 'available'
STATE_DELETING = 'deleting'
STATE_DELETED = 'deleted'
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = boto3_conn(module, conn_type='client',
resource='efs', region=region,
**aws_connect_params)
self.module = module
except Exception as e:
module.fail_json(msg="Failed to connect to AWS: %s" % to_native(e))
self.region = region
@AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
def list_file_systems(self, **kwargs):
"""
Returns generator of file systems including all attributes of FS
"""
paginator = self.connection.get_paginator('describe_file_systems')
return paginator.paginate(**kwargs).build_full_result()['FileSystems']
@AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
def get_tags(self, file_system_id):
"""
Returns tag list for selected instance of EFS
"""
paginator = self.connection.get_paginator('describe_tags')
return boto3_tag_list_to_ansible_dict(paginator.paginate(FileSystemId=file_system_id).build_full_result()['Tags'])
@AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
def get_mount_targets(self, file_system_id):
"""
Returns mount targets for selected instance of EFS
"""
paginator = self.connection.get_paginator('describe_mount_targets')
return paginator.paginate(FileSystemId=file_system_id).build_full_result()['MountTargets']
@AWSRetry.jittered_backoff(catch_extra_error_codes=['ThrottlingException'])
def get_security_groups(self, mount_target_id):
"""
Returns security groups for selected instance of EFS
"""
return self.connection.describe_mount_target_security_groups(MountTargetId=mount_target_id)['SecurityGroups']
def get_mount_targets_data(self, file_systems):
for item in file_systems:
if item['life_cycle_state'] == self.STATE_AVAILABLE:
try:
mount_targets = self.get_mount_targets(item['file_system_id'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get EFS targets")
for mt in mount_targets:
item['mount_targets'].append(camel_dict_to_snake_dict(mt))
return file_systems
def get_security_groups_data(self, file_systems):
for item in file_systems:
if item['life_cycle_state'] == self.STATE_AVAILABLE:
for target in item['mount_targets']:
if target['life_cycle_state'] == self.STATE_AVAILABLE:
try:
target['security_groups'] = self.get_security_groups(target['mount_target_id'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get EFS security groups")
else:
target['security_groups'] = []
else:
item['tags'] = {}
item['mount_targets'] = []
return file_systems
def get_file_systems(self, file_system_id=None, creation_token=None):
kwargs = dict()
if file_system_id:
kwargs['FileSystemId'] = file_system_id
if creation_token:
kwargs['CreationToken'] = creation_token
try:
file_systems = self.list_file_systems(**kwargs)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get EFS file systems")
results = list()
for item in file_systems:
item['CreationTime'] = str(item['CreationTime'])
"""
In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it
AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose
And new FilesystemAddress variable is introduced for direct use with other modules (e.g. mount)
AWS documentation is available here:
U(https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html)
"""
item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
item['FilesystemAddress'] = '%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
if 'Timestamp' in item['SizeInBytes']:
item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
result = camel_dict_to_snake_dict(item)
result['tags'] = {}
result['mount_targets'] = []
# Set tags *after* doing camel to snake
if result['life_cycle_state'] == self.STATE_AVAILABLE:
try:
result['tags'] = self.get_tags(result['file_system_id'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get EFS tags")
results.append(result)
return results
def prefix_to_attr(attr_id):
"""
Helper method to convert ID prefix to mount target attribute
"""
attr_by_prefix = {
'fsmt-': 'mount_target_id',
'subnet-': 'subnet_id',
'eni-': 'network_interface_id',
'sg-': 'security_groups'
}
return first_or_default([attr_name for (prefix, attr_name) in attr_by_prefix.items()
if str(attr_id).startswith(prefix)], 'ip_address')
def first_or_default(items, default=None):
"""
Helper method to fetch first element of list (if exists)
"""
for item in items:
return item
return default
def has_tags(available, required):
"""
Helper method to determine if tag requested already exists
"""
for key, value in required.items():
if key not in available or value != available[key]:
return False
return True
def has_targets(available, required):
"""
Helper method to determine if mount target requested already exists
"""
grouped = group_list_of_dict(available)
for (value, field) in required:
if field not in grouped or value not in grouped[field]:
return False
return True
def group_list_of_dict(array):
"""
Helper method to group list of dict to dict with all possible values
"""
result = defaultdict(list)
for item in array:
for key, value in item.items():
result[key] += value if isinstance(value, list) else [value]
return result
def main():
"""
Module action handler
"""
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
id=dict(),
name=dict(aliases=['creation_token']),
tags=dict(type="dict", default={}),
targets=dict(type="list", default=[])
))
module = AnsibleAWSModule(argument_spec=argument_spec,
supports_check_mode=True)
is_old_facts = module._name == 'efs_facts'
if is_old_facts:
module.deprecate("The 'efs_facts' module has been renamed to 'efs_info', "
"and the renamed one no longer returns ansible_facts", version='2.13')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = EFSConnection(module, region, **aws_connect_params)
name = module.params.get('name')
fs_id = module.params.get('id')
tags = module.params.get('tags')
targets = module.params.get('targets')
file_systems_info = connection.get_file_systems(fs_id, name)
if tags:
file_systems_info = [item for item in file_systems_info if has_tags(item['tags'], tags)]
file_systems_info = connection.get_mount_targets_data(file_systems_info)
file_systems_info = connection.get_security_groups_data(file_systems_info)
if targets:
targets = [(item, prefix_to_attr(item)) for item in targets]
file_systems_info = [item for item in file_systems_info if has_targets(item['mount_targets'], targets)]
if is_old_facts:
module.exit_json(changed=False, ansible_facts={'efs': file_systems_info})
else:
module.exit_json(changed=False, efs=file_systems_info)
if __name__ == '__main__':
main()
|
gpl-3.0
|
svanschalkwyk/datafari
|
windows/python/Lib/site-packages/pip/_vendor/colorama/ansitowin32.py
|
442
|
9262
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import re
import sys
import os
from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style
from .winterm import WinTerm, WinColor, WinStyle
from .win32 import windll
winterm = None
if windll is not None:
winterm = WinTerm()
def is_a_tty(stream):
return hasattr(stream, 'isatty') and stream.isatty()
class StreamWrapper(object):
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()', which is delegated to our
Converter instance.
'''
def __init__(self, wrapped, converter):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
self.__convertor = converter
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def write(self, text):
self.__convertor.write(text)
class AnsiToWin32(object):
'''
Implements a 'write()' method which, on Windows, will strip ANSI character
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_CSI_RE = re.compile('\033\[((?:\d|;)*)([a-zA-Z])') # Control Sequence Introducer
ANSI_OSC_RE = re.compile('\033\]((?:.|;)*?)(\x07)') # Operating System Command
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
self.wrapped = wrapped
# should we reset colors to defaults after every .write()
self.autoreset = autoreset
# create the proxy wrapping our output stream
self.stream = StreamWrapper(wrapped, self)
on_windows = os.name == 'nt'
on_emulated_windows = on_windows and 'TERM' in os.environ
# should we strip ANSI sequences from our output?
if strip is None:
strip = on_windows and not on_emulated_windows
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = on_windows and not wrapped.closed and not on_emulated_windows and is_a_tty(wrapped)
self.convert = convert
# dict of ansi codes to win32 functions and parameters
self.win32_calls = self.get_win32_calls()
# are we wrapping stderr?
self.on_stderr = self.wrapped is sys.stderr
def should_wrap(self):
'''
True if this class is actually needed. If false, then the output
stream will not be affected, nor will win32 calls be issued, so
wrapping stdout is not actually required. This will generally be
False on non-Windows platforms, unless optional functionality like
autoreset has been requested using kwargs to init()
'''
return self.convert or self.strip or self.autoreset
def get_win32_calls(self):
if self.convert and winterm:
return {
AnsiStyle.RESET_ALL: (winterm.reset_all, ),
AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
AnsiFore.RED: (winterm.fore, WinColor.RED),
AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
AnsiFore.RESET: (winterm.fore, ),
AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True),
AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True),
AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True),
AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True),
AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True),
AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True),
AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True),
AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True),
AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
AnsiBack.RED: (winterm.back, WinColor.RED),
AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
AnsiBack.WHITE: (winterm.back, WinColor.GREY),
AnsiBack.RESET: (winterm.back, ),
AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True),
AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True),
AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True),
AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True),
AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True),
AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True),
AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True),
AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True),
}
return dict()
def write(self, text):
if self.strip or self.convert:
self.write_and_convert(text)
else:
self.wrapped.write(text)
self.wrapped.flush()
if self.autoreset:
self.reset_all()
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
elif not self.wrapped.closed and is_a_tty(self.wrapped):
self.wrapped.write(Style.RESET_ALL)
def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
text = self.convert_osc(text)
for match in self.ANSI_CSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text))
def write_plain_text(self, text, start, end):
if start < end:
self.wrapped.write(text[start:end])
self.wrapped.flush()
def convert_ansi(self, paramstring, command):
if self.convert:
params = self.extract_params(command, paramstring)
self.call_win32(command, params)
def extract_params(self, command, paramstring):
if command in 'Hf':
params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';'))
while len(params) < 2:
# defaults:
params = params + (1,)
else:
params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0)
if len(params) == 0:
# defaults:
if command in 'JKm':
params = (0,)
elif command in 'ABCD':
params = (1,)
return params
def call_win32(self, command, params):
if command == 'm':
for param in params:
if param in self.win32_calls:
func_args = self.win32_calls[param]
func = func_args[0]
args = func_args[1:]
kwargs = dict(on_stderr=self.on_stderr)
func(*args, **kwargs)
elif command in 'J':
winterm.erase_screen(params[0], on_stderr=self.on_stderr)
elif command in 'K':
winterm.erase_line(params[0], on_stderr=self.on_stderr)
elif command in 'Hf': # cursor position - absolute
winterm.set_cursor_position(params, on_stderr=self.on_stderr)
elif command in 'ABCD': # cursor position - relative
n = params[0]
# A - up, B - down, C - forward, D - back
x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command]
winterm.cursor_adjust(x, y, on_stderr=self.on_stderr)
def convert_osc(self, text):
for match in self.ANSI_OSC_RE.finditer(text):
start, end = match.span()
text = text[:start] + text[end:]
paramstring, command = match.groups()
if command in '\x07': # \x07 = BEL
params = paramstring.split(";")
# 0 - change title and icon (we will only change title)
# 1 - change icon (we don't support this)
# 2 - change title
if params[0] in '02':
winterm.set_title(params[1])
return text
|
apache-2.0
|
electrumalt/electrum-ixc
|
gui/qt/seed_dialog.py
|
1
|
3876
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2013 ecdsa@github
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
from electrum_ixc.i18n import _
from electrum_ixc import mnemonic
from qrcodewidget import QRCodeWidget, QRDialog
from util import close_button
from qrtextedit import ShowQRTextEdit, ScanQRTextEdit
class SeedDialog(QDialog):
def __init__(self, parent, seed, imported_keys):
QDialog.__init__(self, parent)
self.setModal(1)
self.setMinimumWidth(400)
self.setWindowTitle('Electrum-IXC' + ' - ' + _('Seed'))
vbox = show_seed_box(seed)
if imported_keys:
vbox.addWidget(QLabel("<b>"+_("WARNING")+":</b> " + _("Your wallet contains imported keys. These keys cannot be recovered from seed.") + "</b><p>"))
vbox.addLayout(close_button(self))
self.setLayout(vbox)
def icon_filename(sid):
if sid == 'cold':
return ":icons/cold_seed.png"
elif sid == 'hot':
return ":icons/hot_seed.png"
else:
return ":icons/seed.png"
def show_seed_box(seed, sid=None):
save_msg = _("Please save these %d words on paper (order is important).")%len(seed.split()) + " "
qr_msg = _("Your seed is also displayed as QR code, in case you want to transfer it to a mobile phone.") + "<p>"
warning_msg = "<b>"+_("WARNING")+":</b> " + _("Never disclose your seed. Never type it on a website.") + "</b><p>"
if sid is None:
msg = _("Your wallet generation seed is")
msg2 = save_msg + " " \
+ _("This seed will allow you to recover your wallet in case of computer failure.") + "<br/>" \
+ warning_msg
elif sid == 'cold':
msg = _("Your cold storage seed is")
msg2 = save_msg + " " \
+ _("This seed will be permanently deleted from your wallet file. Make sure you have saved it before you press 'next'") + " " \
elif sid == 'hot':
msg = _("Your hot seed is")
msg2 = save_msg + " " \
+ _("If you ever need to recover your wallet from seed, you will need both this seed and your cold seed.") + " " \
label1 = QLabel(msg+ ":")
seed_text = ShowQRTextEdit(text=seed)
seed_text.setMaximumHeight(130)
label2 = QLabel(msg2)
label2.setWordWrap(True)
logo = QLabel()
logo.setPixmap(QPixmap(icon_filename(sid)).scaledToWidth(56))
logo.setMaximumWidth(60)
grid = QGridLayout()
grid.addWidget(logo, 0, 0)
grid.addWidget(label1, 0, 1)
grid.addWidget(seed_text, 1, 0, 1, 2)
vbox = QVBoxLayout()
vbox.addLayout(grid)
vbox.addWidget(label2)
vbox.addStretch(1)
return vbox
def enter_seed_box(msg, window, sid=None):
vbox = QVBoxLayout()
logo = QLabel()
logo.setPixmap(QPixmap(icon_filename(sid)).scaledToWidth(56))
logo.setMaximumWidth(60)
label = QLabel(msg)
label.setWordWrap(True)
seed_e = ScanQRTextEdit(win=window)
seed_e.setMaximumHeight(100)
seed_e.setTabChangesFocus(True)
vbox.addWidget(label)
grid = QGridLayout()
grid.addWidget(logo, 0, 0)
grid.addWidget(seed_e, 0, 1)
vbox.addLayout(grid)
return vbox, seed_e
|
gpl-3.0
|
andela-ooladayo/django
|
tests/get_or_create/tests.py
|
282
|
15058
|
from __future__ import unicode_literals
import traceback
from datetime import date
from django.db import DatabaseError, IntegrityError
from django.test import TestCase, TransactionTestCase, ignore_warnings
from django.utils.encoding import DjangoUnicodeDecodeError
from .models import (
Author, Book, DefaultPerson, ManualPrimaryKeyTest, Person, Profile,
Publisher, Tag, Thing,
)
class GetOrCreateTests(TestCase):
def setUp(self):
self.lennon = Person.objects.create(
first_name='John', last_name='Lennon', birthday=date(1940, 10, 9)
)
def test_get_or_create_method_with_get(self):
created = Person.objects.get_or_create(
first_name="John", last_name="Lennon", defaults={
"birthday": date(1940, 10, 9)
}
)[1]
self.assertFalse(created)
self.assertEqual(Person.objects.count(), 1)
def test_get_or_create_method_with_create(self):
created = Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults={
'birthday': date(1943, 2, 25)
}
)[1]
self.assertTrue(created)
self.assertEqual(Person.objects.count(), 2)
def test_get_or_create_redundant_instance(self):
"""
If we execute the exact same statement twice, the second time,
it won't create a Person.
"""
Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults={
'birthday': date(1943, 2, 25)
}
)
created = Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults={
'birthday': date(1943, 2, 25)
}
)[1]
self.assertFalse(created)
self.assertEqual(Person.objects.count(), 2)
def test_get_or_create_invalid_params(self):
"""
If you don't specify a value or default value for all required
fields, you will get an error.
"""
self.assertRaises(
IntegrityError,
Person.objects.get_or_create, first_name="Tom", last_name="Smith"
)
def test_get_or_create_on_related_manager(self):
p = Publisher.objects.create(name="Acme Publishing")
# Create a book through the publisher.
book, created = p.books.get_or_create(name="The Book of Ed & Fred")
self.assertTrue(created)
# The publisher should have one book.
self.assertEqual(p.books.count(), 1)
# Try get_or_create again, this time nothing should be created.
book, created = p.books.get_or_create(name="The Book of Ed & Fred")
self.assertFalse(created)
# And the publisher should still have one book.
self.assertEqual(p.books.count(), 1)
# Add an author to the book.
ed, created = book.authors.get_or_create(name="Ed")
self.assertTrue(created)
# The book should have one author.
self.assertEqual(book.authors.count(), 1)
# Try get_or_create again, this time nothing should be created.
ed, created = book.authors.get_or_create(name="Ed")
self.assertFalse(created)
# And the book should still have one author.
self.assertEqual(book.authors.count(), 1)
# Add a second author to the book.
fred, created = book.authors.get_or_create(name="Fred")
self.assertTrue(created)
# The book should have two authors now.
self.assertEqual(book.authors.count(), 2)
# Create an Author not tied to any books.
Author.objects.create(name="Ted")
# There should be three Authors in total. The book object should have two.
self.assertEqual(Author.objects.count(), 3)
self.assertEqual(book.authors.count(), 2)
# Try creating a book through an author.
_, created = ed.books.get_or_create(name="Ed's Recipes", publisher=p)
self.assertTrue(created)
# Now Ed has two Books, Fred just one.
self.assertEqual(ed.books.count(), 2)
self.assertEqual(fred.books.count(), 1)
# Use the publisher's primary key value instead of a model instance.
_, created = ed.books.get_or_create(name='The Great Book of Ed', publisher_id=p.id)
self.assertTrue(created)
# Try get_or_create again, this time nothing should be created.
_, created = ed.books.get_or_create(name='The Great Book of Ed', publisher_id=p.id)
self.assertFalse(created)
# The publisher should have three books.
self.assertEqual(p.books.count(), 3)
def test_defaults_exact(self):
"""
If you have a field named defaults and want to use it as an exact
lookup, you need to use 'defaults__exact'.
"""
obj, created = Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults__exact='testing', defaults={
'birthday': date(1943, 2, 25),
'defaults': 'testing',
}
)
self.assertTrue(created)
self.assertEqual(obj.defaults, 'testing')
obj2, created = Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults__exact='testing', defaults={
'birthday': date(1943, 2, 25),
'defaults': 'testing',
}
)
self.assertFalse(created)
self.assertEqual(obj, obj2)
class GetOrCreateTestsWithManualPKs(TestCase):
def setUp(self):
self.first_pk = ManualPrimaryKeyTest.objects.create(id=1, data="Original")
def test_create_with_duplicate_primary_key(self):
"""
If you specify an existing primary key, but different other fields,
then you will get an error and data will not be updated.
"""
self.assertRaises(
IntegrityError,
ManualPrimaryKeyTest.objects.get_or_create, id=1, data="Different"
)
self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original")
def test_get_or_create_raises_IntegrityError_plus_traceback(self):
"""
get_or_create should raise IntegrityErrors with the full traceback.
This is tested by checking that a known method call is in the traceback.
We cannot use assertRaises here because we need to inspect
the actual traceback. Refs #16340.
"""
try:
ManualPrimaryKeyTest.objects.get_or_create(id=1, data="Different")
except IntegrityError:
formatted_traceback = traceback.format_exc()
self.assertIn(str('obj.save'), formatted_traceback)
# MySQL emits a warning when broken data is saved
@ignore_warnings(module='django.db.backends.mysql.base')
def test_savepoint_rollback(self):
"""
Regression test for #20463: the database connection should still be
usable after a DataError or ProgrammingError in .get_or_create().
"""
try:
Person.objects.get_or_create(
birthday=date(1970, 1, 1),
defaults={'first_name': b"\xff", 'last_name': b"\xff"})
except (DatabaseError, DjangoUnicodeDecodeError):
Person.objects.create(
first_name="Bob", last_name="Ross", birthday=date(1950, 1, 1))
else:
self.skipTest("This backend accepts broken utf-8.")
def test_get_or_create_empty(self):
"""
Regression test for #16137: get_or_create does not require kwargs.
"""
try:
DefaultPerson.objects.get_or_create()
except AssertionError:
self.fail("If all the attributes on a model have defaults, we "
"shouldn't need to pass any arguments.")
class GetOrCreateTransactionTests(TransactionTestCase):
available_apps = ['get_or_create']
def test_get_or_create_integrityerror(self):
"""
Regression test for #15117. Requires a TransactionTestCase on
databases that delay integrity checks until the end of transactions,
otherwise the exception is never raised.
"""
try:
Profile.objects.get_or_create(person=Person(id=1))
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
class GetOrCreateThroughManyToMany(TestCase):
def test_get_get_or_create(self):
tag = Tag.objects.create(text='foo')
a_thing = Thing.objects.create(name='a')
a_thing.tags.add(tag)
obj, created = a_thing.tags.get_or_create(text='foo')
self.assertFalse(created)
self.assertEqual(obj.pk, tag.pk)
def test_create_get_or_create(self):
a_thing = Thing.objects.create(name='a')
obj, created = a_thing.tags.get_or_create(text='foo')
self.assertTrue(created)
self.assertEqual(obj.text, 'foo')
self.assertIn(obj, a_thing.tags.all())
def test_something(self):
Tag.objects.create(text='foo')
a_thing = Thing.objects.create(name='a')
self.assertRaises(IntegrityError, a_thing.tags.get_or_create, text='foo')
class UpdateOrCreateTests(TestCase):
def test_update(self):
Person.objects.create(
first_name='John', last_name='Lennon', birthday=date(1940, 10, 9)
)
p, created = Person.objects.update_or_create(
first_name='John', last_name='Lennon', defaults={
'birthday': date(1940, 10, 10)
}
)
self.assertFalse(created)
self.assertEqual(p.first_name, 'John')
self.assertEqual(p.last_name, 'Lennon')
self.assertEqual(p.birthday, date(1940, 10, 10))
def test_create(self):
p, created = Person.objects.update_or_create(
first_name='John', last_name='Lennon', defaults={
'birthday': date(1940, 10, 10)
}
)
self.assertTrue(created)
self.assertEqual(p.first_name, 'John')
self.assertEqual(p.last_name, 'Lennon')
self.assertEqual(p.birthday, date(1940, 10, 10))
def test_create_twice(self):
params = {
'first_name': 'John',
'last_name': 'Lennon',
'birthday': date(1940, 10, 10),
}
Person.objects.update_or_create(**params)
# If we execute the exact same statement, it won't create a Person.
p, created = Person.objects.update_or_create(**params)
self.assertFalse(created)
def test_integrity(self):
"""
If you don't specify a value or default value for all required
fields, you will get an error.
"""
self.assertRaises(IntegrityError,
Person.objects.update_or_create, first_name="Tom", last_name="Smith")
def test_manual_primary_key_test(self):
"""
If you specify an existing primary key, but different other fields,
then you will get an error and data will not be updated.
"""
ManualPrimaryKeyTest.objects.create(id=1, data="Original")
self.assertRaises(
IntegrityError,
ManualPrimaryKeyTest.objects.update_or_create, id=1, data="Different"
)
self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original")
def test_error_contains_full_traceback(self):
"""
update_or_create should raise IntegrityErrors with the full traceback.
This is tested by checking that a known method call is in the traceback.
We cannot use assertRaises/assertRaises here because we need to inspect
the actual traceback. Refs #16340.
"""
try:
ManualPrimaryKeyTest.objects.update_or_create(id=1, data="Different")
except IntegrityError:
formatted_traceback = traceback.format_exc()
self.assertIn('obj.save', formatted_traceback)
def test_create_with_related_manager(self):
"""
Should be able to use update_or_create from the related manager to
create a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
book, created = p.books.update_or_create(name="The Book of Ed & Fred")
self.assertTrue(created)
self.assertEqual(p.books.count(), 1)
def test_update_with_related_manager(self):
"""
Should be able to use update_or_create from the related manager to
update a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
book = Book.objects.create(name="The Book of Ed & Fred", publisher=p)
self.assertEqual(p.books.count(), 1)
name = "The Book of Django"
book, created = p.books.update_or_create(defaults={'name': name}, id=book.id)
self.assertFalse(created)
self.assertEqual(book.name, name)
self.assertEqual(p.books.count(), 1)
def test_create_with_many(self):
"""
Should be able to use update_or_create from the m2m related manager to
create a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
author = Author.objects.create(name="Ted")
book, created = author.books.update_or_create(name="The Book of Ed & Fred", publisher=p)
self.assertTrue(created)
self.assertEqual(author.books.count(), 1)
def test_update_with_many(self):
"""
Should be able to use update_or_create from the m2m related manager to
update a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
author = Author.objects.create(name="Ted")
book = Book.objects.create(name="The Book of Ed & Fred", publisher=p)
book.authors.add(author)
self.assertEqual(author.books.count(), 1)
name = "The Book of Django"
book, created = author.books.update_or_create(defaults={'name': name}, id=book.id)
self.assertFalse(created)
self.assertEqual(book.name, name)
self.assertEqual(author.books.count(), 1)
def test_defaults_exact(self):
"""
If you have a field named defaults and want to use it as an exact
lookup, you need to use 'defaults__exact'.
"""
obj, created = Person.objects.update_or_create(
first_name='George', last_name='Harrison', defaults__exact='testing', defaults={
'birthday': date(1943, 2, 25),
'defaults': 'testing',
}
)
self.assertTrue(created)
self.assertEqual(obj.defaults, 'testing')
obj, created = Person.objects.update_or_create(
first_name='George', last_name='Harrison', defaults__exact='testing', defaults={
'birthday': date(1943, 2, 25),
'defaults': 'another testing',
}
)
self.assertFalse(created)
self.assertEqual(obj.defaults, 'another testing')
|
bsd-3-clause
|
abaditsegay/arangodb
|
3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_cookielib.py
|
63
|
71359
|
# -*- coding: latin-1 -*-
"""Tests for cookielib.py."""
import re, os, time
from unittest import TestCase
from test import test_support
class DateTimeTests(TestCase):
def test_time2isoz(self):
from cookielib import time2isoz
base = 1019227000
day = 24*3600
self.assertEquals(time2isoz(base), "2002-04-19 14:36:40Z")
self.assertEquals(time2isoz(base+day), "2002-04-20 14:36:40Z")
self.assertEquals(time2isoz(base+2*day), "2002-04-21 14:36:40Z")
self.assertEquals(time2isoz(base+3*day), "2002-04-22 14:36:40Z")
az = time2isoz()
bz = time2isoz(500000)
for text in (az, bz):
self.assert_(re.search(r"^\d{4}-\d\d-\d\d \d\d:\d\d:\d\dZ$", text),
"bad time2isoz format: %s %s" % (az, bz))
def test_http2time(self):
from cookielib import http2time
def parse_date(text):
return time.gmtime(http2time(text))[:6]
self.assertEquals(parse_date("01 Jan 2001"), (2001, 1, 1, 0, 0, 0.0))
# this test will break around year 2070
self.assertEquals(parse_date("03-Feb-20"), (2020, 2, 3, 0, 0, 0.0))
# this test will break around year 2048
self.assertEquals(parse_date("03-Feb-98"), (1998, 2, 3, 0, 0, 0.0))
def test_http2time_formats(self):
from cookielib import http2time, time2isoz
# test http2time for supported dates. Test cases with 2 digit year
# will probably break in year 2044.
tests = [
'Thu, 03 Feb 1994 00:00:00 GMT', # proposed new HTTP format
'Thursday, 03-Feb-94 00:00:00 GMT', # old rfc850 HTTP format
'Thursday, 03-Feb-1994 00:00:00 GMT', # broken rfc850 HTTP format
'03 Feb 1994 00:00:00 GMT', # HTTP format (no weekday)
'03-Feb-94 00:00:00 GMT', # old rfc850 (no weekday)
'03-Feb-1994 00:00:00 GMT', # broken rfc850 (no weekday)
'03-Feb-1994 00:00 GMT', # broken rfc850 (no weekday, no seconds)
'03-Feb-1994 00:00', # broken rfc850 (no weekday, no seconds, no tz)
'03-Feb-94', # old rfc850 HTTP format (no weekday, no time)
'03-Feb-1994', # broken rfc850 HTTP format (no weekday, no time)
'03 Feb 1994', # proposed new HTTP format (no weekday, no time)
# A few tests with extra space at various places
' 03 Feb 1994 0:00 ',
' 03-Feb-1994 ',
]
test_t = 760233600 # assume broken POSIX counting of seconds
result = time2isoz(test_t)
expected = "1994-02-03 00:00:00Z"
self.assertEquals(result, expected,
"%s => '%s' (%s)" % (test_t, result, expected))
for s in tests:
t = http2time(s)
t2 = http2time(s.lower())
t3 = http2time(s.upper())
self.assert_(t == t2 == t3 == test_t,
"'%s' => %s, %s, %s (%s)" % (s, t, t2, t3, test_t))
def test_http2time_garbage(self):
from cookielib import http2time
for test in [
'',
'Garbage',
'Mandag 16. September 1996',
'01-00-1980',
'01-13-1980',
'00-01-1980',
'32-01-1980',
'01-01-1980 25:00:00',
'01-01-1980 00:61:00',
'01-01-1980 00:00:62',
]:
self.assert_(http2time(test) is None,
"http2time(%s) is not None\n"
"http2time(test) %s" % (test, http2time(test))
)
class HeaderTests(TestCase):
def test_parse_ns_headers(self):
from cookielib import parse_ns_headers
# quotes should be stripped
expected = [[('foo', 'bar'), ('expires', 2209069412L), ('version', '0')]]
for hdr in [
'foo=bar; expires=01 Jan 2040 22:23:32 GMT',
'foo=bar; expires="01 Jan 2040 22:23:32 GMT"',
]:
self.assertEquals(parse_ns_headers([hdr]), expected)
def test_parse_ns_headers_special_names(self):
# names such as 'expires' are not special in first name=value pair
# of Set-Cookie: header
from cookielib import parse_ns_headers
# Cookie with name 'expires'
hdr = 'expires=01 Jan 2040 22:23:32 GMT'
expected = [[("expires", "01 Jan 2040 22:23:32 GMT"), ("version", "0")]]
self.assertEquals(parse_ns_headers([hdr]), expected)
def test_join_header_words(self):
from cookielib import join_header_words
joined = join_header_words([[("foo", None), ("bar", "baz")]])
self.assertEquals(joined, "foo; bar=baz")
self.assertEquals(join_header_words([[]]), "")
def test_split_header_words(self):
from cookielib import split_header_words
tests = [
("foo", [[("foo", None)]]),
("foo=bar", [[("foo", "bar")]]),
(" foo ", [[("foo", None)]]),
(" foo= ", [[("foo", "")]]),
(" foo=", [[("foo", "")]]),
(" foo= ; ", [[("foo", "")]]),
(" foo= ; bar= baz ", [[("foo", ""), ("bar", "baz")]]),
("foo=bar bar=baz", [[("foo", "bar"), ("bar", "baz")]]),
# doesn't really matter if this next fails, but it works ATM
("foo= bar=baz", [[("foo", "bar=baz")]]),
("foo=bar;bar=baz", [[("foo", "bar"), ("bar", "baz")]]),
('foo bar baz', [[("foo", None), ("bar", None), ("baz", None)]]),
("a, b, c", [[("a", None)], [("b", None)], [("c", None)]]),
(r'foo; bar=baz, spam=, foo="\,\;\"", bar= ',
[[("foo", None), ("bar", "baz")],
[("spam", "")], [("foo", ',;"')], [("bar", "")]]),
]
for arg, expect in tests:
try:
result = split_header_words([arg])
except:
import traceback, StringIO
f = StringIO.StringIO()
traceback.print_exc(None, f)
result = "(error -- traceback follows)\n\n%s" % f.getvalue()
self.assertEquals(result, expect, """
When parsing: '%s'
Expected: '%s'
Got: '%s'
""" % (arg, expect, result))
def test_roundtrip(self):
from cookielib import split_header_words, join_header_words
tests = [
("foo", "foo"),
("foo=bar", "foo=bar"),
(" foo ", "foo"),
("foo=", 'foo=""'),
("foo=bar bar=baz", "foo=bar; bar=baz"),
("foo=bar;bar=baz", "foo=bar; bar=baz"),
('foo bar baz', "foo; bar; baz"),
(r'foo="\"" bar="\\"', r'foo="\""; bar="\\"'),
('foo,,,bar', 'foo, bar'),
('foo=bar,bar=baz', 'foo=bar, bar=baz'),
('text/html; charset=iso-8859-1',
'text/html; charset="iso-8859-1"'),
('foo="bar"; port="80,81"; discard, bar=baz',
'foo=bar; port="80,81"; discard, bar=baz'),
(r'Basic realm="\"foo\\\\bar\""',
r'Basic; realm="\"foo\\\\bar\""')
]
for arg, expect in tests:
input = split_header_words([arg])
res = join_header_words(input)
self.assertEquals(res, expect, """
When parsing: '%s'
Expected: '%s'
Got: '%s'
Input was: '%s'
""" % (arg, expect, res, input))
class FakeResponse:
def __init__(self, headers=[], url=None):
"""
headers: list of RFC822-style 'Key: value' strings
"""
import mimetools, StringIO
f = StringIO.StringIO("\n".join(headers))
self._headers = mimetools.Message(f)
self._url = url
def info(self): return self._headers
def interact_2965(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie2")
def interact_netscape(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie")
def _interact(cookiejar, url, set_cookie_hdrs, hdr_name):
"""Perform a single request / response cycle, returning Cookie: header."""
from urllib2 import Request
req = Request(url)
cookiejar.add_cookie_header(req)
cookie_hdr = req.get_header("Cookie", "")
headers = []
for hdr in set_cookie_hdrs:
headers.append("%s: %s" % (hdr_name, hdr))
res = FakeResponse(headers, url)
cookiejar.extract_cookies(res, req)
return cookie_hdr
class FileCookieJarTests(TestCase):
def test_lwp_valueless_cookie(self):
# cookies with no value should be saved and loaded consistently
from cookielib import LWPCookieJar
filename = test_support.TESTFN
c = LWPCookieJar()
interact_netscape(c, "http://www.acme.com/", 'boo')
self.assertEqual(c._cookies["www.acme.com"]["/"]["boo"].value, None)
try:
c.save(filename, ignore_discard=True)
c = LWPCookieJar()
c.load(filename, ignore_discard=True)
finally:
try: os.unlink(filename)
except OSError: pass
self.assertEqual(c._cookies["www.acme.com"]["/"]["boo"].value, None)
def test_bad_magic(self):
from cookielib import LWPCookieJar, MozillaCookieJar, LoadError
# IOErrors (eg. file doesn't exist) are allowed to propagate
filename = test_support.TESTFN
for cookiejar_class in LWPCookieJar, MozillaCookieJar:
c = cookiejar_class()
try:
c.load(filename="for this test to work, a file with this "
"filename should not exist")
except IOError, exc:
# exactly IOError, not LoadError
self.assertEqual(exc.__class__, IOError)
else:
self.fail("expected IOError for invalid filename")
# Invalid contents of cookies file (eg. bad magic string)
# causes a LoadError.
try:
f = open(filename, "w")
f.write("oops\n")
for cookiejar_class in LWPCookieJar, MozillaCookieJar:
c = cookiejar_class()
self.assertRaises(LoadError, c.load, filename)
finally:
try: os.unlink(filename)
except OSError: pass
class CookieTests(TestCase):
# XXX
# Get rid of string comparisons where not actually testing str / repr.
# .clear() etc.
# IP addresses like 50 (single number, no dot) and domain-matching
# functions (and is_HDN)? See draft RFC 2965 errata.
# Strictness switches
# is_third_party()
# unverifiability / third-party blocking
# Netscape cookies work the same as RFC 2965 with regard to port.
# Set-Cookie with negative max age.
# If turn RFC 2965 handling off, Set-Cookie2 cookies should not clobber
# Set-Cookie cookies.
# Cookie2 should be sent if *any* cookies are not V1 (ie. V0 OR V2 etc.).
# Cookies (V1 and V0) with no expiry date should be set to be discarded.
# RFC 2965 Quoting:
# Should accept unquoted cookie-attribute values? check errata draft.
# Which are required on the way in and out?
# Should always return quoted cookie-attribute values?
# Proper testing of when RFC 2965 clobbers Netscape (waiting for errata).
# Path-match on return (same for V0 and V1).
# RFC 2965 acceptance and returning rules
# Set-Cookie2 without version attribute is rejected.
# Netscape peculiarities list from Ronald Tschalar.
# The first two still need tests, the rest are covered.
## - Quoting: only quotes around the expires value are recognized as such
## (and yes, some folks quote the expires value); quotes around any other
## value are treated as part of the value.
## - White space: white space around names and values is ignored
## - Default path: if no path parameter is given, the path defaults to the
## path in the request-uri up to, but not including, the last '/'. Note
## that this is entirely different from what the spec says.
## - Commas and other delimiters: Netscape just parses until the next ';'.
## This means it will allow commas etc inside values (and yes, both
## commas and equals are commonly appear in the cookie value). This also
## means that if you fold multiple Set-Cookie header fields into one,
## comma-separated list, it'll be a headache to parse (at least my head
## starts hurting everytime I think of that code).
## - Expires: You'll get all sorts of date formats in the expires,
## including emtpy expires attributes ("expires="). Be as flexible as you
## can, and certainly don't expect the weekday to be there; if you can't
## parse it, just ignore it and pretend it's a session cookie.
## - Domain-matching: Netscape uses the 2-dot rule for _all_ domains, not
## just the 7 special TLD's listed in their spec. And folks rely on
## that...
def test_domain_return_ok(self):
# test optimization: .domain_return_ok() should filter out most
# domains in the CookieJar before we try to access them (because that
# may require disk access -- in particular, with MSIECookieJar)
# This is only a rough check for performance reasons, so it's not too
# critical as long as it's sufficiently liberal.
import cookielib, urllib2
pol = cookielib.DefaultCookiePolicy()
for url, domain, ok in [
("http://foo.bar.com/", "blah.com", False),
("http://foo.bar.com/", "rhubarb.blah.com", False),
("http://foo.bar.com/", "rhubarb.foo.bar.com", False),
("http://foo.bar.com/", ".foo.bar.com", True),
("http://foo.bar.com/", "foo.bar.com", True),
("http://foo.bar.com/", ".bar.com", True),
("http://foo.bar.com/", "com", True),
("http://foo.com/", "rhubarb.foo.com", False),
("http://foo.com/", ".foo.com", True),
("http://foo.com/", "foo.com", True),
("http://foo.com/", "com", True),
("http://foo/", "rhubarb.foo", False),
("http://foo/", ".foo", True),
("http://foo/", "foo", True),
("http://foo/", "foo.local", True),
("http://foo/", ".local", True),
]:
request = urllib2.Request(url)
r = pol.domain_return_ok(domain, request)
if ok: self.assert_(r)
else: self.assert_(not r)
def test_missing_value(self):
from cookielib import MozillaCookieJar, lwp_cookie_str
# missing = sign in Cookie: header is regarded by Mozilla as a missing
# name, and by cookielib as a missing value
filename = test_support.TESTFN
c = MozillaCookieJar(filename)
interact_netscape(c, "http://www.acme.com/", 'eggs')
interact_netscape(c, "http://www.acme.com/", '"spam"; path=/foo/')
cookie = c._cookies["www.acme.com"]["/"]["eggs"]
self.assert_(cookie.value is None)
self.assertEquals(cookie.name, "eggs")
cookie = c._cookies["www.acme.com"]['/foo/']['"spam"']
self.assert_(cookie.value is None)
self.assertEquals(cookie.name, '"spam"')
self.assertEquals(lwp_cookie_str(cookie), (
r'"spam"; path="/foo/"; domain="www.acme.com"; '
'path_spec; discard; version=0'))
old_str = repr(c)
c.save(ignore_expires=True, ignore_discard=True)
try:
c = MozillaCookieJar(filename)
c.revert(ignore_expires=True, ignore_discard=True)
finally:
os.unlink(c.filename)
# cookies unchanged apart from lost info re. whether path was specified
self.assertEquals(
repr(c),
re.sub("path_specified=%s" % True, "path_specified=%s" % False,
old_str)
)
self.assertEquals(interact_netscape(c, "http://www.acme.com/foo/"),
'"spam"; eggs')
def test_rfc2109_handling(self):
# RFC 2109 cookies are handled as RFC 2965 or Netscape cookies,
# dependent on policy settings
from cookielib import CookieJar, DefaultCookiePolicy
for rfc2109_as_netscape, rfc2965, version in [
# default according to rfc2965 if not explicitly specified
(None, False, 0),
(None, True, 1),
# explicit rfc2109_as_netscape
(False, False, None), # version None here means no cookie stored
(False, True, 1),
(True, False, 0),
(True, True, 0),
]:
policy = DefaultCookiePolicy(
rfc2109_as_netscape=rfc2109_as_netscape,
rfc2965=rfc2965)
c = CookieJar(policy)
interact_netscape(c, "http://www.example.com/", "ni=ni; Version=1")
try:
cookie = c._cookies["www.example.com"]["/"]["ni"]
except KeyError:
self.assert_(version is None) # didn't expect a stored cookie
else:
self.assertEqual(cookie.version, version)
# 2965 cookies are unaffected
interact_2965(c, "http://www.example.com/",
"foo=bar; Version=1")
if rfc2965:
cookie2965 = c._cookies["www.example.com"]["/"]["foo"]
self.assertEqual(cookie2965.version, 1)
def test_ns_parser(self):
from cookielib import CookieJar, DEFAULT_HTTP_PORT
c = CookieJar()
interact_netscape(c, "http://www.acme.com/",
'spam=eggs; DoMain=.acme.com; port; blArgh="feep"')
interact_netscape(c, "http://www.acme.com/", 'ni=ni; port=80,8080')
interact_netscape(c, "http://www.acme.com:80/", 'nini=ni')
interact_netscape(c, "http://www.acme.com:80/", 'foo=bar; expires=')
interact_netscape(c, "http://www.acme.com:80/", 'spam=eggs; '
'expires="Foo Bar 25 33:22:11 3022"')
cookie = c._cookies[".acme.com"]["/"]["spam"]
self.assertEquals(cookie.domain, ".acme.com")
self.assert_(cookie.domain_specified)
self.assertEquals(cookie.port, DEFAULT_HTTP_PORT)
self.assert_(not cookie.port_specified)
# case is preserved
self.assert_(cookie.has_nonstandard_attr("blArgh") and
not cookie.has_nonstandard_attr("blargh"))
cookie = c._cookies["www.acme.com"]["/"]["ni"]
self.assertEquals(cookie.domain, "www.acme.com")
self.assert_(not cookie.domain_specified)
self.assertEquals(cookie.port, "80,8080")
self.assert_(cookie.port_specified)
cookie = c._cookies["www.acme.com"]["/"]["nini"]
self.assert_(cookie.port is None)
self.assert_(not cookie.port_specified)
# invalid expires should not cause cookie to be dropped
foo = c._cookies["www.acme.com"]["/"]["foo"]
spam = c._cookies["www.acme.com"]["/"]["foo"]
self.assert_(foo.expires is None)
self.assert_(spam.expires is None)
def test_ns_parser_special_names(self):
# names such as 'expires' are not special in first name=value pair
# of Set-Cookie: header
from cookielib import CookieJar
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'expires=eggs')
interact_netscape(c, "http://www.acme.com/", 'version=eggs; spam=eggs')
cookies = c._cookies["www.acme.com"]["/"]
self.assert_('expires' in cookies)
self.assert_('version' in cookies)
def test_expires(self):
from cookielib import time2netscape, CookieJar
# if expires is in future, keep cookie...
c = CookieJar()
future = time2netscape(time.time()+3600)
interact_netscape(c, "http://www.acme.com/", 'spam="bar"; expires=%s' %
future)
self.assertEquals(len(c), 1)
now = time2netscape(time.time()-1)
# ... and if in past or present, discard it
interact_netscape(c, "http://www.acme.com/", 'foo="eggs"; expires=%s' %
now)
h = interact_netscape(c, "http://www.acme.com/")
self.assertEquals(len(c), 1)
self.assert_('spam="bar"' in h and "foo" not in h)
# max-age takes precedence over expires, and zero max-age is request to
# delete both new cookie and any old matching cookie
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; expires=%s' %
future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; expires=%s' %
future)
self.assertEquals(len(c), 3)
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; '
'expires=%s; max-age=0' % future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; '
'max-age=0; expires=%s' % future)
h = interact_netscape(c, "http://www.acme.com/")
self.assertEquals(len(c), 1)
# test expiry at end of session for cookies with no expires attribute
interact_netscape(c, "http://www.rhubarb.net/", 'whum="fizz"')
self.assertEquals(len(c), 2)
c.clear_session_cookies()
self.assertEquals(len(c), 1)
self.assert_('spam="bar"' in h)
# XXX RFC 2965 expiry rules (some apply to V0 too)
def test_default_path(self):
from cookielib import CookieJar, DefaultCookiePolicy
# RFC 2965
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/", 'spam="bar"; Version="1"')
self.assert_("/" in c._cookies["www.acme.com"])
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah", 'eggs="bar"; Version="1"')
self.assert_("/" in c._cookies["www.acme.com"])
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb",
'eggs="bar"; Version="1"')
self.assert_("/blah/" in c._cookies["www.acme.com"])
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb/",
'eggs="bar"; Version="1"')
self.assert_("/blah/rhubarb/" in c._cookies["www.acme.com"])
# Netscape
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'spam="bar"')
self.assert_("/" in c._cookies["www.acme.com"])
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah", 'eggs="bar"')
self.assert_("/" in c._cookies["www.acme.com"])
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb", 'eggs="bar"')
self.assert_("/blah" in c._cookies["www.acme.com"])
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb/", 'eggs="bar"')
self.assert_("/blah/rhubarb" in c._cookies["www.acme.com"])
def test_escape_path(self):
from cookielib import escape_path
cases = [
# quoted safe
("/foo%2f/bar", "/foo%2F/bar"),
("/foo%2F/bar", "/foo%2F/bar"),
# quoted %
("/foo%%/bar", "/foo%%/bar"),
# quoted unsafe
("/fo%19o/bar", "/fo%19o/bar"),
("/fo%7do/bar", "/fo%7Do/bar"),
# unquoted safe
("/foo/bar&", "/foo/bar&"),
("/foo//bar", "/foo//bar"),
("\176/foo/bar", "\176/foo/bar"),
# unquoted unsafe
("/foo\031/bar", "/foo%19/bar"),
("/\175foo/bar", "/%7Dfoo/bar"),
# unicode
(u"/foo/bar\uabcd", "/foo/bar%EA%AF%8D"), # UTF-8 encoded
]
for arg, result in cases:
self.assertEquals(escape_path(arg), result)
def test_request_path(self):
from urllib2 import Request
from cookielib import request_path
# with parameters
req = Request("http://www.example.com/rheum/rhaponicum;"
"foo=bar;sing=song?apples=pears&spam=eggs#ni")
self.assertEquals(request_path(req), "/rheum/rhaponicum;"
"foo=bar;sing=song?apples=pears&spam=eggs#ni")
# without parameters
req = Request("http://www.example.com/rheum/rhaponicum?"
"apples=pears&spam=eggs#ni")
self.assertEquals(request_path(req), "/rheum/rhaponicum?"
"apples=pears&spam=eggs#ni")
# missing final slash
req = Request("http://www.example.com")
self.assertEquals(request_path(req), "/")
def test_request_port(self):
from urllib2 import Request
from cookielib import request_port, DEFAULT_HTTP_PORT
req = Request("http://www.acme.com:1234/",
headers={"Host": "www.acme.com:4321"})
self.assertEquals(request_port(req), "1234")
req = Request("http://www.acme.com/",
headers={"Host": "www.acme.com:4321"})
self.assertEquals(request_port(req), DEFAULT_HTTP_PORT)
def test_request_host(self):
from urllib2 import Request
from cookielib import request_host
# this request is illegal (RFC2616, 14.2.3)
req = Request("http://1.1.1.1/",
headers={"Host": "www.acme.com:80"})
# libwww-perl wants this response, but that seems wrong (RFC 2616,
# section 5.2, point 1., and RFC 2965 section 1, paragraph 3)
#self.assertEquals(request_host(req), "www.acme.com")
self.assertEquals(request_host(req), "1.1.1.1")
req = Request("http://www.acme.com/",
headers={"Host": "irrelevant.com"})
self.assertEquals(request_host(req), "www.acme.com")
# not actually sure this one is valid Request object, so maybe should
# remove test for no host in url in request_host function?
req = Request("/resource.html",
headers={"Host": "www.acme.com"})
self.assertEquals(request_host(req), "www.acme.com")
# port shouldn't be in request-host
req = Request("http://www.acme.com:2345/resource.html",
headers={"Host": "www.acme.com:5432"})
self.assertEquals(request_host(req), "www.acme.com")
def test_is_HDN(self):
from cookielib import is_HDN
self.assert_(is_HDN("foo.bar.com"))
self.assert_(is_HDN("1foo2.3bar4.5com"))
self.assert_(not is_HDN("192.168.1.1"))
self.assert_(not is_HDN(""))
self.assert_(not is_HDN("."))
self.assert_(not is_HDN(".foo.bar.com"))
self.assert_(not is_HDN("..foo"))
self.assert_(not is_HDN("foo."))
def test_reach(self):
from cookielib import reach
self.assertEquals(reach("www.acme.com"), ".acme.com")
self.assertEquals(reach("acme.com"), "acme.com")
self.assertEquals(reach("acme.local"), ".local")
self.assertEquals(reach(".local"), ".local")
self.assertEquals(reach(".com"), ".com")
self.assertEquals(reach("."), ".")
self.assertEquals(reach(""), "")
self.assertEquals(reach("192.168.0.1"), "192.168.0.1")
def test_domain_match(self):
from cookielib import domain_match, user_domain_match
self.assert_(domain_match("192.168.1.1", "192.168.1.1"))
self.assert_(not domain_match("192.168.1.1", ".168.1.1"))
self.assert_(domain_match("x.y.com", "x.Y.com"))
self.assert_(domain_match("x.y.com", ".Y.com"))
self.assert_(not domain_match("x.y.com", "Y.com"))
self.assert_(domain_match("a.b.c.com", ".c.com"))
self.assert_(not domain_match(".c.com", "a.b.c.com"))
self.assert_(domain_match("example.local", ".local"))
self.assert_(not domain_match("blah.blah", ""))
self.assert_(not domain_match("", ".rhubarb.rhubarb"))
self.assert_(domain_match("", ""))
self.assert_(user_domain_match("acme.com", "acme.com"))
self.assert_(not user_domain_match("acme.com", ".acme.com"))
self.assert_(user_domain_match("rhubarb.acme.com", ".acme.com"))
self.assert_(user_domain_match("www.rhubarb.acme.com", ".acme.com"))
self.assert_(user_domain_match("x.y.com", "x.Y.com"))
self.assert_(user_domain_match("x.y.com", ".Y.com"))
self.assert_(not user_domain_match("x.y.com", "Y.com"))
self.assert_(user_domain_match("y.com", "Y.com"))
self.assert_(not user_domain_match(".y.com", "Y.com"))
self.assert_(user_domain_match(".y.com", ".Y.com"))
self.assert_(user_domain_match("x.y.com", ".com"))
self.assert_(not user_domain_match("x.y.com", "com"))
self.assert_(not user_domain_match("x.y.com", "m"))
self.assert_(not user_domain_match("x.y.com", ".m"))
self.assert_(not user_domain_match("x.y.com", ""))
self.assert_(not user_domain_match("x.y.com", "."))
self.assert_(user_domain_match("192.168.1.1", "192.168.1.1"))
# not both HDNs, so must string-compare equal to match
self.assert_(not user_domain_match("192.168.1.1", ".168.1.1"))
self.assert_(not user_domain_match("192.168.1.1", "."))
# empty string is a special case
self.assert_(not user_domain_match("192.168.1.1", ""))
def test_wrong_domain(self):
# Cookies whose effective request-host name does not domain-match the
# domain are rejected.
# XXX far from complete
from cookielib import CookieJar
c = CookieJar()
interact_2965(c, "http://www.nasty.com/",
'foo=bar; domain=friendly.org; Version="1"')
self.assertEquals(len(c), 0)
def test_strict_domain(self):
# Cookies whose domain is a country-code tld like .co.uk should
# not be set if CookiePolicy.strict_domain is true.
from cookielib import CookieJar, DefaultCookiePolicy
cp = DefaultCookiePolicy(strict_domain=True)
cj = CookieJar(policy=cp)
interact_netscape(cj, "http://example.co.uk/", 'no=problemo')
interact_netscape(cj, "http://example.co.uk/",
'okey=dokey; Domain=.example.co.uk')
self.assertEquals(len(cj), 2)
for pseudo_tld in [".co.uk", ".org.za", ".tx.us", ".name.us"]:
interact_netscape(cj, "http://example.%s/" % pseudo_tld,
'spam=eggs; Domain=.co.uk')
self.assertEquals(len(cj), 2)
def test_two_component_domain_ns(self):
# Netscape: .www.bar.com, www.bar.com, .bar.com, bar.com, no domain
# should all get accepted, as should .acme.com, acme.com and no domain
# for 2-component domains like acme.com.
from cookielib import CookieJar, DefaultCookiePolicy
c = CookieJar()
# two-component V0 domain is OK
interact_netscape(c, "http://foo.net/", 'ns=bar')
self.assertEquals(len(c), 1)
self.assertEquals(c._cookies["foo.net"]["/"]["ns"].value, "bar")
self.assertEquals(interact_netscape(c, "http://foo.net/"), "ns=bar")
# *will* be returned to any other domain (unlike RFC 2965)...
self.assertEquals(interact_netscape(c, "http://www.foo.net/"),
"ns=bar")
# ...unless requested otherwise
pol = DefaultCookiePolicy(
strict_ns_domain=DefaultCookiePolicy.DomainStrictNonDomain)
c.set_policy(pol)
self.assertEquals(interact_netscape(c, "http://www.foo.net/"), "")
# unlike RFC 2965, even explicit two-component domain is OK,
# because .foo.net matches foo.net
interact_netscape(c, "http://foo.net/foo/",
'spam1=eggs; domain=foo.net')
# even if starts with a dot -- in NS rules, .foo.net matches foo.net!
interact_netscape(c, "http://foo.net/foo/bar/",
'spam2=eggs; domain=.foo.net')
self.assertEquals(len(c), 3)
self.assertEquals(c._cookies[".foo.net"]["/foo"]["spam1"].value,
"eggs")
self.assertEquals(c._cookies[".foo.net"]["/foo/bar"]["spam2"].value,
"eggs")
self.assertEquals(interact_netscape(c, "http://foo.net/foo/bar/"),
"spam2=eggs; spam1=eggs; ns=bar")
# top-level domain is too general
interact_netscape(c, "http://foo.net/", 'nini="ni"; domain=.net')
self.assertEquals(len(c), 3)
## # Netscape protocol doesn't allow non-special top level domains (such
## # as co.uk) in the domain attribute unless there are at least three
## # dots in it.
# Oh yes it does! Real implementations don't check this, and real
# cookies (of course) rely on that behaviour.
interact_netscape(c, "http://foo.co.uk", 'nasty=trick; domain=.co.uk')
## self.assertEquals(len(c), 2)
self.assertEquals(len(c), 4)
def test_two_component_domain_rfc2965(self):
from cookielib import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
# two-component V1 domain is OK
interact_2965(c, "http://foo.net/", 'foo=bar; Version="1"')
self.assertEquals(len(c), 1)
self.assertEquals(c._cookies["foo.net"]["/"]["foo"].value, "bar")
self.assertEquals(interact_2965(c, "http://foo.net/"),
"$Version=1; foo=bar")
# won't be returned to any other domain (because domain was implied)
self.assertEquals(interact_2965(c, "http://www.foo.net/"), "")
# unless domain is given explicitly, because then it must be
# rewritten to start with a dot: foo.net --> .foo.net, which does
# not domain-match foo.net
interact_2965(c, "http://foo.net/foo",
'spam=eggs; domain=foo.net; path=/foo; Version="1"')
self.assertEquals(len(c), 1)
self.assertEquals(interact_2965(c, "http://foo.net/foo"),
"$Version=1; foo=bar")
# explicit foo.net from three-component domain www.foo.net *does* get
# set, because .foo.net domain-matches .foo.net
interact_2965(c, "http://www.foo.net/foo/",
'spam=eggs; domain=foo.net; Version="1"')
self.assertEquals(c._cookies[".foo.net"]["/foo/"]["spam"].value,
"eggs")
self.assertEquals(len(c), 2)
self.assertEquals(interact_2965(c, "http://foo.net/foo/"),
"$Version=1; foo=bar")
self.assertEquals(interact_2965(c, "http://www.foo.net/foo/"),
'$Version=1; spam=eggs; $Domain="foo.net"')
# top-level domain is too general
interact_2965(c, "http://foo.net/",
'ni="ni"; domain=".net"; Version="1"')
self.assertEquals(len(c), 2)
# RFC 2965 doesn't require blocking this
interact_2965(c, "http://foo.co.uk/",
'nasty=trick; domain=.co.uk; Version="1"')
self.assertEquals(len(c), 3)
def test_domain_allow(self):
from cookielib import CookieJar, DefaultCookiePolicy
from urllib2 import Request
c = CookieJar(policy=DefaultCookiePolicy(
blocked_domains=["acme.com"],
allowed_domains=["www.acme.com"]))
req = Request("http://acme.com/")
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
res = FakeResponse(headers, "http://acme.com/")
c.extract_cookies(res, req)
self.assertEquals(len(c), 0)
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
self.assertEquals(len(c), 1)
req = Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
c.extract_cookies(res, req)
self.assertEquals(len(c), 1)
# set a cookie with non-allowed domain...
req = Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
self.assertEquals(len(c), 2)
# ... and check is doesn't get returned
c.add_cookie_header(req)
self.assert_(not req.has_header("Cookie"))
def test_domain_block(self):
from cookielib import CookieJar, DefaultCookiePolicy
from urllib2 import Request
pol = DefaultCookiePolicy(
rfc2965=True, blocked_domains=[".acme.com"])
c = CookieJar(policy=pol)
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
self.assertEquals(len(c), 0)
p = pol.set_blocked_domains(["acme.com"])
c.extract_cookies(res, req)
self.assertEquals(len(c), 1)
c.clear()
req = Request("http://www.roadrunner.net/")
res = FakeResponse(headers, "http://www.roadrunner.net/")
c.extract_cookies(res, req)
self.assertEquals(len(c), 1)
req = Request("http://www.roadrunner.net/")
c.add_cookie_header(req)
self.assert_((req.has_header("Cookie") and
req.has_header("Cookie2")))
c.clear()
pol.set_blocked_domains([".acme.com"])
c.extract_cookies(res, req)
self.assertEquals(len(c), 1)
# set a cookie with blocked domain...
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
self.assertEquals(len(c), 2)
# ... and check is doesn't get returned
c.add_cookie_header(req)
self.assert_(not req.has_header("Cookie"))
def test_secure(self):
from cookielib import CookieJar, DefaultCookiePolicy
for ns in True, False:
for whitespace in " ", "":
c = CookieJar()
if ns:
pol = DefaultCookiePolicy(rfc2965=False)
int = interact_netscape
vs = ""
else:
pol = DefaultCookiePolicy(rfc2965=True)
int = interact_2965
vs = "; Version=1"
c.set_policy(pol)
url = "http://www.acme.com/"
int(c, url, "foo1=bar%s%s" % (vs, whitespace))
int(c, url, "foo2=bar%s; secure%s" % (vs, whitespace))
self.assert_(
not c._cookies["www.acme.com"]["/"]["foo1"].secure,
"non-secure cookie registered secure")
self.assert_(
c._cookies["www.acme.com"]["/"]["foo2"].secure,
"secure cookie registered non-secure")
def test_quote_cookie_value(self):
from cookielib import CookieJar, DefaultCookiePolicy
c = CookieJar(policy=DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/", r'foo=\b"a"r; Version=1')
h = interact_2965(c, "http://www.acme.com/")
self.assertEquals(h, r'$Version=1; foo=\\b\"a\"r')
def test_missing_final_slash(self):
# Missing slash from request URL's abs_path should be assumed present.
from cookielib import CookieJar, DefaultCookiePolicy
from urllib2 import Request
url = "http://www.acme.com"
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, url, "foo=bar; Version=1")
req = Request(url)
self.assertEquals(len(c), 1)
c.add_cookie_header(req)
self.assert_(req.has_header("Cookie"))
def test_domain_mirror(self):
from cookielib import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
self.assert_("Domain" not in h,
"absent domain returned with domain present")
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Domain=.bar.com')
h = interact_2965(c, url)
self.assert_('$Domain=".bar.com"' in h, "domain not returned")
c = CookieJar(pol)
url = "http://foo.bar.com/"
# note missing initial dot in Domain
interact_2965(c, url, 'spam=eggs; Version=1; Domain=bar.com')
h = interact_2965(c, url)
self.assert_('$Domain="bar.com"' in h, "domain not returned")
def test_path_mirror(self):
from cookielib import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
self.assert_("Path" not in h,
"absent path returned with path present")
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Path=/')
h = interact_2965(c, url)
self.assert_('$Path="/"' in h, "path not returned")
def test_port_mirror(self):
from cookielib import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
self.assert_("Port" not in h,
"absent port returned with port present")
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1; Port")
h = interact_2965(c, url)
self.assert_(re.search("\$Port([^=]|$)", h),
"port with no value not returned with no value")
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Port="80"')
h = interact_2965(c, url)
self.assert_('$Port="80"' in h,
"port with single value not returned with single value")
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Port="80,8080"')
h = interact_2965(c, url)
self.assert_('$Port="80,8080"' in h,
"port with multiple values not returned with multiple "
"values")
def test_no_return_comment(self):
from cookielib import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; '
'Comment="does anybody read these?"; '
'CommentURL="http://foo.bar.net/comment.html"')
h = interact_2965(c, url)
self.assert_(
"Comment" not in h,
"Comment or CommentURL cookie-attributes returned to server")
def test_Cookie_iterator(self):
from cookielib import CookieJar, Cookie, DefaultCookiePolicy
cs = CookieJar(DefaultCookiePolicy(rfc2965=True))
# add some random cookies
interact_2965(cs, "http://blah.spam.org/", 'foo=eggs; Version=1; '
'Comment="does anybody read these?"; '
'CommentURL="http://foo.bar.net/comment.html"')
interact_netscape(cs, "http://www.acme.com/blah/", "spam=bar; secure")
interact_2965(cs, "http://www.acme.com/blah/",
"foo=bar; secure; Version=1")
interact_2965(cs, "http://www.acme.com/blah/",
"foo=bar; path=/; Version=1")
interact_2965(cs, "http://www.sol.no",
r'bang=wallop; version=1; domain=".sol.no"; '
r'port="90,100, 80,8080"; '
r'max-age=100; Comment = "Just kidding! (\"|\\\\) "')
versions = [1, 1, 1, 0, 1]
names = ["bang", "foo", "foo", "spam", "foo"]
domains = [".sol.no", "blah.spam.org", "www.acme.com",
"www.acme.com", "www.acme.com"]
paths = ["/", "/", "/", "/blah", "/blah/"]
for i in range(4):
i = 0
for c in cs:
self.assert_(isinstance(c, Cookie))
self.assertEquals(c.version, versions[i])
self.assertEquals(c.name, names[i])
self.assertEquals(c.domain, domains[i])
self.assertEquals(c.path, paths[i])
i = i + 1
def test_parse_ns_headers(self):
from cookielib import parse_ns_headers
# missing domain value (invalid cookie)
self.assertEquals(
parse_ns_headers(["foo=bar; path=/; domain"]),
[[("foo", "bar"),
("path", "/"), ("domain", None), ("version", "0")]]
)
# invalid expires value
self.assertEquals(
parse_ns_headers(["foo=bar; expires=Foo Bar 12 33:22:11 2000"]),
[[("foo", "bar"), ("expires", None), ("version", "0")]]
)
# missing cookie value (valid cookie)
self.assertEquals(
parse_ns_headers(["foo"]),
[[("foo", None), ("version", "0")]]
)
# shouldn't add version if header is empty
self.assertEquals(parse_ns_headers([""]), [])
def test_bad_cookie_header(self):
def cookiejar_from_cookie_headers(headers):
from cookielib import CookieJar
from urllib2 import Request
c = CookieJar()
req = Request("http://www.example.com/")
r = FakeResponse(headers, "http://www.example.com/")
c.extract_cookies(r, req)
return c
# none of these bad headers should cause an exception to be raised
for headers in [
["Set-Cookie: "], # actually, nothing wrong with this
["Set-Cookie2: "], # ditto
# missing domain value
["Set-Cookie2: a=foo; path=/; Version=1; domain"],
# bad max-age
["Set-Cookie: b=foo; max-age=oops"],
]:
c = cookiejar_from_cookie_headers(headers)
# these bad cookies shouldn't be set
self.assertEquals(len(c), 0)
# cookie with invalid expires is treated as session cookie
headers = ["Set-Cookie: c=foo; expires=Foo Bar 12 33:22:11 2000"]
c = cookiejar_from_cookie_headers(headers)
cookie = c._cookies["www.example.com"]["/"]["c"]
self.assert_(cookie.expires is None)
class LWPCookieTests(TestCase):
# Tests taken from libwww-perl, with a few modifications and additions.
def test_netscape_example_1(self):
from cookielib import CookieJar, DefaultCookiePolicy
from urllib2 import Request
#-------------------------------------------------------------------
# First we check that it works for the original example at
# http://www.netscape.com/newsref/std/cookie_spec.html
# Client requests a document, and receives in the response:
#
# Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/; expires=Wednesday, 09-Nov-99 23:12:40 GMT
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE
#
# Client requests a document, and receives in the response:
#
# Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# Client receives:
#
# Set-Cookie: SHIPPING=FEDEX; path=/fo
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# When client requests a URL in path "/foo" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001; SHIPPING=FEDEX
#
# The last Cookie is buggy, because both specifications say that the
# most specific cookie must be sent first. SHIPPING=FEDEX is the
# most specific and should thus be first.
year_plus_one = time.localtime()[0] + 1
headers = []
c = CookieJar(DefaultCookiePolicy(rfc2965 = True))
#req = Request("http://1.1.1.1/",
# headers={"Host": "www.acme.com:80"})
req = Request("http://www.acme.com:80/",
headers={"Host": "www.acme.com:80"})
headers.append(
"Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/ ; "
"expires=Wednesday, 09-Nov-%d 23:12:40 GMT" % year_plus_one)
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
self.assertEqual(req.get_header("Cookie"), "CUSTOMER=WILE_E_COYOTE")
self.assertEqual(req.get_header("Cookie2"), '$Version="1"')
headers.append("Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/foo/bar")
c.add_cookie_header(req)
h = req.get_header("Cookie")
self.assert_("PART_NUMBER=ROCKET_LAUNCHER_0001" in h and
"CUSTOMER=WILE_E_COYOTE" in h)
headers.append('Set-Cookie: SHIPPING=FEDEX; path=/foo')
res = FakeResponse(headers, "http://www.acme.com")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
h = req.get_header("Cookie")
self.assert_("PART_NUMBER=ROCKET_LAUNCHER_0001" in h and
"CUSTOMER=WILE_E_COYOTE" in h and
"SHIPPING=FEDEX" not in h)
req = Request("http://www.acme.com/foo/")
c.add_cookie_header(req)
h = req.get_header("Cookie")
self.assert_(("PART_NUMBER=ROCKET_LAUNCHER_0001" in h and
"CUSTOMER=WILE_E_COYOTE" in h and
h.startswith("SHIPPING=FEDEX;")))
def test_netscape_example_2(self):
from cookielib import CookieJar
from urllib2 import Request
# Second Example transaction sequence:
#
# Assume all mappings from above have been cleared.
#
# Client receives:
#
# Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001
#
# Client receives:
#
# Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo
#
# When client requests a URL in path "/ammo" on this server, it sends:
#
# Cookie: PART_NUMBER=RIDING_ROCKET_0023; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# NOTE: There are two name/value pairs named "PART_NUMBER" due to
# the inheritance of the "/" mapping in addition to the "/ammo" mapping.
c = CookieJar()
headers = []
req = Request("http://www.acme.com/")
headers.append("Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
self.assertEquals(req.get_header("Cookie"),
"PART_NUMBER=ROCKET_LAUNCHER_0001")
headers.append(
"Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/ammo")
c.add_cookie_header(req)
self.assert_(re.search(r"PART_NUMBER=RIDING_ROCKET_0023;\s*"
"PART_NUMBER=ROCKET_LAUNCHER_0001",
req.get_header("Cookie")))
def test_ietf_example_1(self):
from cookielib import CookieJar, DefaultCookiePolicy
#-------------------------------------------------------------------
# Then we test with the examples from draft-ietf-http-state-man-mec-03.txt
#
# 5. EXAMPLES
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
#
# 5.1 Example 1
#
# Most detail of request and response headers has been omitted. Assume
# the user agent has no stored cookies.
#
# 1. User Agent -> Server
#
# POST /acme/login HTTP/1.1
# [form data]
#
# User identifies self via a form.
#
# 2. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"
#
# Cookie reflects user's identity.
cookie = interact_2965(
c, 'http://www.acme.com/acme/login',
'Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"')
self.assert_(not cookie)
#
# 3. User Agent -> Server
#
# POST /acme/pickitem HTTP/1.1
# Cookie: $Version="1"; Customer="WILE_E_COYOTE"; $Path="/acme"
# [form data]
#
# User selects an item for ``shopping basket.''
#
# 4. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1";
# Path="/acme"
#
# Shopping basket contains an item.
cookie = interact_2965(c, 'http://www.acme.com/acme/pickitem',
'Part_Number="Rocket_Launcher_0001"; '
'Version="1"; Path="/acme"');
self.assert_(re.search(
r'^\$Version="?1"?; Customer="?WILE_E_COYOTE"?; \$Path="/acme"$',
cookie))
#
# 5. User Agent -> Server
#
# POST /acme/shipping HTTP/1.1
# Cookie: $Version="1";
# Customer="WILE_E_COYOTE"; $Path="/acme";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme"
# [form data]
#
# User selects shipping method from form.
#
# 6. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Shipping="FedEx"; Version="1"; Path="/acme"
#
# New cookie reflects shipping method.
cookie = interact_2965(c, "http://www.acme.com/acme/shipping",
'Shipping="FedEx"; Version="1"; Path="/acme"')
self.assert_(re.search(r'^\$Version="?1"?;', cookie))
self.assert_(re.search(r'Part_Number="?Rocket_Launcher_0001"?;'
'\s*\$Path="\/acme"', cookie))
self.assert_(re.search(r'Customer="?WILE_E_COYOTE"?;\s*\$Path="\/acme"',
cookie))
#
# 7. User Agent -> Server
#
# POST /acme/process HTTP/1.1
# Cookie: $Version="1";
# Customer="WILE_E_COYOTE"; $Path="/acme";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme";
# Shipping="FedEx"; $Path="/acme"
# [form data]
#
# User chooses to process order.
#
# 8. Server -> User Agent
#
# HTTP/1.1 200 OK
#
# Transaction is complete.
cookie = interact_2965(c, "http://www.acme.com/acme/process")
self.assert_(
re.search(r'Shipping="?FedEx"?;\s*\$Path="\/acme"', cookie) and
"WILE_E_COYOTE" in cookie)
#
# The user agent makes a series of requests on the origin server, after
# each of which it receives a new cookie. All the cookies have the same
# Path attribute and (default) domain. Because the request URLs all have
# /acme as a prefix, and that matches the Path attribute, each request
# contains all the cookies received so far.
def test_ietf_example_2(self):
from cookielib import CookieJar, DefaultCookiePolicy
# 5.2 Example 2
#
# This example illustrates the effect of the Path attribute. All detail
# of request and response headers has been omitted. Assume the user agent
# has no stored cookies.
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
# Imagine the user agent has received, in response to earlier requests,
# the response headers
#
# Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1";
# Path="/acme"
#
# and
#
# Set-Cookie2: Part_Number="Riding_Rocket_0023"; Version="1";
# Path="/acme/ammo"
interact_2965(
c, "http://www.acme.com/acme/ammo/specific",
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"',
'Part_Number="Riding_Rocket_0023"; Version="1"; Path="/acme/ammo"')
# A subsequent request by the user agent to the (same) server for URLs of
# the form /acme/ammo/... would include the following request header:
#
# Cookie: $Version="1";
# Part_Number="Riding_Rocket_0023"; $Path="/acme/ammo";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme"
#
# Note that the NAME=VALUE pair for the cookie with the more specific Path
# attribute, /acme/ammo, comes before the one with the less specific Path
# attribute, /acme. Further note that the same cookie name appears more
# than once.
cookie = interact_2965(c, "http://www.acme.com/acme/ammo/...")
self.assert_(
re.search(r"Riding_Rocket_0023.*Rocket_Launcher_0001", cookie))
# A subsequent request by the user agent to the (same) server for a URL of
# the form /acme/parts/ would include the following request header:
#
# Cookie: $Version="1"; Part_Number="Rocket_Launcher_0001"; $Path="/acme"
#
# Here, the second cookie's Path attribute /acme/ammo is not a prefix of
# the request URL, /acme/parts/, so the cookie does not get forwarded to
# the server.
cookie = interact_2965(c, "http://www.acme.com/acme/parts/")
self.assert_("Rocket_Launcher_0001" in cookie and
"Riding_Rocket_0023" not in cookie)
def test_rejection(self):
# Test rejection of Set-Cookie2 responses based on domain, path, port.
from cookielib import DefaultCookiePolicy, LWPCookieJar
pol = DefaultCookiePolicy(rfc2965=True)
c = LWPCookieJar(policy=pol)
max_age = "max-age=3600"
# illegal domain (no embedded dots)
cookie = interact_2965(c, "http://www.acme.com",
'foo=bar; domain=".com"; version=1')
self.assert_(not c)
# legal domain
cookie = interact_2965(c, "http://www.acme.com",
'ping=pong; domain="acme.com"; version=1')
self.assertEquals(len(c), 1)
# illegal domain (host prefix "www.a" contains a dot)
cookie = interact_2965(c, "http://www.a.acme.com",
'whiz=bang; domain="acme.com"; version=1')
self.assertEquals(len(c), 1)
# legal domain
cookie = interact_2965(c, "http://www.a.acme.com",
'wow=flutter; domain=".a.acme.com"; version=1')
self.assertEquals(len(c), 2)
# can't partially match an IP-address
cookie = interact_2965(c, "http://125.125.125.125",
'zzzz=ping; domain="125.125.125"; version=1')
self.assertEquals(len(c), 2)
# illegal path (must be prefix of request path)
cookie = interact_2965(c, "http://www.sol.no",
'blah=rhubarb; domain=".sol.no"; path="/foo"; '
'version=1')
self.assertEquals(len(c), 2)
# legal path
cookie = interact_2965(c, "http://www.sol.no/foo/bar",
'bing=bong; domain=".sol.no"; path="/foo"; '
'version=1')
self.assertEquals(len(c), 3)
# illegal port (request-port not in list)
cookie = interact_2965(c, "http://www.sol.no",
'whiz=ffft; domain=".sol.no"; port="90,100"; '
'version=1')
self.assertEquals(len(c), 3)
# legal port
cookie = interact_2965(
c, "http://www.sol.no",
r'bang=wallop; version=1; domain=".sol.no"; '
r'port="90,100, 80,8080"; '
r'max-age=100; Comment = "Just kidding! (\"|\\\\) "')
self.assertEquals(len(c), 4)
# port attribute without any value (current port)
cookie = interact_2965(c, "http://www.sol.no",
'foo9=bar; version=1; domain=".sol.no"; port; '
'max-age=100;')
self.assertEquals(len(c), 5)
# encoded path
# LWP has this test, but unescaping allowed path characters seems
# like a bad idea, so I think this should fail:
## cookie = interact_2965(c, "http://www.sol.no/foo/",
## r'foo8=bar; version=1; path="/%66oo"')
# but this is OK, because '<' is not an allowed HTTP URL path
# character:
cookie = interact_2965(c, "http://www.sol.no/<oo/",
r'foo8=bar; version=1; path="/%3coo"')
self.assertEquals(len(c), 6)
# save and restore
filename = test_support.TESTFN
try:
c.save(filename, ignore_discard=True)
old = repr(c)
c = LWPCookieJar(policy=pol)
c.load(filename, ignore_discard=True)
finally:
try: os.unlink(filename)
except OSError: pass
self.assertEquals(old, repr(c))
def test_url_encoding(self):
# Try some URL encodings of the PATHs.
# (the behaviour here has changed from libwww-perl)
from cookielib import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/foo%2f%25/%3c%3c%0Anew%E5/%E5",
"foo = bar; version = 1")
cookie = interact_2965(
c, "http://www.acme.com/foo%2f%25/<<%0anewå/æøå",
'bar=baz; path="/foo/"; version=1');
version_re = re.compile(r'^\$version=\"?1\"?', re.I)
self.assert_("foo=bar" in cookie and version_re.search(cookie))
cookie = interact_2965(
c, "http://www.acme.com/foo/%25/<<%0anewå/æøå")
self.assert_(not cookie)
# unicode URL doesn't raise exception
cookie = interact_2965(c, u"http://www.acme.com/\xfc")
def test_mozilla(self):
# Save / load Mozilla/Netscape cookie file format.
from cookielib import MozillaCookieJar, DefaultCookiePolicy
year_plus_one = time.localtime()[0] + 1
filename = test_support.TESTFN
c = MozillaCookieJar(filename,
policy=DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/",
"foo1=bar; max-age=100; Version=1")
interact_2965(c, "http://www.acme.com/",
'foo2=bar; port="80"; max-age=100; Discard; Version=1')
interact_2965(c, "http://www.acme.com/", "foo3=bar; secure; Version=1")
expires = "expires=09-Nov-%d 23:12:40 GMT" % (year_plus_one,)
interact_netscape(c, "http://www.foo.com/",
"fooa=bar; %s" % expires)
interact_netscape(c, "http://www.foo.com/",
"foob=bar; Domain=.foo.com; %s" % expires)
interact_netscape(c, "http://www.foo.com/",
"fooc=bar; Domain=www.foo.com; %s" % expires)
def save_and_restore(cj, ignore_discard):
try:
cj.save(ignore_discard=ignore_discard)
new_c = MozillaCookieJar(filename,
DefaultCookiePolicy(rfc2965=True))
new_c.load(ignore_discard=ignore_discard)
finally:
try: os.unlink(filename)
except OSError: pass
return new_c
new_c = save_and_restore(c, True)
self.assertEquals(len(new_c), 6) # none discarded
self.assert_("name='foo1', value='bar'" in repr(new_c))
new_c = save_and_restore(c, False)
self.assertEquals(len(new_c), 4) # 2 of them discarded on save
self.assert_("name='foo1', value='bar'" in repr(new_c))
def test_netscape_misc(self):
# Some additional Netscape cookies tests.
from cookielib import CookieJar
from urllib2 import Request
c = CookieJar()
headers = []
req = Request("http://foo.bar.acme.com/foo")
# Netscape allows a host part that contains dots
headers.append("Set-Cookie: Customer=WILE_E_COYOTE; domain=.acme.com")
res = FakeResponse(headers, "http://www.acme.com/foo")
c.extract_cookies(res, req)
# and that the domain is the same as the host without adding a leading
# dot to the domain. Should not quote even if strange chars are used
# in the cookie value.
headers.append("Set-Cookie: PART_NUMBER=3,4; domain=foo.bar.acme.com")
res = FakeResponse(headers, "http://www.acme.com/foo")
c.extract_cookies(res, req)
req = Request("http://foo.bar.acme.com/foo")
c.add_cookie_header(req)
self.assert_(
"PART_NUMBER=3,4" in req.get_header("Cookie") and
"Customer=WILE_E_COYOTE" in req.get_header("Cookie"))
def test_intranet_domains_2965(self):
# Test handling of local intranet hostnames without a dot.
from cookielib import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://example/",
"foo1=bar; PORT; Discard; Version=1;")
cookie = interact_2965(c, "http://example/",
'foo2=bar; domain=".local"; Version=1')
self.assert_("foo1=bar" in cookie)
interact_2965(c, "http://example/", 'foo3=bar; Version=1')
cookie = interact_2965(c, "http://example/")
self.assert_("foo2=bar" in cookie and len(c) == 3)
def test_intranet_domains_ns(self):
from cookielib import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965 = False))
interact_netscape(c, "http://example/", "foo1=bar")
cookie = interact_netscape(c, "http://example/",
'foo2=bar; domain=.local')
self.assertEquals(len(c), 2)
self.assert_("foo1=bar" in cookie)
cookie = interact_netscape(c, "http://example/")
self.assert_("foo2=bar" in cookie)
self.assertEquals(len(c), 2)
def test_empty_path(self):
from cookielib import CookieJar, DefaultCookiePolicy
from urllib2 import Request
# Test for empty path
# Broken web-server ORION/1.3.38 returns to the client response like
#
# Set-Cookie: JSESSIONID=ABCDERANDOM123; Path=
#
# ie. with Path set to nothing.
# In this case, extract_cookies() must set cookie to / (root)
c = CookieJar(DefaultCookiePolicy(rfc2965 = True))
headers = []
req = Request("http://www.ants.com/")
headers.append("Set-Cookie: JSESSIONID=ABCDERANDOM123; Path=")
res = FakeResponse(headers, "http://www.ants.com/")
c.extract_cookies(res, req)
req = Request("http://www.ants.com/")
c.add_cookie_header(req)
self.assertEquals(req.get_header("Cookie"),
"JSESSIONID=ABCDERANDOM123")
self.assertEquals(req.get_header("Cookie2"), '$Version="1"')
# missing path in the request URI
req = Request("http://www.ants.com:8080")
c.add_cookie_header(req)
self.assertEquals(req.get_header("Cookie"),
"JSESSIONID=ABCDERANDOM123")
self.assertEquals(req.get_header("Cookie2"), '$Version="1"')
def test_session_cookies(self):
from cookielib import CookieJar
from urllib2 import Request
year_plus_one = time.localtime()[0] + 1
# Check session cookies are deleted properly by
# CookieJar.clear_session_cookies method
req = Request('http://www.perlmeister.com/scripts')
headers = []
headers.append("Set-Cookie: s1=session;Path=/scripts")
headers.append("Set-Cookie: p1=perm; Domain=.perlmeister.com;"
"Path=/;expires=Fri, 02-Feb-%d 23:24:20 GMT" %
year_plus_one)
headers.append("Set-Cookie: p2=perm;Path=/;expires=Fri, "
"02-Feb-%d 23:24:20 GMT" % year_plus_one)
headers.append("Set-Cookie: s2=session;Path=/scripts;"
"Domain=.perlmeister.com")
headers.append('Set-Cookie2: s3=session;Version=1;Discard;Path="/"')
res = FakeResponse(headers, 'http://www.perlmeister.com/scripts')
c = CookieJar()
c.extract_cookies(res, req)
# How many session/permanent cookies do we have?
counter = {"session_after": 0,
"perm_after": 0,
"session_before": 0,
"perm_before": 0}
for cookie in c:
key = "%s_before" % cookie.value
counter[key] = counter[key] + 1
c.clear_session_cookies()
# How many now?
for cookie in c:
key = "%s_after" % cookie.value
counter[key] = counter[key] + 1
self.assert_(not (
# a permanent cookie got lost accidently
counter["perm_after"] != counter["perm_before"] or
# a session cookie hasn't been cleared
counter["session_after"] != 0 or
# we didn't have session cookies in the first place
counter["session_before"] == 0))
def test_main(verbose=None):
from test import test_sets
test_support.run_unittest(
DateTimeTests,
HeaderTests,
CookieTests,
FileCookieJarTests,
LWPCookieTests,
)
if __name__ == "__main__":
test_main(verbose=True)
|
apache-2.0
|
omnirom/android_kernel_huawei_angler
|
tools/perf/scripts/python/syscall-counts.py
|
11181
|
1522
|
# system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
|
gpl-2.0
|
mfherbst/spack
|
var/spack/repos/builtin/packages/bedtools2/package.py
|
5
|
2043
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Bedtools2(Package):
"""Collectively, the bedtools utilities are a swiss-army knife of
tools for a wide-range of genomics analysis tasks. The most
widely-used tools enable genome arithmetic: that is, set theory
on the genome."""
homepage = "https://github.com/arq5x/bedtools2"
url = "https://github.com/arq5x/bedtools2/archive/v2.26.0.tar.gz"
version('2.27.1', '8e0afcab95a824e42a6e99c5436a8438')
version('2.27.0', '052f22eb214ef2e7e7981b3c01167302')
version('2.26.0', '52227e7efa6627f0f95d7d734973233d')
version('2.25.0', '534fb4a7bf0d0c3f05be52a0160d8e3d')
version('2.23.0', '4fa3671b3a3891eefd969ad3509222e3')
depends_on('zlib')
def install(self, spec, prefix):
make("prefix=%s" % prefix, "install")
|
lgpl-2.1
|
gnuhub/intellij-community
|
plugins/hg4idea/testData/bin/hgext/rebase.py
|
90
|
32420
|
# rebase.py - rebasing feature for mercurial
#
# Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''command to move sets of revisions to a different ancestor
This extension lets you rebase changesets in an existing Mercurial
repository.
For more information:
http://mercurial.selenic.com/wiki/RebaseExtension
'''
from mercurial import hg, util, repair, merge, cmdutil, commands, bookmarks
from mercurial import extensions, patch, scmutil, phases, obsolete, error
from mercurial.commands import templateopts
from mercurial.node import nullrev
from mercurial.lock import release
from mercurial.i18n import _
import os, errno
nullmerge = -2
revignored = -3
cmdtable = {}
command = cmdutil.command(cmdtable)
testedwith = 'internal'
@command('rebase',
[('s', 'source', '',
_('rebase from the specified changeset'), _('REV')),
('b', 'base', '',
_('rebase from the base of the specified changeset '
'(up to greatest common ancestor of base and dest)'),
_('REV')),
('r', 'rev', [],
_('rebase these revisions'),
_('REV')),
('d', 'dest', '',
_('rebase onto the specified changeset'), _('REV')),
('', 'collapse', False, _('collapse the rebased changesets')),
('m', 'message', '',
_('use text as collapse commit message'), _('TEXT')),
('e', 'edit', False, _('invoke editor on commit messages')),
('l', 'logfile', '',
_('read collapse commit message from file'), _('FILE')),
('', 'keep', False, _('keep original changesets')),
('', 'keepbranches', False, _('keep original branch names')),
('D', 'detach', False, _('(DEPRECATED)')),
('t', 'tool', '', _('specify merge tool')),
('c', 'continue', False, _('continue an interrupted rebase')),
('a', 'abort', False, _('abort an interrupted rebase'))] +
templateopts,
_('[-s REV | -b REV] [-d REV] [OPTION]'))
def rebase(ui, repo, **opts):
"""move changeset (and descendants) to a different branch
Rebase uses repeated merging to graft changesets from one part of
history (the source) onto another (the destination). This can be
useful for linearizing *local* changes relative to a master
development tree.
You should not rebase changesets that have already been shared
with others. Doing so will force everybody else to perform the
same rebase or they will end up with duplicated changesets after
pulling in your rebased changesets.
In its default configuration, Mercurial will prevent you from
rebasing published changes. See :hg:`help phases` for details.
If you don't specify a destination changeset (``-d/--dest``),
rebase uses the tipmost head of the current named branch as the
destination. (The destination changeset is not modified by
rebasing, but new changesets are added as its descendants.)
You can specify which changesets to rebase in two ways: as a
"source" changeset or as a "base" changeset. Both are shorthand
for a topologically related set of changesets (the "source
branch"). If you specify source (``-s/--source``), rebase will
rebase that changeset and all of its descendants onto dest. If you
specify base (``-b/--base``), rebase will select ancestors of base
back to but not including the common ancestor with dest. Thus,
``-b`` is less precise but more convenient than ``-s``: you can
specify any changeset in the source branch, and rebase will select
the whole branch. If you specify neither ``-s`` nor ``-b``, rebase
uses the parent of the working directory as the base.
For advanced usage, a third way is available through the ``--rev``
option. It allows you to specify an arbitrary set of changesets to
rebase. Descendants of revs you specify with this option are not
automatically included in the rebase.
By default, rebase recreates the changesets in the source branch
as descendants of dest and then destroys the originals. Use
``--keep`` to preserve the original source changesets. Some
changesets in the source branch (e.g. merges from the destination
branch) may be dropped if they no longer contribute any change.
One result of the rules for selecting the destination changeset
and source branch is that, unlike ``merge``, rebase will do
nothing if you are at the latest (tipmost) head of a named branch
with two heads. You need to explicitly specify source and/or
destination (or ``update`` to the other head, if it's the head of
the intended source branch).
If a rebase is interrupted to manually resolve a merge, it can be
continued with --continue/-c or aborted with --abort/-a.
Returns 0 on success, 1 if nothing to rebase.
"""
originalwd = target = None
activebookmark = None
external = nullrev
state = {}
skipped = set()
targetancestors = set()
editor = None
if opts.get('edit'):
editor = cmdutil.commitforceeditor
lock = wlock = None
try:
wlock = repo.wlock()
lock = repo.lock()
# Validate input and define rebasing points
destf = opts.get('dest', None)
srcf = opts.get('source', None)
basef = opts.get('base', None)
revf = opts.get('rev', [])
contf = opts.get('continue')
abortf = opts.get('abort')
collapsef = opts.get('collapse', False)
collapsemsg = cmdutil.logmessage(ui, opts)
extrafn = opts.get('extrafn') # internal, used by e.g. hgsubversion
keepf = opts.get('keep', False)
keepbranchesf = opts.get('keepbranches', False)
# keepopen is not meant for use on the command line, but by
# other extensions
keepopen = opts.get('keepopen', False)
if collapsemsg and not collapsef:
raise util.Abort(
_('message can only be specified with collapse'))
if contf or abortf:
if contf and abortf:
raise util.Abort(_('cannot use both abort and continue'))
if collapsef:
raise util.Abort(
_('cannot use collapse with continue or abort'))
if srcf or basef or destf:
raise util.Abort(
_('abort and continue do not allow specifying revisions'))
if opts.get('tool', False):
ui.warn(_('tool option will be ignored\n'))
(originalwd, target, state, skipped, collapsef, keepf,
keepbranchesf, external, activebookmark) = restorestatus(repo)
if abortf:
return abort(repo, originalwd, target, state)
else:
if srcf and basef:
raise util.Abort(_('cannot specify both a '
'source and a base'))
if revf and basef:
raise util.Abort(_('cannot specify both a '
'revision and a base'))
if revf and srcf:
raise util.Abort(_('cannot specify both a '
'revision and a source'))
cmdutil.bailifchanged(repo)
if not destf:
# Destination defaults to the latest revision in the
# current branch
branch = repo[None].branch()
dest = repo[branch]
else:
dest = scmutil.revsingle(repo, destf)
if revf:
rebaseset = repo.revs('%lr', revf)
elif srcf:
src = scmutil.revrange(repo, [srcf])
rebaseset = repo.revs('(%ld)::', src)
else:
base = scmutil.revrange(repo, [basef or '.'])
rebaseset = repo.revs(
'(children(ancestor(%ld, %d)) and ::(%ld))::',
base, dest, base)
if rebaseset:
root = min(rebaseset)
else:
root = None
if not rebaseset:
repo.ui.debug('base is ancestor of destination\n')
result = None
elif (not (keepf or obsolete._enabled)
and repo.revs('first(children(%ld) - %ld)',
rebaseset, rebaseset)):
raise util.Abort(
_("can't remove original changesets with"
" unrebased descendants"),
hint=_('use --keep to keep original changesets'))
else:
result = buildstate(repo, dest, rebaseset, collapsef)
if not result:
# Empty state built, nothing to rebase
ui.status(_('nothing to rebase\n'))
return 1
elif not keepf and not repo[root].mutable():
raise util.Abort(_("can't rebase immutable changeset %s")
% repo[root],
hint=_('see hg help phases for details'))
else:
originalwd, target, state = result
if collapsef:
targetancestors = repo.changelog.ancestors([target],
inclusive=True)
external = checkexternal(repo, state, targetancestors)
if keepbranchesf:
assert not extrafn, 'cannot use both keepbranches and extrafn'
def extrafn(ctx, extra):
extra['branch'] = ctx.branch()
if collapsef:
branches = set()
for rev in state:
branches.add(repo[rev].branch())
if len(branches) > 1:
raise util.Abort(_('cannot collapse multiple named '
'branches'))
# Rebase
if not targetancestors:
targetancestors = repo.changelog.ancestors([target], inclusive=True)
# Keep track of the current bookmarks in order to reset them later
currentbookmarks = repo._bookmarks.copy()
activebookmark = activebookmark or repo._bookmarkcurrent
if activebookmark:
bookmarks.unsetcurrent(repo)
sortedstate = sorted(state)
total = len(sortedstate)
pos = 0
for rev in sortedstate:
pos += 1
if state[rev] == -1:
ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, repo[rev])),
_('changesets'), total)
storestatus(repo, originalwd, target, state, collapsef, keepf,
keepbranchesf, external, activebookmark)
p1, p2 = defineparents(repo, rev, target, state,
targetancestors)
if len(repo.parents()) == 2:
repo.ui.debug('resuming interrupted rebase\n')
else:
try:
ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
stats = rebasenode(repo, rev, p1, state, collapsef)
if stats and stats[3] > 0:
raise error.InterventionRequired(
_('unresolved conflicts (see hg '
'resolve, then hg rebase --continue)'))
finally:
ui.setconfig('ui', 'forcemerge', '')
cmdutil.duplicatecopies(repo, rev, target)
if not collapsef:
newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn,
editor=editor)
else:
# Skip commit if we are collapsing
repo.setparents(repo[p1].node())
newrev = None
# Update the state
if newrev is not None:
state[rev] = repo[newrev].rev()
else:
if not collapsef:
ui.note(_('no changes, revision %d skipped\n') % rev)
ui.debug('next revision set to %s\n' % p1)
skipped.add(rev)
state[rev] = p1
ui.progress(_('rebasing'), None)
ui.note(_('rebase merging completed\n'))
if collapsef and not keepopen:
p1, p2 = defineparents(repo, min(state), target,
state, targetancestors)
if collapsemsg:
commitmsg = collapsemsg
else:
commitmsg = 'Collapsed revision'
for rebased in state:
if rebased not in skipped and state[rebased] > nullmerge:
commitmsg += '\n* %s' % repo[rebased].description()
commitmsg = ui.edit(commitmsg, repo.ui.username())
newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg,
extrafn=extrafn, editor=editor)
if 'qtip' in repo.tags():
updatemq(repo, state, skipped, **opts)
if currentbookmarks:
# Nodeids are needed to reset bookmarks
nstate = {}
for k, v in state.iteritems():
if v > nullmerge:
nstate[repo[k].node()] = repo[v].node()
# XXX this is the same as dest.node() for the non-continue path --
# this should probably be cleaned up
targetnode = repo[target].node()
if not keepf:
collapsedas = None
if collapsef:
collapsedas = newrev
clearrebased(ui, repo, state, skipped, collapsedas)
if currentbookmarks:
updatebookmarks(repo, targetnode, nstate, currentbookmarks)
clearstatus(repo)
ui.note(_("rebase completed\n"))
util.unlinkpath(repo.sjoin('undo'), ignoremissing=True)
if skipped:
ui.note(_("%d revisions have been skipped\n") % len(skipped))
if (activebookmark and
repo['tip'].node() == repo._bookmarks[activebookmark]):
bookmarks.setcurrent(repo, activebookmark)
finally:
release(lock, wlock)
def checkexternal(repo, state, targetancestors):
"""Check whether one or more external revisions need to be taken in
consideration. In the latter case, abort.
"""
external = nullrev
source = min(state)
for rev in state:
if rev == source:
continue
# Check externals and fail if there are more than one
for p in repo[rev].parents():
if (p.rev() not in state
and p.rev() not in targetancestors):
if external != nullrev:
raise util.Abort(_('unable to collapse, there is more '
'than one external parent'))
external = p.rev()
return external
def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None):
'Commit the changes and store useful information in extra'
try:
repo.setparents(repo[p1].node(), repo[p2].node())
ctx = repo[rev]
if commitmsg is None:
commitmsg = ctx.description()
extra = {'rebase_source': ctx.hex()}
if extrafn:
extrafn(ctx, extra)
# Commit might fail if unresolved files exist
newrev = repo.commit(text=commitmsg, user=ctx.user(),
date=ctx.date(), extra=extra, editor=editor)
repo.dirstate.setbranch(repo[newrev].branch())
targetphase = max(ctx.phase(), phases.draft)
# retractboundary doesn't overwrite upper phase inherited from parent
newnode = repo[newrev].node()
if newnode:
phases.retractboundary(repo, targetphase, [newnode])
return newrev
except util.Abort:
# Invalidate the previous setparents
repo.dirstate.invalidate()
raise
def rebasenode(repo, rev, p1, state, collapse):
'Rebase a single revision'
# Merge phase
# Update to target and merge it with local
if repo['.'].rev() != repo[p1].rev():
repo.ui.debug(" update to %d:%s\n" % (repo[p1].rev(), repo[p1]))
merge.update(repo, p1, False, True, False)
else:
repo.ui.debug(" already in target\n")
repo.dirstate.write()
repo.ui.debug(" merge against %d:%s\n" % (repo[rev].rev(), repo[rev]))
base = None
if repo[rev].rev() != repo[min(state)].rev():
base = repo[rev].p1().node()
# When collapsing in-place, the parent is the common ancestor, we
# have to allow merging with it.
return merge.update(repo, rev, True, True, False, base, collapse)
def nearestrebased(repo, rev, state):
"""return the nearest ancestors of rev in the rebase result"""
rebased = [r for r in state if state[r] > nullmerge]
candidates = repo.revs('max(%ld and (::%d))', rebased, rev)
if candidates:
return state[candidates[0]]
else:
return None
def defineparents(repo, rev, target, state, targetancestors):
'Return the new parent relationship of the revision that will be rebased'
parents = repo[rev].parents()
p1 = p2 = nullrev
P1n = parents[0].rev()
if P1n in targetancestors:
p1 = target
elif P1n in state:
if state[P1n] == nullmerge:
p1 = target
elif state[P1n] == revignored:
p1 = nearestrebased(repo, P1n, state)
if p1 is None:
p1 = target
else:
p1 = state[P1n]
else: # P1n external
p1 = target
p2 = P1n
if len(parents) == 2 and parents[1].rev() not in targetancestors:
P2n = parents[1].rev()
# interesting second parent
if P2n in state:
if p1 == target: # P1n in targetancestors or external
p1 = state[P2n]
elif state[P2n] == revignored:
p2 = nearestrebased(repo, P2n, state)
if p2 is None:
# no ancestors rebased yet, detach
p2 = target
else:
p2 = state[P2n]
else: # P2n external
if p2 != nullrev: # P1n external too => rev is a merged revision
raise util.Abort(_('cannot use revision %d as base, result '
'would have 3 parents') % rev)
p2 = P2n
repo.ui.debug(" future parents are %d and %d\n" %
(repo[p1].rev(), repo[p2].rev()))
return p1, p2
def isagitpatch(repo, patchname):
'Return true if the given patch is in git format'
mqpatch = os.path.join(repo.mq.path, patchname)
for line in patch.linereader(file(mqpatch, 'rb')):
if line.startswith('diff --git'):
return True
return False
def updatemq(repo, state, skipped, **opts):
'Update rebased mq patches - finalize and then import them'
mqrebase = {}
mq = repo.mq
original_series = mq.fullseries[:]
skippedpatches = set()
for p in mq.applied:
rev = repo[p.node].rev()
if rev in state:
repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' %
(rev, p.name))
mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
else:
# Applied but not rebased, not sure this should happen
skippedpatches.add(p.name)
if mqrebase:
mq.finish(repo, mqrebase.keys())
# We must start import from the newest revision
for rev in sorted(mqrebase, reverse=True):
if rev not in skipped:
name, isgit = mqrebase[rev]
repo.ui.debug('import mq patch %d (%s)\n' % (state[rev], name))
mq.qimport(repo, (), patchname=name, git=isgit,
rev=[str(state[rev])])
else:
# Rebased and skipped
skippedpatches.add(mqrebase[rev][0])
# Patches were either applied and rebased and imported in
# order, applied and removed or unapplied. Discard the removed
# ones while preserving the original series order and guards.
newseries = [s for s in original_series
if mq.guard_re.split(s, 1)[0] not in skippedpatches]
mq.fullseries[:] = newseries
mq.seriesdirty = True
mq.savedirty()
def updatebookmarks(repo, targetnode, nstate, originalbookmarks):
'Move bookmarks to their correct changesets, and delete divergent ones'
marks = repo._bookmarks
for k, v in originalbookmarks.iteritems():
if v in nstate:
# update the bookmarks for revs that have moved
marks[k] = nstate[v]
bookmarks.deletedivergent(repo, [targetnode], k)
marks.write()
def storestatus(repo, originalwd, target, state, collapse, keep, keepbranches,
external, activebookmark):
'Store the current status to allow recovery'
f = repo.opener("rebasestate", "w")
f.write(repo[originalwd].hex() + '\n')
f.write(repo[target].hex() + '\n')
f.write(repo[external].hex() + '\n')
f.write('%d\n' % int(collapse))
f.write('%d\n' % int(keep))
f.write('%d\n' % int(keepbranches))
f.write('%s\n' % (activebookmark or ''))
for d, v in state.iteritems():
oldrev = repo[d].hex()
if v > nullmerge:
newrev = repo[v].hex()
else:
newrev = v
f.write("%s:%s\n" % (oldrev, newrev))
f.close()
repo.ui.debug('rebase status stored\n')
def clearstatus(repo):
'Remove the status files'
util.unlinkpath(repo.join("rebasestate"), ignoremissing=True)
def restorestatus(repo):
'Restore a previously stored status'
try:
target = None
collapse = False
external = nullrev
activebookmark = None
state = {}
f = repo.opener("rebasestate")
for i, l in enumerate(f.read().splitlines()):
if i == 0:
originalwd = repo[l].rev()
elif i == 1:
target = repo[l].rev()
elif i == 2:
external = repo[l].rev()
elif i == 3:
collapse = bool(int(l))
elif i == 4:
keep = bool(int(l))
elif i == 5:
keepbranches = bool(int(l))
elif i == 6 and not (len(l) == 81 and ':' in l):
# line 6 is a recent addition, so for backwards compatibility
# check that the line doesn't look like the oldrev:newrev lines
activebookmark = l
else:
oldrev, newrev = l.split(':')
if newrev in (str(nullmerge), str(revignored)):
state[repo[oldrev].rev()] = int(newrev)
else:
state[repo[oldrev].rev()] = repo[newrev].rev()
skipped = set()
# recompute the set of skipped revs
if not collapse:
seen = set([target])
for old, new in sorted(state.items()):
if new != nullrev and new in seen:
skipped.add(old)
seen.add(new)
repo.ui.debug('computed skipped revs: %s\n' % skipped)
repo.ui.debug('rebase status resumed\n')
return (originalwd, target, state, skipped,
collapse, keep, keepbranches, external, activebookmark)
except IOError, err:
if err.errno != errno.ENOENT:
raise
raise util.Abort(_('no rebase in progress'))
def abort(repo, originalwd, target, state):
'Restore the repository to its original state'
dstates = [s for s in state.values() if s != nullrev]
immutable = [d for d in dstates if not repo[d].mutable()]
if immutable:
raise util.Abort(_("can't abort rebase due to immutable changesets %s")
% ', '.join(str(repo[r]) for r in immutable),
hint=_('see hg help phases for details'))
descendants = set()
if dstates:
descendants = set(repo.changelog.descendants(dstates))
if descendants - set(dstates):
repo.ui.warn(_("warning: new changesets detected on target branch, "
"can't abort\n"))
return -1
else:
# Strip from the first rebased revision
merge.update(repo, repo[originalwd].rev(), False, True, False)
rebased = filter(lambda x: x > -1 and x != target, state.values())
if rebased:
strippoints = [c.node() for c in repo.set('roots(%ld)', rebased)]
# no backup of rebased cset versions needed
repair.strip(repo.ui, repo, strippoints)
clearstatus(repo)
repo.ui.warn(_('rebase aborted\n'))
return 0
def buildstate(repo, dest, rebaseset, collapse):
'''Define which revisions are going to be rebased and where
repo: repo
dest: context
rebaseset: set of rev
'''
# This check isn't strictly necessary, since mq detects commits over an
# applied patch. But it prevents messing up the working directory when
# a partially completed rebase is blocked by mq.
if 'qtip' in repo.tags() and (dest.node() in
[s.node for s in repo.mq.applied]):
raise util.Abort(_('cannot rebase onto an applied mq patch'))
roots = list(repo.set('roots(%ld)', rebaseset))
if not roots:
raise util.Abort(_('no matching revisions'))
roots.sort()
state = {}
detachset = set()
for root in roots:
commonbase = root.ancestor(dest)
if commonbase == root:
raise util.Abort(_('source is ancestor of destination'))
if commonbase == dest:
samebranch = root.branch() == dest.branch()
if not collapse and samebranch and root in dest.children():
repo.ui.debug('source is a child of destination\n')
return None
repo.ui.debug('rebase onto %d starting from %s\n' % (dest, roots))
state.update(dict.fromkeys(rebaseset, nullrev))
# Rebase tries to turn <dest> into a parent of <root> while
# preserving the number of parents of rebased changesets:
#
# - A changeset with a single parent will always be rebased as a
# changeset with a single parent.
#
# - A merge will be rebased as merge unless its parents are both
# ancestors of <dest> or are themselves in the rebased set and
# pruned while rebased.
#
# If one parent of <root> is an ancestor of <dest>, the rebased
# version of this parent will be <dest>. This is always true with
# --base option.
#
# Otherwise, we need to *replace* the original parents with
# <dest>. This "detaches" the rebased set from its former location
# and rebases it onto <dest>. Changes introduced by ancestors of
# <root> not common with <dest> (the detachset, marked as
# nullmerge) are "removed" from the rebased changesets.
#
# - If <root> has a single parent, set it to <dest>.
#
# - If <root> is a merge, we cannot decide which parent to
# replace, the rebase operation is not clearly defined.
#
# The table below sums up this behavior:
#
# +------------------+----------------------+-------------------------+
# | | one parent | merge |
# +------------------+----------------------+-------------------------+
# | parent in | new parent is <dest> | parents in ::<dest> are |
# | ::<dest> | | remapped to <dest> |
# +------------------+----------------------+-------------------------+
# | unrelated source | new parent is <dest> | ambiguous, abort |
# +------------------+----------------------+-------------------------+
#
# The actual abort is handled by `defineparents`
if len(root.parents()) <= 1:
# ancestors of <root> not ancestors of <dest>
detachset.update(repo.changelog.findmissingrevs([commonbase.rev()],
[root.rev()]))
for r in detachset:
if r not in state:
state[r] = nullmerge
if len(roots) > 1:
# If we have multiple roots, we may have "hole" in the rebase set.
# Rebase roots that descend from those "hole" should not be detached as
# other root are. We use the special `revignored` to inform rebase that
# the revision should be ignored but that `defineparents` should search
# a rebase destination that make sense regarding rebased topology.
rebasedomain = set(repo.revs('%ld::%ld', rebaseset, rebaseset))
for ignored in set(rebasedomain) - set(rebaseset):
state[ignored] = revignored
return repo['.'].rev(), dest.rev(), state
def clearrebased(ui, repo, state, skipped, collapsedas=None):
"""dispose of rebased revision at the end of the rebase
If `collapsedas` is not None, the rebase was a collapse whose result if the
`collapsedas` node."""
if obsolete._enabled:
markers = []
for rev, newrev in sorted(state.items()):
if newrev >= 0:
if rev in skipped:
succs = ()
elif collapsedas is not None:
succs = (repo[collapsedas],)
else:
succs = (repo[newrev],)
markers.append((repo[rev], succs))
if markers:
obsolete.createmarkers(repo, markers)
else:
rebased = [rev for rev in state if state[rev] > nullmerge]
if rebased:
stripped = []
for root in repo.set('roots(%ld)', rebased):
if set(repo.changelog.descendants([root.rev()])) - set(state):
ui.warn(_("warning: new changesets detected "
"on source branch, not stripping\n"))
else:
stripped.append(root.node())
if stripped:
# backup the old csets by default
repair.strip(ui, repo, stripped, "all")
def pullrebase(orig, ui, repo, *args, **opts):
'Call rebase after pull if the latter has been invoked with --rebase'
if opts.get('rebase'):
if opts.get('update'):
del opts['update']
ui.debug('--update and --rebase are not compatible, ignoring '
'the update flag\n')
movemarkfrom = repo['.'].node()
cmdutil.bailifchanged(repo)
revsprepull = len(repo)
origpostincoming = commands.postincoming
def _dummy(*args, **kwargs):
pass
commands.postincoming = _dummy
try:
orig(ui, repo, *args, **opts)
finally:
commands.postincoming = origpostincoming
revspostpull = len(repo)
if revspostpull > revsprepull:
# --rev option from pull conflict with rebase own --rev
# dropping it
if 'rev' in opts:
del opts['rev']
rebase(ui, repo, **opts)
branch = repo[None].branch()
dest = repo[branch].rev()
if dest != repo['.'].rev():
# there was nothing to rebase we force an update
hg.update(repo, dest)
if bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
ui.status(_("updating bookmark %s\n")
% repo._bookmarkcurrent)
else:
if opts.get('tool'):
raise util.Abort(_('--tool can only be used with --rebase'))
orig(ui, repo, *args, **opts)
def uisetup(ui):
'Replace pull with a decorator to provide --rebase option'
entry = extensions.wrapcommand(commands.table, 'pull', pullrebase)
entry[1].append(('', 'rebase', None,
_("rebase working directory to branch head")))
entry[1].append(('t', 'tool', '',
_("specify merge tool for rebase")))
|
apache-2.0
|
kgiusti/dispatch
|
tests/system_tests_multicast.py
|
3
|
38121
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
# Test the multicast forwarder
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import sys
from time import sleep
from proton.handlers import MessagingHandler
from proton.reactor import Container
from proton.reactor import LinkOption
from proton import Connection
from proton import Link
from proton import Message
from proton import Delivery
from system_test import AsyncTestSender, AsyncTestReceiver, TestCase, Qdrouterd, main_module, TIMEOUT, TestTimeout, unittest
MAX_FRAME=1023
LINK_CAPACITY=250
W_THREADS=2
LARGE_PAYLOAD = ("X" * MAX_FRAME) * 19
# check for leaks of the following entities
ALLOC_STATS=["qd_message_t",
"qd_buffer_t",
"qdr_delivery_t"]
class MulticastLinearTest(TestCase):
"""
Verify the multicast forwarding logic across a multihop linear router
configuration
"""
@classmethod
def setUpClass(cls):
"""Start a router"""
super(MulticastLinearTest, cls).setUpClass()
def router(name, mode, extra):
config = [
('router', {'mode': mode,
'id': name,
'allowUnsettledMulticast': 'yes',
'workerThreads': W_THREADS}),
('listener', {'role': 'normal',
'port': cls.tester.get_port(),
'maxFrameSize': MAX_FRAME,
'linkCapacity': LINK_CAPACITY}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
]
if extra:
config.extend(extra)
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
return cls.routers[-1]
# configuration:
# two edge routers connected via 2 interior routers.
#
# +-------+ +---------+ +---------+ +-------+
# | EA1 |<==>| INT.A |<==>| INT.B |<==>| EB1 |
# +-------+ +---------+ +---------+ +-------+
#
# Each router has 2 multicast consumers
# EA1 and INT.A each have a multicast sender
cls.routers = []
interrouter_port = cls.tester.get_port()
cls.INTA_edge_port = cls.tester.get_port()
cls.INTB_edge_port = cls.tester.get_port()
router('INT.A', 'interior',
[('listener', {'role': 'inter-router',
'port': interrouter_port}),
('listener', {'role': 'edge', 'port': cls.INTA_edge_port})])
cls.INT_A = cls.routers[0]
cls.INT_A.listener = cls.INT_A.addresses[0]
router('INT.B', 'interior',
[('connector', {'name': 'connectorToA',
'role': 'inter-router',
'port': interrouter_port}),
('listener', {'role': 'edge',
'port': cls.INTB_edge_port})])
cls.INT_B = cls.routers[1]
cls.INT_B.listener = cls.INT_B.addresses[0]
router('EA1', 'edge',
[('listener', {'name': 'rc', 'role': 'route-container',
'port': cls.tester.get_port()}),
('connector', {'name': 'uplink', 'role': 'edge',
'port': cls.INTA_edge_port}),
('linkRoute', {'prefix': 'CfgLinkRoute1', 'containerId': 'FakeBroker', 'direction': 'in'}),
('linkRoute', {'prefix': 'CfgLinkRoute1', 'containerId': 'FakeBroker', 'direction': 'out'})])
cls.EA1 = cls.routers[2]
cls.EA1.listener = cls.EA1.addresses[0]
cls.EA1.route_container = cls.EA1.addresses[1]
router('EB1', 'edge',
[('connector', {'name': 'uplink',
'role': 'edge',
'port': cls.INTB_edge_port,
'maxFrameSize': 1024}),
('listener', {'name': 'rc', 'role': 'route-container',
'port': cls.tester.get_port()}),
('linkRoute', {'pattern': '*.cfg.pattern.#', 'containerId': 'FakeBroker', 'direction': 'in'}),
('linkRoute', {'pattern': '*.cfg.pattern.#', 'containerId': 'FakeBroker', 'direction': 'out'})])
cls.EB1 = cls.routers[3]
cls.EB1.listener = cls.EB1.addresses[0]
cls.EB1.route_container = cls.EB1.addresses[1]
cls.INT_A.wait_router_connected('INT.B')
cls.INT_B.wait_router_connected('INT.A')
cls.EA1.wait_connectors()
cls.EB1.wait_connectors()
# Client topology:
# all routes have 2 receivers
# Edge router EA1 and interior INT_A have a sender each
#
cls.config = [
# edge router EA1:
{'router': cls.EA1,
'senders': ['S-EA1-1'],
'receivers': ['R-EA1-1', 'R-EA1-2'],
'subscribers': 2,
'remotes': 0
},
# Interior router INT_A:
{'router': cls.INT_A,
'senders': ['S-INT_A-1'],
'receivers': ['R-INT_A-1', 'R-INT_A-2'],
'subscribers': 3,
'remotes': 1,
},
# Interior router INT_B:
{'router': cls.INT_B,
'senders': [],
'receivers': ['R-INT_B-1', 'R-INT_B-2'],
'subscribers': 3,
'remotes': 1,
},
# edge router EB1
{'router': cls.EB1,
'senders': [],
'receivers': ['R-EB1-1', 'R-EB1-2'],
'subscribers': 2,
'remotes': 0,
}
]
def _get_alloc_stats(self, router, stats):
# return a map of the current allocator counters for each entity type
# name in stats
#
# 57: END = [{u'heldByThreads': int32(384), u'typeSize': int32(536),
# u'transferBatchSize': int32(64), u'globalFreeListMax': int32(0),
# u'batchesRebalancedToGlobal': int32(774), u'typeName':
# u'qd_buffer_t', u'batchesRebalancedToThreads': int32(736),
# u'totalFreeToHeap': int32(0), u'totalAllocFromHeap': int32(2816),
# u'localFreeListMax': int32(128), u'type':
# u'org.apache.qpid.dispatch.allocator', u'identity':
# u'allocator/qd_buffer_t', u'name': u'allocator/qd_buffer_t'}]
d = dict()
mgmt = router.management
atype = 'org.apache.qpid.dispatch.allocator'
q = mgmt.query(type=atype).get_dicts()
for name in stats:
d[name] = list(filter(lambda a: a['typeName'] == name, q))[0]
return d
def _check_for_leaks(self):
for r in self.routers:
stats = self._get_alloc_stats(r, ALLOC_STATS)
for name in ALLOC_STATS:
# ensure threads haven't leaked
max_allowed = ((W_THREADS + 1)
* stats[name]['localFreeListMax'])
held = stats[name]['heldByThreads']
if held >= (2 * max_allowed):
print("OOPS!!! %s: (%s) - held=%d max=%d\n %s\n"
% (r.config.router_id,
name, held, max_allowed, stats))
sys.stdout.flush()
self.assertFalse(held >= (2 * max_allowed))
#
# run all the negative tests first so that if we screw up the internal
# state of the brokers the positive tests will likely fail
#
def _presettled_large_msg_rx_detach(self, config, count, drop_clients):
# detach receivers during receive
body = " MCAST PRESETTLED LARGE RX DETACH " + LARGE_PAYLOAD
test = MulticastPresettledRxFail(config, count,
drop_clients,
detach=True,
body=body)
test.run()
self.assertEqual(None, test.error)
def test_01_presettled_large_msg_rx_detach(self):
self._presettled_large_msg_rx_detach(self.config, 10, ['R-EA1-1', 'R-EB1-2'])
self._presettled_large_msg_rx_detach(self.config, 10, ['R-INT_A-2', 'R-INT_B-1'])
def _presettled_large_msg_rx_close(self, config, count, drop_clients):
# close receiver connections during receive
body = " MCAST PRESETTLED LARGE RX CLOSE " + LARGE_PAYLOAD
test = MulticastPresettledRxFail(config, count,
drop_clients,
detach=False,
body=body)
test.run()
self.assertEqual(None, test.error)
def test_02_presettled_large_msg_rx_close(self):
self._presettled_large_msg_rx_close(self.config, 10, ['R-EA1-2', 'R-EB1-1'])
self._presettled_large_msg_rx_close(self.config, 10, ['R-INT_A-1', 'R-INT_B-2'])
def _unsettled_large_msg_rx_detach(self, config, count, drop_clients):
# detach receivers during the test
body = " MCAST UNSETTLED LARGE RX DETACH " + LARGE_PAYLOAD
test = MulticastUnsettledRxFail(self.config, count, drop_clients, detach=True, body=body)
test.run()
self.assertEqual(None, test.error)
def test_10_unsettled_large_msg_rx_detach(self):
self._unsettled_large_msg_rx_detach(self.config, 10, ['R-EA1-1', 'R-EB1-2'])
self._unsettled_large_msg_rx_detach(self.config, 10, ['R-INT_A-2', 'R-INT_B-1'])
def _unsettled_large_msg_rx_close(self, config, count, drop_clients):
# close receiver connections during test
body = " MCAST UNSETTLED LARGE RX CLOSE " + LARGE_PAYLOAD
test = MulticastUnsettledRxFail(self.config, count, drop_clients, detach=False, body=body)
test.run()
self.assertEqual(None, test.error)
def test_11_unsettled_large_msg_rx_close(self):
self._unsettled_large_msg_rx_close(self.config, 10, ['R-EA1-2', 'R-EB1-1', ])
self._unsettled_large_msg_rx_close(self.config, 10, ['R-INT_A-1', 'R-INT_B-2'])
#
# now the positive tests
#
def test_50_presettled(self):
# Simply send a bunch of pre-settled multicast messages
body = " MCAST PRESETTLED "
test = MulticastPresettled(self.config, 10, body, SendPresettled())
test.run()
def test_51_presettled_mixed_large_msg(self):
# Same as above, but large message bodies (mixed sender settle mode)
body = " MCAST MAYBE PRESETTLED LARGE " + LARGE_PAYLOAD
test = MulticastPresettled(self.config, 11, body, SendMixed())
test.run()
self.assertEqual(None, test.error)
def test_52_presettled_large_msg(self):
# Same as above, (pre-settled sender settle mode)
body = " MCAST PRESETTLED LARGE " + LARGE_PAYLOAD
test = MulticastPresettled(self.config, 13, body, SendPresettled())
test.run()
self.assertEqual(None, test.error)
def test_60_unsettled_3ack(self):
# Sender sends unsettled, waits for Outcome from Receiver then settles
# Expect all messages to be accepted
body = " MCAST UNSETTLED "
test = MulticastUnsettled3Ack(self.config, 10, body)
test.run()
self.assertEqual(None, test.error)
self.assertEqual(test.n_outcomes[Delivery.ACCEPTED], test.n_sent)
def test_61_unsettled_3ack_large_msg(self):
# Same as above but with multiframe streaming
body = " MCAST UNSETTLED LARGE " + LARGE_PAYLOAD
test = MulticastUnsettled3Ack(self.config, 11, body=body)
test.run()
self.assertEqual(None, test.error)
self.assertEqual(test.n_outcomes[Delivery.ACCEPTED], test.n_sent)
def _unsettled_3ack_outcomes(self,
config,
count,
outcomes,
expected):
body = " MCAST UNSETTLED 3ACK OUTCOMES " + LARGE_PAYLOAD
test = MulticastUnsettled3Ack(self.config,
count,
body,
outcomes=outcomes)
test.run()
self.assertEqual(None, test.error)
self.assertEqual(test.n_outcomes[expected], test.n_sent)
def test_63_unsettled_3ack_outcomes(self):
# Verify the expected outcome is returned to the sender when the
# receivers return different outcome values. If no outcome is
# specified for a receiver it will default to ACCEPTED
# expect REJECTED if any reject:
self._unsettled_3ack_outcomes(self.config, 3,
{'R-EB1-1': Delivery.REJECTED,
'R-EB1-2': Delivery.MODIFIED,
'R-INT_B-2': Delivery.RELEASED},
Delivery.REJECTED)
self._unsettled_3ack_outcomes(self.config, 3,
{'R-EB1-1': Delivery.REJECTED,
'R-INT_B-2': Delivery.RELEASED},
Delivery.REJECTED)
# expect ACCEPT if no rejects
self._unsettled_3ack_outcomes(self.config, 3,
{'R-EB1-2': Delivery.MODIFIED,
'R-INT_B-2': Delivery.RELEASED},
Delivery.ACCEPTED)
# expect MODIFIED over RELEASED
self._unsettled_3ack_outcomes(self.config, 3,
{'R-EA1-1': Delivery.RELEASED,
'R-EA1-2': Delivery.RELEASED,
'R-INT_A-1': Delivery.RELEASED,
'R-INT_A-2': Delivery.RELEASED,
'R-INT_B-1': Delivery.RELEASED,
'R-INT_B-2': Delivery.RELEASED,
'R-EB1-1': Delivery.RELEASED,
'R-EB1-2': Delivery.MODIFIED},
Delivery.MODIFIED)
# and released only if all released
self._unsettled_3ack_outcomes(self.config, 3,
{'R-EA1-1': Delivery.RELEASED,
'R-EA1-2': Delivery.RELEASED,
'R-INT_A-1': Delivery.RELEASED,
'R-INT_A-2': Delivery.RELEASED,
'R-INT_B-1': Delivery.RELEASED,
'R-INT_B-2': Delivery.RELEASED,
'R-EB1-1': Delivery.RELEASED,
'R-EB1-2': Delivery.RELEASED},
Delivery.RELEASED)
def test_70_unsettled_1ack(self):
# Sender sends unsettled, expects both outcome and settlement from
# receiver before sender settles locally
body = " MCAST UNSETTLED 1ACK "
test = MulticastUnsettled1Ack(self.config, 10, body)
test.run()
self.assertEqual(None, test.error)
def test_71_unsettled_1ack_large_msg(self):
# Same as above but with multiframe streaming
body = " MCAST UNSETTLED 1ACK LARGE " + LARGE_PAYLOAD
test = MulticastUnsettled1Ack(self.config, 10, body)
test.run()
self.assertEqual(None, test.error)
def test_80_unsettled_3ack_message_annotations(self):
body = " MCAST UNSETTLED 3ACK LARGE MESSAGE ANNOTATIONS " + LARGE_PAYLOAD
test = MulticastUnsettled3AckMA(self.config, 10, body)
test.run()
self.assertEqual(None, test.error)
def test_90_credit_no_subscribers(self):
"""
Verify that multicast senders are blocked until a consumer is present.
"""
test = MulticastCreditBlocked(address=self.EA1.listener,
target='multicast/no/subscriber1')
test.run()
self.assertEqual(None, test.error)
test = MulticastCreditBlocked(address=self.INT_A.listener,
target='multicast/no/subscriber2')
test.run()
self.assertEqual(None, test.error)
def test_91_anonymous_sender(self):
"""
Verify that senders over anonymous links do not block waiting for
consumers.
"""
# no receiver - should not block, return RELEASED
msg = Message(body="test_100_anonymous_sender")
msg.address = "multicast/test_100_anonymous_sender"
tx = AsyncTestSender(address=self.INT_B.listener,
count=5,
target=None,
message=msg,
container_id="test_100_anonymous_sender")
tx.wait()
self.assertEqual(5, tx.released)
# now add a receiver:
rx = AsyncTestReceiver(address=self.INT_A.listener,
source=msg.address)
self.INT_B.wait_address(msg.address)
tx = AsyncTestSender(address=self.INT_B.listener,
count=5,
target=None,
message=msg,
container_id="test_100_anonymous_sender")
tx.wait()
self.assertEqual(5, tx.accepted)
rx.stop()
def test_999_check_for_leaks(self):
self._check_for_leaks()
#
# Settlement options for Link attach
#
class SendPresettled(LinkOption):
"""
All messages are sent presettled
"""
def apply(self, link):
link.snd_settle_mode = Link.SND_SETTLED
link.rcv_settle_mode = Link.RCV_FIRST
class SendMixed(LinkOption):
"""
Messages may be sent unsettled or settled
"""
def apply(self, link):
link.snd_settle_mode = Link.SND_MIXED
link.rcv_settle_mode = Link.RCV_FIRST
class Link1Ack(LinkOption):
"""
Messages will be sent unsettled
"""
def apply(self, link):
link.snd_settle_mode = Link.SND_UNSETTLED
link.rcv_settle_mode = Link.RCV_FIRST
class Link3Ack(LinkOption):
"""
Messages will be sent unsettled and the receiver will wait for sender to
settle first.
"""
def apply(self, link):
link.snd_settle_mode = Link.SND_UNSETTLED
link.rcv_settle_mode = Link.RCV_SECOND
class MulticastBase(MessagingHandler):
"""
Common multicast boilerplate code
"""
def __init__(self, config, count, body, topic=None, **handler_kwargs):
super(MulticastBase, self).__init__(**handler_kwargs)
self.msg_count = count
self.config = config
self.topic = topic or "multicast/test"
self.body = body
# totals
self.n_senders = 0;
self.n_receivers = 0;
self.n_sent = 0
self.n_received = 0
self.n_settled = 0
self.n_accepted = 0
self.n_released = 0
self.n_rejected = 0
self.n_modified = 0
self.n_partial = 0
# all maps indexed by client name:
self.receivers = {}
self.senders = {}
self.r_conns = {}
self.s_conns = {}
# per receiver
self.c_received = {}
# count per outcome
self.n_outcomes = {}
self.error = None
self.timers = []
self.reactor = None
def done(self):
# stop the reactor and clean up the test
for t in self.timers:
t.cancel()
for c_dict in [self.r_conns, self.s_conns]:
for conn in c_dict.values():
conn.close()
self.r_conns = {}
self.s_conns = {}
def timeout(self):
self.error = "Timeout Expired"
self.done()
def create_receiver(self, container, conn, source, name):
# must override in subclass
assert(False)
def create_sender(self, container, conn, target, name):
# must override in subclass
assert(False)
def on_start(self, event):
self.reactor = event.reactor
self.timers.append(self.reactor.schedule(TIMEOUT, TestTimeout(self)))
# first create all the receivers first
for cfg in self.config:
for name in cfg['receivers']:
conn = event.container.connect(cfg['router'].listener)
assert(name not in self.r_conns)
self.r_conns[name] = conn
self.create_receiver(event.container, conn, self.topic, name)
self.n_receivers += 1
self.c_received[name] = 0
def on_link_opened(self, event):
if event.receiver:
r_name = event.receiver.name
self.receivers[r_name] = event.receiver
# create senders after all receivers are opened
# makes it easy to check when the clients are ready
if len(self.receivers) == self.n_receivers:
for cfg in self.config:
for name in cfg['senders']:
conn = event.container.connect(cfg['router'].listener)
assert(name not in self.s_conns)
self.s_conns[name] = conn
self.create_sender(event.container, conn, self.topic, name)
self.n_senders += 1
def on_sendable(self, event):
s_name = event.sender.name
if s_name not in self.senders:
self.senders[s_name] = event.sender
if len(self.senders) == self.n_senders:
# all senders ready to send, now wait until the routes settle
for cfg in self.config:
cfg['router'].wait_address(self.topic,
subscribers=cfg['subscribers'],
remotes=cfg['remotes'])
for sender in self.senders.values():
self.do_send(sender)
def on_message(self, event):
if event.delivery.partial:
self.n_partial += 1
else:
dlv = event.delivery
self.n_received += 1
name = event.link.name
self.c_received[name] = 1 + self.c_received.get(name, 0)
def on_accepted(self, event):
self.n_accepted += 1
name = event.link.name
self.n_outcomes[Delivery.ACCEPTED] = 1 + self.n_outcomes.get(Delivery.ACCEPTED, 0)
def on_released(self, event):
# for some reason Proton 'helpfully' calls on_released even though the
# delivery state is actually MODIFIED
if event.delivery.remote_state == Delivery.MODIFIED:
return self.on_modified(event)
self.n_released += 1
name = event.link.name
self.n_outcomes[Delivery.RELEASED] = 1 + self.n_outcomes.get(Delivery.RELEASED, 0)
def on_modified(self, event):
self.n_modified += 1
name = event.link.name
self.n_outcomes[Delivery.MODIFIED] = 1 + self.n_outcomes.get(Delivery.MODIFIED, 0)
def on_rejected(self, event):
self.n_rejected += 1
name = event.link.name
self.n_outcomes[Delivery.REJECTED] = 1 + self.n_outcomes.get(Delivery.REJECTED, 0)
def on_settled(self, event):
self.n_settled += 1
def run(self):
Container(self).run()
# wait until all routers have cleaned up the route tables
clean = False
while not clean:
clean = True
for cfg in self.config:
mgmt = cfg['router'].management
atype = 'org.apache.qpid.dispatch.router.address'
addrs = mgmt.query(type=atype).get_dicts()
if list(filter(lambda a: a['name'].find(self.topic) != -1, addrs)):
clean = False
break
if not clean:
sleep(0.1)
class MulticastPresettled(MulticastBase):
"""
Test multicast forwarding for presettled transfers.
Verifies that all messages are settled by the sender
"""
def __init__(self, config, count, body, settlement_mode):
# use a large prefetch to prevent drops
super(MulticastPresettled, self).__init__(config,
count,
body,
prefetch=(count * 1024),
auto_accept=False,
auto_settle=False)
self.settlement_mode = settlement_mode
self.unexpected_unsettled = 0
self.expected_settled = 0
self.sender_settled = 0
self.done_count = 0
self.unsettled_deliveries = dict()
def create_receiver(self, container, conn, source, name):
return container.create_receiver(conn, source=source, name=name,
options=self.settlement_mode)
def create_sender(self, container, conn, target, name):
return container.create_sender(conn, target=target, name=name,
options=self.settlement_mode)
def do_send(self, sender):
for i in range(self.msg_count):
msg = Message(body=" %s -> %s:%s" % (sender.name, i, self.body))
dlv = sender.send(msg)
# settled before sending out the message
dlv.settle()
self.n_sent += 1
def check_if_done(self):
# wait for all present receivers to receive all messages
# and for all received messages to be settled by the
# sender
to_rcv = self.n_senders * self.msg_count * self.n_receivers
if to_rcv == self.n_received and not self.unsettled_deliveries:
self.done()
def on_message(self, event):
super(MulticastPresettled, self).on_message(event)
if event.receiver:
if not event.delivery.settled:
# it may be that settle will come after on_message
# so track that here
event.delivery.update(Delivery.ACCEPTED)
self.unexpected_unsettled += 1
tag = str(event.delivery.tag)
if tag not in self.unsettled_deliveries:
self.unsettled_deliveries[tag] = 1
else:
self.unsettled_deliveries[tag] += 1
else:
self.expected_settled += 1
event.receiver.flow(100)
self.check_if_done()
def on_settled(self, event):
super(MulticastPresettled, self).on_settled(event)
if event.receiver:
self.sender_settled += 1
tag = str(event.delivery.tag)
try:
# got a delayed settle
self.unsettled_deliveries[tag] -= 1
if self.unsettled_deliveries[tag] == 0:
del self.unsettled_deliveries[tag]
except KeyError:
pass
self.check_if_done()
class MulticastPresettledRxFail(MulticastPresettled):
"""
Spontaneously close a receiver or connection on message received
"""
def __init__(self, config, count, drop_clients, detach, body):
super(MulticastPresettledRxFail, self).__init__(config, count, body, SendPresettled())
self.drop_clients = drop_clients
self.detach = detach
def check_if_done(self):
# Verify each receiver got the expected number of messages.
# Avoid waiting for dropped receivers.
done = True
to_rcv = self.n_senders * self.msg_count
for name, count in self.c_received.items():
if name not in self.drop_clients:
if count != to_rcv:
done = False
if done:
self.done()
def on_message(self, event):
# close the receiver on arrival of the first message
r_name = event.receiver.name
if r_name in self.drop_clients:
if self.detach:
if event.receiver.state & Link.LOCAL_ACTIVE:
event.receiver.close()
elif event.connection.state & Connection.LOCAL_ACTIVE:
event.connection.close()
super(MulticastPresettledRxFail, self).on_message(event)
class MulticastUnsettled3Ack(MulticastBase):
"""
Send count messages per sender, senders wait for terminal outcome from
receivers before settling
"""
def __init__(self, config, count, body, outcomes=None):
pfetch = int((count + 1)/2)
super(MulticastUnsettled3Ack, self).__init__(config,
count,
body,
prefetch=pfetch,
auto_accept=False,
auto_settle=False)
self.outcomes = outcomes or {}
def create_receiver(self, container, conn, source, name):
return container.create_receiver(conn, source=source, name=name,
options=Link3Ack())
def create_sender(self, container, conn, target, name):
return container.create_sender(conn, target=target, name=name,
options=Link3Ack())
def do_send(self, sender):
for i in range(self.msg_count):
msg = Message(body=" %s -> %s:%s" % (sender.name, i, self.body))
dlv = sender.send(msg)
self.n_sent += 1
def on_message(self, event):
# receiver: send outcome do not settle
super(MulticastUnsettled3Ack, self).on_message(event)
if event.delivery.settled:
self.error = "Unexpected pre-settled message received!"
self.done()
return
r_name = event.receiver.name
outcome = self.outcomes.get(r_name, Delivery.ACCEPTED)
event.delivery.update(outcome)
if event.receiver.credit == 0:
event.receiver.flow(1)
def on_settled(self, event):
super(MulticastUnsettled3Ack, self).on_settled(event)
event.delivery.settle()
self.check_if_done()
def on_accepted(self, event):
super(MulticastUnsettled3Ack, self).on_accepted(event)
event.delivery.settle()
self.check_if_done()
def on_released(self, event):
super(MulticastUnsettled3Ack, self).on_released(event)
event.delivery.settle()
self.check_if_done()
def on_modified(self, event):
super(MulticastUnsettled3Ack, self).on_modified(event)
event.delivery.settle()
self.check_if_done()
def on_rejected(self, event):
super(MulticastUnsettled3Ack, self).on_rejected(event)
event.delivery.settle()
self.check_if_done()
def check_if_done(self):
to_send = self.msg_count * self.n_senders
to_rcv = to_send * self.n_receivers
n_outcomes = (self.n_accepted + self.n_rejected
+ self.n_modified + self.n_released)
# expect senders to see settlement
if (self.n_sent == to_send
and self.n_received == to_rcv
and n_outcomes == to_send
and self.n_settled == to_rcv):
self.done()
class MulticastUnsettled1Ack(MulticastUnsettled3Ack):
"""
Sender sends unsettled, the receiver sets outcome and immediately settles
"""
def __init__(self, config, count, body, outcomes=None):
super(MulticastUnsettled1Ack, self).__init__(config,
count,
outcomes)
def create_receiver(self, container, conn, source, name):
return container.create_receiver(conn, source=source, name=name,
options=Link1Ack())
def create_sender(self, container, conn, target, name):
return container.create_sender(conn, target=target, name=name,
options=Link1Ack())
def on_message(self, event):
# receiver: send outcome and settle
super(MulticastUnsettled1Ack, self).on_message(event)
event.delivery.settle()
def check_if_done(self):
to_send = self.msg_count * self.n_senders
to_rcv = to_send * self.n_receivers
n_outcomes = (self.n_accepted + self.n_rejected
+ self.n_modified + self.n_released)
# expect sender to see settlement
if (self.n_received == to_rcv
and n_outcomes == to_send
and self.n_settled == to_send):
self.done()
class MulticastUnsettledRxFail(MulticastUnsettled3Ack):
"""
Spontaneously close a receiver or connection on message received
"""
def __init__(self, config, count, drop_clients, detach, body):
super(MulticastUnsettledRxFail, self).__init__(config, count, body)
self.drop_clients = drop_clients
self.detach = detach
def check_if_done(self):
# Verify each receiver got the expected number of messages.
# Avoid waiting for dropped receivers.
done = True
to_rcv = self.n_senders * self.msg_count
for name, count in self.c_received.items():
if name not in self.drop_clients:
if count != to_rcv:
done = False
if done:
self.done()
def on_message(self, event):
# close the receiver on arrival of the first message
r_name = event.receiver.name
if r_name in self.drop_clients:
if self.detach:
if event.receiver.state & Link.LOCAL_ACTIVE:
event.receiver.close()
elif event.connection.state & Connection.LOCAL_ACTIVE:
event.connection.close()
super(MulticastUnsettledRxFail, self).on_message(event)
class MulticastUnsettled3AckMA(MulticastUnsettled3Ack):
"""
Try 3 Ack, but with a bunch of user Message Annotations (why not?)
"""
def __init__(self, config, count, body, outcomes=None):
super(MulticastUnsettled3AckMA, self).__init__(config,
count,
body,
outcomes=None)
self._huge_ma = {
"my-key": "my-data",
"my-other-key": "my-other-data",
"my-map": { "my-map-key1": "X",
"my-map-key2": 0x12,
"my-map-key3": "+0123456789" * 101,
"my-map-list": [i for i in range(97)]
},
"my-last-key": "so long, folks!"
}
def do_send(self, sender):
for i in range(self.msg_count):
msg = Message(body=" %s -> %s:%s" % (sender.name, i, self.body))
msg.annotations = self._huge_ma
dlv = sender.send(msg)
self.n_sent += 1
def on_message(self, event):
msg = event.message
if event.message.annotations != self._huge_ma:
self.error = "forwarded message annotations mismatch original"
self.done()
return
super(MulticastUnsettled3AckMA, self).on_message(event)
class MulticastCreditBlocked(MessagingHandler):
"""
Ensure that credit is not provided when there are no consumers present.
This client connects to 'address' and creates a sender to 'target'. Once
the sending link has opened a short timer is started. It is expected that
on_sendable() is NOT invoked before the timer expires.
"""
def __init__(self, address, target=None, timeout=2, **handler_kwargs):
super(MulticastCreditBlocked, self).__init__(**handler_kwargs)
self.target = target
self.address = address
self.time_out = timeout
self.conn = None
self.sender = None
self.timer = None
self.error = "Timeout NOT triggered as expected!"
def done(self):
# stop the reactor and clean up the test
if self.timer:
self.timer.cancel()
if self.conn:
self.conn.close()
def timeout(self):
self.error = None
self.done()
def on_start(self, event):
self.conn = event.container.connect(self.address)
self.sender = event.container.create_sender(self.conn,
target=self.target,
name="McastBlocked")
def on_link_opened(self, event):
self.timer = event.reactor.schedule(self.time_out, TestTimeout(self))
def on_sendable(self, event):
self.error = "Unexpected call to on_sendable()!"
self.done()
def run(self):
Container(self).run()
if __name__ == '__main__':
unittest.main(main_module())
|
apache-2.0
|
popazerty/try
|
lib/python/Components/Lcd.py
|
3
|
11670
|
from boxbranding import getBoxType
from twisted.internet import threads
from enigma import eDBoxLCD, eTimer
from config import config, ConfigSubsection, ConfigSelection, ConfigSlider, ConfigYesNo, ConfigNothing
from Components.SystemInfo import SystemInfo
from Tools.Directories import fileExists
from Components.Network import iNetwork
from Components.About import about
import usb
def IconCheck(session=None, **kwargs):
if fileExists("/proc/stb/lcd/symbol_network") or fileExists("/proc/stb/lcd/symbol_usb"):
global networklinkpoller
networklinkpoller = IconCheckPoller()
networklinkpoller.start()
class IconCheckPoller:
def __init__(self):
self.timer = eTimer()
def start(self):
if self.iconcheck not in self.timer.callback:
self.timer.callback.append(self.iconcheck)
self.timer.startLongTimer(0)
def stop(self):
if self.iconcheck in self.timer.callback:
self.timer.callback.remove(self.iconcheck)
self.timer.stop()
def iconcheck(self):
threads.deferToThread(self.JobTask)
self.timer.startLongTimer(30)
def JobTask(self):
# Network state symbol
netSymbol = "/proc/stb/lcd/symbol_network"
if fileExists(netSymbol):
linkUp = 0
if config.lcd.mode.value == '1':
for ifName in iNetwork.getInstalledAdapters():
ifState = about.getIfConfig(ifName)
if (
'flags' in ifState and
ifState['flags'].get('up') and
ifState['flags'].get('running')
):
linkUp = 1
break
open(netSymbol, "w").write(str(linkUp))
# USB devices connected symbol
usbSymbol = "/proc/stb/lcd/symbol_usb"
if fileExists(usbSymbol):
USBState = 0
busses = usb.busses()
if config.lcd.mode.value == '1':
for bus in busses:
devices = bus.devices
for dev in devices:
if dev.deviceClass != 9 and dev.deviceClass != 2 and dev.idVendor > 0:
# print ' '
# print "Device:", dev.filename
# print " Number:", dev.deviceClass
# print " idVendor: %d (0x%04x)" % (dev.idVendor, dev.idVendor)
# print " idProduct: %d (0x%04x)" % (dev.idProduct, dev.idProduct)
USBState = 1
break
open(usbSymbol, "w").write(str(USBState))
self.timer.startLongTimer(30)
class LCD:
def __init__(self):
self.oled_type = eDBoxLCD.getInstance().isOled()
if self.oled_type == 3:
# Bitmapped OLED has 16 level of brightness
self.oled_brightness_scale = 15
else:
# LCD display has 10 levels of brightness
self.oled_brightness_scale = 10
print "[LCD] oled_type=%d, oled_brightness_scale=%d" % (self.oled_type, self.oled_brightness_scale)
def setBright(self, value):
value *= 255
value /= self.oled_brightness_scale
if value > 255:
value = 255
eDBoxLCD.getInstance().setLCDBrightness(value)
def setContrast(self, value):
value *= 63
value /= 20
if value > 63:
value = 63
eDBoxLCD.getInstance().setLCDContrast(value)
def setInverted(self, value):
if value:
value = 255
eDBoxLCD.getInstance().setInverted(value)
def setFlipped(self, value):
eDBoxLCD.getInstance().setFlipped(value)
def isOled(self):
return self.oled_type
def setMode(self, value):
print '[LCD] setMode', value
f = open("/proc/stb/lcd/show_symbols", "w")
f.write(value)
f.close()
def setRepeat(self, value):
print '[LCD] setRepeat', value
f = open("/proc/stb/lcd/scroll_repeats", "w")
f.write(value)
f.close()
def setScrollspeed(self, value):
print '[LCD] setScrollspeed', value
f = open("/proc/stb/lcd/scroll_delay", "w")
f.write(str(value))
f.close()
def setLEDNormalState(self, value):
eDBoxLCD.getInstance().setLED(value, 0)
def setLEDDeepStandbyState(self, value):
eDBoxLCD.getInstance().setLED(value, 1)
def setLEDBlinkingTime(self, value):
eDBoxLCD.getInstance().setLED(value, 2)
def setLEDStandby(self, value):
file = open("/proc/stb/power/standbyled", "w")
file.write(value and "on" or "off")
file.close()
def setLCDMiniTVMode(self, value):
print '[LCD] setLCDMiniTVMode', value
f = open('/proc/stb/lcd/mode', "w")
f.write(value)
f.close()
def setLCDMiniTVPIPMode(self, value):
print '[LCD] setLCDMiniTVPIPMode', value
def setLCDMiniTVFPS(self, value):
print '[LCD] setLCDMiniTVFPS',value
f = open('/proc/stb/lcd/fps', "w")
f.write("%d \n" % value)
f.close()
def leaveStandby():
config.lcd.bright.apply()
if SystemInfo["LEDButtons"]:
config.lcd.ledbrightness.apply()
config.lcd.ledbrightnessdeepstandby.apply()
def standbyCounterChanged(configElement):
from Screens.Standby import inStandby
inStandby.onClose.append(leaveStandby)
config.lcd.standby.apply()
if SystemInfo["LEDButtons"]:
config.lcd.ledbrightnessstandby.apply()
config.lcd.ledbrightnessdeepstandby.apply()
def InitLcd():
if getBoxType() in ('amikomini', 'dynaspark', 'amiko8900', 'sognorevolution', 'arguspingulux', 'arguspinguluxmini', 'arguspinguluxplus', 'sparkreloaded', 'sabsolo', 'sparklx', 'gis8120', 'gb800se', 'gb800solo', 'iqonios300hd', 'tmsingle', 'tmnano2super', 'vusolo', 'et4x00', 'et5x00', 'et6x00'):
detected = False
else:
detected = eDBoxLCD.getInstance().detected()
ilcd = LCD()
SystemInfo["Display"] = detected
config.lcd = ConfigSubsection()
if SystemInfo["StandbyLED"]:
def setLEDstandby(configElement):
ilcd.setLEDStandby(configElement.value)
config.usage.standbyLED = ConfigYesNo(default=True)
config.usage.standbyLED.addNotifier(setLEDstandby)
if SystemInfo["LEDButtons"]:
def setLEDnormalstate(configElement):
ilcd.setLEDNormalState(configElement.value)
def setLEDdeepstandby(configElement):
ilcd.setLEDDeepStandbyState(configElement.value)
def setLEDblinkingtime(configElement):
ilcd.setLEDBlinkingTime(configElement.value)
config.lcd.ledblinkingtime = ConfigSlider(default = 5, increment = 1, limits = (0,15))
config.lcd.ledblinkingtime.addNotifier(setLEDblinkingtime)
config.lcd.ledbrightnessdeepstandby = ConfigSlider(default = 1, increment = 1, limits = (0,15))
config.lcd.ledbrightnessdeepstandby.addNotifier(setLEDnormalstate)
config.lcd.ledbrightnessdeepstandby.addNotifier(setLEDdeepstandby)
config.lcd.ledbrightnessdeepstandby.apply = lambda : setLEDdeepstandby(config.lcd.ledbrightnessdeepstandby)
config.lcd.ledbrightnessstandby = ConfigSlider(default = 1, increment = 1, limits = (0,15))
config.lcd.ledbrightnessstandby.addNotifier(setLEDnormalstate)
config.lcd.ledbrightnessstandby.apply = lambda : setLEDnormalstate(config.lcd.ledbrightnessstandby)
config.lcd.ledbrightness = ConfigSlider(default = 3, increment = 1, limits = (0,15))
config.lcd.ledbrightness.addNotifier(setLEDnormalstate)
config.lcd.ledbrightness.apply = lambda : setLEDnormalstate(config.lcd.ledbrightness)
config.lcd.ledbrightness.callNotifiersOnSaveAndCancel = True
if detected:
config.lcd.scroll_speed = ConfigSelection(default="300", choices=[
("500", _("slow")),
("300", _("normal")),
("100", _("fast"))])
config.lcd.scroll_delay = ConfigSelection(default="10000", choices=[
("10000", "10 " + _("seconds")),
("20000", "20 " + _("seconds")),
("30000", "30 " + _("seconds")),
("60000", "1 " + _("minute")),
("300000", "5 " + _("minutes")),
("noscrolling", _("off"))])
def setLCDbright(configElement):
ilcd.setBright(configElement.value)
def setLCDcontrast(configElement):
ilcd.setContrast(configElement.value)
def setLCDinverted(configElement):
ilcd.setInverted(configElement.value)
def setLCDflipped(configElement):
ilcd.setFlipped(configElement.value)
def setLCDmode(configElement):
ilcd.setMode(configElement.value)
def setLCDrepeat(configElement):
ilcd.setRepeat(configElement.value)
def setLCDscrollspeed(configElement):
ilcd.setScrollspeed(configElement.value)
def setLCDminitvmode(configElement):
ilcd.setLCDMiniTVMode(configElement.value)
def setLCDminitvpipmode(configElement):
ilcd.setLCDMiniTVPIPMode(configElement.value)
def setLCDminitvfps(configElement):
ilcd.setLCDMiniTVFPS(configElement.value)
standby_default = ilcd.oled_brightness_scale * 2 / 3
if not ilcd.isOled():
config.lcd.contrast = ConfigSlider(default=5, limits=(0, 20))
config.lcd.contrast.addNotifier(setLCDcontrast)
else:
config.lcd.contrast = ConfigNothing()
class BrightnessSlider(ConfigSlider):
def __init__(self, **kwargs):
self._value = None
ConfigSlider.__init__(self, **kwargs)
def setValue(self, value):
if self._value != value:
self._value = value
self.changed()
value = property(ConfigSlider.getValue, setValue)
def onSelect(self, session):
self.changed()
def onDeselect(self, session):
ConfigSlider.onDeselect(self, session)
b = config.lcd.bright.saved_value
if not b:
b = config.lcd.bright.default
ilcd.setBright(int(b))
config.lcd.standby = BrightnessSlider(default=standby_default, limits=(0, ilcd.oled_brightness_scale))
config.lcd.standby.addNotifier(setLCDbright)
config.lcd.standby.apply = lambda: setLCDbright(config.lcd.standby)
config.lcd.standby.callNotifiersOnSaveAndCancel = True
config.lcd.bright = BrightnessSlider(default=ilcd.oled_brightness_scale, limits=(0, ilcd.oled_brightness_scale))
config.lcd.bright.addNotifier(setLCDbright)
config.lcd.bright.apply = lambda: setLCDbright(config.lcd.bright)
config.lcd.bright.callNotifiersOnSaveAndCancel = True
config.lcd.invert = ConfigYesNo(default=False)
config.lcd.invert.addNotifier(setLCDinverted)
config.lcd.flip = ConfigYesNo(default=False)
config.lcd.flip.addNotifier(setLCDflipped)
if SystemInfo["LCDMiniTV"]:
config.lcd.minitvmode = ConfigSelection([("0", _("normal")), ("1", _("MiniTV")), ("2", _("OSD")), ("3", _("MiniTV with OSD"))], "0")
config.lcd.minitvmode.addNotifier(setLCDminitvmode)
config.lcd.minitvpipmode = ConfigSelection([("0", _("off")), ("5", _("PIP")), ("7", _("PIP with OSD"))], "0")
config.lcd.minitvpipmode.addNotifier(setLCDminitvpipmode)
config.lcd.minitvfps = ConfigSlider(default=30, limits=(0, 30))
config.lcd.minitvfps.addNotifier(setLCDminitvfps)
if fileExists("/proc/stb/lcd/scroll_delay"):
config.lcd.scrollspeed = ConfigSlider(default=150, increment=10, limits=(0, 500))
config.lcd.scrollspeed.addNotifier(setLCDscrollspeed)
else:
config.lcd.scrollspeed = ConfigNothing()
if fileExists("/proc/stb/lcd/scroll_repeats"):
config.lcd.repeat = ConfigSelection([("0", _("None")), ("1", _("1x")), ("2", _("2x")), ("3", _("3x")), ("4", _("4x")), ("5", _("5x")), ("10", _("10x")), ("500", _("Continuous"))], "3")
config.lcd.repeat.addNotifier(setLCDrepeat)
else:
config.lcd.repeat = ConfigNothing()
if fileExists("/proc/stb/lcd/show_symbols"):
config.lcd.mode = ConfigSelection([("0", _("No")), ("1", _("Yes"))], "1")
config.lcd.mode.addNotifier(setLCDmode)
else:
config.lcd.mode = ConfigNothing()
else:
def doNothing():
pass
config.lcd.contrast = ConfigNothing()
config.lcd.bright = ConfigNothing()
config.lcd.standby = ConfigNothing()
config.lcd.bright.apply = lambda: doNothing()
config.lcd.standby.apply = lambda: doNothing()
config.lcd.mode = ConfigNothing()
config.lcd.repeat = ConfigNothing()
config.lcd.scrollspeed = ConfigNothing()
config.lcd.ledbrightness = ConfigNothing()
config.lcd.ledbrightness.apply = lambda: doNothing()
config.lcd.ledbrightnessstandby = ConfigNothing()
config.lcd.ledbrightnessstandby.apply = lambda: doNothing()
config.lcd.ledbrightnessdeepstandby = ConfigNothing()
config.lcd.ledbrightnessdeepstandby.apply = lambda: doNothing()
config.lcd.ledblinkingtime = ConfigNothing()
config.misc.standbyCounter.addNotifier(standbyCounterChanged, initial_call=False)
|
gpl-2.0
|
renatofb/weblate
|
weblate/trans/tests/test_autofix.py
|
11
|
3620
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <[email protected]>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for automatix fixups.
"""
from django.test import TestCase
from weblate.trans.tests.test_checks import MockUnit
from weblate.trans.autofixes import fix_target
from weblate.trans.autofixes.chars import (
ReplaceTrailingDotsWithEllipsis, RemoveZeroSpace,
)
from weblate.trans.autofixes.whitespace import SameBookendingWhitespace
class AutoFixTest(TestCase):
def test_ellipsis(self):
unit = MockUnit(source=u'Foo…')
fix = ReplaceTrailingDotsWithEllipsis()
self.assertEqual(
fix.fix_target(['Bar...'], unit),
([u'Bar…'], True)
)
self.assertEqual(
fix.fix_target(['Bar... '], unit),
([u'Bar... '], False)
)
def test_no_ellipsis(self):
unit = MockUnit(source=u'Foo...')
fix = ReplaceTrailingDotsWithEllipsis()
self.assertEqual(
fix.fix_target(['Bar...'], unit),
([u'Bar...'], False)
)
self.assertEqual(
fix.fix_target([u'Bar…'], unit),
([u'Bar…'], False)
)
def test_whitespace(self):
unit = MockUnit(source=u'Foo\n')
fix = SameBookendingWhitespace()
self.assertEqual(
fix.fix_target(['Bar'], unit),
([u'Bar\n'], True)
)
self.assertEqual(
fix.fix_target(['Bar\n'], unit),
([u'Bar\n'], False)
)
unit = MockUnit(source=u' ')
self.assertEqual(
fix.fix_target([' '], unit),
([' '], False)
)
def test_no_whitespace(self):
unit = MockUnit(source=u'Foo')
fix = SameBookendingWhitespace()
self.assertEqual(
fix.fix_target(['Bar'], unit),
([u'Bar'], False)
)
self.assertEqual(
fix.fix_target(['Bar\n'], unit),
([u'Bar'], True)
)
def test_zerospace(self):
unit = MockUnit(source=u'Foo\u200b')
fix = RemoveZeroSpace()
self.assertEqual(
fix.fix_target(['Bar'], unit),
([u'Bar'], False)
)
self.assertEqual(
fix.fix_target([u'Bar\u200b'], unit),
([u'Bar\u200b'], False)
)
def test_no_zerospace(self):
unit = MockUnit(source=u'Foo')
fix = RemoveZeroSpace()
self.assertEqual(
fix.fix_target(['Bar'], unit),
([u'Bar'], False)
)
self.assertEqual(
fix.fix_target([u'Bar\u200b'], unit),
([u'Bar'], True)
)
def test_fix_target(self):
unit = MockUnit(source=u'Foo…')
fixed, fixups = fix_target(['Bar...'], unit)
self.assertEqual(fixed, [u'Bar…'])
self.assertEqual(len(fixups), 1)
self.assertEqual(unicode(fixups[0]), u'Trailing ellipsis')
|
gpl-3.0
|
partp/gtg-services
|
GTG/backends/backend_tomboy.py
|
2
|
2467
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2013 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
'''
The tomboy backend. The actual backend is all in GenericTomboy, since it's
shared with the Gnote backend.
'''
from GTG.backends.genericbackend import GenericBackend
from GTG import _
from GTG.backends.generictomboy import GenericTomboy
class Backend(GenericTomboy):
'''
A simple class that adds some description to the GenericTomboy class.
It's done this way since Tomboy and Gnote backends have different
descriptions and Dbus addresses but the same backend behind them.
'''
_general_description = {
GenericBackend.BACKEND_NAME: "backend_tomboy",
GenericBackend.BACKEND_HUMAN_NAME: _("Tomboy"),
GenericBackend.BACKEND_AUTHORS: ["Luca Invernizzi"],
GenericBackend.BACKEND_TYPE: GenericBackend.TYPE_READWRITE,
GenericBackend.BACKEND_DESCRIPTION:
_("This synchronization service can synchronize all or part of"
" your Tomboy notes in GTG. If you decide it would be handy to"
" have one of your notes in your TODO list, just tag it "
"with the tag you have chosen (you'll configure it later"
"), and it will appear in GTG."),
}
_static_parameters = {
GenericBackend.KEY_ATTACHED_TAGS: {
GenericBackend.PARAM_TYPE: GenericBackend.TYPE_LIST_OF_STRINGS,
GenericBackend.PARAM_DEFAULT_VALUE: ["@GTG-Tomboy"]},
}
BUS_ADDRESS = ("org.gnome.Tomboy",
"/org/gnome/Tomboy/RemoteControl",
"org.gnome.Tomboy.RemoteControl")
|
gpl-3.0
|
EricMuller/mynotes-backend
|
requirements/twisted/Twisted-17.1.0/build/lib.linux-x86_64-3.5/twisted/conch/test/test_insults.py
|
13
|
33581
|
# -*- test-case-name: twisted.conch.test.test_insults -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.python.reflect import namedAny
from twisted.trial import unittest
from twisted.test.proto_helpers import StringTransport
from twisted.conch.insults.insults import ServerProtocol, ClientProtocol
from twisted.conch.insults.insults import (CS_UK, CS_US, CS_DRAWING,
CS_ALTERNATE,
CS_ALTERNATE_SPECIAL,
BLINK, UNDERLINE)
from twisted.conch.insults.insults import G0, G1
from twisted.conch.insults.insults import modes, privateModes
from twisted.python.compat import intToBytes, iterbytes
from twisted.python.constants import ValueConstant, Values
import textwrap
def _getattr(mock, name):
return super(Mock, mock).__getattribute__(name)
def occurrences(mock):
return _getattr(mock, 'occurrences')
def methods(mock):
return _getattr(mock, 'methods')
def _append(mock, obj):
occurrences(mock).append(obj)
default = object()
def _ecmaCodeTableCoordinate(column, row):
"""
Return the byte in 7- or 8-bit code table identified by C{column}
and C{row}.
"An 8-bit code table consists of 256 positions arranged in 16
columns and 16 rows. The columns and rows are numbered 00 to 15."
"A 7-bit code table consists of 128 positions arranged in 8
columns and 16 rows. The columns are numbered 00 to 07 and the
rows 00 to 15 (see figure 1)."
p.5 of "Standard ECMA-35: Character Code Structure and Extension
Techniques", 6th Edition (December 1994).
"""
# 8 and 15 both happen to take up 4 bits, so the first number
# should be shifted by 4 for both the 7- and 8-bit tables.
return bytes(bytearray([(column << 4) | row]))
def _makeControlFunctionSymbols(name, colOffset, names, doc):
# the value for each name is the concatenation of the bit values
# of its x, y locations, with an offset of 4 added to its x value.
# so CUP is (0 + 4, 8) = (4, 8) = 4||8 = 1001000 = 72 = b"H"
# this is how it's defined in the standard!
attrs = {name: ValueConstant(_ecmaCodeTableCoordinate(i + colOffset, j))
for j, row in enumerate(names)
for i, name in enumerate(row)
if name}
attrs["__doc__"] = doc
return type(name, (Values,), attrs)
CSFinalByte = _makeControlFunctionSymbols(
"CSFinalByte",
colOffset=4,
names=[
# 4, 5, 6
['ICH', 'DCH', 'HPA'],
['CUU', 'SSE', 'HPR'],
['CUD', 'CPR', 'REP'],
['CUF', 'SU', 'DA'],
['CUB', 'SD', 'VPA'],
['CNL', 'NP', 'VPR'],
['CPL', 'PP', 'HVP'],
['CHA', 'CTC', 'TBC'],
['CUP', 'ECH', 'SM'],
['CHT', 'CVT', 'MC'],
['ED', 'CBT', 'HPB'],
['EL', 'SRS', 'VPB'],
['IL', 'PTX', 'RM'],
['DL', 'SDS', 'SGR'],
['EF', 'SIMD', 'DSR'],
['EA', None, 'DAQ'],
],
doc=textwrap.dedent("""
Symbolic constants for all control sequence final bytes
that do not imply intermediate bytes. This happens to cover
movement control sequences.
See page 11 of "Standard ECMA 48: Control Functions for Coded
Character Sets", 5th Edition (June 1991).
Each L{ValueConstant} maps a control sequence name to L{bytes}
"""))
C1SevenBit = _makeControlFunctionSymbols(
"C1SevenBit",
colOffset=4,
names=[
[None, "DCS"],
[None, "PU1"],
["BPH", "PU2"],
["NBH", "STS"],
[None, "CCH"],
["NEL", "MW"],
["SSA", "SPA"],
["ESA", "EPA"],
["HTS", "SOS"],
["HTJ", None],
["VTS", "SCI"],
["PLD", "CSI"],
["PLU", "ST"],
["RI", "OSC"],
["SS2", "PM"],
["SS3", "APC"],
],
doc=textwrap.dedent("""
Symbolic constants for all 7 bit versions of the C1 control functions
See page 9 "Standard ECMA 48: Control Functions for Coded
Character Sets", 5th Edition (June 1991).
Each L{ValueConstant} maps a control sequence name to L{bytes}
"""))
class Mock(object):
callReturnValue = default
def __init__(self, methods=None, callReturnValue=default):
"""
@param methods: Mapping of names to return values
@param callReturnValue: object __call__ should return
"""
self.occurrences = []
if methods is None:
methods = {}
self.methods = methods
if callReturnValue is not default:
self.callReturnValue = callReturnValue
def __call__(self, *a, **kw):
returnValue = _getattr(self, 'callReturnValue')
if returnValue is default:
returnValue = Mock()
# _getattr(self, 'occurrences').append(('__call__', returnValue, a, kw))
_append(self, ('__call__', returnValue, a, kw))
return returnValue
def __getattribute__(self, name):
methods = _getattr(self, 'methods')
if name in methods:
attrValue = Mock(callReturnValue=methods[name])
else:
attrValue = Mock()
# _getattr(self, 'occurrences').append((name, attrValue))
_append(self, (name, attrValue))
return attrValue
class MockMixin:
def assertCall(self, occurrence, methodName, expectedPositionalArgs=(),
expectedKeywordArgs={}):
attr, mock = occurrence
self.assertEqual(attr, methodName)
self.assertEqual(len(occurrences(mock)), 1)
[(call, result, args, kw)] = occurrences(mock)
self.assertEqual(call, "__call__")
self.assertEqual(args, expectedPositionalArgs)
self.assertEqual(kw, expectedKeywordArgs)
return result
_byteGroupingTestTemplate = """\
def testByte%(groupName)s(self):
transport = StringTransport()
proto = Mock()
parser = self.protocolFactory(lambda: proto)
parser.factory = self
parser.makeConnection(transport)
bytes = self.TEST_BYTES
while bytes:
chunk = bytes[:%(bytesPer)d]
bytes = bytes[%(bytesPer)d:]
parser.dataReceived(chunk)
self.verifyResults(transport, proto, parser)
"""
class ByteGroupingsMixin(MockMixin):
protocolFactory = None
for word, n in [('Pairs', 2), ('Triples', 3), ('Quads', 4), ('Quints', 5), ('Sexes', 6)]:
exec(_byteGroupingTestTemplate % {'groupName': word, 'bytesPer': n})
del word, n
def verifyResults(self, transport, proto, parser):
result = self.assertCall(occurrences(proto).pop(0), "makeConnection", (parser,))
self.assertEqual(occurrences(result), [])
del _byteGroupingTestTemplate
class ServerArrowKeysTests(ByteGroupingsMixin, unittest.TestCase):
protocolFactory = ServerProtocol
# All the arrow keys once
TEST_BYTES = b'\x1b[A\x1b[B\x1b[C\x1b[D'
def verifyResults(self, transport, proto, parser):
ByteGroupingsMixin.verifyResults(self, transport, proto, parser)
for arrow in (parser.UP_ARROW, parser.DOWN_ARROW,
parser.RIGHT_ARROW, parser.LEFT_ARROW):
result = self.assertCall(occurrences(proto).pop(0), "keystrokeReceived", (arrow, None))
self.assertEqual(occurrences(result), [])
self.assertFalse(occurrences(proto))
class PrintableCharactersTests(ByteGroupingsMixin, unittest.TestCase):
protocolFactory = ServerProtocol
# Some letters and digits, first on their own, then capitalized,
# then modified with alt
TEST_BYTES = b'abc123ABC!@#\x1ba\x1bb\x1bc\x1b1\x1b2\x1b3'
def verifyResults(self, transport, proto, parser):
ByteGroupingsMixin.verifyResults(self, transport, proto, parser)
for char in iterbytes(b'abc123ABC!@#'):
result = self.assertCall(occurrences(proto).pop(0), "keystrokeReceived", (char, None))
self.assertEqual(occurrences(result), [])
for char in iterbytes(b'abc123'):
result = self.assertCall(occurrences(proto).pop(0), "keystrokeReceived", (char, parser.ALT))
self.assertEqual(occurrences(result), [])
occs = occurrences(proto)
self.assertFalse(occs, "%r should have been []" % (occs,))
class ServerFunctionKeysTests(ByteGroupingsMixin, unittest.TestCase):
"""Test for parsing and dispatching function keys (F1 - F12)
"""
protocolFactory = ServerProtocol
byteList = []
for byteCodes in (b'OP', b'OQ', b'OR', b'OS', # F1 - F4
b'15~', b'17~', b'18~', b'19~', # F5 - F8
b'20~', b'21~', b'23~', b'24~'): # F9 - F12
byteList.append(b'\x1b[' + byteCodes)
TEST_BYTES = b''.join(byteList)
del byteList, byteCodes
def verifyResults(self, transport, proto, parser):
ByteGroupingsMixin.verifyResults(self, transport, proto, parser)
for funcNum in range(1, 13):
funcArg = getattr(parser, 'F%d' % (funcNum,))
result = self.assertCall(occurrences(proto).pop(0), "keystrokeReceived", (funcArg, None))
self.assertEqual(occurrences(result), [])
self.assertFalse(occurrences(proto))
class ClientCursorMovementTests(ByteGroupingsMixin, unittest.TestCase):
protocolFactory = ClientProtocol
d2 = b"\x1b[2B"
r4 = b"\x1b[4C"
u1 = b"\x1b[A"
l2 = b"\x1b[2D"
# Move the cursor down two, right four, up one, left two, up one, left two
TEST_BYTES = d2 + r4 + u1 + l2 + u1 + l2
del d2, r4, u1, l2
def verifyResults(self, transport, proto, parser):
ByteGroupingsMixin.verifyResults(self, transport, proto, parser)
for (method, count) in [('Down', 2), ('Forward', 4), ('Up', 1),
('Backward', 2), ('Up', 1), ('Backward', 2)]:
result = self.assertCall(occurrences(proto).pop(0), "cursor" + method, (count,))
self.assertEqual(occurrences(result), [])
self.assertFalse(occurrences(proto))
class ClientControlSequencesTests(unittest.TestCase, MockMixin):
def setUp(self):
self.transport = StringTransport()
self.proto = Mock()
self.parser = ClientProtocol(lambda: self.proto)
self.parser.factory = self
self.parser.makeConnection(self.transport)
result = self.assertCall(occurrences(self.proto).pop(0), "makeConnection", (self.parser,))
self.assertFalse(occurrences(result))
def testSimpleCardinals(self):
self.parser.dataReceived(
b''.join(
[b''.join([b'\x1b[' + n + ch
for n in (b'', intToBytes(2), intToBytes(20), intToBytes(200))]
) for ch in iterbytes(b'BACD')
]))
occs = occurrences(self.proto)
for meth in ("Down", "Up", "Forward", "Backward"):
for count in (1, 2, 20, 200):
result = self.assertCall(occs.pop(0), "cursor" + meth, (count,))
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testScrollRegion(self):
self.parser.dataReceived(b'\x1b[5;22r\x1b[r')
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "setScrollRegion", (5, 22))
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "setScrollRegion", (None, None))
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testHeightAndWidth(self):
self.parser.dataReceived(b"\x1b#3\x1b#4\x1b#5\x1b#6")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "doubleHeightLine", (True,))
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "doubleHeightLine", (False,))
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "singleWidthLine")
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "doubleWidthLine")
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testCharacterSet(self):
self.parser.dataReceived(
b''.join(
[b''.join([b'\x1b' + g + n for n in iterbytes(b'AB012')])
for g in iterbytes(b'()')
]))
occs = occurrences(self.proto)
for which in (G0, G1):
for charset in (CS_UK, CS_US, CS_DRAWING, CS_ALTERNATE, CS_ALTERNATE_SPECIAL):
result = self.assertCall(occs.pop(0), "selectCharacterSet", (charset, which))
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testShifting(self):
self.parser.dataReceived(b"\x15\x14")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "shiftIn")
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "shiftOut")
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testSingleShifts(self):
self.parser.dataReceived(b"\x1bN\x1bO")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "singleShift2")
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "singleShift3")
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testKeypadMode(self):
self.parser.dataReceived(b"\x1b=\x1b>")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "applicationKeypadMode")
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "numericKeypadMode")
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testCursor(self):
self.parser.dataReceived(b"\x1b7\x1b8")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "saveCursor")
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "restoreCursor")
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testReset(self):
self.parser.dataReceived(b"\x1bc")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "reset")
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testIndex(self):
self.parser.dataReceived(b"\x1bD\x1bM\x1bE")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "index")
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "reverseIndex")
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "nextLine")
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testModes(self):
self.parser.dataReceived(
b"\x1b[" + b';'.join(map(intToBytes, [modes.KAM, modes.IRM, modes.LNM])) + b"h")
self.parser.dataReceived(
b"\x1b[" + b';'.join(map(intToBytes, [modes.KAM, modes.IRM, modes.LNM])) + b"l")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "setModes", ([modes.KAM, modes.IRM, modes.LNM],))
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "resetModes", ([modes.KAM, modes.IRM, modes.LNM],))
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testErasure(self):
self.parser.dataReceived(
b"\x1b[K\x1b[1K\x1b[2K\x1b[J\x1b[1J\x1b[2J\x1b[3P")
occs = occurrences(self.proto)
for meth in ("eraseToLineEnd", "eraseToLineBeginning", "eraseLine",
"eraseToDisplayEnd", "eraseToDisplayBeginning",
"eraseDisplay"):
result = self.assertCall(occs.pop(0), meth)
self.assertFalse(occurrences(result))
result = self.assertCall(occs.pop(0), "deleteCharacter", (3,))
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testLineDeletion(self):
self.parser.dataReceived(b"\x1b[M\x1b[3M")
occs = occurrences(self.proto)
for arg in (1, 3):
result = self.assertCall(occs.pop(0), "deleteLine", (arg,))
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testLineInsertion(self):
self.parser.dataReceived(b"\x1b[L\x1b[3L")
occs = occurrences(self.proto)
for arg in (1, 3):
result = self.assertCall(occs.pop(0), "insertLine", (arg,))
self.assertFalse(occurrences(result))
self.assertFalse(occs)
def testCursorPosition(self):
methods(self.proto)['reportCursorPosition'] = (6, 7)
self.parser.dataReceived(b"\x1b[6n")
self.assertEqual(self.transport.value(), b"\x1b[7;8R")
occs = occurrences(self.proto)
result = self.assertCall(occs.pop(0), "reportCursorPosition")
# This isn't really an interesting assert, since it only tests that
# our mock setup is working right, but I'll include it anyway.
self.assertEqual(result, (6, 7))
def test_applicationDataBytes(self):
"""
Contiguous non-control bytes are passed to a single call to the
C{write} method of the terminal to which the L{ClientProtocol} is
connected.
"""
occs = occurrences(self.proto)
self.parser.dataReceived(b'a')
self.assertCall(occs.pop(0), "write", (b"a",))
self.parser.dataReceived(b'bc')
self.assertCall(occs.pop(0), "write", (b"bc",))
def _applicationDataTest(self, data, calls):
occs = occurrences(self.proto)
self.parser.dataReceived(data)
while calls:
self.assertCall(occs.pop(0), *calls.pop(0))
self.assertFalse(occs, "No other calls should happen: %r" % (occs,))
def test_shiftInAfterApplicationData(self):
"""
Application data bytes followed by a shift-in command are passed to a
call to C{write} before the terminal's C{shiftIn} method is called.
"""
self._applicationDataTest(
b'ab\x15', [
("write", (b"ab",)),
("shiftIn",)])
def test_shiftOutAfterApplicationData(self):
"""
Application data bytes followed by a shift-out command are passed to a
call to C{write} before the terminal's C{shiftOut} method is called.
"""
self._applicationDataTest(
b'ab\x14', [
("write", (b"ab",)),
("shiftOut",)])
def test_cursorBackwardAfterApplicationData(self):
"""
Application data bytes followed by a cursor-backward command are passed
to a call to C{write} before the terminal's C{cursorBackward} method is
called.
"""
self._applicationDataTest(
b'ab\x08', [
("write", (b"ab",)),
("cursorBackward",)])
def test_escapeAfterApplicationData(self):
"""
Application data bytes followed by an escape character are passed to a
call to C{write} before the terminal's handler method for the escape is
called.
"""
# Test a short escape
self._applicationDataTest(
b'ab\x1bD', [
("write", (b"ab",)),
("index",)])
# And a long escape
self._applicationDataTest(
b'ab\x1b[4h', [
("write", (b"ab",)),
("setModes", ([4],))])
# There's some other cases too, but they're all handled by the same
# codepaths as above.
class ServerProtocolOutputTests(unittest.TestCase):
"""
Tests for the bytes L{ServerProtocol} writes to its transport when its
methods are called.
"""
# From ECMA 48: CSI is represented by bit combinations 01/11
# (representing ESC) and 05/11 in a 7-bit code or by bit
# combination 09/11 in an 8-bit code
ESC = _ecmaCodeTableCoordinate(1, 11)
CSI = ESC + _ecmaCodeTableCoordinate(5, 11)
def setUp(self):
self.protocol = ServerProtocol()
self.transport = StringTransport()
self.protocol.makeConnection(self.transport)
def test_cursorUp(self):
"""
L{ServerProtocol.cursorUp} writes the control sequence
ending with L{CSFinalByte.CUU} to its transport.
"""
self.protocol.cursorUp(1)
self.assertEqual(self.transport.value(),
self.CSI + b'1' + CSFinalByte.CUU.value)
def test_cursorDown(self):
"""
L{ServerProtocol.cursorDown} writes the control sequence
ending with L{CSFinalByte.CUD} to its transport.
"""
self.protocol.cursorDown(1)
self.assertEqual(self.transport.value(),
self.CSI + b'1' + CSFinalByte.CUD.value)
def test_cursorForward(self):
"""
L{ServerProtocol.cursorForward} writes the control sequence
ending with L{CSFinalByte.CUF} to its transport.
"""
self.protocol.cursorForward(1)
self.assertEqual(self.transport.value(),
self.CSI + b'1' + CSFinalByte.CUF.value)
def test_cursorBackward(self):
"""
L{ServerProtocol.cursorBackward} writes the control sequence
ending with L{CSFinalByte.CUB} to its transport.
"""
self.protocol.cursorBackward(1)
self.assertEqual(self.transport.value(),
self.CSI + b'1' + CSFinalByte.CUB.value)
def test_cursorPosition(self):
"""
L{ServerProtocol.cursorPosition} writes a control sequence
ending with L{CSFinalByte.CUP} and containing the expected
coordinates to its transport.
"""
self.protocol.cursorPosition(0, 0)
self.assertEqual(self.transport.value(),
self.CSI + b'1;1' + CSFinalByte.CUP.value)
def test_cursorHome(self):
"""
L{ServerProtocol.cursorHome} writes a control sequence ending
with L{CSFinalByte.CUP} and no parameters, so that the client
defaults to (1, 1).
"""
self.protocol.cursorHome()
self.assertEqual(self.transport.value(),
self.CSI + CSFinalByte.CUP.value)
def test_index(self):
"""
L{ServerProtocol.index} writes the control sequence ending in
the 8-bit code table coordinates 4, 4.
Note that ECMA48 5th Edition removes C{IND}.
"""
self.protocol.index()
self.assertEqual(self.transport.value(),
self.ESC + _ecmaCodeTableCoordinate(4, 4))
def test_reverseIndex(self):
"""
L{ServerProtocol.reverseIndex} writes the control sequence
ending in the L{C1SevenBit.RI}.
"""
self.protocol.reverseIndex()
self.assertEqual(self.transport.value(),
self.ESC + C1SevenBit.RI.value)
def test_nextLine(self):
"""
L{ServerProtocol.nextLine} writes C{"\r\n"} to its transport.
"""
# Why doesn't it write ESC E? Because ESC E is poorly supported. For
# example, gnome-terminal (many different versions) fails to scroll if
# it receives ESC E and the cursor is already on the last row.
self.protocol.nextLine()
self.assertEqual(self.transport.value(), b"\r\n")
def test_setModes(self):
"""
L{ServerProtocol.setModes} writes a control sequence
containing the requested modes and ending in the
L{CSFinalByte.SM}.
"""
modesToSet = [modes.KAM, modes.IRM, modes.LNM]
self.protocol.setModes(modesToSet)
self.assertEqual(self.transport.value(),
self.CSI +
b';'.join(map(intToBytes, modesToSet)) +
CSFinalByte.SM.value)
def test_setPrivateModes(self):
"""
L{ServerProtocol.setPrivatesModes} writes a control sequence
containing the requested private modes and ending in the
L{CSFinalByte.SM}.
"""
privateModesToSet = [privateModes.ERROR,
privateModes.COLUMN,
privateModes.ORIGIN]
self.protocol.setModes(privateModesToSet)
self.assertEqual(self.transport.value(),
self.CSI +
b';'.join(map(intToBytes, privateModesToSet)) +
CSFinalByte.SM.value)
def test_resetModes(self):
"""
L{ServerProtocol.resetModes} writes the control sequence
ending in the L{CSFinalByte.RM}.
"""
modesToSet = [modes.KAM, modes.IRM, modes.LNM]
self.protocol.resetModes(modesToSet)
self.assertEqual(self.transport.value(),
self.CSI +
b';'.join(map(intToBytes, modesToSet)) +
CSFinalByte.RM.value)
def test_singleShift2(self):
"""
L{ServerProtocol.singleShift2} writes an escape sequence
followed by L{C1SevenBit.SS2}
"""
self.protocol.singleShift2()
self.assertEqual(self.transport.value(),
self.ESC + C1SevenBit.SS2.value)
def test_singleShift3(self):
"""
L{ServerProtocol.singleShift3} writes an escape sequence
followed by L{C1SevenBit.SS3}
"""
self.protocol.singleShift3()
self.assertEqual(self.transport.value(),
self.ESC + C1SevenBit.SS3.value)
def test_selectGraphicRendition(self):
"""
L{ServerProtocol.selectGraphicRendition} writes a control
sequence containing the requested attributes and ending with
L{CSFinalByte.SGR}
"""
self.protocol.selectGraphicRendition(str(BLINK), str(UNDERLINE))
self.assertEqual(self.transport.value(),
self.CSI +
intToBytes(BLINK) + b';' + intToBytes(UNDERLINE) +
CSFinalByte.SGR.value)
def test_horizontalTabulationSet(self):
"""
L{ServerProtocol.horizontalTabulationSet} writes the escape
sequence ending in L{C1SevenBit.HTS}
"""
self.protocol.horizontalTabulationSet()
self.assertEqual(self.transport.value(),
self.ESC +
C1SevenBit.HTS.value)
def test_eraseToLineEnd(self):
"""
L{ServerProtocol.eraseToLineEnd} writes the control sequence
sequence ending in L{CSFinalByte.EL} and no parameters,
forcing the client to default to 0 (from the active present
position's current location to the end of the line.)
"""
self.protocol.eraseToLineEnd()
self.assertEqual(self.transport.value(),
self.CSI + CSFinalByte.EL.value)
def test_eraseToLineBeginning(self):
"""
L{ServerProtocol.eraseToLineBeginning} writes the control
sequence sequence ending in L{CSFinalByte.EL} and a parameter
of 1 (from the beginning of the line up to and include the
active present position's current location.)
"""
self.protocol.eraseToLineBeginning()
self.assertEqual(self.transport.value(),
self.CSI + b'1' + CSFinalByte.EL.value)
def test_eraseLine(self):
"""
L{ServerProtocol.eraseLine} writes the control
sequence sequence ending in L{CSFinalByte.EL} and a parameter
of 2 (the entire line.)
"""
self.protocol.eraseLine()
self.assertEqual(self.transport.value(),
self.CSI + b'2' + CSFinalByte.EL.value)
def test_eraseToDisplayEnd(self):
"""
L{ServerProtocol.eraseToDisplayEnd} writes the control
sequence sequence ending in L{CSFinalByte.ED} and no parameters,
forcing the client to default to 0 (from the active present
position's current location to the end of the page.)
"""
self.protocol.eraseToDisplayEnd()
self.assertEqual(self.transport.value(),
self.CSI + CSFinalByte.ED.value)
def test_eraseToDisplayBeginning(self):
"""
L{ServerProtocol.eraseToDisplayBeginning} writes the control
sequence sequence ending in L{CSFinalByte.ED} a parameter of 1
(from the beginning of the page up to and include the active
present position's current location.)
"""
self.protocol.eraseToDisplayBeginning()
self.assertEqual(self.transport.value(),
self.CSI + b'1' + CSFinalByte.ED.value)
def test_eraseToDisplay(self):
"""
L{ServerProtocol.eraseDisplay} writes the control sequence
sequence ending in L{CSFinalByte.ED} a parameter of 2 (the
entire page)
"""
self.protocol.eraseDisplay()
self.assertEqual(self.transport.value(),
self.CSI + b'2' + CSFinalByte.ED.value)
def test_deleteCharacter(self):
"""
L{ServerProtocol.deleteCharacter} writes the control sequence
containing the number of characters to delete and ending in
L{CSFinalByte.DCH}
"""
self.protocol.deleteCharacter(4)
self.assertEqual(self.transport.value(),
self.CSI + b'4' + CSFinalByte.DCH.value)
def test_insertLine(self):
"""
L{ServerProtocol.insertLine} writes the control sequence
containing the number of lines to insert and ending in
L{CSFinalByte.IL}
"""
self.protocol.insertLine(5)
self.assertEqual(self.transport.value(),
self.CSI + b'5' + CSFinalByte.IL.value)
def test_deleteLine(self):
"""
L{ServerProtocol.deleteLine} writes the control sequence
containing the number of lines to delete and ending in
L{CSFinalByte.DL}
"""
self.protocol.deleteLine(6)
self.assertEqual(self.transport.value(),
self.CSI + b'6' + CSFinalByte.DL.value)
def test_setScrollRegionNoArgs(self):
"""
With no arguments, L{ServerProtocol.setScrollRegion} writes a
control sequence with no parameters, but a parameter
separator, and ending in C{b'r'}.
"""
self.protocol.setScrollRegion()
self.assertEqual(self.transport.value(), self.CSI + b';' + b'r')
def test_setScrollRegionJustFirst(self):
"""
With just a value for its C{first} argument,
L{ServerProtocol.setScrollRegion} writes a control sequence with
that parameter, a parameter separator, and finally a C{b'r'}.
"""
self.protocol.setScrollRegion(first=1)
self.assertEqual(self.transport.value(), self.CSI + b'1;' + b'r')
def test_setScrollRegionJustLast(self):
"""
With just a value for its C{last} argument,
L{ServerProtocol.setScrollRegion} writes a control sequence with
a parameter separator, that parameter, and finally a C{b'r'}.
"""
self.protocol.setScrollRegion(last=1)
self.assertEqual(self.transport.value(), self.CSI + b';1' + b'r')
def test_setScrollRegionFirstAndLast(self):
"""
When given both C{first} and C{last}
L{ServerProtocol.setScrollRegion} writes a control sequence with
the first parameter, a parameter separator, the last
parameter, and finally a C{b'r'}.
"""
self.protocol.setScrollRegion(first=1, last=2)
self.assertEqual(self.transport.value(), self.CSI + b'1;2' + b'r')
def test_reportCursorPosition(self):
"""
L{ServerProtocol.reportCursorPosition} writes a control
sequence ending in L{CSFinalByte.DSR} with a parameter of 6
(the Device Status Report returns the current active
position.)
"""
self.protocol.reportCursorPosition()
self.assertEqual(self.transport.value(),
self.CSI + b'6' + CSFinalByte.DSR.value)
class DeprecationsTests(unittest.TestCase):
"""
Tests to ensure deprecation of L{insults.colors} and L{insults.client}
"""
def ensureDeprecated(self, message):
"""
Ensures that the correct deprecation warning was issued.
"""
warnings = self.flushWarnings()
self.assertIs(warnings[0]['category'], DeprecationWarning)
self.assertEqual(warnings[0]['message'], message)
self.assertEqual(len(warnings), 1)
def test_colors(self):
"""
The L{insults.colors} module is deprecated
"""
namedAny('twisted.conch.insults.colors')
self.ensureDeprecated("twisted.conch.insults.colors was deprecated "
"in Twisted 10.1.0: Please use "
"twisted.conch.insults.helper instead.")
def test_client(self):
"""
The L{insults.client} module is deprecated
"""
namedAny('twisted.conch.insults.client')
self.ensureDeprecated("twisted.conch.insults.client was deprecated "
"in Twisted 10.1.0: Please use "
"twisted.conch.insults.insults instead.")
|
mit
|
youprofit/servo
|
components/script/dom/bindings/codegen/parser/tests/test_special_methods.py
|
208
|
3824
|
import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
interface SpecialMethods {
getter long long (unsigned long index);
setter long long (unsigned long index, long long value);
creator long long (unsigned long index, long long value);
deleter long long (unsigned long index);
getter boolean (DOMString name);
setter boolean (DOMString name, boolean value);
creator boolean (DOMString name, boolean value);
deleter boolean (DOMString name);
};
interface SpecialMethodsCombination {
getter deleter long long (unsigned long index);
setter creator long long (unsigned long index, long long value);
getter deleter boolean (DOMString name);
setter creator boolean (DOMString name, boolean value);
};
""")
results = parser.finish()
def checkMethod(method, QName, name,
static=False, getter=False, setter=False, creator=False,
deleter=False, legacycaller=False, stringifier=False):
harness.ok(isinstance(method, WebIDL.IDLMethod),
"Should be an IDLMethod")
harness.check(method.identifier.QName(), QName, "Method has the right QName")
harness.check(method.identifier.name, name, "Method has the right name")
harness.check(method.isStatic(), static, "Method has the correct static value")
harness.check(method.isGetter(), getter, "Method has the correct getter value")
harness.check(method.isSetter(), setter, "Method has the correct setter value")
harness.check(method.isCreator(), creator, "Method has the correct creator value")
harness.check(method.isDeleter(), deleter, "Method has the correct deleter value")
harness.check(method.isLegacycaller(), legacycaller, "Method has the correct legacycaller value")
harness.check(method.isStringifier(), stringifier, "Method has the correct stringifier value")
harness.check(len(results), 2, "Expect 2 interfaces")
iface = results[0]
harness.check(len(iface.members), 8, "Expect 8 members")
checkMethod(iface.members[0], "::SpecialMethods::__indexedgetter", "__indexedgetter",
getter=True)
checkMethod(iface.members[1], "::SpecialMethods::__indexedsetter", "__indexedsetter",
setter=True)
checkMethod(iface.members[2], "::SpecialMethods::__indexedcreator", "__indexedcreator",
creator=True)
checkMethod(iface.members[3], "::SpecialMethods::__indexeddeleter", "__indexeddeleter",
deleter=True)
checkMethod(iface.members[4], "::SpecialMethods::__namedgetter", "__namedgetter",
getter=True)
checkMethod(iface.members[5], "::SpecialMethods::__namedsetter", "__namedsetter",
setter=True)
checkMethod(iface.members[6], "::SpecialMethods::__namedcreator", "__namedcreator",
creator=True)
checkMethod(iface.members[7], "::SpecialMethods::__nameddeleter", "__nameddeleter",
deleter=True)
iface = results[1]
harness.check(len(iface.members), 4, "Expect 4 members")
checkMethod(iface.members[0], "::SpecialMethodsCombination::__indexedgetterdeleter",
"__indexedgetterdeleter", getter=True, deleter=True)
checkMethod(iface.members[1], "::SpecialMethodsCombination::__indexedsettercreator",
"__indexedsettercreator", setter=True, creator=True)
checkMethod(iface.members[2], "::SpecialMethodsCombination::__namedgetterdeleter",
"__namedgetterdeleter", getter=True, deleter=True)
checkMethod(iface.members[3], "::SpecialMethodsCombination::__namedsettercreator",
"__namedsettercreator", setter=True, creator=True)
|
mpl-2.0
|
Acehaidrey/incubator-airflow
|
airflow/providers/amazon/aws/transfers/mongo_to_s3.py
|
4
|
4312
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from typing import Any, Iterable, Optional, Union, cast
from bson import json_util
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.mongo.hooks.mongo import MongoHook
from airflow.utils.decorators import apply_defaults
class MongoToS3Operator(BaseOperator):
"""
Mongo -> S3
A more specific baseOperator meant to move data
from mongo via pymongo to s3 via boto
things to note
.execute() is written to depend on .transform()
.transform() is meant to be extended by child classes
to perform transformations unique to those operators needs
"""
template_fields = ['s3_key', 'mongo_query']
# pylint: disable=too-many-instance-attributes
@apply_defaults
def __init__(
self,
*,
mongo_conn_id: str,
s3_conn_id: str,
mongo_collection: str,
mongo_query: Union[list, dict],
s3_bucket: str,
s3_key: str,
mongo_db: Optional[str] = None,
replace: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
# Conn Ids
self.mongo_conn_id = mongo_conn_id
self.s3_conn_id = s3_conn_id
# Mongo Query Settings
self.mongo_db = mongo_db
self.mongo_collection = mongo_collection
# Grab query and determine if we need to run an aggregate pipeline
self.mongo_query = mongo_query
self.is_pipeline = isinstance(self.mongo_query, list)
# S3 Settings
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.replace = replace
def execute(self, context) -> bool:
"""Executed by task_instance at runtime"""
s3_conn = S3Hook(self.s3_conn_id)
# Grab collection and execute query according to whether or not it is a pipeline
if self.is_pipeline:
results = MongoHook(self.mongo_conn_id).aggregate(
mongo_collection=self.mongo_collection,
aggregate_query=cast(list, self.mongo_query),
mongo_db=self.mongo_db,
)
else:
results = MongoHook(self.mongo_conn_id).find(
mongo_collection=self.mongo_collection,
query=cast(dict, self.mongo_query),
mongo_db=self.mongo_db,
)
# Performs transform then stringifies the docs results into json format
docs_str = self._stringify(self.transform(results))
# Load Into S3
s3_conn.load_string(
string_data=docs_str, key=self.s3_key, bucket_name=self.s3_bucket, replace=self.replace
)
return True
@staticmethod
def _stringify(iterable: Iterable, joinable: str = '\n') -> str:
"""
Takes an iterable (pymongo Cursor or Array) containing dictionaries and
returns a stringified version using python join
"""
return joinable.join([json.dumps(doc, default=json_util.default) for doc in iterable])
@staticmethod
def transform(docs: Any) -> Any:
"""
Processes pyMongo cursor and returns an iterable with each element being
a JSON serializable dictionary
Base transform() assumes no processing is needed
ie. docs is a pyMongo cursor of documents and cursor just
needs to be passed through
Override this method for custom transformations
"""
return docs
|
apache-2.0
|
andyzsf/edx
|
common/djangoapps/monitoring/signals.py
|
172
|
4584
|
"""
Add receivers for django signals, and feed data into the monitoring system.
If a model has a class attribute 'METRIC_TAGS' that is a list of strings,
those fields will be retrieved from the model instance, and added as tags to
the recorded metrics.
"""
from django.db.models.signals import post_save, post_delete, m2m_changed, post_init
from django.dispatch import receiver
import dogstats_wrapper as dog_stats_api
def _database_tags(action, sender, kwargs):
"""
Return a tags for the sender and database used in django.db.models signals.
Arguments:
action (str): What action is being performed on the db model.
sender (Model): What model class is the action being performed on.
kwargs (dict): The kwargs passed by the model signal.
"""
tags = _model_tags(kwargs, 'instance')
tags.append(u'action:{}'.format(action))
if 'using' in kwargs:
tags.append(u'database:{}'.format(kwargs['using']))
return tags
def _model_tags(kwargs, key):
"""
Return a list of all tags for all attributes in kwargs[key].MODEL_TAGS,
plus a tag for the model class.
"""
if key not in kwargs:
return []
instance = kwargs[key]
tags = [
u'{}.{}:{}'.format(key, attr, getattr(instance, attr))
for attr in getattr(instance, 'MODEL_TAGS', [])
]
tags.append(u'model_class:{}'.format(instance.__class__.__name__))
return tags
@receiver(post_init, dispatch_uid='edxapp.monitoring.post_init_metrics')
def post_init_metrics(sender, **kwargs):
"""
Record the number of times that django models are instantiated.
Args:
sender (Model): The model class sending the signals.
using (str): The name of the database being used for this initialization (optional).
instance (Model instance): The instance being initialized (optional).
"""
tags = _database_tags('initialized', sender, kwargs)
dog_stats_api.increment('edxapp.db.model', tags=tags)
@receiver(post_save, dispatch_uid='edxapp.monitoring.post_save_metrics')
def post_save_metrics(sender, **kwargs):
"""
Record the number of times that django models are saved (created or updated).
Args:
sender (Model): The model class sending the signals.
using (str): The name of the database being used for this update (optional).
instance (Model instance): The instance being updated (optional).
"""
action = 'created' if kwargs.pop('created', False) else 'updated'
tags = _database_tags(action, sender, kwargs)
dog_stats_api.increment('edxapp.db.model', tags=tags)
@receiver(post_delete, dispatch_uid='edxapp.monitoring.post_delete_metrics')
def post_delete_metrics(sender, **kwargs):
"""
Record the number of times that django models are deleted.
Args:
sender (Model): The model class sending the signals.
using (str): The name of the database being used for this deletion (optional).
instance (Model instance): The instance being deleted (optional).
"""
tags = _database_tags('deleted', sender, kwargs)
dog_stats_api.increment('edxapp.db.model', tags=tags)
@receiver(m2m_changed, dispatch_uid='edxapp.monitoring.m2m_changed_metrics')
def m2m_changed_metrics(sender, **kwargs):
"""
Record the number of times that Many2Many fields are updated. This is separated
from post_save and post_delete, because it's signaled by the database model in
the middle of the Many2Many relationship, rather than either of the models
that are the relationship participants.
Args:
sender (Model): The model class in the middle of the Many2Many relationship.
action (str): The action being taken on this Many2Many relationship.
using (str): The name of the database being used for this deletion (optional).
instance (Model instance): The instance whose many-to-many relation is being modified.
model (Model class): The model of the class being added/removed/cleared from the relation.
"""
if 'action' not in kwargs:
return
action = {
'post_add': 'm2m.added',
'post_remove': 'm2m.removed',
'post_clear': 'm2m.cleared',
}.get(kwargs['action'])
if not action:
return
tags = _database_tags(action, sender, kwargs)
if 'model' in kwargs:
tags.append('target_class:{}'.format(kwargs['model'].__name__))
pk_set = kwargs.get('pk_set', []) or []
dog_stats_api.increment(
'edxapp.db.model',
value=len(pk_set),
tags=tags
)
|
agpl-3.0
|
lepistone/odoo
|
openerp/workflow/service.py
|
378
|
4972
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from helpers import Session
from helpers import Record
from openerp.workflow.instance import WorkflowInstance
# import instance
class WorkflowService(object):
CACHE = {}
@classmethod
def clear_cache(cls, dbname):
cls.CACHE[dbname] = {}
@classmethod
def new(cls, cr, uid, model_name, record_id):
return cls(Session(cr, uid), Record(model_name, record_id))
def __init__(self, session, record):
assert isinstance(session, Session)
assert isinstance(record, Record)
self.session = session
self.record = record
self.cr = self.session.cr
def write(self):
self.cr.execute('select id from wkf_instance where res_id=%s and res_type=%s and state=%s',
(self.record.id or None, self.record.model or None, 'active')
)
for (instance_id,) in self.cr.fetchall():
WorkflowInstance(self.session, self.record, {'id': instance_id}).update()
def trigger(self):
self.cr.execute('select instance_id from wkf_triggers where res_id=%s and model=%s', (self.record.id, self.record.model))
res = self.cr.fetchall()
for (instance_id,) in res:
self.cr.execute('select %s,res_type,res_id from wkf_instance where id=%s', (self.session.uid, instance_id,))
current_uid, current_model_name, current_record_id = self.cr.fetchone()
current_session = Session(self.session.cr, current_uid)
current_record = Record(current_model_name, current_record_id)
WorkflowInstance(current_session, current_record, {'id': instance_id}).update()
def delete(self):
WorkflowInstance(self.session, self.record, {}).delete()
def create(self):
WorkflowService.CACHE.setdefault(self.cr.dbname, {})
wkf_ids = WorkflowService.CACHE[self.cr.dbname].get(self.record.model, None)
if not wkf_ids:
self.cr.execute('select id from wkf where osv=%s and on_create=True', (self.record.model,))
wkf_ids = self.cr.fetchall()
WorkflowService.CACHE[self.cr.dbname][self.record.model] = wkf_ids
for (wkf_id, ) in wkf_ids:
WorkflowInstance.create(self.session, self.record, wkf_id)
def validate(self, signal):
result = False
# ids of all active workflow instances for a corresponding resource (id, model_nam)
self.cr.execute('select id from wkf_instance where res_id=%s and res_type=%s and state=%s', (self.record.id, self.record.model, 'active'))
# TODO: Refactor the workflow instance object
for (instance_id,) in self.cr.fetchall():
wi = WorkflowInstance(self.session, self.record, {'id': instance_id})
res2 = wi.validate(signal)
result = result or res2
return result
def redirect(self, new_rid):
# get ids of wkf instances for the old resource (res_id)
# CHECKME: shouldn't we get only active instances?
self.cr.execute('select id, wkf_id from wkf_instance where res_id=%s and res_type=%s', (self.record.id, self.record.model))
for old_inst_id, workflow_id in self.cr.fetchall():
# first active instance for new resource (new_rid), using same wkf
self.cr.execute(
'SELECT id '\
'FROM wkf_instance '\
'WHERE res_id=%s AND res_type=%s AND wkf_id=%s AND state=%s',
(new_rid, self.record.model, workflow_id, 'active'))
new_id = self.cr.fetchone()
if new_id:
# select all workitems which "wait" for the old instance
self.cr.execute('select id from wkf_workitem where subflow_id=%s', (old_inst_id,))
for (item_id,) in self.cr.fetchall():
# redirect all those workitems to the wkf instance of the new resource
self.cr.execute('update wkf_workitem set subflow_id=%s where id=%s', (new_id[0], item_id))
|
agpl-3.0
|
MyAOSP/external_chromium_org
|
build/android/pylib/host_driven/test_info_collection.py
|
50
|
5344
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing information about the host-driven tests."""
import logging
import os
import tests_annotations
class TestInfo(object):
"""An object containing and representing a test function, plus metadata."""
def __init__(self, runnable, set_up=None, tear_down=None):
# The actual test function/method.
self.runnable = runnable
# Qualified name of test function/method (e.g. FooModule.testBar).
self.qualified_name = self._GetQualifiedName(runnable)
# setUp and teardown functions, if any.
self.set_up = set_up
self.tear_down = tear_down
def _GetQualifiedName(self, runnable):
"""Helper method to infer a runnable's name and module name.
Many filters and lists presuppose a format of module_name.testMethodName.
To make this easy on everyone, we use some reflection magic to infer this
name automatically.
Args:
runnable: the test method to get the qualified name for
Returns:
qualified name for this runnable, incl. module name and method name.
"""
runnable_name = runnable.__name__
# See also tests_annotations.
module_name = os.path.splitext(
os.path.basename(runnable.__globals__['__file__']))[0]
return '.'.join([module_name, runnable_name])
def __str__(self):
return self.qualified_name
class TestInfoCollection(object):
"""A collection of TestInfo objects which facilitates filtering."""
def __init__(self):
"""Initialize a new TestInfoCollection."""
# Master list of all valid tests.
self.all_tests = []
def AddTests(self, test_infos):
"""Adds a set of tests to this collection.
The user may then retrieve them, optionally according to criteria, via
GetAvailableTests().
Args:
test_infos: a list of TestInfos representing test functions/methods.
"""
self.all_tests = test_infos
def GetAvailableTests(self, annotations, exclude_annotations, name_filter):
"""Get a collection of TestInfos which match the supplied criteria.
Args:
annotations: List of annotations. Each test in the returned list is
annotated with atleast one of these annotations.
exclude_annotations: List of annotations. The tests in the returned
list are not annotated with any of these annotations.
name_filter: name filter which tests must match, if any
Returns:
List of available tests.
"""
available_tests = self.all_tests
# Filter out tests which match neither the requested annotation, nor the
# requested name filter, if any.
available_tests = [t for t in available_tests if
self._AnnotationIncludesTest(t, annotations)]
if annotations and len(annotations) == 1 and annotations[0] == 'SmallTest':
tests_without_annotation = [
t for t in self.all_tests if
not tests_annotations.AnnotatedFunctions.GetTestAnnotations(
t.qualified_name)]
test_names = [t.qualified_name for t in tests_without_annotation]
logging.warning('The following tests do not contain any annotation. '
'Assuming "SmallTest":\n%s',
'\n'.join(test_names))
available_tests += tests_without_annotation
if exclude_annotations:
excluded_tests = [t for t in available_tests if
self._AnnotationIncludesTest(t, exclude_annotations)]
available_tests = list(set(available_tests) - set(excluded_tests))
available_tests = [t for t in available_tests if
self._NameFilterIncludesTest(t, name_filter)]
return available_tests
def _AnnotationIncludesTest(self, test_info, annotation_filter_list):
"""Checks whether a given test represented by test_info matches annotation.
Args:
test_info: TestInfo object representing the test
annotation_filter_list: list of annotation filters to match (e.g. Smoke)
Returns:
True if no annotation was supplied or the test matches; false otherwise.
"""
if not annotation_filter_list:
return True
for annotation_filter in annotation_filter_list:
filters = annotation_filter.split('=')
if len(filters) == 2:
key = filters[0]
value_list = filters[1].split(',')
for value in value_list:
if tests_annotations.AnnotatedFunctions.IsAnnotated(
key + ':' + value, test_info.qualified_name):
return True
elif tests_annotations.AnnotatedFunctions.IsAnnotated(
annotation_filter, test_info.qualified_name):
return True
return False
def _NameFilterIncludesTest(self, test_info, name_filter):
"""Checks whether a name filter matches a given test_info's method name.
This is a case-sensitive, substring comparison: 'Foo' will match methods
Foo.testBar and Bar.testFoo. 'foo' would not match either.
Args:
test_info: TestInfo object representing the test
name_filter: substring to check for in the qualified name of the test
Returns:
True if no name filter supplied or it matches; False otherwise.
"""
return not name_filter or name_filter in test_info.qualified_name
|
bsd-3-clause
|
rschnapka/odoo
|
openerp/addons/base/res/report/__init__.py
|
79
|
1201
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#from report import report_sxw
#report_sxw.report_sxw('report.partner.list', 'res.partner', 'addons/base/res/partner/report/partner_list.rml')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
philotas/enigma2
|
lib/python/Components/TimerList.py
|
16
|
7927
|
from HTMLComponent import HTMLComponent
from GUIComponent import GUIComponent
from skin import parseFont
from Tools.FuzzyDate import FuzzyTime
from enigma import eListboxPythonMultiContent, eListbox, gFont, RT_HALIGN_LEFT, RT_HALIGN_RIGHT, RT_VALIGN_CENTER, RT_VALIGN_TOP, RT_VALIGN_BOTTOM
from Tools.Alternatives import GetWithAlternative
from Tools.LoadPixmap import LoadPixmap
from Tools.TextBoundary import getTextBoundarySize
from timer import TimerEntry
from Tools.Directories import resolveFilename, SCOPE_ACTIVE_SKIN
class TimerList(HTMLComponent, GUIComponent, object):
#
# | <Name of the Timer> <Service> <orb.pos>|
# | <state> <start, end> |
#
def buildTimerEntry(self, timer, processed):
height = self.l.getItemSize().height()
width = self.l.getItemSize().width()
res = [ None ]
serviceName = " " + timer.service_ref.getServiceName()
serviceNameWidth = getTextBoundarySize(self.instance, self.serviceNameFont, self.l.getItemSize(), serviceName).width()
if 200 > width - serviceNameWidth - self.iconWidth - self.iconMargin:
serviceNameWidth = width - 200 - self.iconWidth - self.iconMargin
res.append((eListboxPythonMultiContent.TYPE_TEXT, width - serviceNameWidth, 0, serviceNameWidth, self.rowSplit, 0, RT_HALIGN_RIGHT|RT_VALIGN_BOTTOM, serviceName))
res.append((eListboxPythonMultiContent.TYPE_TEXT, self.iconWidth + self.iconMargin, 0, width - serviceNameWidth - self.iconWidth - self.iconMargin, self.rowSplit, 2, RT_HALIGN_LEFT|RT_VALIGN_BOTTOM, timer.name))
begin = FuzzyTime(timer.begin)
if timer.repeated:
days = ( _("Mon"), _("Tue"), _("Wed"), _("Thu"), _("Fri"), _("Sat"), _("Sun") )
repeatedtext = []
flags = timer.repeated
for x in (0, 1, 2, 3, 4, 5, 6):
if flags & 1 == 1:
repeatedtext.append(days[x])
flags >>= 1
repeatedtext = ", ".join(repeatedtext)
if self.iconRepeat:
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHABLEND, self.iconMargin / 2, self.rowSplit + (self.itemHeight - self.rowSplit - self.iconHeight) / 2, self.iconWidth, self.iconHeight, self.iconRepeat))
else:
repeatedtext = begin[0] # date
if timer.justplay:
if timer.end > timer.begin + 3:
text = repeatedtext + ((" %s ... %s (" + _("ZAP") + ", %d " + _("mins") + ")") % (begin[1], FuzzyTime(timer.end)[1], (timer.end - timer.begin) / 60))
else:
text = repeatedtext + ((" %s (" + _("ZAP") + ")") % (begin[1]))
else:
text = repeatedtext + ((" %s ... %s (%d " + _("mins") + ")") % (begin[1], FuzzyTime(timer.end)[1], (timer.end - timer.begin) / 60))
icon = None
if not processed:
if timer.state == TimerEntry.StateWaiting:
state = _("waiting")
if timer.isAutoTimer:
icon = self.iconAutoTimer
else:
icon = self.iconWait
elif timer.state == TimerEntry.StatePrepared:
state = _("about to start")
icon = self.iconPrepared
elif timer.state == TimerEntry.StateRunning:
if timer.justplay:
state = _("zapped")
icon = self.iconZapped
else:
state = _("recording...")
icon = self.iconRecording
elif timer.state == TimerEntry.StateEnded:
state = _("done!")
icon = self.iconDone
else:
state = _("<unknown>")
icon = None
elif timer.disabled:
state = _("disabled")
icon = self.iconDisabled
elif timer.failed:
state = _("failed")
icon = self.iconFailed
else:
state = _("done!")
icon = self.iconDone
icon and res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHABLEND, self.iconMargin / 2, (self.rowSplit - self.iconHeight) / 2, self.iconWidth, self.iconHeight, icon))
orbpos = self.getOrbitalPos(timer.service_ref)
orbposWidth = getTextBoundarySize(self.instance, self.font, self.l.getItemSize(), orbpos).width()
res.append((eListboxPythonMultiContent.TYPE_TEXT, self.satPosLeft, self.rowSplit, orbposWidth, self.itemHeight - self.rowSplit, 1, RT_HALIGN_LEFT|RT_VALIGN_TOP, orbpos))
res.append((eListboxPythonMultiContent.TYPE_TEXT, self.iconWidth + self.iconMargin, self.rowSplit, self.satPosLeft - self.iconWidth - self.iconMargin, self.itemHeight - self.rowSplit, 1, RT_HALIGN_LEFT|RT_VALIGN_TOP, state))
res.append((eListboxPythonMultiContent.TYPE_TEXT, self.satPosLeft + orbposWidth, self.rowSplit, width - self.satPosLeft - orbposWidth, self.itemHeight - self.rowSplit, 1, RT_HALIGN_RIGHT|RT_VALIGN_TOP, text))
line = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "div-h.png"))
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHABLEND, 0, height-2, width, 2, line))
return res
def __init__(self, list):
GUIComponent.__init__(self)
self.l = eListboxPythonMultiContent()
self.l.setBuildFunc(self.buildTimerEntry)
self.serviceNameFont = gFont("Regular", 20)
self.font = gFont("Regular", 18)
self.eventNameFont = gFont("Regular", 18)
self.l.setList(list)
self.itemHeight = 50
self.rowSplit = 25
self.iconMargin = 4
self.satPosLeft = 160
self.iconWait = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/timer_wait.png"))
#currently intended that all icons have the same size
self.iconWidth = self.iconWait.size().width()
self.iconHeight = self.iconWait.size().height()
self.iconRecording = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/timer_rec.png"))
self.iconPrepared = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/timer_prep.png"))
self.iconDone = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/timer_done.png"))
self.iconRepeat = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/timer_rep.png"))
self.iconZapped = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/timer_zap.png"))
self.iconDisabled = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/timer_off.png"))
self.iconFailed = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/timer_failed.png"))
self.iconAutoTimer = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/timer_autotimer.png"))
def applySkin(self, desktop, parent):
def itemHeight(value):
self.itemHeight = int(value)
def setServiceNameFont(value):
self.serviceNameFont = parseFont(value, ((1,1),(1,1)))
def setEventNameFont(value):
self.eventNameFont = parseFont(value, ((1,1),(1,1)))
def setFont(value):
self.font = parseFont(value, ((1,1),(1,1)))
def rowSplit(value):
self.rowSplit = int(value)
def iconMargin(value):
self.iconMargin = int(value)
def satPosLeft(value):
self.satPosLeft = int(value)
for (attrib, value) in list(self.skinAttributes):
try:
locals().get(attrib)(value)
self.skinAttributes.remove((attrib, value))
except:
pass
self.l.setItemHeight(self.itemHeight)
self.l.setFont(0, self.serviceNameFont)
self.l.setFont(1, self.font)
self.l.setFont(2, self.eventNameFont)
return GUIComponent.applySkin(self, desktop, parent)
def getCurrent(self):
cur = self.l.getCurrentSelection()
return cur and cur[0]
GUI_WIDGET = eListbox
def postWidgetCreate(self, instance):
instance.setContent(self.l)
self.instance = instance
instance.setWrapAround(True)
def moveToIndex(self, index):
self.instance.moveSelectionTo(index)
def getCurrentIndex(self):
return self.instance.getCurrentIndex()
currentIndex = property(getCurrentIndex, moveToIndex)
currentSelection = property(getCurrent)
def moveDown(self):
self.instance.moveSelection(self.instance.moveDown)
def invalidate(self):
self.l.invalidate()
def entryRemoved(self, idx):
self.l.entryRemoved(idx)
def getOrbitalPos(self, ref):
refstr = None
if hasattr(ref, 'sref'):
refstr = str(ref.sref)
else:
refstr = str(ref)
refstr = refstr and GetWithAlternative(refstr)
if '%3a//' in refstr:
return "%s" % _("Stream")
op = int(refstr.split(':', 10)[6][:-4] or "0",16)
if op == 0xeeee:
return "%s" % _("DVB-T")
if op == 0xffff:
return "%s" % _("DVB-C")
direction = 'E'
if op > 1800:
op = 3600 - op
direction = 'W'
return ("%d.%d\xc2\xb0%s") % (op // 10, op % 10, direction)
|
gpl-2.0
|
photoninger/ansible
|
lib/ansible/modules/cloud/amazon/redshift_subnet_group.py
|
16
|
5953
|
#!/usr/bin/python
# Copyright 2014 Jens Carl, Hothead Games Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author:
- "Jens Carl (@j-carl), Hothead Games Inc."
module: redshift_subnet_group
version_added: "2.2"
short_description: manage Redshift cluster subnet groups
description:
- Create, modifies, and deletes Redshift cluster subnet groups.
options:
state:
description:
- Specifies whether the subnet should be present or absent.
default: 'present'
choices: ['present', 'absent' ]
group_name:
description:
- Cluster subnet group name.
required: true
aliases: ['name']
group_description:
description:
- Database subnet group description.
required: false
default: null
aliases: ['description']
group_subnets:
description:
- List of subnet IDs that make up the cluster subnet group.
required: false
default: null
aliases: ['subnets']
requirements: [ 'boto' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create a Redshift subnet group
- local_action:
module: redshift_subnet_group
state: present
group_name: redshift-subnet
group_description: Redshift subnet
group_subnets:
- 'subnet-aaaaa'
- 'subnet-bbbbb'
# Remove subnet group
- redshift_subnet_group:
state: absent
group_name: redshift-subnet
'''
RETURN = '''
group:
description: dictionary containing all Redshift subnet group information
returned: success
type: complex
contains:
name:
description: name of the Redshift subnet group
returned: success
type: string
sample: "redshift_subnet_group_name"
vpc_id:
description: Id of the VPC where the subnet is located
returned: success
type: string
sample: "vpc-aabb1122"
'''
try:
import boto
import boto.redshift
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
group_name=dict(required=True, aliases=['name']),
group_description=dict(required=False, aliases=['description']),
group_subnets=dict(required=False, aliases=['subnets'], type='list'),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto v2.9.0+ required for this module')
state = module.params.get('state')
group_name = module.params.get('group_name')
group_description = module.params.get('group_description')
group_subnets = module.params.get('group_subnets')
if state == 'present':
for required in ('group_name', 'group_description', 'group_subnets'):
if not module.params.get(required):
module.fail_json(msg=str("parameter %s required for state='present'" % required))
else:
for not_allowed in ('group_description', 'group_subnets'):
if module.params.get(not_allowed):
module.fail_json(msg=str("parameter %s not allowed for state='absent'" % not_allowed))
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg=str("region not specified and unable to determine region from EC2_REGION."))
# Connect to the Redshift endpoint.
try:
conn = connect_to_aws(boto.redshift, region, **aws_connect_params)
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
try:
changed = False
exists = False
group = None
try:
matching_groups = conn.describe_cluster_subnet_groups(group_name, max_records=100)
exists = len(matching_groups) > 0
except boto.exception.JSONResponseError as e:
if e.body['Error']['Code'] != 'ClusterSubnetGroupNotFoundFault':
# if e.code != 'ClusterSubnetGroupNotFoundFault':
module.fail_json(msg=str(e))
if state == 'absent':
if exists:
conn.delete_cluster_subnet_group(group_name)
changed = True
else:
if not exists:
new_group = conn.create_cluster_subnet_group(group_name, group_description, group_subnets)
group = {
'name': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult']
['ClusterSubnetGroup']['ClusterSubnetGroupName'],
'vpc_id': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult']
['ClusterSubnetGroup']['VpcId'],
}
else:
changed_group = conn.modify_cluster_subnet_group(group_name, group_subnets, description=group_description)
group = {
'name': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult']
['ClusterSubnetGroup']['ClusterSubnetGroupName'],
'vpc_id': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult']
['ClusterSubnetGroup']['VpcId'],
}
changed = True
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
module.exit_json(changed=changed, group=group)
if __name__ == '__main__':
main()
|
gpl-3.0
|
ahmadio/edx-platform
|
lms/djangoapps/instructor/tests/test_legacy_xss.py
|
46
|
2251
|
"""
Tests of various instructor dashboard features that include lists of students
"""
from django.conf import settings
from django.test.client import RequestFactory
from markupsafe import escape
from nose.plugins.attrib import attr
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from edxmako.tests import mako_middleware_process_request
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from instructor.views import legacy
# pylint: disable=missing-docstring
@attr('shard_1')
class TestXss(ModuleStoreTestCase):
def setUp(self):
super(TestXss, self).setUp()
self._request_factory = RequestFactory()
self._course = CourseFactory.create()
self._evil_student = UserFactory.create(
email="[email protected]",
username="evil-robot",
profile__name='<span id="evil">Evil Robot</span>',
)
self._instructor = UserFactory.create(
email="[email protected]",
username="instructor",
is_staff=True
)
CourseEnrollmentFactory.create(
user=self._evil_student,
course_id=self._course.id
)
def _test_action(self, action):
"""
Test for XSS vulnerability in the given action
Build a request with the given action, call the instructor dashboard
view, and check that HTML code in a user's name is properly escaped.
"""
req = self._request_factory.post(
"dummy_url",
data={"action": action}
)
req.user = self._instructor
req.session = {}
mako_middleware_process_request(req)
resp = legacy.instructor_dashboard(req, self._course.id.to_deprecated_string())
respUnicode = resp.content.decode(settings.DEFAULT_CHARSET)
self.assertNotIn(self._evil_student.profile.name, respUnicode)
self.assertIn(escape(self._evil_student.profile.name), respUnicode)
def test_list_enrolled(self):
self._test_action("List enrolled students")
def test_dump_list_of_enrolled(self):
self._test_action("Dump list of enrolled students")
|
agpl-3.0
|
GdZ/scriptfile
|
software/googleAppEngine/lib/django_1_2/django/core/management/commands/startapp.py
|
321
|
1909
|
import os
from django.core.management.base import copy_helper, CommandError, LabelCommand
from django.utils.importlib import import_module
class Command(LabelCommand):
help = "Creates a Django app directory structure for the given app name in the current directory."
args = "[appname]"
label = 'application name'
requires_model_validation = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = False
def handle_label(self, app_name, directory=None, **options):
if directory is None:
directory = os.getcwd()
# Determine the project_name by using the basename of directory,
# which should be the full path of the project directory (or the
# current directory if no directory was passed).
project_name = os.path.basename(directory)
if app_name == project_name:
raise CommandError("You cannot create an app with the same name"
" (%r) as your project." % app_name)
# Check that the app_name cannot be imported.
try:
import_module(app_name)
except ImportError:
pass
else:
raise CommandError("%r conflicts with the name of an existing Python module and cannot be used as an app name. Please try another name." % app_name)
copy_helper(self.style, 'app', app_name, directory, project_name)
class ProjectCommand(Command):
help = ("Creates a Django app directory structure for the given app name"
" in this project's directory.")
def __init__(self, project_directory):
super(ProjectCommand, self).__init__()
self.project_directory = project_directory
def handle_label(self, app_name, **options):
super(ProjectCommand, self).handle_label(app_name, self.project_directory, **options)
|
mit
|
charukiewicz/beer-manager
|
venv/lib/python3.4/site-packages/passlib/utils/_blowfish/_gen_files.py
|
21
|
6169
|
"""passlib.utils._blowfish._gen_files - meta script that generates unrolled.py"""
#=============================================================================
# imports
#=============================================================================
# core
import os
import textwrap
# pkg
from passlib.utils.compat import irange
# local
#=============================================================================
# helpers
#=============================================================================
def varlist(name, count):
return ", ".join(name + str(x) for x in irange(count))
def indent_block(block, padding):
"ident block of text"
lines = block.split("\n")
return "\n".join(
padding + line if line else ""
for line in lines
)
BFSTR = """\
((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff)
""".strip()
def render_encipher(write, indent=0):
for i in irange(0, 15, 2):
write(indent, """\
# Feistel substitution on left word (round %(i)d)
r ^= %(left)s ^ p%(i1)d
# Feistel substitution on right word (round %(i1)d)
l ^= %(right)s ^ p%(i2)d
""", i=i, i1=i+1, i2=i+2,
left=BFSTR, right=BFSTR.replace("l","r"),
)
def write_encipher_function(write, indent=0):
write(indent, """\
def encipher(self, l, r):
\"""blowfish encipher a single 64-bit block encoded as two 32-bit ints\"""
(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9,
p10, p11, p12, p13, p14, p15, p16, p17) = self.P
S0, S1, S2, S3 = self.S
l ^= p0
""")
render_encipher(write, indent+1)
write(indent+1, """\
return r ^ p17, l
""")
def write_expand_function(write, indent=0):
write(indent, """\
def expand(self, key_words):
\"""unrolled version of blowfish key expansion\"""
##assert len(key_words) >= 18, "size of key_words must be >= 18"
P, S = self.P, self.S
S0, S1, S2, S3 = S
#=============================================================
# integrate key
#=============================================================
""")
for i in irange(18):
write(indent+1, """\
p%(i)d = P[%(i)d] ^ key_words[%(i)d]
""", i=i)
write(indent+1, """\
#=============================================================
# update P
#=============================================================
#------------------------------------------------
# update P[0] and P[1]
#------------------------------------------------
l, r = p0, 0
""")
render_encipher(write, indent+1)
write(indent+1, """\
p0, p1 = l, r = r ^ p17, l
""")
for i in irange(2, 18, 2):
write(indent+1, """\
#------------------------------------------------
# update P[%(i)d] and P[%(i1)d]
#------------------------------------------------
l ^= p0
""", i=i, i1=i+1)
render_encipher(write, indent+1)
write(indent+1, """\
p%(i)d, p%(i1)d = l, r = r ^ p17, l
""", i=i, i1=i+1)
write(indent+1, """\
#------------------------------------------------
# save changes to original P array
#------------------------------------------------
P[:] = (p0, p1, p2, p3, p4, p5, p6, p7, p8, p9,
p10, p11, p12, p13, p14, p15, p16, p17)
#=============================================================
# update S
#=============================================================
for box in S:
j = 0
while j < 256:
l ^= p0
""")
render_encipher(write, indent+3)
write(indent+3, """\
box[j], box[j+1] = l, r = r ^ p17, l
j += 2
""")
#=============================================================================
# main
#=============================================================================
def main():
target = os.path.join(os.path.dirname(__file__), "unrolled.py")
fh = file(target, "w")
def write(indent, msg, **kwds):
literal = kwds.pop("literal", False)
if kwds:
msg %= kwds
if not literal:
msg = textwrap.dedent(msg.rstrip(" "))
if indent:
msg = indent_block(msg, " " * (indent*4))
fh.write(msg)
write(0, """\
\"""passlib.utils._blowfish.unrolled - unrolled loop implementation of bcrypt,
autogenerated by _gen_files.py
currently this override the encipher() and expand() methods
with optimized versions, and leaves the other base.py methods alone.
\"""
#=================================================================
# imports
#=================================================================
# pkg
from passlib.utils._blowfish.base import BlowfishEngine as _BlowfishEngine
# local
__all__ = [
"BlowfishEngine",
]
#=================================================================
#
#=================================================================
class BlowfishEngine(_BlowfishEngine):
""")
write_encipher_function(write, indent=1)
write_expand_function(write, indent=1)
write(0, """\
#=================================================================
# eoc
#=================================================================
#=================================================================
# eof
#=================================================================
""")
if __name__ == "__main__":
main()
#=============================================================================
# eof
#=============================================================================
|
mit
|
nafex/pyload
|
module/lib/thrift/protocol/TBase.py
|
64
|
2532
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from thrift.Thrift import *
from thrift.protocol import TBinaryProtocol
from thrift.transport import TTransport
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class TBase(object):
__slots__ = []
def __repr__(self):
L = ['%s=%r' % (key, getattr(self, key))
for key in self.__slots__ ]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
for attr in self.__slots__:
my_val = getattr(self, attr)
other_val = getattr(other, attr)
if my_val != other_val:
return False
return True
def __ne__(self, other):
return not (self == other)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStruct(self, self.thrift_spec)
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStruct(self, self.thrift_spec)
class TExceptionBase(Exception):
# old style class so python2.4 can raise exceptions derived from this
# This can't inherit from TBase because of that limitation.
__slots__ = []
__repr__ = TBase.__repr__.im_func
__eq__ = TBase.__eq__.im_func
__ne__ = TBase.__ne__.im_func
read = TBase.read.im_func
write = TBase.write.im_func
|
gpl-3.0
|
Hellrungj/CSC-412-Networking
|
rpc-project/venv/lib/python2.7/site-packages/rpyc/core/protocol.py
|
7
|
24841
|
"""
The RPyC protocol
"""
import sys
import weakref
import itertools
import socket
import time
from threading import Lock
from rpyc.lib.compat import pickle, next, is_py3k, maxint, select_error
from rpyc.lib.colls import WeakValueDict, RefCountingColl
from rpyc.core import consts, brine, vinegar, netref
from rpyc.core.async import AsyncResult
class PingError(Exception):
"""The exception raised should :func:`Connection.ping` fail"""
pass
DEFAULT_CONFIG = dict(
# ATTRIBUTES
allow_safe_attrs = True,
allow_exposed_attrs = True,
allow_public_attrs = False,
allow_all_attrs = False,
safe_attrs = set(['__abs__', '__add__', '__and__', '__bool__', '__cmp__', '__contains__',
'__delitem__', '__delslice__', '__div__', '__divmod__', '__doc__',
'__eq__', '__float__', '__floordiv__', '__ge__', '__getitem__',
'__getslice__', '__gt__', '__hash__', '__hex__', '__iadd__', '__iand__',
'__idiv__', '__ifloordiv__', '__ilshift__', '__imod__', '__imul__',
'__index__', '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__',
'__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', '__len__',
'__long__', '__lshift__', '__lt__', '__mod__', '__mul__', '__ne__',
'__neg__', '__new__', '__nonzero__', '__oct__', '__or__', '__pos__',
'__pow__', '__radd__', '__rand__', '__rdiv__', '__rdivmod__', '__repr__',
'__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__', '__ror__',
'__rpow__', '__rrshift__', '__rshift__', '__rsub__', '__rtruediv__',
'__rxor__', '__setitem__', '__setslice__', '__str__', '__sub__',
'__truediv__', '__xor__', 'next', '__length_hint__', '__enter__',
'__exit__', '__next__',]),
exposed_prefix = "exposed_",
allow_getattr = True,
allow_setattr = False,
allow_delattr = False,
# EXCEPTIONS
include_local_traceback = True,
instantiate_custom_exceptions = False,
import_custom_exceptions = False,
instantiate_oldstyle_exceptions = False, # which don't derive from Exception
propagate_SystemExit_locally = False, # whether to propagate SystemExit locally or to the other party
propagate_KeyboardInterrupt_locally = True, # whether to propagate KeyboardInterrupt locally or to the other party
log_exceptions = True,
# MISC
allow_pickle = False,
connid = None,
credentials = None,
endpoints = None,
logger = None,
)
"""
The default configuration dictionary of the protocol. You can override these parameters
by passing a different configuration dict to the :class:`Connection` class.
.. note::
You only need to override the parameters you want to change. There's no need
to repeat parameters whose values remain unchanged.
======================================= ================ =====================================================
Parameter Default value Description
======================================= ================ =====================================================
``allow_safe_attrs`` ``True`` Whether to allow the use of *safe* attributes
(only those listed as ``safe_attrs``)
``allow_exposed_attrs`` ``True`` Whether to allow exposed attributes
(attributes that start with the ``exposed_prefix``)
``allow_public_attrs`` ``False`` Whether to allow public attributes
(attributes that don't start with ``_``)
``allow_all_attrs`` ``False`` Whether to allow all attributes (including private)
``safe_attrs`` ``set([...])`` The set of attributes considered safe
``exposed_prefix`` ``"exposed_"`` The prefix of exposed attributes
``allow_getattr`` ``True`` Whether to allow getting of attributes (``getattr``)
``allow_setattr`` ``False`` Whether to allow setting of attributes (``setattr``)
``allow_delattr`` ``False`` Whether to allow deletion of attributes (``delattr``)
``allow_pickle`` ``False`` Whether to allow the use of ``pickle``
``include_local_traceback`` ``True`` Whether to include the local traceback
in the remote exception
``instantiate_custom_exceptions`` ``False`` Whether to allow instantiation of
custom exceptions (not the built in ones)
``import_custom_exceptions`` ``False`` Whether to allow importing of
exceptions from not-yet-imported modules
``instantiate_oldstyle_exceptions`` ``False`` Whether to allow instantiation of exceptions
which don't derive from ``Exception``. This
is not applicable for Python 3 and later.
``propagate_SystemExit_locally`` ``False`` Whether to propagate ``SystemExit``
locally (kill the server) or to the other
party (kill the client)
``propagate_KeyboardInterrupt_locally`` ``False`` Whether to propagate ``KeyboardInterrupt``
locally (kill the server) or to the other
party (kill the client)
``logger`` ``None`` The logger instance to use to log exceptions
(before they are sent to the other party)
and other events. If ``None``, no logging takes place.
``connid`` ``None`` **Runtime**: the RPyC connection ID (used
mainly for debugging purposes)
``credentials`` ``None`` **Runtime**: the credentails object that was returned
by the server's :ref:`authenticator <api-authenticators>`
or ``None``
``endpoints`` ``None`` **Runtime**: The connection's endpoints. This is a tuple
made of the local socket endpoint (``getsockname``) and the
remote one (``getpeername``). This is set by the server
upon accepting a connection; client side connections
do no have this configuration option set.
======================================= ================ =====================================================
"""
_connection_id_generator = itertools.count(1)
class Connection(object):
"""The RPyC *connection* (AKA *protocol*).
:param service: the :class:`Service <rpyc.core.service.Service>` to expose
:param channel: the :class:`Channel <rpyc.core.channel.Channel>` over which messages are passed
:param config: the connection's configuration dict (overriding parameters
from the :data:`default configuration <DEFAULT_CONFIG>`)
:param _lazy: whether or not to initialize the service with the creation of
the connection. Default is True. If set to False, you will
need to call :func:`_init_service` manually later
"""
def __init__(self, service, channel, config = {}, _lazy = False):
self._closed = True
self._config = DEFAULT_CONFIG.copy()
self._config.update(config)
if self._config["connid"] is None:
self._config["connid"] = "conn%d" % (next(_connection_id_generator),)
self._channel = channel
self._seqcounter = itertools.count()
self._recvlock = Lock()
self._sendlock = Lock()
self._sync_replies = {}
self._async_callbacks = {}
self._local_objects = RefCountingColl()
self._last_traceback = None
self._proxy_cache = WeakValueDict()
self._netref_classes_cache = {}
self._remote_root = None
self._local_root = service(weakref.proxy(self))
if not _lazy:
self._init_service()
self._closed = False
def _init_service(self):
self._local_root.on_connect()
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
def __repr__(self):
a, b = object.__repr__(self).split(" object ")
return "%s %r object %s" % (a, self._config["connid"], b)
#
# IO
#
def _cleanup(self, _anyway = True):
if self._closed and not _anyway:
return
self._closed = True
self._channel.close()
self._local_root.on_disconnect()
self._sync_replies.clear()
self._async_callbacks.clear()
self._local_objects.clear()
self._proxy_cache.clear()
self._netref_classes_cache.clear()
self._last_traceback = None
self._remote_root = None
self._local_root = None
#self._seqcounter = None
#self._config.clear()
def close(self, _catchall = True):
"""closes the connection, releasing all held resources"""
if self._closed:
return
self._closed = True
try:
self._async_request(consts.HANDLE_CLOSE)
except EOFError:
pass
except Exception:
if not _catchall:
raise
finally:
self._cleanup(_anyway = True)
@property
def closed(self):
"""Indicates whether the connection has been closed or not"""
return self._closed
def fileno(self):
"""Returns the connectin's underlying file descriptor"""
return self._channel.fileno()
def ping(self, data = None, timeout = 3):
"""
Asserts that the other party is functioning properly, by making sure
the *data* is echoed back before the *timeout* expires
:param data: the data to send (leave ``None`` for the default buffer)
:param timeout: the maximal time to wait for echo
:raises: :class:`PingError` if the echoed data does not match
"""
if data is None:
data = "abcdefghijklmnopqrstuvwxyz" * 20
res = self.async_request(consts.HANDLE_PING, data, timeout = timeout)
if res.value != data:
raise PingError("echo mismatches sent data")
def _send(self, msg, seq, args):
data = brine.dump((msg, seq, args))
self._sendlock.acquire()
try:
self._channel.send(data)
finally:
self._sendlock.release()
def _send_request(self, handler, args):
seq = next(self._seqcounter)
self._send(consts.MSG_REQUEST, seq, (handler, self._box(args)))
return seq
def _send_reply(self, seq, obj):
self._send(consts.MSG_REPLY, seq, self._box(obj))
def _send_exception(self, seq, exctype, excval, exctb):
exc = vinegar.dump(exctype, excval, exctb,
include_local_traceback = self._config["include_local_traceback"])
self._send(consts.MSG_EXCEPTION, seq, exc)
#
# boxing
#
def _box(self, obj):
"""store a local object in such a way that it could be recreated on
the remote party either by-value or by-reference"""
if brine.dumpable(obj):
return consts.LABEL_VALUE, obj
if type(obj) is tuple:
return consts.LABEL_TUPLE, tuple(self._box(item) for item in obj)
elif isinstance(obj, netref.BaseNetref) and obj.____conn__() is self:
return consts.LABEL_LOCAL_REF, obj.____oid__
else:
self._local_objects.add(obj)
try:
cls = obj.__class__
except Exception:
# see issue #16
cls = type(obj)
if not isinstance(cls, type):
cls = type(obj)
return consts.LABEL_REMOTE_REF, (id(obj), cls.__name__, cls.__module__)
def _unbox(self, package):
"""recreate a local object representation of the remote object: if the
object is passed by value, just return it; if the object is passed by
reference, create a netref to it"""
label, value = package
if label == consts.LABEL_VALUE:
return value
if label == consts.LABEL_TUPLE:
return tuple(self._unbox(item) for item in value)
if label == consts.LABEL_LOCAL_REF:
return self._local_objects[value]
if label == consts.LABEL_REMOTE_REF:
oid, clsname, modname = value
if oid in self._proxy_cache:
return self._proxy_cache[oid]
proxy = self._netref_factory(oid, clsname, modname)
self._proxy_cache[oid] = proxy
return proxy
raise ValueError("invalid label %r" % (label,))
def _netref_factory(self, oid, clsname, modname):
typeinfo = (clsname, modname)
if typeinfo in self._netref_classes_cache:
cls = self._netref_classes_cache[typeinfo]
elif typeinfo in netref.builtin_classes_cache:
cls = netref.builtin_classes_cache[typeinfo]
else:
info = self.sync_request(consts.HANDLE_INSPECT, oid)
cls = netref.class_factory(clsname, modname, info)
self._netref_classes_cache[typeinfo] = cls
return cls(weakref.ref(self), oid)
#
# dispatching
#
def _dispatch_request(self, seq, raw_args):
try:
handler, args = raw_args
args = self._unbox(args)
res = self._HANDLERS[handler](self, *args)
except:
# need to catch old style exceptions too
t, v, tb = sys.exc_info()
self._last_traceback = tb
if self._config["logger"] and t is not StopIteration:
self._config["logger"].debug("Exception caught", exc_info=True)
if t is SystemExit and self._config["propagate_SystemExit_locally"]:
raise
if t is KeyboardInterrupt and self._config["propagate_KeyboardInterrupt_locally"]:
raise
self._send_exception(seq, t, v, tb)
else:
self._send_reply(seq, res)
def _dispatch_reply(self, seq, raw):
obj = self._unbox(raw)
if seq in self._async_callbacks:
self._async_callbacks.pop(seq)(False, obj)
else:
self._sync_replies[seq] = (False, obj)
def _dispatch_exception(self, seq, raw):
obj = vinegar.load(raw,
import_custom_exceptions = self._config["import_custom_exceptions"],
instantiate_custom_exceptions = self._config["instantiate_custom_exceptions"],
instantiate_oldstyle_exceptions = self._config["instantiate_oldstyle_exceptions"])
if seq in self._async_callbacks:
self._async_callbacks.pop(seq)(True, obj)
else:
self._sync_replies[seq] = (True, obj)
#
# serving
#
def _recv(self, timeout, wait_for_lock):
if not self._recvlock.acquire(wait_for_lock):
return None
try:
if self._channel.poll(timeout):
data = self._channel.recv()
else:
data = None
except EOFError:
self.close()
raise
finally:
self._recvlock.release()
return data
def _dispatch(self, data):
msg, seq, args = brine.load(data)
if msg == consts.MSG_REQUEST:
self._dispatch_request(seq, args)
elif msg == consts.MSG_REPLY:
self._dispatch_reply(seq, args)
elif msg == consts.MSG_EXCEPTION:
self._dispatch_exception(seq, args)
else:
raise ValueError("invalid message type: %r" % (msg,))
def poll(self, timeout = 0):
"""Serves a single transaction, should one arrives in the given
interval. Note that handling a request/reply may trigger nested
requests, which are all part of a single transaction.
:returns: ``True`` if a transaction was served, ``False`` otherwise"""
data = self._recv(timeout, wait_for_lock = False)
if not data:
return False
self._dispatch(data)
return True
def serve(self, timeout = 1):
"""Serves a single request or reply that arrives within the given
time frame (default is 1 sec). Note that the dispatching of a request
might trigger multiple (nested) requests, thus this function may be
reentrant.
:returns: ``True`` if a request or reply were received, ``False``
otherwise.
"""
data = self._recv(timeout, wait_for_lock = True)
if not data:
return False
self._dispatch(data)
return True
def serve_all(self):
"""Serves all requests and replies for as long as the connection is
alive."""
try:
while True:
self.serve(0.1)
except (socket.error, select_error, IOError):
if not self.closed:
raise
except EOFError:
pass
finally:
self.close()
def poll_all(self, timeout = 0):
"""Serves all requests and replies that arrive within the given interval.
:returns: ``True`` if at least a single transaction was served, ``False`` otherwise
"""
at_least_once = False
t0 = time.time()
duration = timeout
try:
while True:
if self.poll(duration):
at_least_once = True
if timeout is not None:
duration = t0 + timeout - time.time()
if duration < 0:
break
except EOFError:
pass
return at_least_once
#
# requests
#
def sync_request(self, handler, *args):
"""Sends a synchronous request (waits for the reply to arrive)
:raises: any exception that the requets may be generated
:returns: the result of the request
"""
seq = self._send_request(handler, args)
while seq not in self._sync_replies:
self.serve(0.1)
isexc, obj = self._sync_replies.pop(seq)
if isexc:
raise obj
else:
return obj
def _async_request(self, handler, args = (), callback = (lambda a, b: None)):
seq = self._send_request(handler, args)
self._async_callbacks[seq] = callback
def async_request(self, handler, *args, **kwargs):
"""Send an asynchronous request (does not wait for it to finish)
:returns: an :class:`rpyc.core.async.AsyncResult` object, which will
eventually hold the result (or exception)
"""
timeout = kwargs.pop("timeout", None)
if kwargs:
raise TypeError("got unexpected keyword argument(s) %s" % (list(kwargs.keys()),))
res = AsyncResult(weakref.proxy(self))
self._async_request(handler, args, res)
if timeout is not None:
res.set_expiry(timeout)
return res
@property
def root(self):
"""Fetches the root object (service) of the other party"""
if self._remote_root is None:
self._remote_root = self.sync_request(consts.HANDLE_GETROOT)
return self._remote_root
#
# attribute access
#
def _check_attr(self, obj, name):
if self._config["allow_exposed_attrs"]:
if name.startswith(self._config["exposed_prefix"]):
name2 = name
else:
name2 = self._config["exposed_prefix"] + name
if hasattr(obj, name2):
return name2
if self._config["allow_all_attrs"]:
return name
if self._config["allow_safe_attrs"] and name in self._config["safe_attrs"]:
return name
if self._config["allow_public_attrs"] and not name.startswith("_"):
return name
return False
def _access_attr(self, oid, name, args, overrider, param, default):
if is_py3k:
if type(name) is bytes:
name = str(name, "utf8")
elif type(name) is not str:
raise TypeError("name must be a string")
else:
if type(name) not in (str, unicode):
raise TypeError("name must be a string")
name = str(name) # IronPython issue #10 + py3k issue
obj = self._local_objects[oid]
accessor = getattr(type(obj), overrider, None)
if accessor is None:
name2 = self._check_attr(obj, name)
if not self._config[param] or not name2:
raise AttributeError("cannot access %r" % (name,))
accessor = default
name = name2
return accessor(obj, name, *args)
#
# request handlers
#
def _handle_ping(self, data):
return data
def _handle_close(self):
self._cleanup()
def _handle_getroot(self):
return self._local_root
def _handle_del(self, oid):
self._local_objects.decref(oid)
def _handle_repr(self, oid):
return repr(self._local_objects[oid])
def _handle_str(self, oid):
return str(self._local_objects[oid])
def _handle_cmp(self, oid, other):
# cmp() might enter recursive resonance... yet another workaround
#return cmp(self._local_objects[oid], other)
obj = self._local_objects[oid]
try:
return type(obj).__cmp__(obj, other)
except (AttributeError, TypeError):
return NotImplemented
def _handle_hash(self, oid):
return hash(self._local_objects[oid])
def _handle_call(self, oid, args, kwargs=()):
return self._local_objects[oid](*args, **dict(kwargs))
def _handle_dir(self, oid):
return tuple(dir(self._local_objects[oid]))
def _handle_inspect(self, oid):
return tuple(netref.inspect_methods(self._local_objects[oid]))
def _handle_getattr(self, oid, name):
return self._access_attr(oid, name, (), "_rpyc_getattr", "allow_getattr", getattr)
def _handle_delattr(self, oid, name):
return self._access_attr(oid, name, (), "_rpyc_delattr", "allow_delattr", delattr)
def _handle_setattr(self, oid, name, value):
return self._access_attr(oid, name, (value,), "_rpyc_setattr", "allow_setattr", setattr)
def _handle_callattr(self, oid, name, args, kwargs):
return self._handle_getattr(oid, name)(*args, **dict(kwargs))
def _handle_pickle(self, oid, proto):
if not self._config["allow_pickle"]:
raise ValueError("pickling is disabled")
return pickle.dumps(self._local_objects[oid], proto)
def _handle_buffiter(self, oid, count):
items = []
obj = self._local_objects[oid]
i = 0
try:
while i < count:
items.append(next(obj))
i += 1
except StopIteration:
pass
return tuple(items)
def _handle_oldslicing(self, oid, attempt, fallback, start, stop, args):
try:
# first try __xxxitem__
getitem = self._handle_getattr(oid, attempt)
return getitem(slice(start, stop), *args)
except Exception:
# fallback to __xxxslice__. see issue #41
if stop is None:
stop = maxint
getslice = self._handle_getattr(oid, fallback)
return getslice(start, stop, *args)
# collect handlers
_HANDLERS = {}
for name, obj in dict(locals()).items():
if name.startswith("_handle_"):
name2 = "HANDLE_" + name[8:].upper()
if hasattr(consts, name2):
_HANDLERS[getattr(consts, name2)] = obj
else:
raise NameError("no constant defined for %r", name)
del name, name2, obj
|
gpl-3.0
|
takluyver/readthedocs.org
|
readthedocs/core/management/commands/clean_builds.py
|
14
|
2156
|
from datetime import datetime, timedelta
import logging
from optparse import make_option
from django.core.management.base import BaseCommand
from django.db.models import Max
from builds.models import Build, Version
from builds.utils import clean_build_path
log = logging.getLogger(__name__)
class Command(BaseCommand):
help = ('Clean up stale build paths per project version')
option_list = BaseCommand.option_list + (
make_option('--days',
dest='days',
type='int',
default=365,
help='Find builds older than DAYS days, default: 365'),
make_option('--dryrun',
action='store_true',
dest='dryrun',
help='Perform dry run on build cleanup'),
)
def handle(self, *args, **options):
'''
Find stale builds and remove build paths
'''
max_date = datetime.now() - timedelta(days=options['days'])
queryset = (Build.objects
.values('project', 'version')
.annotate(max_date=Max('date'))
.filter(max_date__lt=max_date)
.order_by('-max_date'))
for build in queryset:
try:
# Get version from build version id, perform sanity check on
# latest build date
version = Version.objects.get(id=build['version'])
latest_build = version.builds.latest('date')
if latest_build.date > max_date:
log.warn('{0} is newer than {1}'.format(
latest_build, max_date))
next
path = version.get_build_path()
if path is not None:
log.info(
('Found stale build path for {0} '
'at {1}, last used on {2}').format(
version, path, latest_build.date))
if not options['dryrun']:
clean_build_path(version)
except Version.DoesNotExist:
pass
|
mit
|
jcrist/pydy
|
examples/double_pendulum/visualize.py
|
7
|
2397
|
"""
This file will use pydy.viz to visualize the double pendulum. Run this script
via a command line:
$ python visualization.py
"""
from numpy import pi
from pydy.viz.shapes import Cylinder, Sphere
from pydy.viz.scene import Scene
from pydy.viz.visualization_frame import VisualizationFrame
from simulate import *
# Create geometry
# ===============
# Each link in the pendulum is visualized with a cylinder, and a sphere at its
# far end.
link = Cylinder(name='link', radius=0.5, length=l, color='red')
sphere = Sphere(name='sphere', radius=1.0)
# By default, Cylinders are drawn so that their center is at the origin of the
# VisualizationFrame, and their axis is the y axis of the VisualizationFrame.
# We want the end of the Cylinder to be at the origin of the
# VisualizationFrame, and we want the Cylinder's axis to be aligned with the x
# axis of the VisualizationFrame. For these reasons, we must use the
# 'orientnew' and 'locatenew' methods to create new frames/points.
linkP_frame = A.orientnew('frameP', 'Axis', [0.5 * pi, N.z])
linkP_origin = O.locatenew('originP', 0.5 * l * A.x)
linkP_viz_frame = VisualizationFrame('linkP', linkP_frame, linkP_origin, link)
linkR_frame = B.orientnew('frameR', 'Axis', [0.5 * pi, N.z])
linkR_origin = P.locatenew('originP', 0.5 * l * B.x)
linkR_viz_frame = VisualizationFrame('linkR', linkR_frame, linkR_origin, link)
sphereP_viz_frame = VisualizationFrame('sphereP', N, P, sphere)
sphereR_viz_frame = VisualizationFrame('sphereR', N, R, sphere)
# Construct the scene
# ===================
# We want gravity to be directed downwards in the visualization. Gravity is in
# the -x direction. By default, the visualization uses the xz plane as the
# ground plane. Thus, gravity is contained in the ground plane. However, we
# want gravity to point in the -y direction in the visualization. To achieve
# this, we create a world frame that is rotated +90 degrees about the N frame's
# z direction.
world_frame = N.orientnew('world', 'Axis', [0.5 * pi, N.z])
scene = Scene(world_frame, O,
linkP_viz_frame, linkR_viz_frame, sphereP_viz_frame, sphereR_viz_frame)
# Create the visualization
# ========================
scene.generate_visualization_json_system(sys)
if __name__ == "__main__":
try: #If called from inside notebook,
scene.display_ipython()
except:#If called from interpreter
scene.display()
|
bsd-3-clause
|
sup95/zulip
|
zerver/views/webhooks/librato.py
|
11
|
7746
|
from __future__ import absolute_import
from typing import Any, Optional, Callable, Tuple
from six import text_type
from six.moves import zip
from django.utils.translation import ugettext as _
from django.utils.datetime_safe import datetime
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view, REQ, has_request_variables
from zerver.lib.response import json_success, json_error
from zerver.lib.actions import check_send_message
from zerver.models import Client, UserProfile
import ujson
ALERT_CLEAR = 'clear'
ALERT_VIOLATION = 'violations'
SNAPSHOT = 'image_url'
class LibratoWebhookParser(object):
ALERT_URL_TEMPLATE = "https://metrics.librato.com/alerts#/{alert_id}"
def __init__(self, payload, attachments):
# type: (Dict[str, Any], List[Dict[str, Any]]) -> None
self.payload = payload
self.attachments = attachments
def generate_alert_url(self, alert_id):
# type: (int) -> text_type
return self.ALERT_URL_TEMPLATE.format(alert_id=alert_id)
def parse_alert(self):
# type: () -> Tuple[int, text_type, text_type, text_type]
alert = self.payload['alert']
alert_id = alert['id']
return alert_id, alert['name'], self.generate_alert_url(alert_id), alert['runbook_url']
def parse_condition(self, condition):
# type: (Dict[str, Any]) -> Tuple[text_type, text_type, text_type, text_type]
summary_function = condition['summary_function']
threshold = condition.get('threshold', '')
condition_type = condition['type']
duration = condition.get('duration', '')
return summary_function, threshold, condition_type, duration
def parse_violation(self, violation):
# type: (Dict[str, Any]) -> Tuple[text_type, text_type]
metric_name = violation['metric']
recorded_at = datetime.fromtimestamp((violation['recorded_at']))
return metric_name, recorded_at
def parse_conditions(self):
# type: () -> List[Dict[str, Any]]
conditions = self.payload['conditions']
return conditions
def parse_violations(self):
# type: () -> List[Dict[str, Any]]
violations = self.payload['violations']['test-source']
return violations
def parse_snapshot(self, snapshot):
# type: (Dict[str, Any]) -> Tuple[text_type, text_type, text_type]
author_name, image_url, title = snapshot['author_name'], snapshot['image_url'], snapshot['title']
return author_name, image_url, title
class LibratoWebhookHandler(LibratoWebhookParser):
def __init__(self, payload, attachments):
# type: (Dict[str, Any], List[Dict[str, Any]]) -> None
super(LibratoWebhookHandler, self).__init__(payload, attachments)
self.payload_available_types = {
ALERT_CLEAR: self.handle_alert_clear_message,
ALERT_VIOLATION: self.handle_alert_violation_message
}
self.attachments_available_types = {
SNAPSHOT: self.handle_snapshots
}
def find_handle_method(self):
# type: () -> Callable
for available_type in self.payload_available_types:
if self.payload.get(available_type):
return self.payload_available_types[available_type]
for available_type in self.attachments_available_types:
if self.attachments[0].get(available_type):
return self.attachments_available_types[available_type]
raise Exception("Unexcepted message type")
def handle(self):
# type: () -> text_type
return self.find_handle_method()()
def generate_topic(self):
# type: () -> text_type
if self.attachments:
return "Snapshots"
topic_template = "Alert {alert_name}"
alert_id, alert_name, alert_url, alert_runbook_url = self.parse_alert()
return topic_template.format(alert_name=alert_name)
def handle_alert_clear_message(self):
# type: () -> text_type
alert_clear_template = "Alert [alert_name]({alert_url}) has cleared at {trigger_time}!"
trigger_time = datetime.fromtimestamp((self.payload['trigger_time']))
alert_id, alert_name, alert_url, alert_runbook_url = self.parse_alert()
content = alert_clear_template.format(alert_name=alert_name, alert_url=alert_url, trigger_time=trigger_time)
return content
def handle_snapshots(self):
# type: () -> text_type
content = u''
for attachment in self.attachments:
content += self.handle_snapshot(attachment)
return content
def handle_snapshot(self, snapshot):
# type: (Dict[str, Any]) -> text_type
snapshot_template = u"**{author_name}** sent a [snapshot]({image_url}) of [metric]({title})"
author_name, image_url, title = self.parse_snapshot(snapshot)
content = snapshot_template.format(author_name=author_name, image_url=image_url, title=title)
return content
def handle_alert_violation_message(self):
# type: () -> text_type
alert_violation_template = u"Alert [alert_name]({alert_url}) has triggered! "
alert_id, alert_name, alert_url, alert_runbook_url = self.parse_alert()
content = alert_violation_template.format(alert_name=alert_name, alert_url=alert_url)
if alert_runbook_url:
alert_runbook_template = u"[Reaction steps]({alert_runbook_url})"
content += alert_runbook_template.format(alert_runbook_url=alert_runbook_url)
content += self.generate_conditions_and_violations()
return content
def generate_conditions_and_violations(self):
# type: () -> text_type
conditions = self.parse_conditions()
violations = self.parse_violations()
content = u""
for condition, violation in zip(conditions, violations):
content += self.generate_violated_metric_condition(violation, condition)
return content
def generate_violated_metric_condition(self, violation, condition):
# type: (Dict[str, Any], Dict[str, Any]) -> text_type
summary_function, threshold, condition_type, duration = self.parse_condition(condition)
metric_name, recorded_at = self.parse_violation(violation)
metric_condition_template = u"\n>Metric `{metric_name}`, {summary_function} was {condition_type} {threshold}"
content = metric_condition_template.format(
metric_name=metric_name, summary_function=summary_function, condition_type=condition_type,
threshold=threshold)
if duration:
content += u" by {duration}s".format(duration=duration)
content += u", recorded at {recorded_at}".format(recorded_at=recorded_at)
return content
@api_key_only_webhook_view('Librato')
@has_request_variables
def api_librato_webhook(request, user_profile, client, payload=REQ(converter=ujson.loads, default={}),
stream=REQ(default='librato'), topic=REQ(default=None)):
# type: (HttpRequest, UserProfile, Client, Dict[str, Any], text_type, text_type) -> HttpResponse
try:
attachments = ujson.loads(request.body).get('attachments', [])
except ValueError:
attachments = []
if not attachments and not payload:
return json_error(_("Malformed JSON input"))
message_handler = LibratoWebhookHandler(payload, attachments)
if not topic:
topic = message_handler.generate_topic()
try:
content = message_handler.handle()
except Exception as e:
return json_error(_(str(e)))
check_send_message(user_profile, client, "stream", [stream], topic, content)
return json_success()
|
apache-2.0
|
yashsharan/sympy
|
sympy/benchmarks/bench_discrete_log.py
|
30
|
2523
|
from __future__ import print_function, division
import sys
from time import time
from sympy.ntheory.residue_ntheory import (discrete_log,
_discrete_log_trial_mul, _discrete_log_shanks_steps,
_discrete_log_pollard_rho, _discrete_log_pohlig_hellman)
# Cyclic group (Z/pZ)* with p prime, order p - 1 and generator g
data_set_1 = [
# p, p - 1, g
[191, 190, 19],
[46639, 46638, 6],
[14789363, 14789362, 2],
[4254225211, 4254225210, 2],
[432751500361, 432751500360, 7],
[158505390797053, 158505390797052, 2],
[6575202655312007, 6575202655312006, 5],
[8430573471995353769, 8430573471995353768, 3],
[3938471339744997827267, 3938471339744997827266, 2],
[875260951364705563393093, 875260951364705563393092, 5],
]
# Cyclic sub-groups of (Z/nZ)* with prime order p and generator g
# (n, p are primes and n = 2 * p + 1)
data_set_2 = [
# n, p, g
[227, 113, 3],
[2447, 1223, 2],
[24527, 12263, 2],
[245639, 122819, 2],
[2456747, 1228373, 3],
[24567899, 12283949, 3],
[245679023, 122839511, 2],
[2456791307, 1228395653, 3],
[24567913439, 12283956719, 2],
[245679135407, 122839567703, 2],
[2456791354763, 1228395677381, 3],
[24567913550903, 12283956775451, 2],
[245679135509519, 122839567754759, 2],
]
# Cyclic sub-groups of (Z/nZ)* with smooth order o and generator g
data_set_3 = [
# n, o, g
[2**118, 2**116, 3],
]
def bench_discrete_log(data_set, algo=None):
if algo is None:
f = discrete_log
elif algo == 'trial':
f = _discrete_log_trial_mul
elif algo == 'shanks':
f = _discrete_log_shanks_steps
elif algo == 'rho':
f = _discrete_log_pollard_rho
elif algo == 'ph':
f = _discrete_log_pohlig_hellman
else:
raise ValueError("Argument 'algo' should be one"
" of ('trial', 'shanks', 'rho' or 'ph')")
for i, data in enumerate(data_set):
for j, (n, p, g) in enumerate(data):
t = time()
l = f(n, pow(g, p - 1, n), g, p)
t = time() - t
print('[%02d-%03d] %15.10f' % (i, j, t))
assert l == p - 1
if __name__ == '__main__':
algo = sys.argv[1] \
if len(sys.argv) > 1 else None
data_set = [
data_set_1,
data_set_2,
data_set_3,
]
bench_discrete_log(data_set, algo)
|
bsd-3-clause
|
zcbenz/cefode-chromium
|
chrome_frame/tools/test/page_cycler/cf_cycler.py
|
78
|
2625
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Automates IE to visit a list of web sites while running CF in full tab mode.
The page cycler automates IE and navigates it to a series of URLs. It is
designed to be run with Chrome Frame configured to load every URL inside
CF full tab mode.
TODO(robertshield): Make use of the python unittest module as per
review comments.
"""
import optparse
import sys
import time
import win32com.client
import win32gui
def LoadSiteList(path):
"""Loads a list of URLs from |path|.
Expects the URLs to be separated by newlines, with no leading or trailing
whitespace.
Args:
path: The path to a file containing a list of new-line separated URLs.
Returns:
A list of strings, each one a URL.
"""
f = open(path)
urls = f.readlines()
f.close()
return urls
def LaunchIE():
"""Starts up IE, makes it visible and returns the automation object.
Returns:
The IE automation object.
"""
ie = win32com.client.Dispatch("InternetExplorer.Application")
ie.visible = 1
win32gui.SetForegroundWindow(ie.HWND)
return ie
def RunTest(url, ie):
"""Loads |url| into the InternetExplorer.Application instance in |ie|.
Waits for the Document object to be created and then waits for
the document ready state to reach READYSTATE_COMPLETE.
Args:
url: A string containing the url to navigate to.
ie: The IE automation object to navigate.
"""
print "Navigating to " + url
ie.Navigate(url)
timer = 0
READYSTATE_COMPLETE = 4
last_ready_state = -1
for retry in xrange(60):
try:
# TODO(robertshield): Become an event sink instead of polling for
# changes to the ready state.
last_ready_state = ie.Document.ReadyState
if last_ready_state == READYSTATE_COMPLETE:
break
except:
# TODO(robertshield): Find the precise exception related to ie.Document
# being not accessible and handle it here.
print "Unexpected error:", sys.exc_info()[0]
raise
time.sleep(1)
if last_ready_state != READYSTATE_COMPLETE:
print "Timeout waiting for " + url
def main():
parser = optparse.OptionParser()
parser.add_option('-u', '--url_list', default='urllist',
help='The path to the list of URLs')
(opts, args) = parser.parse_args()
urls = LoadSiteList(opts.url_list)
ie = LaunchIE()
for url in urls:
RunTest(url, ie)
time.sleep(1)
ie.visible = 0
ie.Quit()
if __name__ == '__main__':
main()
|
bsd-3-clause
|
ayoubg/gem5-graphics
|
gem5/tests/quick/se/01.hello-2T-smt/test.py
|
40
|
1798
|
# Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Korey Sewell
process1 = LiveProcess(cmd = 'hello', executable = binpath('hello'))
process2 = LiveProcess(cmd = 'hello', executable = binpath('hello'))
root.system.cpu[0].workload = [process1, process2]
root.system.cpu[0].numThreads = 2
|
bsd-3-clause
|
jnishi/chainer
|
tests/chainer_tests/links_tests/connection_tests/test_scale.py
|
9
|
4443
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(
{'learn_W': True, 'bias_term': False, 'bias_shape': None},
{'learn_W': True, 'bias_term': True, 'bias_shape': None},
{'learn_W': False, 'bias_term': False, 'bias_shape': None},
{'learn_W': False, 'bias_term': True, 'bias_shape': (2,)}
)
class TestScale(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)
self.W = numpy.random.uniform(-1, 1, (2)).astype(numpy.float32)
self.b = numpy.random.uniform(-1, 1, (2)).astype(numpy.float32)
self.y_expected = numpy.copy(self.x)
for i, j, k in numpy.ndindex(self.y_expected.shape):
self.y_expected[i, j, k] *= self.W[j]
if self.bias_term:
self.y_expected[i, j, k] += self.b[j]
self.gy = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)
bias_term = self.bias_term
bias_shape = self.bias_shape
axis = 1
if self.learn_W:
self.link = links.Scale(
axis, self.W.shape, bias_term, bias_shape)
self.link.W.data = self.W
if bias_term:
self.link.bias.b.data = self.b
else:
self.link = links.Scale(
axis, None, bias_term, bias_shape)
if bias_term:
self.link.bias.b.data = self.b
self.link.cleargrads()
def test_attribute_presence(self):
self.assertEqual(self.learn_W, hasattr(self.link, 'W'))
self.assertEqual(self.bias_term, hasattr(self.link, 'bias'))
def check_forward(self, x_data, W_data, y_expected):
x = chainer.Variable(x_data)
if W_data is None:
y = self.link(x)
testing.assert_allclose(y_expected, y.data)
else:
W = chainer.Variable(W_data)
y = self.link(x, W)
testing.assert_allclose(y_expected, y.data)
def test_forward_cpu(self):
if self.learn_W:
W = None
else:
W = self.W
self.check_forward(self.x, W, self.y_expected)
@attr.gpu
def test_forward_gpu(self):
self.link.to_gpu()
x = cuda.to_gpu(self.x)
if self.learn_W:
W = None
else:
W = cuda.to_gpu(self.W)
self.check_forward(x, W, self.y_expected)
def check_backward(self, x_data, W_data, y_grad):
if W_data is None:
params = [self.link.W]
gradient_check.check_backward(
self.link, x_data, y_grad, params, atol=1e-2)
else:
gradient_check.check_backward(
self.link, (x_data, W_data), y_grad, atol=1e-2)
@condition.retry(3)
def test_backward_cpu(self):
if self.learn_W:
W = None
else:
W = self.W
self.check_backward(self.x, W, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.link.to_gpu()
x = cuda.to_gpu(self.x)
if self.learn_W:
W = None
else:
W = cuda.to_gpu(self.W)
gy = cuda.to_gpu(self.gy)
self.check_backward(x, W, gy)
class TestScaleInvalidArgc(unittest.TestCase):
def setUp(self):
x_data = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)
W_data = numpy.random.uniform(-1, 1, (2)).astype(numpy.float32)
self.axis = 1
self.x = chainer.Variable(x_data)
self.W = chainer.Variable(W_data)
def test_scale_invalid_argc1(self):
func = links.Scale(self.axis, self.W.data.shape)
with chainer.using_config('debug', True):
with self.assertRaises(AssertionError):
func(self.x, self.W)
def test_scale_invalid_argc2(self):
func = links.Scale(self.axis, None)
with chainer.using_config('debug', True):
with self.assertRaises(AssertionError):
func(self.x)
class TestScaleNoBiasShape(unittest.TestCase):
def test_scale_no_bias_shape(self):
axis = 1
with self.assertRaises(ValueError):
links.Scale(axis, None, True, None)
testing.run_module(__name__, __file__)
|
mit
|
bwyyoung/haroopad
|
node_modules/grunt/node_modules/js-yaml/support/pyyaml-src/parser.py
|
210
|
25495
|
# The following YAML grammar is LL(1) and is parsed by a recursive descent
# parser.
#
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
# implicit_document ::= block_node DOCUMENT-END*
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
# block_node_or_indentless_sequence ::=
# ALIAS
# | properties (block_content | indentless_block_sequence)?
# | block_content
# | indentless_block_sequence
# block_node ::= ALIAS
# | properties block_content?
# | block_content
# flow_node ::= ALIAS
# | properties flow_content?
# | flow_content
# properties ::= TAG ANCHOR? | ANCHOR TAG?
# block_content ::= block_collection | flow_collection | SCALAR
# flow_content ::= flow_collection | SCALAR
# block_collection ::= block_sequence | block_mapping
# flow_collection ::= flow_sequence | flow_mapping
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
# block_mapping ::= BLOCK-MAPPING_START
# ((KEY block_node_or_indentless_sequence?)?
# (VALUE block_node_or_indentless_sequence?)?)*
# BLOCK-END
# flow_sequence ::= FLOW-SEQUENCE-START
# (flow_sequence_entry FLOW-ENTRY)*
# flow_sequence_entry?
# FLOW-SEQUENCE-END
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
# flow_mapping ::= FLOW-MAPPING-START
# (flow_mapping_entry FLOW-ENTRY)*
# flow_mapping_entry?
# FLOW-MAPPING-END
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
#
# FIRST sets:
#
# stream: { STREAM-START }
# explicit_document: { DIRECTIVE DOCUMENT-START }
# implicit_document: FIRST(block_node)
# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
# block_sequence: { BLOCK-SEQUENCE-START }
# block_mapping: { BLOCK-MAPPING-START }
# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
# indentless_sequence: { ENTRY }
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
# flow_sequence: { FLOW-SEQUENCE-START }
# flow_mapping: { FLOW-MAPPING-START }
# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
__all__ = ['Parser', 'ParserError']
from .error import MarkedYAMLError
from .tokens import *
from .events import *
from .scanner import *
class ParserError(MarkedYAMLError):
pass
class Parser:
# Since writing a recursive-descendant parser is a straightforward task, we
# do not give many comments here.
DEFAULT_TAGS = {
'!': '!',
'!!': 'tag:yaml.org,2002:',
}
def __init__(self):
self.current_event = None
self.yaml_version = None
self.tag_handles = {}
self.states = []
self.marks = []
self.state = self.parse_stream_start
def dispose(self):
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def check_event(self, *choices):
# Check the type of the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
if self.current_event is not None:
if not choices:
return True
for choice in choices:
if isinstance(self.current_event, choice):
return True
return False
def peek_event(self):
# Get the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
return self.current_event
def get_event(self):
# Get the next event and proceed further.
if self.current_event is None:
if self.state:
self.current_event = self.state()
value = self.current_event
self.current_event = None
return value
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
# implicit_document ::= block_node DOCUMENT-END*
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
def parse_stream_start(self):
# Parse the stream start.
token = self.get_token()
event = StreamStartEvent(token.start_mark, token.end_mark,
encoding=token.encoding)
# Prepare the next state.
self.state = self.parse_implicit_document_start
return event
def parse_implicit_document_start(self):
# Parse an implicit document.
if not self.check_token(DirectiveToken, DocumentStartToken,
StreamEndToken):
self.tag_handles = self.DEFAULT_TAGS
token = self.peek_token()
start_mark = end_mark = token.start_mark
event = DocumentStartEvent(start_mark, end_mark,
explicit=False)
# Prepare the next state.
self.states.append(self.parse_document_end)
self.state = self.parse_block_node
return event
else:
return self.parse_document_start()
def parse_document_start(self):
# Parse any extra document end indicators.
while self.check_token(DocumentEndToken):
self.get_token()
# Parse an explicit document.
if not self.check_token(StreamEndToken):
token = self.peek_token()
start_mark = token.start_mark
version, tags = self.process_directives()
if not self.check_token(DocumentStartToken):
raise ParserError(None, None,
"expected '<document start>', but found %r"
% self.peek_token().id,
self.peek_token().start_mark)
token = self.get_token()
end_mark = token.end_mark
event = DocumentStartEvent(start_mark, end_mark,
explicit=True, version=version, tags=tags)
self.states.append(self.parse_document_end)
self.state = self.parse_document_content
else:
# Parse the end of the stream.
token = self.get_token()
event = StreamEndEvent(token.start_mark, token.end_mark)
assert not self.states
assert not self.marks
self.state = None
return event
def parse_document_end(self):
# Parse the document end.
token = self.peek_token()
start_mark = end_mark = token.start_mark
explicit = False
if self.check_token(DocumentEndToken):
token = self.get_token()
end_mark = token.end_mark
explicit = True
event = DocumentEndEvent(start_mark, end_mark,
explicit=explicit)
# Prepare the next state.
self.state = self.parse_document_start
return event
def parse_document_content(self):
if self.check_token(DirectiveToken,
DocumentStartToken, DocumentEndToken, StreamEndToken):
event = self.process_empty_scalar(self.peek_token().start_mark)
self.state = self.states.pop()
return event
else:
return self.parse_block_node()
def process_directives(self):
self.yaml_version = None
self.tag_handles = {}
while self.check_token(DirectiveToken):
token = self.get_token()
if token.name == 'YAML':
if self.yaml_version is not None:
raise ParserError(None, None,
"found duplicate YAML directive", token.start_mark)
major, minor = token.value
if major != 1:
raise ParserError(None, None,
"found incompatible YAML document (version 1.* is required)",
token.start_mark)
self.yaml_version = token.value
elif token.name == 'TAG':
handle, prefix = token.value
if handle in self.tag_handles:
raise ParserError(None, None,
"duplicate tag handle %r" % handle,
token.start_mark)
self.tag_handles[handle] = prefix
if self.tag_handles:
value = self.yaml_version, self.tag_handles.copy()
else:
value = self.yaml_version, None
for key in self.DEFAULT_TAGS:
if key not in self.tag_handles:
self.tag_handles[key] = self.DEFAULT_TAGS[key]
return value
# block_node_or_indentless_sequence ::= ALIAS
# | properties (block_content | indentless_block_sequence)?
# | block_content
# | indentless_block_sequence
# block_node ::= ALIAS
# | properties block_content?
# | block_content
# flow_node ::= ALIAS
# | properties flow_content?
# | flow_content
# properties ::= TAG ANCHOR? | ANCHOR TAG?
# block_content ::= block_collection | flow_collection | SCALAR
# flow_content ::= flow_collection | SCALAR
# block_collection ::= block_sequence | block_mapping
# flow_collection ::= flow_sequence | flow_mapping
def parse_block_node(self):
return self.parse_node(block=True)
def parse_flow_node(self):
return self.parse_node()
def parse_block_node_or_indentless_sequence(self):
return self.parse_node(block=True, indentless_sequence=True)
def parse_node(self, block=False, indentless_sequence=False):
if self.check_token(AliasToken):
token = self.get_token()
event = AliasEvent(token.value, token.start_mark, token.end_mark)
self.state = self.states.pop()
else:
anchor = None
tag = None
start_mark = end_mark = tag_mark = None
if self.check_token(AnchorToken):
token = self.get_token()
start_mark = token.start_mark
end_mark = token.end_mark
anchor = token.value
if self.check_token(TagToken):
token = self.get_token()
tag_mark = token.start_mark
end_mark = token.end_mark
tag = token.value
elif self.check_token(TagToken):
token = self.get_token()
start_mark = tag_mark = token.start_mark
end_mark = token.end_mark
tag = token.value
if self.check_token(AnchorToken):
token = self.get_token()
end_mark = token.end_mark
anchor = token.value
if tag is not None:
handle, suffix = tag
if handle is not None:
if handle not in self.tag_handles:
raise ParserError("while parsing a node", start_mark,
"found undefined tag handle %r" % handle,
tag_mark)
tag = self.tag_handles[handle]+suffix
else:
tag = suffix
#if tag == '!':
# raise ParserError("while parsing a node", start_mark,
# "found non-specific tag '!'", tag_mark,
# "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
if start_mark is None:
start_mark = end_mark = self.peek_token().start_mark
event = None
implicit = (tag is None or tag == '!')
if indentless_sequence and self.check_token(BlockEntryToken):
end_mark = self.peek_token().end_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark)
self.state = self.parse_indentless_sequence_entry
else:
if self.check_token(ScalarToken):
token = self.get_token()
end_mark = token.end_mark
if (token.plain and tag is None) or tag == '!':
implicit = (True, False)
elif tag is None:
implicit = (False, True)
else:
implicit = (False, False)
event = ScalarEvent(anchor, tag, implicit, token.value,
start_mark, end_mark, style=token.style)
self.state = self.states.pop()
elif self.check_token(FlowSequenceStartToken):
end_mark = self.peek_token().end_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=True)
self.state = self.parse_flow_sequence_first_entry
elif self.check_token(FlowMappingStartToken):
end_mark = self.peek_token().end_mark
event = MappingStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=True)
self.state = self.parse_flow_mapping_first_key
elif block and self.check_token(BlockSequenceStartToken):
end_mark = self.peek_token().start_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=False)
self.state = self.parse_block_sequence_first_entry
elif block and self.check_token(BlockMappingStartToken):
end_mark = self.peek_token().start_mark
event = MappingStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=False)
self.state = self.parse_block_mapping_first_key
elif anchor is not None or tag is not None:
# Empty scalars are allowed even if a tag or an anchor is
# specified.
event = ScalarEvent(anchor, tag, (implicit, False), '',
start_mark, end_mark)
self.state = self.states.pop()
else:
if block:
node = 'block'
else:
node = 'flow'
token = self.peek_token()
raise ParserError("while parsing a %s node" % node, start_mark,
"expected the node content, but found %r" % token.id,
token.start_mark)
return event
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
def parse_block_sequence_first_entry(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_block_sequence_entry()
def parse_block_sequence_entry(self):
if self.check_token(BlockEntryToken):
token = self.get_token()
if not self.check_token(BlockEntryToken, BlockEndToken):
self.states.append(self.parse_block_sequence_entry)
return self.parse_block_node()
else:
self.state = self.parse_block_sequence_entry
return self.process_empty_scalar(token.end_mark)
if not self.check_token(BlockEndToken):
token = self.peek_token()
raise ParserError("while parsing a block collection", self.marks[-1],
"expected <block end>, but found %r" % token.id, token.start_mark)
token = self.get_token()
event = SequenceEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
def parse_indentless_sequence_entry(self):
if self.check_token(BlockEntryToken):
token = self.get_token()
if not self.check_token(BlockEntryToken,
KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_indentless_sequence_entry)
return self.parse_block_node()
else:
self.state = self.parse_indentless_sequence_entry
return self.process_empty_scalar(token.end_mark)
token = self.peek_token()
event = SequenceEndEvent(token.start_mark, token.start_mark)
self.state = self.states.pop()
return event
# block_mapping ::= BLOCK-MAPPING_START
# ((KEY block_node_or_indentless_sequence?)?
# (VALUE block_node_or_indentless_sequence?)?)*
# BLOCK-END
def parse_block_mapping_first_key(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_block_mapping_key()
def parse_block_mapping_key(self):
if self.check_token(KeyToken):
token = self.get_token()
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_block_mapping_value)
return self.parse_block_node_or_indentless_sequence()
else:
self.state = self.parse_block_mapping_value
return self.process_empty_scalar(token.end_mark)
if not self.check_token(BlockEndToken):
token = self.peek_token()
raise ParserError("while parsing a block mapping", self.marks[-1],
"expected <block end>, but found %r" % token.id, token.start_mark)
token = self.get_token()
event = MappingEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_block_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_block_mapping_key)
return self.parse_block_node_or_indentless_sequence()
else:
self.state = self.parse_block_mapping_key
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_block_mapping_key
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
# flow_sequence ::= FLOW-SEQUENCE-START
# (flow_sequence_entry FLOW-ENTRY)*
# flow_sequence_entry?
# FLOW-SEQUENCE-END
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
#
# Note that while production rules for both flow_sequence_entry and
# flow_mapping_entry are equal, their interpretations are different.
# For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
# generate an inline mapping (set syntax).
def parse_flow_sequence_first_entry(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_flow_sequence_entry(first=True)
def parse_flow_sequence_entry(self, first=False):
if not self.check_token(FlowSequenceEndToken):
if not first:
if self.check_token(FlowEntryToken):
self.get_token()
else:
token = self.peek_token()
raise ParserError("while parsing a flow sequence", self.marks[-1],
"expected ',' or ']', but got %r" % token.id, token.start_mark)
if self.check_token(KeyToken):
token = self.peek_token()
event = MappingStartEvent(None, None, True,
token.start_mark, token.end_mark,
flow_style=True)
self.state = self.parse_flow_sequence_entry_mapping_key
return event
elif not self.check_token(FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry)
return self.parse_flow_node()
token = self.get_token()
event = SequenceEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_flow_sequence_entry_mapping_key(self):
token = self.get_token()
if not self.check_token(ValueToken,
FlowEntryToken, FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry_mapping_value)
return self.parse_flow_node()
else:
self.state = self.parse_flow_sequence_entry_mapping_value
return self.process_empty_scalar(token.end_mark)
def parse_flow_sequence_entry_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry_mapping_end)
return self.parse_flow_node()
else:
self.state = self.parse_flow_sequence_entry_mapping_end
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_flow_sequence_entry_mapping_end
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
def parse_flow_sequence_entry_mapping_end(self):
self.state = self.parse_flow_sequence_entry
token = self.peek_token()
return MappingEndEvent(token.start_mark, token.start_mark)
# flow_mapping ::= FLOW-MAPPING-START
# (flow_mapping_entry FLOW-ENTRY)*
# flow_mapping_entry?
# FLOW-MAPPING-END
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
def parse_flow_mapping_first_key(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_flow_mapping_key(first=True)
def parse_flow_mapping_key(self, first=False):
if not self.check_token(FlowMappingEndToken):
if not first:
if self.check_token(FlowEntryToken):
self.get_token()
else:
token = self.peek_token()
raise ParserError("while parsing a flow mapping", self.marks[-1],
"expected ',' or '}', but got %r" % token.id, token.start_mark)
if self.check_token(KeyToken):
token = self.get_token()
if not self.check_token(ValueToken,
FlowEntryToken, FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_value)
return self.parse_flow_node()
else:
self.state = self.parse_flow_mapping_value
return self.process_empty_scalar(token.end_mark)
elif not self.check_token(FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_empty_value)
return self.parse_flow_node()
token = self.get_token()
event = MappingEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_flow_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(FlowEntryToken, FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_key)
return self.parse_flow_node()
else:
self.state = self.parse_flow_mapping_key
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_flow_mapping_key
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
def parse_flow_mapping_empty_value(self):
self.state = self.parse_flow_mapping_key
return self.process_empty_scalar(self.peek_token().start_mark)
def process_empty_scalar(self, mark):
return ScalarEvent(None, None, (True, False), '', mark, mark)
|
gpl-3.0
|
danic85/time-capsule
|
src/file-browser.py
|
1
|
1530
|
import os, time, datetime, sys
from PIL import Image
path = '../test_files'
def get_minimum_creation_time(exif_data):
mtime = '?'
if 306 in exif_data and exif_data[306] < mtime: # 306 = DateTime
mtime = exif_data[306]
if 36867 in exif_data and exif_data[36867] < mtime: # 36867 = DateTimeOriginal
mtime = exif_data[36867]
if 36868 in exif_data and exif_data[36868] < mtime: # 36868 = DateTimeDigitized
mtime = exif_data[36868]
if mtime == '?':
return False
return time.strptime(mtime.encode('ascii'), "%Y:%m:%d %H:%M:%S")
today = datetime.datetime.now()
threshold = datetime.datetime(today.year - 21, today.month, today.day, today.hour, today.minute, today.second);
for root, dirs, files in os.walk(path):
for name in files:
sys.stdout.write(root + '/' + name + ': ')
if name.endswith((".jpeg", ".jpg")):
img = Image.open(root+'/'+name)
exif_data = img._getexif()
if exif_data is not None:
created = get_minimum_creation_time(exif_data);
if created != False:
created = datetime.datetime(*created[:6])
print str(created) + ' < ' + str(threshold) + ' = ' + str(created < threshold)
else:
print 'could not find created date in exif_data: ' + str(exif_data)
else:
print 'could not find exif_data'
else:
print 'not a jpg or jpeg'
|
gpl-3.0
|
bjko/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/layouttestresults.py
|
124
|
4096
|
# Copyright (c) 2010, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.common.net.resultsjsonparser import ResultsJSONParser
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup, SoupStrainer
from webkitpy.layout_tests.models import test_results
from webkitpy.layout_tests.models import test_failures
_log = logging.getLogger(__name__)
# FIXME: This should be unified with all the layout test results code in the layout_tests package
# This doesn't belong in common.net, but we don't have a better place for it yet.
def path_for_layout_test(test_name):
return "LayoutTests/%s" % test_name
# FIXME: This should be unified with ResultsSummary or other NRWT layout tests code
# in the layout_tests package.
# This doesn't belong in common.net, but we don't have a better place for it yet.
class LayoutTestResults(object):
@classmethod
def results_from_string(cls, string):
if not string:
return None
test_results = ResultsJSONParser.parse_results_json(string)
if not test_results:
return None
return cls(test_results)
def __init__(self, test_results):
self._test_results = test_results
self._failure_limit_count = None
self._unit_test_failures = []
# FIXME: run-webkit-tests should store the --exit-after-N-failures value
# (or some indication of early exit) somewhere in the results.json
# file. Until it does, callers should set the limit to
# --exit-after-N-failures value used in that run. Consumers of LayoutTestResults
# may use that value to know if absence from the failure list means PASS.
# https://bugs.webkit.org/show_bug.cgi?id=58481
def set_failure_limit_count(self, limit):
self._failure_limit_count = limit
def failure_limit_count(self):
return self._failure_limit_count
def test_results(self):
return self._test_results
def results_matching_failure_types(self, failure_types):
return [result for result in self._test_results if result.has_failure_matching_types(*failure_types)]
def tests_matching_failure_types(self, failure_types):
return [result.test_name for result in self.results_matching_failure_types(failure_types)]
def failing_test_results(self):
return self.results_matching_failure_types(test_failures.ALL_FAILURE_CLASSES)
def failing_tests(self):
return [result.test_name for result in self.failing_test_results()] + self._unit_test_failures
def add_unit_test_failures(self, unit_test_results):
self._unit_test_failures = unit_test_results
|
bsd-3-clause
|
yonglehou/spiderfoot
|
ext/metapdf/metapdf.py
|
8
|
2657
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012 Ali Anari
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
.. module:: metapdf
:platform: Unix, Windows
:synopsis: The metapdf class implementation.
.. moduleauthor:: Ali Anari <[email protected]>
"""
__author__ = "Ali Anari"
__author_email__ = "[email protected]"
import os, re
from pyPdf import PdfFileReader
class _meta_pdf_reader(object):
def __init__(self):
self.instance = self.__hash__()
self.metadata_regex = re.compile('(?:\/(\w+)\s?\(([^\n\r]*)\)\n?\r?)', re.S)
self.metadata_offset = 2048
def read_metadata(self, stream):
"""This function reads a PDF file stream and returns its metadata.
:param file_name: The PDF file stream to read.
:type file_name: str
:returns: dict -- The returned metadata as a dictionary of properties.
"""
# Scan the last 2048 bytes, the most
# frequent metadata density block
stream.seek(-self.metadata_offset, os.SEEK_END)
properties = dict()
try:
properties = dict(('/' + p.group(1), p.group(2).decode('utf-8')) \
for p in self.metadata_regex.finditer(stream.read(self.metadata_offset)))
if '/Author' in properties:
return properties
except UnicodeDecodeError:
properties.clear()
# Parse the xref table using pyPdf
properties = PdfFileReader(stream).documentInfo
if properties:
return properties
return {}
_metaPdfReader = _meta_pdf_reader()
def MetaPdfReader(): return _metaPdfReader
|
gpl-2.0
|
nsnam/ns-3.16-git
|
src/tap-bridge/examples/tap-csma-virtual-machine.py
|
157
|
3012
|
# -*- Mode: Python; -*-
#
# Copyright 2010 University of Washington
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import sys
import ns.core
import ns.csma
import ns.internet
import ns.network
import ns.tap_bridge
def main(argv):
#
# We are interacting with the outside, real, world. This means we have to
# interact in real-time and therefore we have to use the real-time simulator
# and take the time to calculate checksums.
#
ns.core.GlobalValue.Bind("SimulatorImplementationType", ns.core.StringValue("ns3::RealtimeSimulatorImpl"))
ns.core.GlobalValue.Bind("ChecksumEnabled", ns.core.BooleanValue("true"))
#
# Create two ghost nodes. The first will represent the virtual machine host
# on the left side of the network; and the second will represent the VM on
# the right side.
#
nodes = ns.network.NodeContainer()
nodes.Create (2)
#
# Use a CsmaHelper to get a CSMA channel created, and the needed net
# devices installed on both of the nodes. The data rate and delay for the
# channel can be set through the command-line parser.
#
csma = ns.csma.CsmaHelper()
devices = csma.Install(nodes)
#
# Use the TapBridgeHelper to connect to the pre-configured tap devices for
# the left side. We go with "UseLocal" mode since the wifi devices do not
# support promiscuous mode (because of their natures0. This is a special
# case mode that allows us to extend a linux bridge into ns-3 IFF we will
# only see traffic from one other device on that bridge. That is the case
# for this configuration.
#
tapBridge = ns.tap_bridge.TapBridgeHelper()
tapBridge.SetAttribute ("Mode", ns.core.StringValue ("UseLocal"))
tapBridge.SetAttribute ("DeviceName", ns.core.StringValue ("tap-left"))
tapBridge.Install (nodes.Get (0), devices.Get (0))
#
# Connect the right side tap to the right side wifi device on the right-side
# ghost node.
#
tapBridge.SetAttribute ("DeviceName", ns.core.StringValue ("tap-right"))
tapBridge.Install (nodes.Get (1), devices.Get (1))
#
# Run the simulation for ten minutes to give the user time to play around
#
ns.core.Simulator.Stop (ns.core.Seconds (600))
ns.core.Simulator.Run(signal_check_frequency = -1)
ns.core.Simulator.Destroy()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
gpl-2.0
|
drawks/ansible
|
lib/ansible/plugins/action/service.py
|
36
|
3755
|
# (c) 2015, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleAction, AnsibleActionFail
from ansible.executor.module_common import get_action_args_with_defaults
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = False
UNUSED_PARAMS = {
'systemd': ['pattern', 'runlevel', 'sleep', 'arguments', 'args'],
}
def run(self, tmp=None, task_vars=None):
''' handler for package operations '''
self._supports_check_mode = True
self._supports_async = True
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
module = self._task.args.get('use', 'auto').lower()
if module == 'auto':
try:
if self._task.delegate_to: # if we delegate, we should use delegated host's facts
module = self._templar.template("{{hostvars['%s']['ansible_facts']['service_mgr']}}" % self._task.delegate_to)
else:
module = self._templar.template('{{ansible_facts.service_mgr}}')
except Exception:
pass # could not get it from template!
try:
if module == 'auto':
facts = self._execute_module(module_name='setup', module_args=dict(gather_subset='!all', filter='ansible_service_mgr'), task_vars=task_vars)
self._display.debug("Facts %s" % facts)
module = facts.get('ansible_facts', {}).get('ansible_service_mgr', 'auto')
if not module or module == 'auto' or module not in self._shared_loader_obj.module_loader:
module = 'service'
if module != 'auto':
# run the 'service' module
new_module_args = self._task.args.copy()
if 'use' in new_module_args:
del new_module_args['use']
if module in self.UNUSED_PARAMS:
for unused in self.UNUSED_PARAMS[module]:
if unused in new_module_args:
del new_module_args[unused]
self._display.warning('Ignoring "%s" as it is not used in "%s"' % (unused, module))
# get defaults for specific module
new_module_args = get_action_args_with_defaults(module, new_module_args, self._task.module_defaults, self._templar)
self._display.vvvv("Running %s" % module)
result.update(self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
else:
raise AnsibleActionFail('Could not detect which service manager to use. Try gathering facts or setting the "use" option.')
except AnsibleAction as e:
result.update(e.result)
finally:
if not self._task.async_val:
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
|
gpl-3.0
|
nvoron23/hue
|
desktop/core/ext-py/MySQL-python-1.2.5/tests/dbapi20.py
|
45
|
31434
|
#!/usr/bin/env python
''' Python DB API 2.0 driver compliance unit test suite.
This software is Public Domain and may be used without restrictions.
"Now we have booze and barflies entering the discussion, plus rumours of
DBAs on drugs... and I won't tell you what flashes through my mind each
time I read the subject line with 'Anal Compliance' in it. All around
this is turning out to be a thoroughly unwholesome unit test."
-- Ian Bicking
'''
__rcs_id__ = '$Id$'
__version__ = '$Revision$'[11:-2]
__author__ = 'Stuart Bishop <[email protected]>'
import unittest
import time
# $Log$
# Revision 1.1.2.1 2006/02/25 03:44:32 adustman
# Generic DB-API unit test module
#
# Revision 1.10 2003/10/09 03:14:14 zenzen
# Add test for DB API 2.0 optional extension, where database exceptions
# are exposed as attributes on the Connection object.
#
# Revision 1.9 2003/08/13 01:16:36 zenzen
# Minor tweak from Stefan Fleiter
#
# Revision 1.8 2003/04/10 00:13:25 zenzen
# Changes, as per suggestions by M.-A. Lemburg
# - Add a table prefix, to ensure namespace collisions can always be avoided
#
# Revision 1.7 2003/02/26 23:33:37 zenzen
# Break out DDL into helper functions, as per request by David Rushby
#
# Revision 1.6 2003/02/21 03:04:33 zenzen
# Stuff from Henrik Ekelund:
# added test_None
# added test_nextset & hooks
#
# Revision 1.5 2003/02/17 22:08:43 zenzen
# Implement suggestions and code from Henrik Eklund - test that cursor.arraysize
# defaults to 1 & generic cursor.callproc test added
#
# Revision 1.4 2003/02/15 00:16:33 zenzen
# Changes, as per suggestions and bug reports by M.-A. Lemburg,
# Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar
# - Class renamed
# - Now a subclass of TestCase, to avoid requiring the driver stub
# to use multiple inheritance
# - Reversed the polarity of buggy test in test_description
# - Test exception heirarchy correctly
# - self.populate is now self._populate(), so if a driver stub
# overrides self.ddl1 this change propogates
# - VARCHAR columns now have a width, which will hopefully make the
# DDL even more portible (this will be reversed if it causes more problems)
# - cursor.rowcount being checked after various execute and fetchXXX methods
# - Check for fetchall and fetchmany returning empty lists after results
# are exhausted (already checking for empty lists if select retrieved
# nothing
# - Fix bugs in test_setoutputsize_basic and test_setinputsizes
#
class DatabaseAPI20Test(unittest.TestCase):
''' Test a database self.driver for DB API 2.0 compatibility.
This implementation tests Gadfly, but the TestCase
is structured so that other self.drivers can subclass this
test case to ensure compiliance with the DB-API. It is
expected that this TestCase may be expanded in the future
if ambiguities or edge conditions are discovered.
The 'Optional Extensions' are not yet being tested.
self.drivers should subclass this test, overriding setUp, tearDown,
self.driver, connect_args and connect_kw_args. Class specification
should be as follows:
import dbapi20
class mytest(dbapi20.DatabaseAPI20Test):
[...]
Don't 'import DatabaseAPI20Test from dbapi20', or you will
confuse the unit tester - just 'import dbapi20'.
'''
# The self.driver module. This should be the module where the 'connect'
# method is to be found
driver = None
connect_args = () # List of arguments to pass to connect
connect_kw_args = {} # Keyword arguments for connect
table_prefix = 'dbapi20test_' # If you need to specify a prefix for tables
ddl1 = 'create table %sbooze (name varchar(20))' % table_prefix
ddl2 = 'create table %sbarflys (name varchar(20))' % table_prefix
xddl1 = 'drop table %sbooze' % table_prefix
xddl2 = 'drop table %sbarflys' % table_prefix
lowerfunc = 'lower' # Name of stored procedure to convert string->lowercase
# Some drivers may need to override these helpers, for example adding
# a 'commit' after the execute.
def executeDDL1(self,cursor):
cursor.execute(self.ddl1)
def executeDDL2(self,cursor):
cursor.execute(self.ddl2)
def setUp(self):
''' self.drivers should override this method to perform required setup
if any is necessary, such as creating the database.
'''
pass
def tearDown(self):
''' self.drivers should override this method to perform required cleanup
if any is necessary, such as deleting the test database.
The default drops the tables that may be created.
'''
con = self._connect()
try:
cur = con.cursor()
for ddl in (self.xddl1,self.xddl2):
try:
cur.execute(ddl)
con.commit()
except self.driver.Error:
# Assume table didn't exist. Other tests will check if
# execute is busted.
pass
finally:
con.close()
def _connect(self):
try:
return self.driver.connect(
*self.connect_args,**self.connect_kw_args
)
except AttributeError:
self.fail("No connect method found in self.driver module")
def test_connect(self):
con = self._connect()
con.close()
def test_apilevel(self):
try:
# Must exist
apilevel = self.driver.apilevel
# Must equal 2.0
self.assertEqual(apilevel,'2.0')
except AttributeError:
self.fail("Driver doesn't define apilevel")
def test_threadsafety(self):
try:
# Must exist
threadsafety = self.driver.threadsafety
# Must be a valid value
self.assertTrue(threadsafety in (0,1,2,3))
except AttributeError:
self.fail("Driver doesn't define threadsafety")
def test_paramstyle(self):
try:
# Must exist
paramstyle = self.driver.paramstyle
# Must be a valid value
self.assertTrue(paramstyle in (
'qmark','numeric','named','format','pyformat'
))
except AttributeError:
self.fail("Driver doesn't define paramstyle")
def test_Exceptions(self):
# Make sure required exceptions exist, and are in the
# defined heirarchy.
self.assertTrue(issubclass(self.driver.Warning,StandardError))
self.assertTrue(issubclass(self.driver.Error,StandardError))
self.assertTrue(
issubclass(self.driver.InterfaceError,self.driver.Error)
)
self.assertTrue(
issubclass(self.driver.DatabaseError,self.driver.Error)
)
self.assertTrue(
issubclass(self.driver.OperationalError,self.driver.Error)
)
self.assertTrue(
issubclass(self.driver.IntegrityError,self.driver.Error)
)
self.assertTrue(
issubclass(self.driver.InternalError,self.driver.Error)
)
self.assertTrue(
issubclass(self.driver.ProgrammingError,self.driver.Error)
)
self.assertTrue(
issubclass(self.driver.NotSupportedError,self.driver.Error)
)
def test_ExceptionsAsConnectionAttributes(self):
# OPTIONAL EXTENSION
# Test for the optional DB API 2.0 extension, where the exceptions
# are exposed as attributes on the Connection object
# I figure this optional extension will be implemented by any
# driver author who is using this test suite, so it is enabled
# by default.
con = self._connect()
drv = self.driver
self.assertTrue(con.Warning is drv.Warning)
self.assertTrue(con.Error is drv.Error)
self.assertTrue(con.InterfaceError is drv.InterfaceError)
self.assertTrue(con.DatabaseError is drv.DatabaseError)
self.assertTrue(con.OperationalError is drv.OperationalError)
self.assertTrue(con.IntegrityError is drv.IntegrityError)
self.assertTrue(con.InternalError is drv.InternalError)
self.assertTrue(con.ProgrammingError is drv.ProgrammingError)
self.assertTrue(con.NotSupportedError is drv.NotSupportedError)
def test_commit(self):
con = self._connect()
try:
# Commit must work, even if it doesn't do anything
con.commit()
finally:
con.close()
def test_rollback(self):
con = self._connect()
# If rollback is defined, it should either work or throw
# the documented exception
if hasattr(con,'rollback'):
try:
con.rollback()
except self.driver.NotSupportedError:
pass
def test_cursor(self):
con = self._connect()
try:
cur = con.cursor()
finally:
con.close()
def test_cursor_isolation(self):
con = self._connect()
try:
# Make sure cursors created from the same connection have
# the documented transaction isolation level
cur1 = con.cursor()
cur2 = con.cursor()
self.executeDDL1(cur1)
cur1.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
cur2.execute("select name from %sbooze" % self.table_prefix)
booze = cur2.fetchall()
self.assertEqual(len(booze),1)
self.assertEqual(len(booze[0]),1)
self.assertEqual(booze[0][0],'Victoria Bitter')
finally:
con.close()
def test_description(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
self.assertEqual(cur.description,None,
'cursor.description should be none after executing a '
'statement that can return no rows (such as DDL)'
)
cur.execute('select name from %sbooze' % self.table_prefix)
self.assertEqual(len(cur.description),1,
'cursor.description describes too many columns'
)
self.assertEqual(len(cur.description[0]),7,
'cursor.description[x] tuples must have 7 elements'
)
self.assertEqual(cur.description[0][0].lower(),'name',
'cursor.description[x][0] must return column name'
)
self.assertEqual(cur.description[0][1],self.driver.STRING,
'cursor.description[x][1] must return column type. Got %r'
% cur.description[0][1]
)
# Make sure self.description gets reset
self.executeDDL2(cur)
self.assertEqual(cur.description,None,
'cursor.description not being set to None when executing '
'no-result statements (eg. DDL)'
)
finally:
con.close()
def test_rowcount(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
self.assertEqual(cur.rowcount,-1,
'cursor.rowcount should be -1 after executing no-result '
'statements'
)
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.assertTrue(cur.rowcount in (-1,1),
'cursor.rowcount should == number or rows inserted, or '
'set to -1 after executing an insert statement'
)
cur.execute("select name from %sbooze" % self.table_prefix)
self.assertTrue(cur.rowcount in (-1,1),
'cursor.rowcount should == number of rows returned, or '
'set to -1 after executing a select statement'
)
self.executeDDL2(cur)
self.assertEqual(cur.rowcount,-1,
'cursor.rowcount not being reset to -1 after executing '
'no-result statements'
)
finally:
con.close()
lower_func = 'lower'
def test_callproc(self):
con = self._connect()
try:
cur = con.cursor()
if self.lower_func and hasattr(cur,'callproc'):
r = cur.callproc(self.lower_func,('FOO',))
self.assertEqual(len(r),1)
self.assertEqual(r[0],'FOO')
r = cur.fetchall()
self.assertEqual(len(r),1,'callproc produced no result set')
self.assertEqual(len(r[0]),1,
'callproc produced invalid result set'
)
self.assertEqual(r[0][0],'foo',
'callproc produced invalid results'
)
finally:
con.close()
def test_close(self):
con = self._connect()
try:
cur = con.cursor()
finally:
con.close()
# cursor.execute should raise an Error if called after connection
# closed
self.assertRaises(self.driver.Error,self.executeDDL1,cur)
# connection.commit should raise an Error if called after connection'
# closed.'
self.assertRaises(self.driver.Error,con.commit)
# connection.close should raise an Error if called more than once
self.assertRaises(self.driver.Error,con.close)
def test_execute(self):
con = self._connect()
try:
cur = con.cursor()
self._paraminsert(cur)
finally:
con.close()
def _paraminsert(self,cur):
self.executeDDL1(cur)
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.assertTrue(cur.rowcount in (-1,1))
if self.driver.paramstyle == 'qmark':
cur.execute(
'insert into %sbooze values (?)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'numeric':
cur.execute(
'insert into %sbooze values (:1)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'named':
cur.execute(
'insert into %sbooze values (:beer)' % self.table_prefix,
{'beer':"Cooper's"}
)
elif self.driver.paramstyle == 'format':
cur.execute(
'insert into %sbooze values (%%s)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'pyformat':
cur.execute(
'insert into %sbooze values (%%(beer)s)' % self.table_prefix,
{'beer':"Cooper's"}
)
else:
self.fail('Invalid paramstyle')
self.assertTrue(cur.rowcount in (-1,1))
cur.execute('select name from %sbooze' % self.table_prefix)
res = cur.fetchall()
self.assertEqual(len(res),2,'cursor.fetchall returned too few rows')
beers = [res[0][0],res[1][0]]
beers.sort()
self.assertEqual(beers[0],"Cooper's",
'cursor.fetchall retrieved incorrect data, or data inserted '
'incorrectly'
)
self.assertEqual(beers[1],"Victoria Bitter",
'cursor.fetchall retrieved incorrect data, or data inserted '
'incorrectly'
)
def test_executemany(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
largs = [ ("Cooper's",) , ("Boag's",) ]
margs = [ {'beer': "Cooper's"}, {'beer': "Boag's"} ]
if self.driver.paramstyle == 'qmark':
cur.executemany(
'insert into %sbooze values (?)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'numeric':
cur.executemany(
'insert into %sbooze values (:1)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'named':
cur.executemany(
'insert into %sbooze values (:beer)' % self.table_prefix,
margs
)
elif self.driver.paramstyle == 'format':
cur.executemany(
'insert into %sbooze values (%%s)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'pyformat':
cur.executemany(
'insert into %sbooze values (%%(beer)s)' % (
self.table_prefix
),
margs
)
else:
self.fail('Unknown paramstyle')
self.assertTrue(cur.rowcount in (-1,2),
'insert using cursor.executemany set cursor.rowcount to '
'incorrect value %r' % cur.rowcount
)
cur.execute('select name from %sbooze' % self.table_prefix)
res = cur.fetchall()
self.assertEqual(len(res),2,
'cursor.fetchall retrieved incorrect number of rows'
)
beers = [res[0][0],res[1][0]]
beers.sort()
self.assertEqual(beers[0],"Boag's",'incorrect data retrieved')
self.assertEqual(beers[1],"Cooper's",'incorrect data retrieved')
finally:
con.close()
def test_fetchone(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchone should raise an Error if called before
# executing a select-type query
self.assertRaises(self.driver.Error,cur.fetchone)
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
self.executeDDL1(cur)
self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute('select name from %sbooze' % self.table_prefix)
self.assertEqual(cur.fetchone(),None,
'cursor.fetchone should return None if a query retrieves '
'no rows'
)
self.assertTrue(cur.rowcount in (-1,0))
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchone()
self.assertEqual(len(r),1,
'cursor.fetchone should have retrieved a single row'
)
self.assertEqual(r[0],'Victoria Bitter',
'cursor.fetchone retrieved incorrect data'
)
self.assertEqual(cur.fetchone(),None,
'cursor.fetchone should return None if no more rows available'
)
self.assertTrue(cur.rowcount in (-1,1))
finally:
con.close()
samples = [
'Carlton Cold',
'Carlton Draft',
'Mountain Goat',
'Redback',
'Victoria Bitter',
'XXXX'
]
def _populate(self):
''' Return a list of sql commands to setup the DB for the fetch
tests.
'''
populate = [
"insert into %sbooze values ('%s')" % (self.table_prefix,s)
for s in self.samples
]
return populate
def test_fetchmany(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchmany should raise an Error if called without
#issuing a query
self.assertRaises(self.driver.Error,cur.fetchmany,4)
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchmany()
self.assertEqual(len(r),1,
'cursor.fetchmany retrieved incorrect number of rows, '
'default of arraysize is one.'
)
cur.arraysize=10
r = cur.fetchmany(3) # Should get 3 rows
self.assertEqual(len(r),3,
'cursor.fetchmany retrieved incorrect number of rows'
)
r = cur.fetchmany(4) # Should get 2 more
self.assertEqual(len(r),2,
'cursor.fetchmany retrieved incorrect number of rows'
)
r = cur.fetchmany(4) # Should be an empty sequence
self.assertEqual(len(r),0,
'cursor.fetchmany should return an empty sequence after '
'results are exhausted'
)
self.assertTrue(cur.rowcount in (-1,6))
# Same as above, using cursor.arraysize
cur.arraysize=4
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchmany() # Should get 4 rows
self.assertEqual(len(r),4,
'cursor.arraysize not being honoured by fetchmany'
)
r = cur.fetchmany() # Should get 2 more
self.assertEqual(len(r),2)
r = cur.fetchmany() # Should be an empty sequence
self.assertEqual(len(r),0)
self.assertTrue(cur.rowcount in (-1,6))
cur.arraysize=6
cur.execute('select name from %sbooze' % self.table_prefix)
rows = cur.fetchmany() # Should get all rows
self.assertTrue(cur.rowcount in (-1,6))
self.assertEqual(len(rows),6)
self.assertEqual(len(rows),6)
rows = [r[0] for r in rows]
rows.sort()
# Make sure we get the right data back out
for i in range(0,6):
self.assertEqual(rows[i],self.samples[i],
'incorrect data retrieved by cursor.fetchmany'
)
rows = cur.fetchmany() # Should return an empty list
self.assertEqual(len(rows),0,
'cursor.fetchmany should return an empty sequence if '
'called after the whole result set has been fetched'
)
self.assertTrue(cur.rowcount in (-1,6))
self.executeDDL2(cur)
cur.execute('select name from %sbarflys' % self.table_prefix)
r = cur.fetchmany() # Should get empty sequence
self.assertEqual(len(r),0,
'cursor.fetchmany should return an empty sequence if '
'query retrieved no rows'
)
self.assertTrue(cur.rowcount in (-1,0))
finally:
con.close()
def test_fetchall(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchall should raise an Error if called
# without executing a query that may return rows (such
# as a select)
self.assertRaises(self.driver.Error, cur.fetchall)
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
# cursor.fetchall should raise an Error if called
# after executing a a statement that cannot return rows
self.assertRaises(self.driver.Error,cur.fetchall)
cur.execute('select name from %sbooze' % self.table_prefix)
rows = cur.fetchall()
self.assertTrue(cur.rowcount in (-1,len(self.samples)))
self.assertEqual(len(rows),len(self.samples),
'cursor.fetchall did not retrieve all rows'
)
rows = [r[0] for r in rows]
rows.sort()
for i in range(0,len(self.samples)):
self.assertEqual(rows[i],self.samples[i],
'cursor.fetchall retrieved incorrect rows'
)
rows = cur.fetchall()
self.assertEqual(
len(rows),0,
'cursor.fetchall should return an empty list if called '
'after the whole result set has been fetched'
)
self.assertTrue(cur.rowcount in (-1,len(self.samples)))
self.executeDDL2(cur)
cur.execute('select name from %sbarflys' % self.table_prefix)
rows = cur.fetchall()
self.assertTrue(cur.rowcount in (-1,0))
self.assertEqual(len(rows),0,
'cursor.fetchall should return an empty list if '
'a select query returns no rows'
)
finally:
con.close()
def test_mixedfetch(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
cur.execute('select name from %sbooze' % self.table_prefix)
rows1 = cur.fetchone()
rows23 = cur.fetchmany(2)
rows4 = cur.fetchone()
rows56 = cur.fetchall()
self.assertTrue(cur.rowcount in (-1,6))
self.assertEqual(len(rows23),2,
'fetchmany returned incorrect number of rows'
)
self.assertEqual(len(rows56),2,
'fetchall returned incorrect number of rows'
)
rows = [rows1[0]]
rows.extend([rows23[0][0],rows23[1][0]])
rows.append(rows4[0])
rows.extend([rows56[0][0],rows56[1][0]])
rows.sort()
for i in range(0,len(self.samples)):
self.assertEqual(rows[i],self.samples[i],
'incorrect data retrieved or inserted'
)
finally:
con.close()
def help_nextset_setUp(self,cur):
''' Should create a procedure called deleteme
that returns two result sets, first the
number of rows in booze then "name from booze"
'''
raise NotImplementedError('Helper not implemented')
#sql="""
# create procedure deleteme as
# begin
# select count(*) from booze
# select name from booze
# end
#"""
#cur.execute(sql)
def help_nextset_tearDown(self,cur):
'If cleaning up is needed after nextSetTest'
raise NotImplementedError('Helper not implemented')
#cur.execute("drop procedure deleteme")
def test_nextset(self):
con = self._connect()
try:
cur = con.cursor()
if not hasattr(cur,'nextset'):
return
try:
self.executeDDL1(cur)
sql=self._populate()
for sql in self._populate():
cur.execute(sql)
self.help_nextset_setUp(cur)
cur.callproc('deleteme')
numberofrows=cur.fetchone()
assert numberofrows[0]== len(self.samples)
assert cur.nextset()
names=cur.fetchall()
assert len(names) == len(self.samples)
s=cur.nextset()
assert s == None,'No more return sets, should return None'
finally:
self.help_nextset_tearDown(cur)
finally:
con.close()
def test_nextset(self):
raise NotImplementedError('Drivers need to override this test')
def test_arraysize(self):
# Not much here - rest of the tests for this are in test_fetchmany
con = self._connect()
try:
cur = con.cursor()
self.assertTrue(hasattr(cur,'arraysize'),
'cursor.arraysize must be defined'
)
finally:
con.close()
def test_setinputsizes(self):
con = self._connect()
try:
cur = con.cursor()
cur.setinputsizes( (25,) )
self._paraminsert(cur) # Make sure cursor still works
finally:
con.close()
def test_setoutputsize_basic(self):
# Basic test is to make sure setoutputsize doesn't blow up
con = self._connect()
try:
cur = con.cursor()
cur.setoutputsize(1000)
cur.setoutputsize(2000,0)
self._paraminsert(cur) # Make sure the cursor still works
finally:
con.close()
def test_setoutputsize(self):
# Real test for setoutputsize is driver dependant
raise NotImplementedError('Driver need to override this test')
def test_None(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
cur.execute('insert into %sbooze values (NULL)' % self.table_prefix)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchall()
self.assertEqual(len(r),1)
self.assertEqual(len(r[0]),1)
self.assertEqual(r[0][0],None,'NULL value not returned as None')
finally:
con.close()
def test_Date(self):
d1 = self.driver.Date(2002,12,25)
d2 = self.driver.DateFromTicks(time.mktime((2002,12,25,0,0,0,0,0,0)))
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(d1),str(d2))
def test_Time(self):
t1 = self.driver.Time(13,45,30)
t2 = self.driver.TimeFromTicks(time.mktime((2001,1,1,13,45,30,0,0,0)))
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Timestamp(self):
t1 = self.driver.Timestamp(2002,12,25,13,45,30)
t2 = self.driver.TimestampFromTicks(
time.mktime((2002,12,25,13,45,30,0,0,0))
)
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Binary(self):
b = self.driver.Binary('Something')
b = self.driver.Binary('')
def test_STRING(self):
self.assertTrue(hasattr(self.driver,'STRING'),
'module.STRING must be defined'
)
def test_BINARY(self):
self.assertTrue(hasattr(self.driver,'BINARY'),
'module.BINARY must be defined.'
)
def test_NUMBER(self):
self.assertTrue(hasattr(self.driver,'NUMBER'),
'module.NUMBER must be defined.'
)
def test_DATETIME(self):
self.assertTrue(hasattr(self.driver,'DATETIME'),
'module.DATETIME must be defined.'
)
def test_ROWID(self):
self.assertTrue(hasattr(self.driver,'ROWID'),
'module.ROWID must be defined.'
)
|
apache-2.0
|
mharnold/spiderosm
|
spiderosm/cannames.py
|
1
|
14317
|
'''
Canonical street names!
(To allow name matching between OSM and Portland City DB.)
'''
import re
import string
import pylev
# ratio of edit distance between strings to string length (= max number of edits possible)
def levenshtein_ratio(s1, s2):
assert len(s1) > 0 and len(s2) > 0
return pylev.levenshtein(s1, s2) / float(max(len(s1),len(s2)))
# guess at probability names refer to the same street
def match_probability(names1, names2):
assert len(names1) > 0 and len(names2) > 0
p = 0
for name1 in names1:
can1 = canonical_street_name(name1)
for name2 in names2:
can2 = canonical_street_name(name2)
#print 'DEBUG', name1, name2, can1, can2, pylev.levenshtein(can1,can2)
#print 'DEBUG ratio:', levenshtein_ratio(can1,can2)
p = max(p, 1-levenshtein_ratio(can1,can2))
return p
# rate name match on scale of 0 to 100
def match_score(names1,names2):
return round(100*match_probability(names1,names2))
# CHARACTER RESTRICTION
# allowed_chars are left unchanged
# ignored_chars are deleted
# characters that are neither in allowed_chars nor in ingored_chars are mapped to space (' ')
allowed_chars = string.ascii_letters + string.digits + " "
ignored_chars = "'"
# SINGLE WORD SUBSTITUTIONS
# applied after words are mapped to all upper case
word_substitutions = {
# directionals
'NORTH':'N',
'SOUTH':'S',
'EAST':'E',
'WEST':'W',
'NORTHEAST':'NE',
'NORTHWEST':'NW',
'SOUTHEAST':'SE',
'SOUTHWEST':'SW',
# misc
'JUNIOR':'JR',
'SAINT':'ST',
# street types
# USPS standard street types -> canonical postal abbreviations (as found int TIGER/Line)
# Source: http://cpansearch.perl.org/src/SDERLE/Geo-Coder-US-0.21/US/Codes.pm
'ALLEE':'ALY',
'ALLEY':'ALY',
'ALLY':'ALY',
'ANEX':'ANX',
'ANNEX':'ANX',
'ANNX':'ANX',
'ARCADE':'ARC',
'AV':'AVE',
'AVEN':'AVE',
'AVENU':'AVE',
'AVENUE':'AVE',
'AVN':'AVE',
'AVNUE':'AVE',
'BAYOO':'BYU',
'BAYOU':'BYU',
'BEACH':'BCH',
'BEND':'BND',
'BLUF':'BLF',
'BLUFF':'BLF',
'BLUFFS':'BLFS',
'BOT':'BTM',
'BOTTM':'BTM',
'BOTTOM':'BTM',
'BOUL':'BLVD',
'BOULEVARD':'BLVD',
'BOULV':'BLVD',
'BRANCH':'BR',
'BRDGE':'BRG',
'BRIDGE':'BRG',
'BRNCH':'BR',
'BROOK':'BRK',
'BROOKS':'BRKS',
'BURG':'BG',
'BURGS':'BGS',
'BYPA':'BYP',
'BYPAS':'BYP',
'BYPASS':'BYP',
'BYPS':'BYP',
'CAMP':'CP',
'CANYN':'CYN',
'CANYON':'CYN',
'CAPE':'CPE',
'CAUSEWAY':'CSWY',
'CAUSWAY':'CSWY',
'CEN':'CTR',
'CENT':'CTR',
'CENTER':'CTR',
'CENTERS':'CTRS',
'CENTR':'CTR',
'CENTRE':'CTR',
'CIRC':'CIR',
'CIRCL':'CIR',
'CIRCLE':'CIR',
'CIRCLES':'CIRS',
'CK':'CRK',
'CLIFF':'CLF',
'CLIFFS':'CLFS',
'CLUB':'CLB',
'CMP':'CP',
'CNTER':'CTR',
'CNTR':'CTR',
'CNYN':'CYN',
'COMMON':'CMN',
'CORNER':'COR',
'CORNERS':'CORS',
'COURSE':'CRSE',
'COURT':'CT',
'COURTS':'CTS',
'COVE':'CV',
'COVES':'CVS',
'CR':'CRK',
'CRCL':'CIR',
'CRCLE':'CIR',
'CRECENT':'CRES',
'CREEK':'CRK',
'CRESCENT':'CRES',
'CRESENT':'CRES',
'CREST':'CRST',
'CROSSING':'XING',
'CROSSROAD':'XRD',
'CRSCNT':'CRES',
'CRSENT':'CRES',
'CRSNT':'CRES',
'CRSSING':'XING',
'CRSSNG':'XING',
'CRT':'CT',
'CURVE':'CURV',
'DALE':'DL',
'DAM':'DM',
'DIV':'DV',
'DIVIDE':'DV',
'DRIV':'DR',
'DRIVE':'DR',
'DRIVES':'DRS',
'DRV':'DR',
'DVD':'DV',
'ESTATE':'EST',
'ESTATES':'ESTS',
'EXP':'EXPY',
'EXPR':'EXPY',
'EXPRESS':'EXPY',
'EXPRESSWAY':'EXPY',
'EXPW':'EXPY',
'EXTENSION':'EXT',
'EXTENSIONS':'EXTS',
'EXTN':'EXT',
'EXTNSN':'EXT',
'FALLS':'FLS',
'FERRY':'FRY',
'FIELD':'FLD',
'FIELDS':'FLDS',
'FLAT':'FLT',
'FLATS':'FLTS',
'FORD':'FRD',
'FORDS':'FRDS',
'FOREST':'FRST',
'FORESTS':'FRST',
'FORG':'FRG',
'FORGE':'FRG',
'FORGES':'FRGS',
'FORK':'FRK',
'FORKS':'FRKS',
'FORT':'FT',
'FREEWAY':'FWY',
'FREEWY':'FWY',
'FRRY':'FRY',
'FRT':'FT',
'FRWAY':'FWY',
'FRWY':'FWY',
'GARDEN':'GDN',
'GARDENS':'GDNS',
'GARDN':'GDN',
'GATEWAY':'GTWY',
'GATEWY':'GTWY',
'GATWAY':'GTWY',
'GLEN':'GLN',
'GLENS':'GLNS',
'GRDEN':'GDN',
'GRDN':'GDN',
'GRDNS':'GDNS',
'GREEN':'GRN',
'GREENS':'GRNS',
'GROV':'GRV',
'GROVE':'GRV',
'GROVES':'GRVS',
'GTWAY':'GTWY',
'HARB':'HBR',
'HARBOR':'HBR',
'HARBORS':'HBRS',
'HARBR':'HBR',
'HAVEN':'HVN',
'HAVN':'HVN',
'HEIGHT':'HTS',
'HEIGHTS':'HTS',
'HGTS':'HTS',
'HIGHWAY':'HWY',
'HIGHWY':'HWY',
'HILL':'HL',
'HILLS':'HLS',
'HIWAY':'HWY',
'HIWY':'HWY',
'HLLW':'HOLW',
'HOLLOW':'HOLW',
'HOLLOWS':'HOLW',
'HOLWS':'HOLW',
'HRBOR':'HBR',
'HT':'HTS',
'HWAY':'HWY',
'INLET':'INLT',
'ISLAND':'IS',
'ISLANDS':'ISS',
'ISLES':'ISLE',
'ISLND':'IS',
'ISLNDS':'ISS',
'JCTION':'JCT',
'JCTN':'JCT',
'JCTNS':'JCTS',
'JUNCTION':'JCT',
'JUNCTIONS':'JCTS',
'JUNCTN':'JCT',
'JUNCTON':'JCT',
'KEY':'KY',
'KEYS':'KYS',
'KNOL':'KNL',
'KNOLL':'KNL',
'KNOLLS':'KNLS',
'LA':'LN',
'LAKE':'LK',
'LAKES':'LKS',
'LANDING':'LNDG',
'LANE':'LN',
'LANES':'LN',
'LDGE':'LDG',
'LIGHT':'LGT',
'LIGHTS':'LGTS',
'LNDNG':'LNDG',
'LOAF':'LF',
'LOCK':'LCK',
'LOCKS':'LCKS',
'LODG':'LDG',
'LODGE':'LDG',
'LOOPS':'LOOP',
'MANOR':'MNR',
'MANORS':'MNRS',
'MEADOW':'MDW',
'MEADOWS':'MDWS',
'MEDOWS':'MDWS',
'MILL':'ML',
'MILLS':'MLS',
'MISSION':'MSN',
'MISSN':'MSN',
'MNT':'MT',
'MNTAIN':'MTN',
'MNTN':'MTN',
'MNTNS':'MTNS',
'MOTORWAY':'MTWY',
'MOUNT':'MT',
'MOUNTAIN':'MTN',
'MOUNTAINS':'MTNS',
'MOUNTIN':'MTN',
'MSSN':'MSN',
'MTIN':'MTN',
'NECK':'NCK',
'ORCHARD':'ORCH',
'ORCHRD':'ORCH',
'OVERPASS':'OPAS',
'OVL':'OVAL',
'PARKS':'PARK',
'PARKWAY':'PKWY',
'PARKWAYS':'PKWY',
'PARKWY':'PKWY',
'PASSAGE':'PSGE',
'PATHS':'PATH',
'PIKES':'PIKE',
'PINE':'PNE',
'PINES':'PNES',
'PK':'PARK',
'PKWAY':'PKWY',
'PKWYS':'PKWY',
'PKY':'PKWY',
'PLACE':'PL',
'PLAIN':'PLN',
'PLAINES':'PLNS',
'PLAINS':'PLNS',
'PLAZA':'PLZ',
'PLZA':'PLZ',
'POINT':'PT',
'POINTS':'PTS',
'PORT':'PRT',
'PORTS':'PRTS',
'PRAIRIE':'PR',
'PRARIE':'PR',
'PRK':'PARK',
'PRR':'PR',
'RAD':'RADL',
'RADIAL':'RADL',
'RADIEL':'RADL',
'RANCH':'RNCH',
'RANCHES':'RNCH',
'RAPID':'RPD',
'RAPIDS':'RPDS',
'RDGE':'RDG',
'REST':'RST',
'RIDGE':'RDG',
'RIDGES':'RDGS',
'RIVER':'RIV',
'RIVR':'RIV',
'RNCHS':'RNCH',
'ROAD':'RD',
'ROADS':'RDS',
'ROUTE':'RTE',
'RVR':'RIV',
'SHOAL':'SHL',
'SHOALS':'SHLS',
'SHOAR':'SHR',
'SHOARS':'SHRS',
'SHORE':'SHR',
'SHORES':'SHRS',
'SKYWAY':'SKWY',
'SPNG':'SPG',
'SPNGS':'SPGS',
'SPRING':'SPG',
'SPRINGS':'SPGS',
'SPRNG':'SPG',
'SPRNGS':'SPGS',
'SPURS':'SPUR',
'SQR':'SQ',
'SQRE':'SQ',
'SQRS':'SQS',
'SQU':'SQ',
'SQUARE':'SQ',
'SQUARES':'SQS',
'STATION':'STA',
'STATN':'STA',
'STN':'STA',
'STR':'ST',
'STRAV':'STRA',
'STRAVE':'STRA',
'STRAVEN':'STRA',
'STRAVENUE':'STRA',
'STRAVN':'STRA',
'STREAM':'STRM',
'STREET':'ST',
'STREETS':'STS',
'STREME':'STRM',
'STRT':'ST',
'STRVN':'STRA',
'STRVNUE':'STRA',
'SUMIT':'SMT',
'SUMITT':'SMT',
'SUMMIT':'SMT',
'TERR':'TER',
'TERRACE':'TER',
'THROUGHWAY':'TRWY',
'TPK':'TPKE',
'TR':'TRL',
'TRACE':'TRCE',
'TRACES':'TRCE',
'TRACK':'TRAK',
'TRACKS':'TRAK',
'TRAFFICWAY':'TRFY',
'TRAIL':'TRL',
'TRAILS':'TRL',
'TRK':'TRAK',
'TRKS':'TRAK',
'TRLS':'TRL',
'TRNPK':'TPKE',
'TRPK':'TPKE',
'TUNEL':'TUNL',
'TUNLS':'TUNL',
'TUNNEL':'TUNL',
'TUNNELS':'TUNL',
'TUNNL':'TUNL',
'TURNPIKE':'TPKE',
'TURNPK':'TPKE',
'UNDERPASS':'UPAS',
'UNION':'UN',
'UNIONS':'UNS',
'VALLEY':'VLY',
'VALLEYS':'VLYS',
'VALLY':'VLY',
'VDCT':'VIA',
'VIADCT':'VIA',
'VIADUCT':'VIA',
'VIEW':'VW',
'VIEWS':'VWS',
'VILL':'VLG',
'VILLAG':'VLG',
'VILLAGE':'VLG',
'VILLAGES':'VLGS',
'VILLE':'VL',
'VILLG':'VLG',
'VILLIAGE':'VLG',
'VIST':'VIS',
'VISTA':'VIS',
'VLLY':'VLY',
'VST':'VIS',
'VSTA':'VIS',
'WALKS':'WALK',
'WELL':'WL',
'WELLS':'WLS',
'WY':'WAY',
}
# REGULAR EXPRESSION BASED NAME MAPPINGS
class NameMap(object):
p = None
template = None
def __init__(self,r,template):
self.p=re.compile(r, re.UNICODE)
self.template = template
def apply_map(self,name):
m = self.p.match(name)
if m:
#print 'DEB NameMap.apply name in:',name,'groups:',m.groups()
name = self.template.format(*m.groups())
#print 'DEB name out:',name
return name
mappings = [
# I84 -> I 84
NameMap(r"(.*\bI)(\d.*)$",
"{0} {1}"),
# I 84 FWY -> I 84
NameMap(r"(.*\d\s+)(\bFWY\b)(.*)$",
"{0}{2}"),
# TRANSIT CTR -> TC
NameMap(r"(.*)(\bTRANSIT\s+CTR\b)(.*)$",
"{0}TC{2}"),
# UNITED STATES -> US
NameMap(r"(.*)(\bUNITED\s+STATES\b)(.*)$",
"{0}US{2}"),
# MC GEE -> MCGEE, LE ROY -> LEROY
NameMap(r"(.*)\b(MC|LE)\s+(\S+.*)$",
"{0}{1}{2}"),
# US 30 -> HWY 30, OR 30 -> HWY 30, etc.
NameMap(r"(.*)\b(US|OR)\s*(\d.*)$",
"{0}HWY {2}"),
]
def restrict_chars(name):
global allowed_chars, ingored_chars
out = []
for c in name:
if c in allowed_chars:
out.append(c)
else:
if c not in ignored_chars: out.append(' ')
return ''.join(out)
def _name_to_words(name):
words=name.strip().split(' ')
words = [w for w in words if w != '']
#print "DEBUG words: ", words
return words
def _words_to_name(words):
return ' '.join(words).strip()
def _canonical_ws(name):
words = _name_to_words(name)
return _words_to_name(words)
def _make_word_substitutions(name):
global word_substitutions
words = _name_to_words(name)
new_words = []
for word in words:
if word_substitutions.has_key(word):
new=word_substitutions[word]
if len(new)>0: new_words.append(new)
continue
new_words.append(word)
return _words_to_name(new_words)
def _apply_mappings(name):
global mappings
for nmap in mappings:
name = nmap.apply_map(name)
return name
def canonical_street_name(name):
'''
Regularizes street name to facilitate comparison.
Converts to all caps and applies standard abbreviations.
'''
if name is None: return None;
name = restrict_chars(name)
name = name.upper()
name = _make_word_substitutions(name)
name = _apply_mappings(name)
name = _canonical_ws(name)
return name
def test_can(name1,name2=None):
can1 = canonical_street_name(name1)
#print "test_can, name1: '%s' can1: '%s'" % (name1, can1)
assert can1 == canonical_street_name(can1)
if name2:
can2 = canonical_street_name(name2)
#print "test_can, name2: '%s' can2: '%s'" % (name2, can2)
assert can2 == canonical_street_name(can2)
assert can1 == can2
def test():
test_can("Southeast Sunburst Lane","SE SUNBURST LN")
test_can("Northeast Saint George Street","NE ST GEORGE ST")
test_can("NE HALSEY ST FRONTAGE RD","Northeast Halsey Street Frontage Road")
test_can("I84 FWY","I 84")
test_can(" 13133","13133")
test_can("US BANCORP COLUMBIA CENTER TC","UNITED STATES BANCORP COLUMBIA CENTER TRANSIT CENTER")
test_can("COEUR DALENE DR", "COEUR D'ALENE DR")
test_can("BURNSIDE BRG", "BURNSIDE BRIDGE")
test_can("BLAZER TRL","BLAZER TRAIL")
test_can("HWY 30","US 30")
test_can("MARTIN LUTHER KING JUNIOR BLVD","MARTIN LUTHER KING JR BLVD")
test_can("Mc Gee Street", "McGee Street")
test_can("Saint Alban's Road", "ST ALBANS RD")
test_can("Le Roy Avenue", "LeRoy Avenue")
# 'fuzzy matching' (edit distance)
assert match_score(['A St'],['B Street']) == 75.0
print 'cannames PASS'
#doit
if __name__ == "__main__":
test()
|
mit
|
Lightmatter/django-inlineformfield
|
.tox/py27/lib/python2.7/site-packages/django/conf/locale/id/formats.py
|
83
|
2141
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j N Y'
DATETIME_FORMAT = "j N Y, G.i.s"
TIME_FORMAT = 'G.i.s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y G.i.s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d-%m-%y', '%d/%m/%y', # '25-10-09', 25/10/09'
'%d-%m-%Y', '%d/%m/%Y', # '25-10-2009', 25/10/2009'
'%d %b %Y', # '25 Oct 2006',
'%d %B %Y', # '25 October 2006'
)
TIME_INPUT_FORMATS = (
'%H.%M.%S', # '14.30.59'
'%H.%M', # '14.30'
)
DATETIME_INPUT_FORMATS = (
'%d-%m-%Y %H.%M.%S', # '25-10-2009 14.30.59'
'%d-%m-%Y %H.%M.%S.%f', # '25-10-2009 14.30.59.000200'
'%d-%m-%Y %H.%M', # '25-10-2009 14.30'
'%d-%m-%Y', # '25-10-2009'
'%d-%m-%y %H.%M.%S', # '25-10-09' 14.30.59'
'%d-%m-%y %H.%M.%S.%f', # '25-10-09' 14.30.59.000200'
'%d-%m-%y %H.%M', # '25-10-09' 14.30'
'%d-%m-%y', # '25-10-09''
'%m/%d/%y %H.%M.%S', # '10/25/06 14.30.59'
'%m/%d/%y %H.%M.%S.%f', # '10/25/06 14.30.59.000200'
'%m/%d/%y %H.%M', # '10/25/06 14.30'
'%m/%d/%y', # '10/25/06'
'%m/%d/%Y %H.%M.%S', # '25/10/2009 14.30.59'
'%m/%d/%Y %H.%M.%S.%f', # '25/10/2009 14.30.59.000200'
'%m/%d/%Y %H.%M', # '25/10/2009 14.30'
'%m/%d/%Y', # '10/25/2009'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
mit
|
kmoocdev/edx-platform
|
cms/djangoapps/contentstore/views/tests/test_access.py
|
45
|
1651
|
"""
Tests access.py
"""
from django.test import TestCase
from django.contrib.auth.models import User
from student.roles import CourseInstructorRole, CourseStaffRole
from student.tests.factories import AdminFactory
from student.auth import add_users
from contentstore.views.access import get_user_role
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class RolesTest(TestCase):
"""
Tests for lti user role serialization.
"""
def setUp(self):
""" Test case setup """
self.global_admin = AdminFactory()
self.instructor = User.objects.create_user('testinstructor', '[email protected]', 'foo')
self.staff = User.objects.create_user('teststaff', '[email protected]', 'foo')
self.course_key = SlashSeparatedCourseKey('mitX', '101', 'test')
def test_get_user_role_instructor(self):
"""
Verifies if user is instructor.
"""
add_users(self.global_admin, CourseInstructorRole(self.course_key), self.instructor)
self.assertEqual(
'instructor',
get_user_role(self.instructor, self.course_key)
)
add_users(self.global_admin, CourseStaffRole(self.course_key), self.staff)
self.assertEqual(
'instructor',
get_user_role(self.instructor, self.course_key)
)
def test_get_user_role_staff(self):
"""
Verifies if user is staff.
"""
add_users(self.global_admin, CourseStaffRole(self.course_key), self.staff)
self.assertEqual(
'staff',
get_user_role(self.staff, self.course_key)
)
|
agpl-3.0
|
WildflowerSchools/sensei
|
app/models/radio_observation.py
|
1
|
1982
|
import dateutil.parser
from shared import *
from sqlalchemy.dialects.postgresql import insert
import pytz
# Raw radio observation
class RadioObservation(db.Model):
classroom_id = db.Column(db.Integer, nullable=False, primary_key=True)
observed_at = db.Column(db.DateTime, nullable=False, primary_key=True)
relationship_id = db.Column(db.Integer, db.ForeignKey('entity_relationship.id'), nullable=False, primary_key=True)
relationship = db.relationship("EntityRelationship")
rssi = db.Column(db.Float)
#db.Index("obs_unique_idx", classroom_id, observed_at, relationship_id, unique=True)
def __init__(self, classroom_id, observed_at, relationship, rssi):
self.classroom_id = classroom_id
self.observed_at = dateutil.parser.parse(observed_at)
if self.observed_at.tzinfo != None:
self.observed_at = self.observed_at.astimezone(pytz.utc)
self.relationship = relationship
self.rssi = rssi
def as_dict_for_bulk_insert(self):
return dict(
classroom_id=self.classroom_id,
observed_at=self.observed_at.isoformat(),
relationship_id=self.relationship.id,
rssi=self.rssi)
def as_dict_for_web_resource(self):
return dict(
classroom_id=self.classroom_id,
observed_at=self.observed_at.isoformat(),
local_id=self.relationship.entity1_id,
local_type=self.relationship.entity1_type.name,
remote_id=self.relationship.entity2_id,
remote_type=self.relationship.entity2_type.name,
rssi=self.rssi)
@staticmethod
def bulk_store(obs):
insert_stmt = insert(RadioObservation.__table__)
do_nothing_stmt = insert_stmt.on_conflict_do_nothing(
index_elements=['classroom_id', 'observed_at', 'relationship_id']
)
obs_values = [o.as_dict_for_bulk_insert() for o in obs]
db.session.execute(do_nothing_stmt, obs_values)
db.session.commit()
|
mit
|
pylixm/sae-django-demo
|
django1.7-sae/site-packages/django/dispatch/dispatcher.py
|
60
|
11775
|
import sys
import threading
import weakref
from django.utils.six.moves import xrange
if sys.version_info < (3, 4):
from .weakref_backports import WeakMethod
else:
from weakref import WeakMethod
def _make_id(target):
if hasattr(target, '__func__'):
return (id(target.__self__), id(target.__func__))
return id(target)
NONE_ID = _make_id(None)
# A marker for caching
NO_RECEIVERS = object()
class Signal(object):
"""
Base class for all signals
Internal attributes:
receivers
{ receiverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None, use_caching=False):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak-referencable.
Receivers must be able to accept keyword arguments.
If receivers have a dispatch_uid attribute, the receiver will
not be added if another receiver already exists with that
dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if settings.configured and settings.DEBUG:
import inspect
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
# Not all callables are inspectable with getargspec, so we'll
# try a couple different ways but in the end fall back on assuming
# it is -- we don't want to prevent registration of valid but weird
# callables.
try:
argspec = inspect.getargspec(receiver)
except TypeError:
try:
argspec = inspect.getargspec(receiver.__call__)
except (TypeError, AttributeError):
argspec = None
if argspec:
assert argspec[2] is not None, \
"Signal receivers must accept keyword arguments (**kwargs)."
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = WeakMethod
receiver_object = receiver.__self__
if sys.version_info >= (3, 4):
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
else:
receiver = ref(receiver, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, weak=True, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
weak
The weakref state to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
with self.lock:
self._clear_dead_receivers()
for index in xrange(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
del self.receivers[index]
break
self.sender_receivers_cache.clear()
def has_listeners(self, sender=None):
return bool(self._live_receivers(sender))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop, so it is quite possible to not have all
receivers called if a raises an error.
Arguments:
sender
The sender of the signal Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
for receiver in self._live_receivers(sender):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ]. May raise
DispatcherKeyError.
If any receiver raises an error (specifically any subclass of
Exception), the error instance is returned as the result for that
receiver.
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
new_receivers = []
for r in self.receivers:
if isinstance(r[1], weakref.ReferenceType) and r[1]() is None:
continue
new_receivers.append(r)
self.receivers = new_receivers
def _live_receivers(self, sender):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this case in
# .send() prior to calling _live_receivers() due to concurrent .send() call.
if receivers is NO_RECEIVERS:
return []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append(receiver)
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_receivers = []
for receiver in receivers:
if isinstance(receiver, weakref.ReferenceType):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
non_weak_receivers.append(receiver)
else:
non_weak_receivers.append(receiver)
return non_weak_receivers
def _remove_receiver(self, receiver=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _remove_receiver() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
...
"""
def _decorator(func):
if isinstance(signal, (list, tuple)):
for s in signal:
s.connect(func, **kwargs)
else:
signal.connect(func, **kwargs)
return func
return _decorator
|
apache-2.0
|
showgood/YCM_windows
|
third_party/requests/requests/packages/chardet/charsetprober.py
|
3127
|
1902
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import re
class CharSetProber:
def __init__(self):
pass
def reset(self):
self._mState = constants.eDetecting
def get_charset_name(self):
return None
def feed(self, aBuf):
pass
def get_state(self):
return self._mState
def get_confidence(self):
return 0.0
def filter_high_bit_only(self, aBuf):
aBuf = re.sub(b'([\x00-\x7F])+', b' ', aBuf)
return aBuf
def filter_without_english_letters(self, aBuf):
aBuf = re.sub(b'([A-Za-z])+', b' ', aBuf)
return aBuf
def filter_with_english_letters(self, aBuf):
# TODO
return aBuf
|
gpl-3.0
|
hsiaoyi0504/scikit-learn
|
examples/classification/plot_lda.py
|
164
|
2224
|
"""
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.lda import LDA
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LDA(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LDA(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="LDA with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="LDA", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('LDA vs. shrinkage LDA (1 discriminative feature)')
plt.show()
|
bsd-3-clause
|
with-git/tensorflow
|
tensorflow/contrib/cluster_resolver/python/training/tpu_cluster_resolver_test.py
|
21
|
3916
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPUClusterResolver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.cluster_resolver.python.training.tpu_cluster_resolver import TPUClusterResolver
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
mock = test.mock
class TPUClusterResolverTest(test.TestCase):
def _verifyClusterSpecEquality(self, cluster_spec, expected_proto):
"""Verifies that the ClusterSpec generates the correct proto.
We are testing this four different ways to ensure that the ClusterSpec
returned by the TPUClusterResolver behaves identically to a normal
ClusterSpec when passed into the generic ClusterSpec libraries.
Args:
cluster_spec: ClusterSpec returned by the TPUClusterResolver
expected_proto: Expected protobuf
"""
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto, server_lib.ClusterSpec(cluster_spec).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())
def mock_service_client(
self,
tpu_map=None):
if tpu_map is None:
tpu_map = {}
def get_side_effect(name):
return tpu_map[name]
mock_client = mock.MagicMock()
mock_client.projects.locations.nodes.get.side_effect = get_side_effect
return mock_client
def testSimpleSuccessfulRetrieval(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'ipAddress': '10.1.2.3',
'port': '8470'
}
}
tpu_cluster_resolver = TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu_names=['test-tpu-1'],
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
actual_cluster_spec = tpu_cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'tpu_worker' tasks { key: 0 value: '10.1.2.3:8470' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
def testMultipleSuccessfulRetrieval(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'ipAddress': '10.1.2.3',
'port': '8470'
},
'projects/test-project/locations/us-central1-c/nodes/test-tpu-2': {
'ipAddress': '10.4.5.6',
'port': '8470'
}
}
tpu_cluster_resolver = TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu_names=['test-tpu-2', 'test-tpu-1'],
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
actual_cluster_spec = tpu_cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'tpu_worker' tasks { key: 0 value: '10.4.5.6:8470' }
tasks { key: 1 value: '10.1.2.3:8470' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
|
apache-2.0
|
pforret/python-for-android
|
python-modules/twisted/twisted/plugins/twisted_qtstub.py
|
61
|
1417
|
# Copyright (c) 2006 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Backwards-compatibility plugin for the Qt reactor.
This provides a Qt reactor plugin named C{qt} which emits a deprecation
warning and a pointer to the separately distributed Qt reactor plugins.
"""
import warnings
from twisted.application.reactors import Reactor, NoSuchReactor
wikiURL = 'http://twistedmatrix.com/trac/wiki/QTReactor'
errorMessage = ('qtreactor is no longer a part of Twisted due to licensing '
'issues. Please see %s for details.' % (wikiURL,))
class QTStub(Reactor):
"""
Reactor plugin which emits a deprecation warning on the successful
installation of its reactor or a pointer to further information if an
ImportError occurs while attempting to install it.
"""
def __init__(self):
super(QTStub, self).__init__(
'qt', 'qtreactor', 'QT integration reactor')
def install(self):
"""
Install the Qt reactor with a deprecation warning or try to point
the user to further information if it cannot be installed.
"""
try:
super(QTStub, self).install()
except (ValueError, ImportError):
raise NoSuchReactor(errorMessage)
else:
warnings.warn(
"Please use -r qt3 to import qtreactor",
category=DeprecationWarning)
qt = QTStub()
|
apache-2.0
|
azurestandard/django
|
tests/regressiontests/utils/dateformat.py
|
9
|
6056
|
from __future__ import unicode_literals
from datetime import datetime, date
import os
import time
from django.utils.dateformat import format
from django.utils import dateformat, translation, unittest
from django.utils.timezone import utc
from django.utils.tzinfo import FixedOffset, LocalTimezone
class DateFormatTests(unittest.TestCase):
def setUp(self):
self.old_TZ = os.environ.get('TZ')
os.environ['TZ'] = 'Europe/Copenhagen'
translation.activate('en-us')
try:
# Check if a timezone has been set
time.tzset()
self.tz_tests = True
except AttributeError:
# No timezone available. Don't run the tests that require a TZ
self.tz_tests = False
def tearDown(self):
if self.old_TZ is None:
del os.environ['TZ']
else:
os.environ['TZ'] = self.old_TZ
# Cleanup - force re-evaluation of TZ environment variable.
if self.tz_tests:
time.tzset()
def test_date(self):
d = date(2009, 5, 16)
self.assertEqual(date.fromtimestamp(int(format(d, 'U'))), d)
def test_naive_datetime(self):
dt = datetime(2009, 5, 16, 5, 30, 30)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt)
def test_datetime_with_local_tzinfo(self):
ltz = LocalTimezone(datetime.now())
dt = datetime(2009, 5, 16, 5, 30, 30, tzinfo=ltz)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz), dt)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt.replace(tzinfo=None))
def test_datetime_with_tzinfo(self):
tz = FixedOffset(-510)
ltz = LocalTimezone(datetime.now())
dt = datetime(2009, 5, 16, 5, 30, 30, tzinfo=tz)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), tz), dt)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz), dt)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt.astimezone(ltz).replace(tzinfo=None))
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), tz).utctimetuple(), dt.utctimetuple())
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz).utctimetuple(), dt.utctimetuple())
def test_epoch(self):
udt = datetime(1970, 1, 1, tzinfo=utc)
self.assertEqual(format(udt, 'U'), '0')
def test_empty_format(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, ''), '')
def test_am_pm(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, 'a'), 'p.m.')
def test_date_formats(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
timestamp = datetime(2008, 5, 19, 11, 45, 23, 123456)
self.assertEqual(dateformat.format(my_birthday, 'A'), 'PM')
self.assertEqual(dateformat.format(timestamp, 'c'), '2008-05-19T11:45:23.123456')
self.assertEqual(dateformat.format(my_birthday, 'd'), '08')
self.assertEqual(dateformat.format(my_birthday, 'j'), '8')
self.assertEqual(dateformat.format(my_birthday, 'l'), 'Sunday')
self.assertEqual(dateformat.format(my_birthday, 'L'), 'False')
self.assertEqual(dateformat.format(my_birthday, 'm'), '07')
self.assertEqual(dateformat.format(my_birthday, 'M'), 'Jul')
self.assertEqual(dateformat.format(my_birthday, 'b'), 'jul')
self.assertEqual(dateformat.format(my_birthday, 'n'), '7')
self.assertEqual(dateformat.format(my_birthday, 'N'), 'July')
def test_time_formats(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, 'P'), '10 p.m.')
self.assertEqual(dateformat.format(my_birthday, 's'), '00')
self.assertEqual(dateformat.format(my_birthday, 'S'), 'th')
self.assertEqual(dateformat.format(my_birthday, 't'), '31')
self.assertEqual(dateformat.format(my_birthday, 'w'), '0')
self.assertEqual(dateformat.format(my_birthday, 'W'), '27')
self.assertEqual(dateformat.format(my_birthday, 'y'), '79')
self.assertEqual(dateformat.format(my_birthday, 'Y'), '1979')
self.assertEqual(dateformat.format(my_birthday, 'z'), '189')
def test_dateformat(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, r'Y z \C\E\T'), '1979 189 CET')
self.assertEqual(dateformat.format(my_birthday, r'jS \o\f F'), '8th of July')
def test_futuredates(self):
the_future = datetime(2100, 10, 25, 0, 00)
self.assertEqual(dateformat.format(the_future, r'Y'), '2100')
def test_timezones(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
summertime = datetime(2005, 10, 30, 1, 00)
wintertime = datetime(2005, 10, 30, 4, 00)
timestamp = datetime(2008, 5, 19, 11, 45, 23, 123456)
if self.tz_tests:
self.assertEqual(dateformat.format(my_birthday, 'O'), '+0100')
self.assertEqual(dateformat.format(my_birthday, 'r'), 'Sun, 8 Jul 1979 22:00:00 +0100')
self.assertEqual(dateformat.format(my_birthday, 'T'), 'CET')
self.assertEqual(dateformat.format(my_birthday, 'U'), '300315600')
self.assertEqual(dateformat.format(timestamp, 'u'), '123456')
self.assertEqual(dateformat.format(my_birthday, 'Z'), '3600')
self.assertEqual(dateformat.format(summertime, 'I'), '1')
self.assertEqual(dateformat.format(summertime, 'O'), '+0200')
self.assertEqual(dateformat.format(wintertime, 'I'), '0')
self.assertEqual(dateformat.format(wintertime, 'O'), '+0100')
# Ticket #16924 -- We don't need timezone support to test this
# 3h30m to the west of UTC
tz = FixedOffset(-3*60 - 30)
dt = datetime(2009, 5, 16, 5, 30, 30, tzinfo=tz)
self.assertEqual(dateformat.format(dt, 'O'), '-0330')
|
bsd-3-clause
|
Iceberg-Marketplace/Iceberg-API-PYTHON
|
icebergsdk/resources/webhooks.py
|
2
|
1828
|
# -*- coding: utf-8 -*-
import os
import time
from icebergsdk.resources.base import IcebergObject, UpdateableIcebergObject
if os.getenv('ICEBERG_DEBUG', False):
MAX_NUMBER_OF_CHECKS = 2
CHECK_EVERY_SECONDS = 5
else:
MAX_NUMBER_OF_CHECKS = 10
CHECK_EVERY_SECONDS = 1
class Webhook(UpdateableIcebergObject):
endpoint = 'webhook'
def test_trigger(self):
data = self.request("%s%s/" % (self.resource_uri, 'test_trigger'), method = "post")
return data
def triggers(self, **filters):
return self.get_list('%striggers/' % self.resource_uri, args=filters)
def get_test_endpoint_url(self):
return "%s/%s/" % ("/".join(self.resource_uri.split("/")[:-2]), 'test_endpoint')
def wait_for_triggers(self, number_of_triggers_expected=1, max_number_of_checks=MAX_NUMBER_OF_CHECKS, check_every_seconds=CHECK_EVERY_SECONDS):
webhook_triggers = []
## looping to wait for the webhook to be triggered
number_of_attempts = 0
while number_of_attempts<max_number_of_checks and len(webhook_triggers)<number_of_triggers_expected:
if number_of_attempts > 0:
time.sleep(check_every_seconds) ## check every X seconds except the 1st time
webhook_triggers = self.triggers(status="succeeded")
number_of_attempts += 1
print "number_of_attempts = %s, %s webhook_triggers (expected %s)" % (number_of_attempts, len(webhook_triggers), number_of_triggers_expected)
return webhook_triggers
class WebhookTrigger(IcebergObject):
endpoint = 'webhook_trigger'
raw_fields = ["payload"]
def attempts(self, **filters):
return self.get_list('%sattempts/' % self.resource_uri, args=filters)
class WebhookTriggerAttempt(IcebergObject):
endpoint = 'webhook_trigger_attempt'
|
mit
|
davy39/eric
|
UI/Previewer.py
|
1
|
6911
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 - 2014 Detlev Offenbach <[email protected]>
#
"""
Module implementing a previewer widget for HTML, Markdown and ReST files.
"""
from __future__ import unicode_literals
import os
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import QStackedWidget
import Preferences
class Previewer(QStackedWidget):
"""
Class implementing a previewer widget containing a stack of
specialized previewers.
"""
def __init__(self, viewmanager, splitter, parent=None):
"""
Constructor
@param viewmanager reference to the viewmanager object (ViewManager)
@param splitter reference to the embedding splitter (QSplitter)
@param parent reference to the parent widget (QWidget)
"""
super(Previewer, self).__init__(parent)
self.__vm = viewmanager
self.__splitter = splitter
self.__firstShow = True
self.__htmlPreviewer = None
self.__qssPreviewer = None
# Don't update too often because the UI might become sluggish
self.__typingTimer = QTimer()
self.__typingTimer.setInterval(500) # 500ms
self.__typingTimer.timeout.connect(self.__processEditor)
self.__vm.editorChangedEd.connect(self.__editorChanged)
self.__vm.editorLanguageChanged.connect(self.__editorLanguageChanged)
self.__vm.editorTextChanged.connect(self.__editorTextChanged)
self.__vm.previewStateChanged.connect(self.__previewStateChanged)
self.__splitter.splitterMoved.connect(self.__splitterMoved)
self.hide()
def show(self):
"""
Public method to show the preview widget.
"""
super(Previewer, self).show()
if self.__firstShow:
self.__splitter.restoreState(
Preferences.getUI("PreviewSplitterState"))
self.__firstShow = False
self.__typingTimer.start()
def hide(self):
"""
Public method to hide the preview widget.
"""
super(Previewer, self).hide()
self.__typingTimer.stop()
def shutdown(self):
"""
Public method to perform shutdown actions.
"""
self.__typingTimer.stop()
self.__htmlPreviewer and self.__htmlPreviewer.shutdown()
def __splitterMoved(self):
"""
Private slot to handle the movement of the embedding splitter's handle.
"""
state = self.__splitter.saveState()
Preferences.setUI("PreviewSplitterState", state)
def __editorChanged(self, editor):
"""
Private slot to handle a change of the current editor.
@param editor reference to the editor (Editor)
"""
if editor is None:
self.hide()
return
if Preferences.getUI("ShowFilePreview") and \
self.__isPreviewable(editor):
self.show()
self.__processEditor()
else:
self.hide()
def __editorLanguageChanged(self, editor):
"""
Private slot to handle a change of the current editor's language.
@param editor reference to the editor (Editor)
"""
self.__editorChanged(editor)
def __editorTextChanged(self, editor):
"""
Private slot to handle changes of an editor's text.
@param editor reference to the editor (Editor)
"""
if self.isVisible():
self.__typingTimer.stop()
self.__typingTimer.start()
def __previewStateChanged(self, on):
"""
Private slot to toggle the display of the preview.
@param on flag indicating to show a preview (boolean)
"""
editor = self.__vm.activeWindow()
if on and editor and self.__isPreviewable(editor):
self.show()
else:
self.hide()
def __isPreviewable(self, editor):
"""
Private method to check, if a preview can be shown for the given
editor.
@param editor reference to an editor (Editor)
@return flag indicating if a preview can be shown (boolean)
"""
if editor:
if editor.getFileName() is not None:
extension = os.path.normcase(
os.path.splitext(editor.getFileName())[1][1:])
return extension in \
Preferences.getEditor("PreviewHtmlFileNameExtensions") + \
Preferences.getEditor(
"PreviewMarkdownFileNameExtensions") + \
Preferences.getEditor("PreviewRestFileNameExtensions") + \
Preferences.getEditor("PreviewQssFileNameExtensions")
elif editor.getLanguage() in ["HTML", "QSS"]:
return True
return False
def __processEditor(self):
"""
Private slot to schedule the processing of the current editor's text.
"""
self.__typingTimer.stop()
editor = self.__vm.activeWindow()
if editor is not None:
fn = editor.getFileName()
if fn:
extension = os.path.normcase(os.path.splitext(fn)[1][1:])
else:
extension = ""
if extension in \
Preferences.getEditor("PreviewHtmlFileNameExtensions") or \
editor.getLanguage() == "HTML":
language = "HTML"
elif extension in \
Preferences.getEditor("PreviewMarkdownFileNameExtensions"):
language = "Markdown"
elif extension in \
Preferences.getEditor("PreviewRestFileNameExtensions"):
language = "ReST"
elif extension in \
Preferences.getEditor("PreviewQssFileNameExtensions"):
language = "QSS"
else:
language = ""
if language in ["HTML", "Markdown", "ReST"]:
if self.__htmlPreviewer is None:
from .Previewers.PreviewerHTML import PreviewerHTML
self.__htmlPreviewer = PreviewerHTML()
self.addWidget(self.__htmlPreviewer)
self.setCurrentWidget(self.__htmlPreviewer)
self.__htmlPreviewer.processEditor(editor)
elif language == "QSS":
if self.__qssPreviewer is None:
from .Previewers.PreviewerQSS import PreviewerQSS
self.__qssPreviewer = PreviewerQSS()
self.addWidget(self.__qssPreviewer)
self.setCurrentWidget(self.__qssPreviewer)
self.__qssPreviewer.processEditor(editor)
|
gpl-3.0
|
irisfeng/CodeScanner
|
SZQRCodeViewController/Pods/AVOSCloudCrashReporting/Breakpad/src/tools/gyp/pylib/gyp/MSVSSettings.py
|
47
|
44444
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Code to validate and convert settings of the Microsoft build tools.
This file contains code to validate and convert settings of the Microsoft
build tools. The function ConvertToMSBuildSettings(), ValidateMSVSSettings(),
and ValidateMSBuildSettings() are the entry points.
This file was created by comparing the projects created by Visual Studio 2008
and Visual Studio 2010 for all available settings through the user interface.
The MSBuild schemas were also considered. They are typically found in the
MSBuild install directory, e.g. c:\Program Files (x86)\MSBuild
"""
import sys
import re
# Dictionaries of settings validators. The key is the tool name, the value is
# a dictionary mapping setting names to validation functions.
_msvs_validators = {}
_msbuild_validators = {}
# A dictionary of settings converters. The key is the tool name, the value is
# a dictionary mapping setting names to conversion functions.
_msvs_to_msbuild_converters = {}
# Tool name mapping from MSVS to MSBuild.
_msbuild_name_of_tool = {}
class _Tool(object):
"""Represents a tool used by MSVS or MSBuild.
Attributes:
msvs_name: The name of the tool in MSVS.
msbuild_name: The name of the tool in MSBuild.
"""
def __init__(self, msvs_name, msbuild_name):
self.msvs_name = msvs_name
self.msbuild_name = msbuild_name
def _AddTool(tool):
"""Adds a tool to the four dictionaries used to process settings.
This only defines the tool. Each setting also needs to be added.
Args:
tool: The _Tool object to be added.
"""
_msvs_validators[tool.msvs_name] = {}
_msbuild_validators[tool.msbuild_name] = {}
_msvs_to_msbuild_converters[tool.msvs_name] = {}
_msbuild_name_of_tool[tool.msvs_name] = tool.msbuild_name
def _GetMSBuildToolSettings(msbuild_settings, tool):
"""Returns an MSBuild tool dictionary. Creates it if needed."""
return msbuild_settings.setdefault(tool.msbuild_name, {})
class _Type(object):
"""Type of settings (Base class)."""
def ValidateMSVS(self, value):
"""Verifies that the value is legal for MSVS.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSVS.
"""
def ValidateMSBuild(self, value):
"""Verifies that the value is legal for MSBuild.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSBuild.
"""
def ConvertToMSBuild(self, value):
"""Returns the MSBuild equivalent of the MSVS value given.
Args:
value: the MSVS value to convert.
Returns:
the MSBuild equivalent.
Raises:
ValueError if value is not valid.
"""
return value
class _String(_Type):
"""A setting that's just a string."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
return ConvertVCMacrosToMSBuild(value)
class _StringList(_Type):
"""A settings that's a list of strings."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
if isinstance(value, list):
return [ConvertVCMacrosToMSBuild(i) for i in value]
else:
return ConvertVCMacrosToMSBuild(value)
class _Boolean(_Type):
"""Boolean settings, can have the values 'false' or 'true'."""
def _Validate(self, value):
if value != 'true' and value != 'false':
raise ValueError('expected bool; got %r' % value)
def ValidateMSVS(self, value):
self._Validate(value)
def ValidateMSBuild(self, value):
self._Validate(value)
def ConvertToMSBuild(self, value):
self._Validate(value)
return value
class _Integer(_Type):
"""Integer settings."""
def __init__(self, msbuild_base=10):
_Type.__init__(self)
self._msbuild_base = msbuild_base
def ValidateMSVS(self, value):
# Try to convert, this will raise ValueError if invalid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
# Try to convert, this will raise ValueError if invalid.
int(value, self._msbuild_base)
def ConvertToMSBuild(self, value):
msbuild_format = (self._msbuild_base == 10) and '%d' or '0x%04x'
return msbuild_format % int(value)
class _Enumeration(_Type):
"""Type of settings that is an enumeration.
In MSVS, the values are indexes like '0', '1', and '2'.
MSBuild uses text labels that are more representative, like 'Win32'.
Constructor args:
label_list: an array of MSBuild labels that correspond to the MSVS index.
In the rare cases where MSVS has skipped an index value, None is
used in the array to indicate the unused spot.
new: an array of labels that are new to MSBuild.
"""
def __init__(self, label_list, new=None):
_Type.__init__(self)
self._label_list = label_list
self._msbuild_values = set(value for value in label_list
if value is not None)
if new is not None:
self._msbuild_values.update(new)
def ValidateMSVS(self, value):
# Try to convert. It will raise an exception if not valid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
if value not in self._msbuild_values:
raise ValueError('unrecognized enumerated value %s' % value)
def ConvertToMSBuild(self, value):
index = int(value)
if index < 0 or index >= len(self._label_list):
raise ValueError('index value (%d) not in expected range [0, %d)' %
(index, len(self._label_list)))
label = self._label_list[index]
if label is None:
raise ValueError('converted value for %s not specified.' % value)
return label
# Instantiate the various generic types.
_boolean = _Boolean()
_integer = _Integer()
# For now, we don't do any special validation on these types:
_string = _String()
_file_name = _String()
_folder_name = _String()
_file_list = _StringList()
_folder_list = _StringList()
_string_list = _StringList()
# Some boolean settings went from numerical values to boolean. The
# mapping is 0: default, 1: false, 2: true.
_newly_boolean = _Enumeration(['', 'false', 'true'])
def _Same(tool, name, setting_type):
"""Defines a setting that has the same name in MSVS and MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
_Renamed(tool, name, name, setting_type)
def _Renamed(tool, msvs_name, msbuild_name, setting_type):
"""Defines a setting for which the name has changed.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting.
msbuild_name: the name of the MSBuild setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
msbuild_tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
msbuild_tool_settings[msbuild_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_name] = setting_type.ValidateMSVS
_msbuild_validators[tool.msbuild_name][msbuild_name] = (
setting_type.ValidateMSBuild)
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _Moved(tool, settings_name, msbuild_tool_name, setting_type):
_MovedAndRenamed(tool, settings_name, msbuild_tool_name, settings_name,
setting_type)
def _MovedAndRenamed(tool, msvs_settings_name, msbuild_tool_name,
msbuild_settings_name, setting_type):
"""Defines a setting that may have moved to a new section.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_settings_name: the MSVS name of the setting.
msbuild_tool_name: the name of the MSBuild tool to place the setting under.
msbuild_settings_name: the MSBuild name of the setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
tool_settings = msbuild_settings.setdefault(msbuild_tool_name, {})
tool_settings[msbuild_settings_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_settings_name] = (
setting_type.ValidateMSVS)
validator = setting_type.ValidateMSBuild
_msbuild_validators[msbuild_tool_name][msbuild_settings_name] = validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_settings_name] = _Translate
def _MSVSOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSVS.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
def _Translate(unused_value, unused_msbuild_settings):
# Since this is for MSVS only settings, no translation will happen.
pass
_msvs_validators[tool.msvs_name][name] = setting_type.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
def _MSBuildOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
_msbuild_validators[tool.msbuild_name][name] = setting_type.ValidateMSBuild
def _ConvertedToAdditionalOption(tool, msvs_name, flag):
"""Defines a setting that's handled via a command line option in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting that if 'true' becomes a flag
flag: the flag to insert at the end of the AdditionalOptions
"""
def _Translate(value, msbuild_settings):
if value == 'true':
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if 'AdditionalOptions' in tool_settings:
new_flags = '%s %s' % (tool_settings['AdditionalOptions'], flag)
else:
new_flags = flag
tool_settings['AdditionalOptions'] = new_flags
_msvs_validators[tool.msvs_name][msvs_name] = _boolean.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _CustomGeneratePreprocessedFile(tool, msvs_name):
def _Translate(value, msbuild_settings):
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if value == '0':
tool_settings['PreprocessToFile'] = 'false'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '1': # /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '2': # /EP /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'true'
else:
raise ValueError('value must be one of [0, 1, 2]; got %s' % value)
# Create a bogus validator that looks for '0', '1', or '2'
msvs_validator = _Enumeration(['a', 'b', 'c']).ValidateMSVS
_msvs_validators[tool.msvs_name][msvs_name] = msvs_validator
msbuild_validator = _boolean.ValidateMSBuild
msbuild_tool_validators = _msbuild_validators[tool.msbuild_name]
msbuild_tool_validators['PreprocessToFile'] = msbuild_validator
msbuild_tool_validators['PreprocessSuppressLineNumbers'] = msbuild_validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
fix_vc_macro_slashes_regex_list = ('IntDir', 'OutDir')
fix_vc_macro_slashes_regex = re.compile(
r'(\$\((?:%s)\))(?:[\\/]+)' % "|".join(fix_vc_macro_slashes_regex_list)
)
# Regular expression to detect keys that were generated by exclusion lists
_EXCLUDED_SUFFIX_RE = re.compile('^(.*)_excluded$')
def _ValidateExclusionSetting(setting, settings, error_msg, stderr=sys.stderr):
"""Verify that 'setting' is valid if it is generated from an exclusion list.
If the setting appears to be generated from an exclusion list, the root name
is checked.
Args:
setting: A string that is the setting name to validate
settings: A dictionary where the keys are valid settings
error_msg: The message to emit in the event of error
stderr: The stream receiving the error messages.
"""
# This may be unrecognized because it's an exclusion list. If the
# setting name has the _excluded suffix, then check the root name.
unrecognized = True
m = re.match(_EXCLUDED_SUFFIX_RE, setting)
if m:
root_setting = m.group(1)
unrecognized = root_setting not in settings
if unrecognized:
# We don't know this setting. Give a warning.
print >> stderr, error_msg
def FixVCMacroSlashes(s):
"""Replace macros which have excessive following slashes.
These macros are known to have a built-in trailing slash. Furthermore, many
scripts hiccup on processing paths with extra slashes in the middle.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
s = fix_vc_macro_slashes_regex.sub(r'\1', s)
return s
def ConvertVCMacrosToMSBuild(s):
"""Convert the the MSVS macros found in the string to the MSBuild equivalent.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
replace_map = {
'$(ConfigurationName)': '$(Configuration)',
'$(InputDir)': '%(RootDir)%(Directory)',
'$(InputExt)': '%(Extension)',
'$(InputFileName)': '%(Filename)%(Extension)',
'$(InputName)': '%(Filename)',
'$(InputPath)': '%(FullPath)',
'$(ParentName)': '$(ProjectFileName)',
'$(PlatformName)': '$(Platform)',
'$(SafeInputName)': '%(Filename)',
}
for old, new in replace_map.iteritems():
s = s.replace(old, new)
s = FixVCMacroSlashes(s)
return s
def ConvertToMSBuildSettings(msvs_settings, stderr=sys.stderr):
"""Converts MSVS settings (VS2008 and earlier) to MSBuild settings (VS2010+).
Args:
msvs_settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
Returns:
A dictionary of MSBuild settings. The key is either the MSBuild tool name
or the empty string (for the global settings). The values are themselves
dictionaries of settings and their values.
"""
msbuild_settings = {}
for msvs_tool_name, msvs_tool_settings in msvs_settings.iteritems():
if msvs_tool_name in _msvs_to_msbuild_converters:
msvs_tool = _msvs_to_msbuild_converters[msvs_tool_name]
for msvs_setting, msvs_value in msvs_tool_settings.iteritems():
if msvs_setting in msvs_tool:
# Invoke the translation function.
try:
msvs_tool[msvs_setting](msvs_value, msbuild_settings)
except ValueError, e:
print >> stderr, ('Warning: while converting %s/%s to MSBuild, '
'%s' % (msvs_tool_name, msvs_setting, e))
else:
_ValidateExclusionSetting(msvs_setting,
msvs_tool,
('Warning: unrecognized setting %s/%s '
'while converting to MSBuild.' %
(msvs_tool_name, msvs_setting)),
stderr)
else:
print >> stderr, ('Warning: unrecognized tool %s while converting to '
'MSBuild.' % msvs_tool_name)
return msbuild_settings
def ValidateMSVSSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSVS.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msvs_validators, settings, stderr)
def ValidateMSBuildSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSBuild.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msbuild_validators, settings, stderr)
def _ValidateSettings(validators, settings, stderr):
"""Validates that the settings are valid for MSBuild or MSVS.
We currently only validate the names of the settings, not their values.
Args:
validators: A dictionary of tools and their validators.
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
for tool_name in settings:
if tool_name in validators:
tool_validators = validators[tool_name]
for setting, value in settings[tool_name].iteritems():
if setting in tool_validators:
try:
tool_validators[setting](value)
except ValueError, e:
print >> stderr, ('Warning: for %s/%s, %s' %
(tool_name, setting, e))
else:
_ValidateExclusionSetting(setting,
tool_validators,
('Warning: unrecognized setting %s/%s' %
(tool_name, setting)),
stderr)
else:
print >> stderr, ('Warning: unrecognized tool %s' % tool_name)
# MSVS and MBuild names of the tools.
_compile = _Tool('VCCLCompilerTool', 'ClCompile')
_link = _Tool('VCLinkerTool', 'Link')
_midl = _Tool('VCMIDLTool', 'Midl')
_rc = _Tool('VCResourceCompilerTool', 'ResourceCompile')
_lib = _Tool('VCLibrarianTool', 'Lib')
_manifest = _Tool('VCManifestTool', 'Manifest')
_AddTool(_compile)
_AddTool(_link)
_AddTool(_midl)
_AddTool(_rc)
_AddTool(_lib)
_AddTool(_manifest)
# Add sections only found in the MSBuild settings.
_msbuild_validators[''] = {}
_msbuild_validators['ProjectReference'] = {}
_msbuild_validators['ManifestResourceCompile'] = {}
# Descriptions of the compiler options, i.e. VCCLCompilerTool in MSVS and
# ClCompile in MSBuild.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\cl.xml" for
# the schema of the MSBuild ClCompile settings.
# Options that have the same name in MSVS and MSBuild
_Same(_compile, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_compile, 'AdditionalOptions', _string_list)
_Same(_compile, 'AdditionalUsingDirectories', _folder_list) # /AI
_Same(_compile, 'AssemblerListingLocation', _file_name) # /Fa
_Same(_compile, 'BrowseInformationFile', _file_name)
_Same(_compile, 'BufferSecurityCheck', _boolean) # /GS
_Same(_compile, 'DisableLanguageExtensions', _boolean) # /Za
_Same(_compile, 'DisableSpecificWarnings', _string_list) # /wd
_Same(_compile, 'EnableFiberSafeOptimizations', _boolean) # /GT
_Same(_compile, 'EnablePREfast', _boolean) # /analyze Visible='false'
_Same(_compile, 'ExpandAttributedSource', _boolean) # /Fx
_Same(_compile, 'FloatingPointExceptions', _boolean) # /fp:except
_Same(_compile, 'ForceConformanceInForLoopScope', _boolean) # /Zc:forScope
_Same(_compile, 'ForcedIncludeFiles', _file_list) # /FI
_Same(_compile, 'ForcedUsingFiles', _file_list) # /FU
_Same(_compile, 'GenerateXMLDocumentationFiles', _boolean) # /doc
_Same(_compile, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_compile, 'MinimalRebuild', _boolean) # /Gm
_Same(_compile, 'OmitDefaultLibName', _boolean) # /Zl
_Same(_compile, 'OmitFramePointers', _boolean) # /Oy
_Same(_compile, 'PreprocessorDefinitions', _string_list) # /D
_Same(_compile, 'ProgramDataBaseFileName', _file_name) # /Fd
_Same(_compile, 'RuntimeTypeInfo', _boolean) # /GR
_Same(_compile, 'ShowIncludes', _boolean) # /showIncludes
_Same(_compile, 'SmallerTypeCheck', _boolean) # /RTCc
_Same(_compile, 'StringPooling', _boolean) # /GF
_Same(_compile, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_compile, 'TreatWChar_tAsBuiltInType', _boolean) # /Zc:wchar_t
_Same(_compile, 'UndefineAllPreprocessorDefinitions', _boolean) # /u
_Same(_compile, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_compile, 'UseFullPaths', _boolean) # /FC
_Same(_compile, 'WholeProgramOptimization', _boolean) # /GL
_Same(_compile, 'XMLDocumentationFileName', _file_name)
_Same(_compile, 'AssemblerOutput',
_Enumeration(['NoListing',
'AssemblyCode', # /FA
'All', # /FAcs
'AssemblyAndMachineCode', # /FAc
'AssemblyAndSourceCode'])) # /FAs
_Same(_compile, 'BasicRuntimeChecks',
_Enumeration(['Default',
'StackFrameRuntimeCheck', # /RTCs
'UninitializedLocalUsageCheck', # /RTCu
'EnableFastChecks'])) # /RTC1
_Same(_compile, 'BrowseInformation',
_Enumeration(['false',
'true', # /FR
'true'])) # /Fr
_Same(_compile, 'CallingConvention',
_Enumeration(['Cdecl', # /Gd
'FastCall', # /Gr
'StdCall'])) # /Gz
_Same(_compile, 'CompileAs',
_Enumeration(['Default',
'CompileAsC', # /TC
'CompileAsCpp'])) # /TP
_Same(_compile, 'DebugInformationFormat',
_Enumeration(['', # Disabled
'OldStyle', # /Z7
None,
'ProgramDatabase', # /Zi
'EditAndContinue'])) # /ZI
_Same(_compile, 'EnableEnhancedInstructionSet',
_Enumeration(['NotSet',
'StreamingSIMDExtensions', # /arch:SSE
'StreamingSIMDExtensions2', # /arch:SSE2
'AdvancedVectorExtensions', # /arch:AVX (vs2012+)
'NoExtensions',])) # /arch:IA32 (vs2012+)
_Same(_compile, 'ErrorReporting',
_Enumeration(['None', # /errorReport:none
'Prompt', # /errorReport:prompt
'Queue'], # /errorReport:queue
new=['Send'])) # /errorReport:send"
_Same(_compile, 'ExceptionHandling',
_Enumeration(['false',
'Sync', # /EHsc
'Async'], # /EHa
new=['SyncCThrow'])) # /EHs
_Same(_compile, 'FavorSizeOrSpeed',
_Enumeration(['Neither',
'Speed', # /Ot
'Size'])) # /Os
_Same(_compile, 'FloatingPointModel',
_Enumeration(['Precise', # /fp:precise
'Strict', # /fp:strict
'Fast'])) # /fp:fast
_Same(_compile, 'InlineFunctionExpansion',
_Enumeration(['Default',
'OnlyExplicitInline', # /Ob1
'AnySuitable'], # /Ob2
new=['Disabled'])) # /Ob0
_Same(_compile, 'Optimization',
_Enumeration(['Disabled', # /Od
'MinSpace', # /O1
'MaxSpeed', # /O2
'Full'])) # /Ox
_Same(_compile, 'RuntimeLibrary',
_Enumeration(['MultiThreaded', # /MT
'MultiThreadedDebug', # /MTd
'MultiThreadedDLL', # /MD
'MultiThreadedDebugDLL'])) # /MDd
_Same(_compile, 'StructMemberAlignment',
_Enumeration(['Default',
'1Byte', # /Zp1
'2Bytes', # /Zp2
'4Bytes', # /Zp4
'8Bytes', # /Zp8
'16Bytes'])) # /Zp16
_Same(_compile, 'WarningLevel',
_Enumeration(['TurnOffAllWarnings', # /W0
'Level1', # /W1
'Level2', # /W2
'Level3', # /W3
'Level4'], # /W4
new=['EnableAllWarnings'])) # /Wall
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_compile, 'EnableFunctionLevelLinking', 'FunctionLevelLinking',
_boolean) # /Gy
_Renamed(_compile, 'EnableIntrinsicFunctions', 'IntrinsicFunctions',
_boolean) # /Oi
_Renamed(_compile, 'KeepComments', 'PreprocessKeepComments', _boolean) # /C
_Renamed(_compile, 'ObjectFile', 'ObjectFileName', _file_name) # /Fo
_Renamed(_compile, 'OpenMP', 'OpenMPSupport', _boolean) # /openmp
_Renamed(_compile, 'PrecompiledHeaderThrough', 'PrecompiledHeaderFile',
_file_name) # Used with /Yc and /Yu
_Renamed(_compile, 'PrecompiledHeaderFile', 'PrecompiledHeaderOutputFile',
_file_name) # /Fp
_Renamed(_compile, 'UsePrecompiledHeader', 'PrecompiledHeader',
_Enumeration(['NotUsing', # VS recognized '' for this value too.
'Create', # /Yc
'Use'])) # /Yu
_Renamed(_compile, 'WarnAsError', 'TreatWarningAsError', _boolean) # /WX
_ConvertedToAdditionalOption(_compile, 'DefaultCharIsUnsigned', '/J')
# MSVS options not found in MSBuild.
_MSVSOnly(_compile, 'Detect64BitPortabilityProblems', _boolean)
_MSVSOnly(_compile, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_compile, 'BuildingInIDE', _boolean)
_MSBuildOnly(_compile, 'CompileAsManaged',
_Enumeration([], new=['false',
'true', # /clr
'Pure', # /clr:pure
'Safe', # /clr:safe
'OldSyntax'])) # /clr:oldSyntax
_MSBuildOnly(_compile, 'CreateHotpatchableImage', _boolean) # /hotpatch
_MSBuildOnly(_compile, 'MultiProcessorCompilation', _boolean) # /MP
_MSBuildOnly(_compile, 'PreprocessOutputPath', _string) # /Fi
_MSBuildOnly(_compile, 'ProcessorNumber', _integer) # the number of processors
_MSBuildOnly(_compile, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_compile, 'TreatSpecificWarningsAsErrors', _string_list) # /we
_MSBuildOnly(_compile, 'UseUnicodeForAssemblerListing', _boolean) # /FAu
# Defines a setting that needs very customized processing
_CustomGeneratePreprocessedFile(_compile, 'GeneratePreprocessedFile')
# Directives for converting MSVS VCLinkerTool to MSBuild Link.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\link.xml" for
# the schema of the MSBuild Link settings.
# Options that have the same name in MSVS and MSBuild
_Same(_link, 'AdditionalDependencies', _file_list)
_Same(_link, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
# /MANIFESTDEPENDENCY:
_Same(_link, 'AdditionalManifestDependencies', _file_list)
_Same(_link, 'AdditionalOptions', _string_list)
_Same(_link, 'AddModuleNamesToAssembly', _file_list) # /ASSEMBLYMODULE
_Same(_link, 'AllowIsolation', _boolean) # /ALLOWISOLATION
_Same(_link, 'AssemblyLinkResource', _file_list) # /ASSEMBLYLINKRESOURCE
_Same(_link, 'BaseAddress', _string) # /BASE
_Same(_link, 'CLRUnmanagedCodeCheck', _boolean) # /CLRUNMANAGEDCODECHECK
_Same(_link, 'DelayLoadDLLs', _file_list) # /DELAYLOAD
_Same(_link, 'DelaySign', _boolean) # /DELAYSIGN
_Same(_link, 'EmbedManagedResourceFile', _file_list) # /ASSEMBLYRESOURCE
_Same(_link, 'EnableUAC', _boolean) # /MANIFESTUAC
_Same(_link, 'EntryPointSymbol', _string) # /ENTRY
_Same(_link, 'ForceSymbolReferences', _file_list) # /INCLUDE
_Same(_link, 'FunctionOrder', _file_name) # /ORDER
_Same(_link, 'GenerateDebugInformation', _boolean) # /DEBUG
_Same(_link, 'GenerateMapFile', _boolean) # /MAP
_Same(_link, 'HeapCommitSize', _string)
_Same(_link, 'HeapReserveSize', _string) # /HEAP
_Same(_link, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_link, 'IgnoreEmbeddedIDL', _boolean) # /IGNOREIDL
_Same(_link, 'ImportLibrary', _file_name) # /IMPLIB
_Same(_link, 'KeyContainer', _file_name) # /KEYCONTAINER
_Same(_link, 'KeyFile', _file_name) # /KEYFILE
_Same(_link, 'ManifestFile', _file_name) # /ManifestFile
_Same(_link, 'MapExports', _boolean) # /MAPINFO:EXPORTS
_Same(_link, 'MapFileName', _file_name)
_Same(_link, 'MergedIDLBaseFileName', _file_name) # /IDLOUT
_Same(_link, 'MergeSections', _string) # /MERGE
_Same(_link, 'MidlCommandFile', _file_name) # /MIDL
_Same(_link, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_link, 'OutputFile', _file_name) # /OUT
_Same(_link, 'PerUserRedirection', _boolean)
_Same(_link, 'Profile', _boolean) # /PROFILE
_Same(_link, 'ProfileGuidedDatabase', _file_name) # /PGD
_Same(_link, 'ProgramDatabaseFile', _file_name) # /PDB
_Same(_link, 'RegisterOutput', _boolean)
_Same(_link, 'SetChecksum', _boolean) # /RELEASE
_Same(_link, 'StackCommitSize', _string)
_Same(_link, 'StackReserveSize', _string) # /STACK
_Same(_link, 'StripPrivateSymbols', _file_name) # /PDBSTRIPPED
_Same(_link, 'SupportUnloadOfDelayLoadedDLL', _boolean) # /DELAY:UNLOAD
_Same(_link, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_link, 'SwapRunFromCD', _boolean) # /SWAPRUN:CD
_Same(_link, 'TurnOffAssemblyGeneration', _boolean) # /NOASSEMBLY
_Same(_link, 'TypeLibraryFile', _file_name) # /TLBOUT
_Same(_link, 'TypeLibraryResourceID', _integer) # /TLBID
_Same(_link, 'UACUIAccess', _boolean) # /uiAccess='true'
_Same(_link, 'Version', _string) # /VERSION
_Same(_link, 'EnableCOMDATFolding', _newly_boolean) # /OPT:ICF
_Same(_link, 'FixedBaseAddress', _newly_boolean) # /FIXED
_Same(_link, 'LargeAddressAware', _newly_boolean) # /LARGEADDRESSAWARE
_Same(_link, 'OptimizeReferences', _newly_boolean) # /OPT:REF
_Same(_link, 'RandomizedBaseAddress', _newly_boolean) # /DYNAMICBASE
_Same(_link, 'TerminalServerAware', _newly_boolean) # /TSAWARE
_subsystem_enumeration = _Enumeration(
['NotSet',
'Console', # /SUBSYSTEM:CONSOLE
'Windows', # /SUBSYSTEM:WINDOWS
'Native', # /SUBSYSTEM:NATIVE
'EFI Application', # /SUBSYSTEM:EFI_APPLICATION
'EFI Boot Service Driver', # /SUBSYSTEM:EFI_BOOT_SERVICE_DRIVER
'EFI ROM', # /SUBSYSTEM:EFI_ROM
'EFI Runtime', # /SUBSYSTEM:EFI_RUNTIME_DRIVER
'WindowsCE'], # /SUBSYSTEM:WINDOWSCE
new=['POSIX']) # /SUBSYSTEM:POSIX
_target_machine_enumeration = _Enumeration(
['NotSet',
'MachineX86', # /MACHINE:X86
None,
'MachineARM', # /MACHINE:ARM
'MachineEBC', # /MACHINE:EBC
'MachineIA64', # /MACHINE:IA64
None,
'MachineMIPS', # /MACHINE:MIPS
'MachineMIPS16', # /MACHINE:MIPS16
'MachineMIPSFPU', # /MACHINE:MIPSFPU
'MachineMIPSFPU16', # /MACHINE:MIPSFPU16
None,
None,
None,
'MachineSH4', # /MACHINE:SH4
None,
'MachineTHUMB', # /MACHINE:THUMB
'MachineX64']) # /MACHINE:X64
_Same(_link, 'AssemblyDebug',
_Enumeration(['',
'true', # /ASSEMBLYDEBUG
'false'])) # /ASSEMBLYDEBUG:DISABLE
_Same(_link, 'CLRImageType',
_Enumeration(['Default',
'ForceIJWImage', # /CLRIMAGETYPE:IJW
'ForcePureILImage', # /Switch="CLRIMAGETYPE:PURE
'ForceSafeILImage'])) # /Switch="CLRIMAGETYPE:SAFE
_Same(_link, 'CLRThreadAttribute',
_Enumeration(['DefaultThreadingAttribute', # /CLRTHREADATTRIBUTE:NONE
'MTAThreadingAttribute', # /CLRTHREADATTRIBUTE:MTA
'STAThreadingAttribute'])) # /CLRTHREADATTRIBUTE:STA
_Same(_link, 'DataExecutionPrevention',
_Enumeration(['',
'false', # /NXCOMPAT:NO
'true'])) # /NXCOMPAT
_Same(_link, 'Driver',
_Enumeration(['NotSet',
'Driver', # /Driver
'UpOnly', # /DRIVER:UPONLY
'WDM'])) # /DRIVER:WDM
_Same(_link, 'LinkTimeCodeGeneration',
_Enumeration(['Default',
'UseLinkTimeCodeGeneration', # /LTCG
'PGInstrument', # /LTCG:PGInstrument
'PGOptimization', # /LTCG:PGOptimize
'PGUpdate'])) # /LTCG:PGUpdate
_Same(_link, 'ShowProgress',
_Enumeration(['NotSet',
'LinkVerbose', # /VERBOSE
'LinkVerboseLib'], # /VERBOSE:Lib
new=['LinkVerboseICF', # /VERBOSE:ICF
'LinkVerboseREF', # /VERBOSE:REF
'LinkVerboseSAFESEH', # /VERBOSE:SAFESEH
'LinkVerboseCLR'])) # /VERBOSE:CLR
_Same(_link, 'SubSystem', _subsystem_enumeration)
_Same(_link, 'TargetMachine', _target_machine_enumeration)
_Same(_link, 'UACExecutionLevel',
_Enumeration(['AsInvoker', # /level='asInvoker'
'HighestAvailable', # /level='highestAvailable'
'RequireAdministrator'])) # /level='requireAdministrator'
_Same(_link, 'MinimumRequiredVersion', _string)
_Same(_link, 'TreatLinkerWarningAsErrors', _boolean) # /WX
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_link, 'ErrorReporting', 'LinkErrorReporting',
_Enumeration(['NoErrorReport', # /ERRORREPORT:NONE
'PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin'], # /ERRORREPORT:QUEUE
new=['SendErrorReport'])) # /ERRORREPORT:SEND
_Renamed(_link, 'IgnoreDefaultLibraryNames', 'IgnoreSpecificDefaultLibraries',
_file_list) # /NODEFAULTLIB
_Renamed(_link, 'ResourceOnlyDLL', 'NoEntryPoint', _boolean) # /NOENTRY
_Renamed(_link, 'SwapRunFromNet', 'SwapRunFromNET', _boolean) # /SWAPRUN:NET
_Moved(_link, 'GenerateManifest', '', _boolean)
_Moved(_link, 'IgnoreImportLibrary', '', _boolean)
_Moved(_link, 'LinkIncremental', '', _newly_boolean)
_Moved(_link, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_Moved(_link, 'UseLibraryDependencyInputs', 'ProjectReference', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_link, 'OptimizeForWindows98', _newly_boolean)
_MSVSOnly(_link, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_link, 'BuildingInIDE', _boolean)
_MSBuildOnly(_link, 'ImageHasSafeExceptionHandlers', _boolean) # /SAFESEH
_MSBuildOnly(_link, 'LinkDLL', _boolean) # /DLL Visible='false'
_MSBuildOnly(_link, 'LinkStatus', _boolean) # /LTCG:STATUS
_MSBuildOnly(_link, 'PreventDllBinding', _boolean) # /ALLOWBIND
_MSBuildOnly(_link, 'SupportNobindOfDelayLoadedDLL', _boolean) # /DELAY:NOBIND
_MSBuildOnly(_link, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_link, 'MSDOSStubFileName', _file_name) # /STUB Visible='false'
_MSBuildOnly(_link, 'SectionAlignment', _integer) # /ALIGN
_MSBuildOnly(_link, 'SpecifySectionAttributes', _string) # /SECTION
_MSBuildOnly(_link, 'ForceFileOutput',
_Enumeration([], new=['Enabled', # /FORCE
# /FORCE:MULTIPLE
'MultiplyDefinedSymbolOnly',
'UndefinedSymbolOnly'])) # /FORCE:UNRESOLVED
_MSBuildOnly(_link, 'CreateHotPatchableImage',
_Enumeration([], new=['Enabled', # /FUNCTIONPADMIN
'X86Image', # /FUNCTIONPADMIN:5
'X64Image', # /FUNCTIONPADMIN:6
'ItaniumImage'])) # /FUNCTIONPADMIN:16
_MSBuildOnly(_link, 'CLRSupportLastError',
_Enumeration([], new=['Enabled', # /CLRSupportLastError
'Disabled', # /CLRSupportLastError:NO
# /CLRSupportLastError:SYSTEMDLL
'SystemDlls']))
# Directives for converting VCResourceCompilerTool to ResourceCompile.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\rc.xml" for
# the schema of the MSBuild ResourceCompile settings.
_Same(_rc, 'AdditionalOptions', _string_list)
_Same(_rc, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_rc, 'Culture', _Integer(msbuild_base=16))
_Same(_rc, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_rc, 'PreprocessorDefinitions', _string_list) # /D
_Same(_rc, 'ResourceOutputFileName', _string) # /fo
_Same(_rc, 'ShowProgress', _boolean) # /v
# There is no UI in VisualStudio 2008 to set the following properties.
# However they are found in CL and other tools. Include them here for
# completeness, as they are very likely to have the same usage pattern.
_Same(_rc, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_rc, 'UndefinePreprocessorDefinitions', _string_list) # /u
# MSBuild options not found in MSVS.
_MSBuildOnly(_rc, 'NullTerminateStrings', _boolean) # /n
_MSBuildOnly(_rc, 'TrackerLogDirectory', _folder_name)
# Directives for converting VCMIDLTool to Midl.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\midl.xml" for
# the schema of the MSBuild Midl settings.
_Same(_midl, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_midl, 'AdditionalOptions', _string_list)
_Same(_midl, 'CPreprocessOptions', _string) # /cpp_opt
_Same(_midl, 'ErrorCheckAllocations', _boolean) # /error allocation
_Same(_midl, 'ErrorCheckBounds', _boolean) # /error bounds_check
_Same(_midl, 'ErrorCheckEnumRange', _boolean) # /error enum
_Same(_midl, 'ErrorCheckRefPointers', _boolean) # /error ref
_Same(_midl, 'ErrorCheckStubData', _boolean) # /error stub_data
_Same(_midl, 'GenerateStublessProxies', _boolean) # /Oicf
_Same(_midl, 'GenerateTypeLibrary', _boolean)
_Same(_midl, 'HeaderFileName', _file_name) # /h
_Same(_midl, 'IgnoreStandardIncludePath', _boolean) # /no_def_idir
_Same(_midl, 'InterfaceIdentifierFileName', _file_name) # /iid
_Same(_midl, 'MkTypLibCompatible', _boolean) # /mktyplib203
_Same(_midl, 'OutputDirectory', _string) # /out
_Same(_midl, 'PreprocessorDefinitions', _string_list) # /D
_Same(_midl, 'ProxyFileName', _file_name) # /proxy
_Same(_midl, 'RedirectOutputAndErrors', _file_name) # /o
_Same(_midl, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_midl, 'TypeLibraryName', _file_name) # /tlb
_Same(_midl, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_midl, 'WarnAsError', _boolean) # /WX
_Same(_midl, 'DefaultCharType',
_Enumeration(['Unsigned', # /char unsigned
'Signed', # /char signed
'Ascii'])) # /char ascii7
_Same(_midl, 'TargetEnvironment',
_Enumeration(['NotSet',
'Win32', # /env win32
'Itanium', # /env ia64
'X64'])) # /env x64
_Same(_midl, 'EnableErrorChecks',
_Enumeration(['EnableCustom',
'None', # /error none
'All'])) # /error all
_Same(_midl, 'StructMemberAlignment',
_Enumeration(['NotSet',
'1', # Zp1
'2', # Zp2
'4', # Zp4
'8'])) # Zp8
_Same(_midl, 'WarningLevel',
_Enumeration(['0', # /W0
'1', # /W1
'2', # /W2
'3', # /W3
'4'])) # /W4
_Renamed(_midl, 'DLLDataFileName', 'DllDataFileName', _file_name) # /dlldata
_Renamed(_midl, 'ValidateParameters', 'ValidateAllParameters',
_boolean) # /robust
# MSBuild options not found in MSVS.
_MSBuildOnly(_midl, 'ApplicationConfigurationMode', _boolean) # /app_config
_MSBuildOnly(_midl, 'ClientStubFile', _file_name) # /cstub
_MSBuildOnly(_midl, 'GenerateClientFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'GenerateServerFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'LocaleID', _integer) # /lcid DECIMAL
_MSBuildOnly(_midl, 'ServerStubFile', _file_name) # /sstub
_MSBuildOnly(_midl, 'SuppressCompilerWarnings', _boolean) # /no_warn
_MSBuildOnly(_midl, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_midl, 'TypeLibFormat',
_Enumeration([], new=['NewFormat', # /newtlb
'OldFormat'])) # /oldtlb
# Directives for converting VCLibrarianTool to Lib.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\lib.xml" for
# the schema of the MSBuild Lib settings.
_Same(_lib, 'AdditionalDependencies', _file_list)
_Same(_lib, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
_Same(_lib, 'AdditionalOptions', _string_list)
_Same(_lib, 'ExportNamedFunctions', _string_list) # /EXPORT
_Same(_lib, 'ForceSymbolReferences', _string) # /INCLUDE
_Same(_lib, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_lib, 'IgnoreSpecificDefaultLibraries', _file_list) # /NODEFAULTLIB
_Same(_lib, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_lib, 'OutputFile', _file_name) # /OUT
_Same(_lib, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_lib, 'UseUnicodeResponseFiles', _boolean)
_Same(_lib, 'LinkTimeCodeGeneration', _boolean) # /LTCG
_Same(_lib, 'TargetMachine', _target_machine_enumeration)
# TODO(jeanluc) _link defines the same value that gets moved to
# ProjectReference. We may want to validate that they are consistent.
_Moved(_lib, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_MSBuildOnly(_lib, 'DisplayLibrary', _string) # /LIST Visible='false'
_MSBuildOnly(_lib, 'ErrorReporting',
_Enumeration([], new=['PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin', # /ERRORREPORT:QUEUE
'SendErrorReport', # /ERRORREPORT:SEND
'NoErrorReport'])) # /ERRORREPORT:NONE
_MSBuildOnly(_lib, 'MinimumRequiredVersion', _string)
_MSBuildOnly(_lib, 'Name', _file_name) # /NAME
_MSBuildOnly(_lib, 'RemoveObjects', _file_list) # /REMOVE
_MSBuildOnly(_lib, 'SubSystem', _subsystem_enumeration)
_MSBuildOnly(_lib, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_lib, 'TreatLibWarningAsErrors', _boolean) # /WX
_MSBuildOnly(_lib, 'Verbose', _boolean)
# Directives for converting VCManifestTool to Mt.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\mt.xml" for
# the schema of the MSBuild Lib settings.
# Options that have the same name in MSVS and MSBuild
_Same(_manifest, 'AdditionalManifestFiles', _file_list) # /manifest
_Same(_manifest, 'AdditionalOptions', _string_list)
_Same(_manifest, 'AssemblyIdentity', _string) # /identity:
_Same(_manifest, 'ComponentFileName', _file_name) # /dll
_Same(_manifest, 'GenerateCatalogFiles', _boolean) # /makecdfs
_Same(_manifest, 'InputResourceManifests', _string) # /inputresource
_Same(_manifest, 'OutputManifestFile', _file_name) # /out
_Same(_manifest, 'RegistrarScriptFile', _file_name) # /rgs
_Same(_manifest, 'ReplacementsFile', _file_name) # /replacements
_Same(_manifest, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_manifest, 'TypeLibraryFile', _file_name) # /tlb:
_Same(_manifest, 'UpdateFileHashes', _boolean) # /hashupdate
_Same(_manifest, 'UpdateFileHashesSearchPath', _file_name)
_Same(_manifest, 'VerboseOutput', _boolean) # /verbose
# Options that have moved location.
_MovedAndRenamed(_manifest, 'ManifestResourceFile',
'ManifestResourceCompile',
'ResourceOutputFileName',
_file_name)
_Moved(_manifest, 'EmbedManifest', '', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_manifest, 'DependencyInformationFile', _file_name)
_MSVSOnly(_manifest, 'UseFAT32Workaround', _boolean)
_MSVSOnly(_manifest, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_manifest, 'EnableDPIAwareness', _boolean)
_MSBuildOnly(_manifest, 'GenerateCategoryTags', _boolean) # /category
_MSBuildOnly(_manifest, 'ManifestFromManagedAssembly',
_file_name) # /managedassemblyname
_MSBuildOnly(_manifest, 'OutputResourceManifests', _string) # /outputresource
_MSBuildOnly(_manifest, 'SuppressDependencyElement', _boolean) # /nodependency
_MSBuildOnly(_manifest, 'TrackerLogDirectory', _folder_name)
|
mit
|
Cadene/keras
|
examples/mnist_irnn.py
|
8
|
3040
|
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.initializations import normal, identity
from keras.layers.recurrent import SimpleRNN, LSTM
from keras.optimizers import RMSprop
from keras.utils import np_utils
'''
This is a reproduction of the IRNN experiment
with pixel-by-pixel sequential MNIST in
"A Simple Way to Initialize Recurrent Networks of Rectified Linear Units "
by Quoc V. Le, Navdeep Jaitly, Geoffrey E. Hinton
arXiv:1504.00941v2 [cs.NE] 7 Apr 201
http://arxiv.org/pdf/1504.00941v2.pdf
Optimizer is replaced with RMSprop which yields more stable and steady
improvement.
Reaches 0.93 train/test accuracy after 900 epochs (which roughly corresponds
to 1687500 steps in the original paper.)
'''
batch_size = 32
nb_classes = 10
nb_epochs = 200
hidden_units = 100
learning_rate = 1e-6
clip_norm = 1.0
BPTT_truncate = 28*28
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], -1, 1)
X_test = X_test.reshape(X_test.shape[0], -1, 1)
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
print('Evaluate IRNN...')
model = Sequential()
model.add(SimpleRNN(input_dim=1, output_dim=hidden_units,
init=lambda shape: normal(shape, scale=0.001),
inner_init=lambda shape: identity(shape, scale=1.0),
activation='relu', truncate_gradient=BPTT_truncate))
model.add(Dense(hidden_units, nb_classes))
model.add(Activation('softmax'))
rmsprop = RMSprop(lr=learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=rmsprop)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epochs,
show_accuracy=True, verbose=1, validation_data=(X_test, Y_test))
scores = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
print('IRNN test score:', scores[0])
print('IRNN test accuracy:', scores[1])
print('Compare to LSTM...')
model = Sequential()
model.add(LSTM(1, hidden_units))
model.add(Dense(hidden_units, nb_classes))
model.add(Activation('softmax'))
rmsprop = RMSprop(lr=learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=rmsprop)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epochs,
show_accuracy=True, verbose=1, validation_data=(X_test, Y_test))
scores = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
print('LSTM test score:', scores[0])
print('LSTM test accuracy:', scores[1])
|
mit
|
turi-code/SFrame
|
oss_src/unity/python/sframe/_json.py
|
5
|
1873
|
'''
Copyright (C) 2016 Turi
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
import array as _array
import datetime as _datetime
import json as _json
def to_serializable(obj):
from . import extensions
return extensions.json.to_serializable(obj)
def from_serializable(data, schema):
from . import extensions
return extensions.json.from_serializable(data, schema)
def dumps(obj):
"""
Dumps a serializable object to JSON. This API maps to the Python built-in
json dumps method, with a few differences:
* The return value is always valid JSON according to RFC 7159.
* The input can be any of the following types:
- SFrame
- SArray
- SGraph
- single flexible_type (Image, int, long, float, datetime.datetime)
- recursive flexible_type (list, dict, array.array)
- recursive variant_type (list or dict of all of the above)
* Serialized result includes both data and schema. Deserialization requires
valid schema information to disambiguate various other wrapped types
(like Image) from dict.
"""
from . import extensions
(data, schema) = to_serializable(obj)
return _json.dumps({'data': data, 'schema': schema})
def loads(json_string):
"""
Loads a serializable object from JSON. This API maps to the Python built-in
json loads method, with a few differences:
* The input string must be valid JSON according to RFC 7159.
* The input must represent a serialized result produced by the `dumps`
method in this module, including both data and schema.
If it does not the result will be unspecified and may raise exceptions.
"""
from . import extensions
result = _json.loads(json_string)
return from_serializable(**result)
|
bsd-3-clause
|
veorq/blueflower
|
blueflower/modules/bzip2.py
|
2
|
1655
|
# copyright (c) 2014 JP Aumasson <[email protected]>
#
# This file is part of blueflower.
#
# blueflower is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# blueflower is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with blueflower. If not, see <http://www.gnu.org/licenses/>.
import os
import bz2
from blueflower.do import do_data
from blueflower.types import type_data
from blueflower.utils.log import log_error
def bzip2_do_bzip2(abzip2, afile):
"""abzip2: raw bytes, afile: source file name"""
try:
data = bz2.decompress(abzip2)
except (IOError, ValueError) as e:
log_error(str(e), afile)
return
(ftype, supported) = type_data(data)
if supported:
# strip any .bz2 extension
(root, ext) = os.path.splitext(afile)
if ext.lower() == '.bz2':
do_data(ftype, data, afile+':'+root)
else:
do_data(ftype, data, afile)
def bzip2_do_data(data, afile):
bzip2_do_bzip2(data, afile)
def bzip2_do_file(afile):
try:
fid = open(afile)
abzip2 = fid.read()
except IOError as e:
log_error(str(e), afile)
return
bzip2_do_bzip2(abzip2, afile)
fid.close()
|
gpl-3.0
|
iuyte/VEX-709S-2018
|
.ycm_extra_conf.py
|
1
|
6322
|
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++14'.
'-std=c++14',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-isystem',
'../BoostParts',
'-isystem',
# This path will only work on OS X, but extra paths that don't exist are not
# harmful
'/System/Library/Frameworks/Python.framework/Headers',
'-isystem',
'../llvm/include',
'-isystem',
'../llvm/tools/clang/include',
'-I',
'.',
'-I',
'./ClangCompleter',
'-isystem',
'./tests/gmock/gtest',
'-isystem',
'./tests/gmock/gtest/include',
'-isystem',
'./tests/gmock',
'-isystem',
'./tests/gmock/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return { 'flags': final_flags }
|
lgpl-3.0
|
mjirayu/sit_academy
|
lms/envs/cms/dev.py
|
132
|
1508
|
"""
Settings for the LMS that runs alongside the CMS on AWS
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
from ..dev import *
FEATURES['AUTH_USE_CERTIFICATES'] = False
SUBDOMAIN_BRANDING['edge'] = 'edge'
SUBDOMAIN_BRANDING['preview.edge'] = 'edge'
VIRTUAL_UNIVERSITIES = ['edge']
# Turn off this flag because it will render 'Edit / QA' links for all instructor viewings of
# modules. Since - for now - those links point to github (for XML based authoring), it seems broken
# to people using it. Once we can update those links to properly link back to Studio,
# then we can turn this flag back on, as well as enabling in aws.py configurations.
FEATURES['ENABLE_LMS_MIGRATION'] = False
META_UNIVERSITIES = {}
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'DOC_STORE_CONFIG': {
'host': 'localhost',
'db': 'xcontent',
}
}
INSTALLED_APPS += (
# Mongo perf stats
'debug_toolbar_mongo',
)
DEBUG_TOOLBAR_PANELS += (
'debug_toolbar_mongo.panel.MongoDebugPanel',
)
# HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS defines, as dictionary of regex's, a set of mappings of HTTP request hostnames to
# what the 'default' modulestore to use while processing the request
# for example 'preview.edx.org' should use the draft modulestore
HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS = {
'preview\.': 'draft-preferred'
}
|
agpl-3.0
|
qwertyjune/BethSaidaBible
|
venv/lib/python2.7/site-packages/django/contrib/gis/forms/widgets.py
|
62
|
3727
|
from __future__ import unicode_literals
import logging
from django.conf import settings
from django.contrib.gis import gdal
from django.contrib.gis.geos import GEOSGeometry, GEOSException
from django.forms.widgets import Widget
from django.template import loader
from django.utils import six
from django.utils import translation
logger = logging.getLogger('django.contrib.gis')
class BaseGeometryWidget(Widget):
"""
The base class for rich geometry widgets.
Renders a map using the WKT of the geometry.
"""
geom_type = 'GEOMETRY'
map_srid = 4326
map_width = 600
map_height = 400
display_raw = False
supports_3d = False
template_name = '' # set on subclasses
def __init__(self, attrs=None):
self.attrs = {}
for key in ('geom_type', 'map_srid', 'map_width', 'map_height', 'display_raw'):
self.attrs[key] = getattr(self, key)
if attrs:
self.attrs.update(attrs)
def serialize(self, value):
return value.wkt if value else ''
def deserialize(self, value):
try:
return GEOSGeometry(value, self.map_srid)
except (GEOSException, ValueError) as err:
logger.error(
"Error creating geometry from value '%s' (%s)" % (
value, err)
)
return None
def render(self, name, value, attrs=None):
# If a string reaches here (via a validation error on another
# field) then just reconstruct the Geometry.
if isinstance(value, six.string_types):
value = self.deserialize(value)
if value:
# Check that srid of value and map match
if value.srid != self.map_srid:
try:
ogr = value.ogr
ogr.transform(self.map_srid)
value = ogr
except gdal.OGRException as err:
logger.error(
"Error transforming geometry from srid '%s' to srid '%s' (%s)" % (
value.srid, self.map_srid, err)
)
context = self.build_attrs(
attrs,
name=name,
module='geodjango_%s' % name.replace('-', '_'), # JS-safe
serialized=self.serialize(value),
geom_type=gdal.OGRGeomType(self.attrs['geom_type']),
STATIC_URL=settings.STATIC_URL,
LANGUAGE_BIDI=translation.get_language_bidi(),
)
return loader.render_to_string(self.template_name, context)
class OpenLayersWidget(BaseGeometryWidget):
template_name = 'gis/openlayers.html'
class Media:
js = (
'http://openlayers.org/api/2.13/OpenLayers.js',
'gis/js/OLMapWidget.js',
)
class OSMWidget(BaseGeometryWidget):
"""
An OpenLayers/OpenStreetMap-based widget.
"""
template_name = 'gis/openlayers-osm.html'
default_lon = 5
default_lat = 47
class Media:
js = (
'http://openlayers.org/api/2.13/OpenLayers.js',
'http://www.openstreetmap.org/openlayers/OpenStreetMap.js',
'gis/js/OLMapWidget.js',
)
def __init__(self, attrs=None):
super(OSMWidget, self).__init__()
for key in ('default_lon', 'default_lat'):
self.attrs[key] = getattr(self, key)
if attrs:
self.attrs.update(attrs)
@property
def map_srid(self):
# Use the official spherical mercator projection SRID on versions
# of GDAL that support it; otherwise, fallback to 900913.
if gdal.HAS_GDAL and gdal.GDAL_VERSION >= (1, 7):
return 3857
else:
return 900913
|
gpl-3.0
|
adlnet-archive/edx-platform
|
lms/djangoapps/instructor_task/migrations/0002_add_subtask_field.py
|
60
|
5146
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'InstructorTask.subtasks'
db.add_column('instructor_task_instructortask', 'subtasks',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'InstructorTask.subtasks'
db.delete_column('instructor_task_instructortask', 'subtasks')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'instructor_task.instructortask': {
'Meta': {'object_name': 'InstructorTask'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'subtasks': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'task_input': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'task_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'task_output': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'task_state': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'}),
'task_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['instructor_task']
|
agpl-3.0
|
luciddg/auth-tool
|
lib/plugin/token.py
|
1
|
2084
|
# -*- coding: utf-8 -*-
import hmac
import hashlib
import math
import time
from cherrypy.process import plugins
__all__ = ['TokenEnginePlugin']
class TokenEnginePlugin(plugins.SimplePlugin):
"""
Generates and verifies RFC-6238-compliant time-based one-time passwords.
Uses the SHA256 digest algorithm and the TOTP = (K, T) where T is the current
epoch time divided by the timestep (default 24 hours). This obviates the need
to expire or track tokens and pushes the data storage problem onto the client.
"""
def __init__(self, bus, secret=None, timestep=86400):
plugins.SimplePlugin.__init__(self, bus)
self.secret = secret
self.timestep = timestep
def start(self): # pragma: no cover
self.bus.log('Starting token plugin')
self.bus.subscribe("token-gen", self._gen_token)
self.bus.subscribe("token-verify", self._verify_token)
def stop(self): # pragma: no cover
self.bus.log('Stopping token plugin')
self.bus.unsubscribe("token-gen", self._gen_token)
self.bus.unsubscribe("token-verify", self._verify_token)
def _gen_token(self, data):
"""
Returns a RFC6238-compliant time-based one-time password
"""
epoch = math.floor(time.time() / self.timestep)
return hmac.new(self.secret, str(data) + str(epoch), hashlib.sha256).hexdigest()
def _verify_token(self, totphash, data):
"""
Verifies a RFC6238-compliant time-based one-time password.
Returns True or False.
"""
epoch = math.floor(time.time() / self.timestep)
curtimeseries = hmac.new(self.secret,
str(data) + str(epoch),
hashlib.sha256).hexdigest()
prevtimeseries = hmac.new(self.secret,
str(data) + str(epoch - 1),
hashlib.sha256).hexdigest()
if totphash == curtimeseries or totphash == prevtimeseries:
return True
else:
return False
|
mit
|
yoosw/printrun_etri
|
printrun/gui/__init__.py
|
1
|
23864
|
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import logging
try:
import wx
except:
logging.error(_("WX is not installed. This program requires WX to run."))
raise
from printrun.utils import install_locale
install_locale('pronterface')
from .controls import ControlsSizer, add_extra_controls
from .viz import VizPane
from .log import LogPane
from .toolbar import MainToolbar
# swyoo 2015.09.30 change toolbar position
from .toolbar_home import HomeToolbar
# swyoo 2015.08.31 for image display in linux
from printrun.utils import imagefile
# swyoo 2015.09.08 divide os nt, linux
import os
# swyoo 2015.09.09 add for tap move
# import time
from control_setting import Setting_Control
from control_printing import Print_Control
from control_motor import Motor_Control
from control_original import createTabbedGui_sub, createGui_sub
from .widgets import TempGauge
from .utils import make_autosize_button
class ToggleablePane(wx.BoxSizer):
def __init__(self, root, label, parentpanel, parentsizers):
super(ToggleablePane, self).__init__(wx.HORIZONTAL)
if not parentpanel: parentpanel = root.panel
self.root = root
self.visible = True
self.parentpanel = parentpanel
self.parentsizers = parentsizers
self.panepanel = root.newPanel(parentpanel)
self.button = wx.Button(parentpanel, -1, label, size = (22, 18), style = wx.BU_EXACTFIT)
self.button.Bind(wx.EVT_BUTTON, self.toggle)
def toggle(self, event):
if self.visible:
self.Hide(self.panepanel)
self.on_hide()
else:
self.Show(self.panepanel)
self.on_show()
self.visible = not self.visible
self.button.SetLabel(">" if self.button.GetLabel() == "<" else "<")
class LeftPaneToggleable(ToggleablePane):
def __init__(self, root, parentpanel, parentsizers):
super(LeftPaneToggleable, self).__init__(root, "<", parentpanel, parentsizers)
self.Add(self.panepanel, 0, wx.EXPAND)
self.Add(self.button, 0)
def set_sizer(self, sizer):
self.panepanel.SetSizer(sizer)
def on_show(self):
for sizer in self.parentsizers:
sizer.Layout()
def on_hide(self):
for sizer in self.parentsizers:
# Expand right splitterwindow
if isinstance(sizer, wx.SplitterWindow):
if sizer.shrinked:
button_width = self.button.GetSize()[0]
sizer.SetSashPosition(sizer.GetSize()[0] - button_width)
else:
sizer.Layout()
class LogPaneToggleable(ToggleablePane):
def __init__(self, root, parentpanel, parentsizers):
super(LogPaneToggleable, self).__init__(root, ">", parentpanel, parentsizers)
self.Add(self.button, 0)
pane = LogPane(root, self.panepanel)
self.panepanel.SetSizer(pane)
self.Add(self.panepanel, 1, wx.EXPAND)
self.splitter = self.parentpanel.GetParent()
def on_show(self):
self.splitter.shrinked = False
self.splitter.SetSashPosition(self.splitter.GetSize()[0] - self.orig_width)
self.splitter.SetMinimumPaneSize(self.orig_min_size)
self.splitter.SetSashGravity(self.orig_gravity)
if hasattr(self.splitter, "SetSashSize"): self.splitter.SetSashSize(self.orig_sash_size)
if hasattr(self.splitter, "SetSashInvisible"): self.splitter.SetSashInvisible(False)
for sizer in self.parentsizers:
sizer.Layout()
def on_hide(self):
self.splitter.shrinked = True
self.orig_width = self.splitter.GetSize()[0] - self.splitter.GetSashPosition()
button_width = self.button.GetSize()[0]
self.orig_min_size = self.splitter.GetMinimumPaneSize()
self.orig_gravity = self.splitter.GetSashGravity()
self.splitter.SetMinimumPaneSize(button_width)
self.splitter.SetSashGravity(1)
self.splitter.SetSashPosition(self.splitter.GetSize()[0] - button_width)
if hasattr(self.splitter, "SetSashSize"):
self.orig_sash_size = self.splitter.GetSashSize()
self.splitter.SetSashSize(0)
if hasattr(self.splitter, "SetSashInvisible"): self.splitter.SetSashInvisible(True)
for sizer in self.parentsizers:
sizer.Layout()
class MainWindow(wx.Frame):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
# this list will contain all controls that should be only enabled
# when we're connected to a printer
self.panel = wx.Panel(self, -1)
self.reset_ui()
self.statefulControls = []
# swyoo 2015.09.10 for hide tap5
self.page_hidden = False
# swyoo 2015.09.04 for guage
def TimerHandler(self, event):
# self.count = self.count + 1
#
# if self.count >= 100:
# # self.count = 0
# return
# self.gauge.SetValue(self.count)
self.gauge.SetValue(int(self.var_loading_count))
def reset_ui(self):
self.panels = []
self.printerControls = []
def newPanel(self, parent, add_to_list = True):
panel = wx.Panel(parent)
self.registerPanel(panel, add_to_list)
return panel
def registerPanel(self, panel, add_to_list = True):
panel.SetBackgroundColour(self.bgcolor)
if add_to_list: self.panels.append(panel)
def createBaseGui(self):
self.notesizer = wx.BoxSizer(wx.VERTICAL)
# self.notebook = wx.Notebook(self.panel)
# self.notebook = wx.Notebook(self.panel, style=wx.NB_LEFT)
if os.name == "nt":
self.notebook = wx.Notebook(self.panel, style=wx.BK_DEFAULT)
else:
self.notebook = wx.Notebook(self.panel, style=
# wx.BK_DEFAULT
# wx.BK_TOP
# #wx.BK_BOTTOM
wx.BK_LEFT
# wx.BK_RIGHT
# | wx.NB_MULTILINE
)
# self.notebook.SetBackgroundColour(self.bgcolor)
self.notebook.SetBackgroundColour('#E6E7E7')
self.page0panel = self.newPanel(self.notebook)
self.page1panel = self.newPanel(self.notebook)
self.page2panel = self.newPanel(self.notebook)
self.page3panel = self.newPanel(self.notebook)
self.page4panel = self.newPanel(self.notebook)
self.page5panel = self.newPanel(self.notebook)
# swyoo 2015.08.29 set color, image
self.page0panel.SetBackgroundColour('#E6E7E7')
self.page1panel.SetBackgroundColour('#E6E7E7')
self.page2panel.SetBackgroundColour('#E6E7E7')
self.page3panel.SetBackgroundColour('#E6E7E7')
# self.page4panel.SetBackgroundColour('#E6E7E7')
# self.page5panel.SetBackgroundColour('#E6E7E7')
# swyoo 2015.09.02 set background image
image_file = 'flexor/print_bg.png'
bmp1 = wx.Image(imagefile(image_file), wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.bitmap1 = wx.StaticBitmap(self.page1panel, -1, bmp1, (0, 0))
image_file = 'flexor/motor_bg.png'
bmp2 = wx.Image(imagefile(image_file), wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.bitmap2 = wx.StaticBitmap(self.page2panel, -1, bmp2, (0, 0))
# image_file = 'flexor/setting_bg.png'
# bmp3 = wx.Image(imagefile(image_file), wx.BITMAP_TYPE_ANY).ConvertToBitmap()
# self.bitmap3 = wx.StaticBitmap(self.page3panel, -1, bmp3, (0, 0))
# font_loading_file = wx.Font(16, wx.MODERN, wx.NORMAL, wx.NORMAL, False, u'Consolas')
font_bold = wx.Font(20, wx.DEFAULT, wx.NORMAL, wx.BOLD, False, u'Consolas')
self.font_gauge = wx.Font(30, wx.DEFAULT, wx.NORMAL, wx.NORMAL, False, u'Consolas')
if os.name == "nt":
self.font_base = wx.Font(18, wx.FONTFAMILY_DEFAULT, wx.NORMAL, wx.NORMAL, False, u'Consolas')
else:
self.font_base = wx.Font(20, wx.FONTFAMILY_DEFAULT, wx.NORMAL, wx.NORMAL, False, u'Consolas')
self.font_combo = wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL, False, u'Consolas')
self.font_big = wx.Font(20, wx.FONTFAMILY_DEFAULT, wx.NORMAL, wx.BOLD, False)
self.font_16 = wx.Font(16, wx.FONTFAMILY_DEFAULT, wx.NORMAL, wx.BOLD)
self.font_motor = wx.Font(26, wx.FONTFAMILY_DEFAULT, wx.NORMAL, wx.NORMAL)
#========================================================== tap0 : Home
self.hometoolbarsizer = HomeToolbar(self, self.page0panel)
self.page0panel.SetSizer(self.hometoolbarsizer)
#========================================================== tap1 : Print
Print_Control(self, self.page1panel)
#========================================================== tap2 : Motor
Motor_Control(self, self.page2panel)
#========================================================== tap3 : Setting
Setting_Control(self, self.page3panel)
#========================================================== tap4 : Log
# swyoo 2015.09.01 prevent 3d view for loading time in raspberry
if 0:
self.mainsizer = wx.BoxSizer(wx.HORIZONTAL)
self.splitterwindow = wx.SplitterWindow(self.page4panel, style = wx.SP_3D)
page4sizer1 = wx.BoxSizer(wx.HORIZONTAL)
page4panel1 = self.newPanel(self.splitterwindow)
page4sizer2 = wx.BoxSizer(wx.HORIZONTAL)
page4panel2 = self.newPanel(self.splitterwindow)
vizpane = VizPane(self, page4panel1)
page4sizer1.Add(vizpane, 1, wx.EXPAND)
page4sizer2.Add(LogPane(self, page4panel2), 1, wx.EXPAND)
page4panel1.SetSizer(page4sizer1)
page4panel2.SetSizer(page4sizer2)
self.splitterwindow.SetMinimumPaneSize(1)
self.splitterwindow.SetSashGravity(0.5)
self.splitterwindow.SplitVertically(page4panel1, page4panel2,
self.settings.last_sash_position)
self.mainsizer.Add(self.splitterwindow, 1, wx.EXPAND)
self.page4panel.SetSizer(self.mainsizer)
else:
# swyoo 2015.09.01 should pass VizPane for etc
vizpane = VizPane(self, self.page4panel)
self.mainsizer_4 = wx.BoxSizer(wx.HORIZONTAL)
self.mainsizer_4.Add(LogPane(self, self.page4panel), 1, wx.EXPAND)
self.page4panel.SetSizer(self.mainsizer_4)
#========================================================== tap5 : Help
if self.settings.uimode in (_("Tabbed"), _("Tabbed with platers")):
createTabbedGui_sub(self, self.page5panel)
else:
createGui_sub(self, self.settings.uimode == _("Compact"),
self.settings.controlsmode == "Mini", self.page5panel)
#========================================================== tap End
self.notesizer.Add(self.notebook, 1, wx.EXPAND)
if 0:
self.notebook.AddPage(self.page0panel, _("Home"))
self.notebook.AddPage(self.page1panel, _("Print"))
self.notebook.AddPage(self.page2panel, _("Motor"))
self.notebook.AddPage(self.page3panel, _("Setting"))
self.notebook.AddPage(self.page4panel, _("Log.."))
self.notebook.AddPage(self.page5panel, _("Original"))
else:
self.notebook.AddPage(self.page0panel, _(""))
self.notebook.AddPage(self.page1panel, _(""))
self.notebook.AddPage(self.page2panel, _(""))
self.notebook.AddPage(self.page3panel, _(""))
self.notebook.AddPage(self.page4panel, _(""))
if os.name == "nt":
self.notebook.AddPage(self.page5panel, _("Original"))
# list containing notebook images:
# .ico seem to be more OS portable
il = wx.ImageList(107, 79) # the (16, 16) is the size in pixels of the images
img0 = il.Add(wx.Bitmap(imagefile('flexor/menu/main.png'), wx.BITMAP_TYPE_PNG))
img1 = il.Add(wx.Bitmap(imagefile('flexor/menu/print.png'), wx.BITMAP_TYPE_PNG))
img2 = il.Add(wx.Bitmap(imagefile('flexor/menu/motor.png'), wx.BITMAP_TYPE_PNG))
img3 = il.Add(wx.Bitmap(imagefile('flexor/menu/setting.png'), wx.BITMAP_TYPE_PNG))
img4 = il.Add(wx.Bitmap(imagefile('flexor/menu/log.png'), wx.BITMAP_TYPE_PNG))
# img5 = il.Add(wx.Bitmap(imagefile('flexor/menu/92p.png'), wx.BITMAP_TYPE_PNG))
# swyoo 2015.09.01 add to tap the image
# set images to pages:
# #first assign image list created above to notebook:
self.notebook.AssignImageList(il)
# then assign each image in list to corresponding page.
# #the sharp-eyed will see you could use a loop for this,
# #but for maximum clarity/understanding I'm using the long way...
self.notebook.SetPageImage(0, img0)
self.notebook.SetPageImage(1, img1)
self.notebook.SetPageImage(2, img2)
self.notebook.SetPageImage(3, img3)
self.notebook.SetPageImage(4, img4)
# self.notebook.SetPageImage(5, img5)
# if this isn't called the notebook background color doesn't work right when switching
# themes in XP.
self.notebook.SetBackgroundColour(self.notebook.GetThemeBackgroundColour())
self.panel.SetSizer(self.notesizer)
# self.panel.SetSizerAndFit(self.notesizer)
self.Bind(wx.EVT_CLOSE, self.kill)
minsize = [600, 450]
self.SetMinSize(self.ClientToWindowSize(minsize)) # client to window
self.Fit()
def Visual_tab(self, event):
if self.page_hidden:
self.notebook.AddPage(self.page5panel, "Original")
self.page_hidden = False
else:
self.notebook.RemovePage(5)
self.page5panel.Hide()
self.page_hidden = True
# swyoo 2015.09.09 add for tap move
def switch_tab(self, page):
notebook = self.notebook
# window so ok, but too late in raspi
# if page == 1:
# time.sleep(0.5)
# else:
# time.sleep(0.2)
notebook.SetSelection(page)
def createTabbedGui(self):
self.notesizer = wx.BoxSizer(wx.VERTICAL)
self.notebook = wx.Notebook(self.panel)
self.notebook.SetBackgroundColour(self.bgcolor)
page1panel = self.newPanel(self.notebook)
page2panel = self.newPanel(self.notebook)
self.mainsizer_page1 = wx.BoxSizer(wx.VERTICAL)
page1panel1 = self.newPanel(page1panel)
page1panel2 = self.newPanel(page1panel)
self.toolbarsizer = MainToolbar(self, page1panel1, use_wrapsizer = True)
page1panel1.SetSizer(self.toolbarsizer)
self.mainsizer_page1.Add(page1panel1, 0, wx.EXPAND)
self.lowersizer = wx.BoxSizer(wx.HORIZONTAL)
page1panel2.SetSizer(self.lowersizer)
leftsizer = wx.BoxSizer(wx.VERTICAL)
controls_sizer = ControlsSizer(self, page1panel2, True)
leftsizer.Add(controls_sizer, 1, wx.ALIGN_CENTER)
rightsizer = wx.BoxSizer(wx.VERTICAL)
extracontrols = wx.GridBagSizer()
add_extra_controls(extracontrols, self, page1panel2, controls_sizer.extra_buttons)
rightsizer.AddStretchSpacer()
rightsizer.Add(extracontrols, 0, wx.ALIGN_CENTER)
self.lowersizer.Add(leftsizer, 0, wx.ALIGN_CENTER | wx.RIGHT, border = 10)
self.lowersizer.Add(rightsizer, 1, wx.ALIGN_CENTER)
self.mainsizer_page1.Add(page1panel2, 1)
self.mainsizer = wx.BoxSizer(wx.HORIZONTAL)
self.splitterwindow = wx.SplitterWindow(page2panel, style = wx.SP_3D)
page2sizer1 = wx.BoxSizer(wx.HORIZONTAL)
page2panel1 = self.newPanel(self.splitterwindow)
page2sizer2 = wx.BoxSizer(wx.HORIZONTAL)
page2panel2 = self.newPanel(self.splitterwindow)
vizpane = VizPane(self, page2panel1)
page2sizer1.Add(vizpane, 1, wx.EXPAND)
page2sizer2.Add(LogPane(self, page2panel2), 1, wx.EXPAND)
page2panel1.SetSizer(page2sizer1)
page2panel2.SetSizer(page2sizer2)
self.splitterwindow.SetMinimumPaneSize(1)
self.splitterwindow.SetSashGravity(0.5)
self.splitterwindow.SplitVertically(page2panel1, page2panel2,
self.settings.last_sash_position)
self.mainsizer.Add(self.splitterwindow, 1, wx.EXPAND)
page1panel.SetSizer(self.mainsizer_page1)
page2panel.SetSizer(self.mainsizer)
self.notesizer.Add(self.notebook, 1, wx.EXPAND)
self.notebook.AddPage(page1panel, _("Commands"))
self.notebook.AddPage(page2panel, _("Status"))
if self.settings.uimode == _("Tabbed with platers"):
from printrun.stlplater import StlPlaterPanel
from printrun.gcodeplater import GcodePlaterPanel
page3panel = StlPlaterPanel(parent = self.notebook,
callback = self.platecb,
build_dimensions = self.build_dimensions_list,
circular_platform = self.settings.circular_bed,
simarrange_path = self.settings.simarrange_path,
antialias_samples = int(self.settings.antialias3dsamples))
page4panel = GcodePlaterPanel(parent = self.notebook,
callback = self.platecb,
build_dimensions = self.build_dimensions_list,
circular_platform = self.settings.circular_bed,
antialias_samples = int(self.settings.antialias3dsamples))
self.registerPanel(page3panel)
self.registerPanel(page4panel)
self.notebook.AddPage(page3panel, _("Plater"))
self.notebook.AddPage(page4panel, _("G-Code Plater"))
self.panel.SetSizer(self.notesizer)
self.panel.Bind(wx.EVT_MOUSE_EVENTS, self.editbutton)
self.Bind(wx.EVT_CLOSE, self.kill)
# Custom buttons
if wx.VERSION > (2, 9): self.cbuttonssizer = wx.WrapSizer(wx.HORIZONTAL)
else: self.cbuttonssizer = wx.GridBagSizer()
self.cbuttonssizer = wx.GridBagSizer()
self.centerpanel = self.newPanel(page1panel2)
self.centerpanel.SetSizer(self.cbuttonssizer)
rightsizer.Add(self.centerpanel, 0, wx.ALIGN_CENTER)
rightsizer.AddStretchSpacer()
self.panel.SetSizerAndFit(self.notesizer)
self.cbuttons_reload()
minsize = self.lowersizer.GetMinSize() # lower pane
minsize[1] = self.notebook.GetSize()[1]
self.SetMinSize(self.ClientToWindowSize(minsize)) # client to window
self.Fit()
def createGui(self, compact = False, mini = False):
self.mainsizer = wx.BoxSizer(wx.VERTICAL)
self.lowersizer = wx.BoxSizer(wx.HORIZONTAL)
upperpanel = self.newPanel(self.panel, False)
self.toolbarsizer = MainToolbar(self, upperpanel)
lowerpanel = self.newPanel(self.panel)
upperpanel.SetSizer(self.toolbarsizer)
lowerpanel.SetSizer(self.lowersizer)
leftpanel = self.newPanel(lowerpanel)
left_pane = LeftPaneToggleable(self, leftpanel, [self.lowersizer])
leftpanel.SetSizer(left_pane)
left_real_panel = left_pane.panepanel
controls_panel = self.newPanel(left_real_panel)
controls_sizer = ControlsSizer(self, controls_panel, mini_mode = mini)
controls_panel.SetSizer(controls_sizer)
left_sizer = wx.BoxSizer(wx.VERTICAL)
left_sizer.Add(controls_panel, 1, wx.EXPAND)
left_pane.set_sizer(left_sizer)
self.lowersizer.Add(leftpanel, 0, wx.EXPAND)
if not compact: # Use a splitterwindow to group viz and log
rightpanel = self.newPanel(lowerpanel)
rightsizer = wx.BoxSizer(wx.VERTICAL)
rightpanel.SetSizer(rightsizer)
self.splitterwindow = wx.SplitterWindow(rightpanel, style = wx.SP_3D)
self.splitterwindow.SetMinimumPaneSize(150)
self.splitterwindow.SetSashGravity(0.8)
rightsizer.Add(self.splitterwindow, 1, wx.EXPAND)
vizpanel = self.newPanel(self.splitterwindow)
logpanel = self.newPanel(self.splitterwindow)
self.splitterwindow.SplitVertically(vizpanel, logpanel,
self.settings.last_sash_position)
self.splitterwindow.shrinked = False
else:
vizpanel = self.newPanel(lowerpanel)
logpanel = self.newPanel(left_real_panel)
viz_pane = VizPane(self, vizpanel)
# Custom buttons
if wx.VERSION > (2, 9): self.cbuttonssizer = wx.WrapSizer(wx.HORIZONTAL)
else: self.cbuttonssizer = wx.GridBagSizer()
self.centerpanel = self.newPanel(vizpanel)
self.centerpanel.SetSizer(self.cbuttonssizer)
viz_pane.Add(self.centerpanel, 0, flag = wx.ALIGN_CENTER)
vizpanel.SetSizer(viz_pane)
if compact:
log_pane = LogPane(self, logpanel)
else:
log_pane = LogPaneToggleable(self, logpanel, [self.lowersizer])
left_pane.parentsizers.append(self.splitterwindow)
logpanel.SetSizer(log_pane)
if not compact:
self.lowersizer.Add(rightpanel, 1, wx.EXPAND)
else:
left_sizer.Add(logpanel, 1, wx.EXPAND)
self.lowersizer.Add(vizpanel, 1, wx.EXPAND)
self.mainsizer.Add(upperpanel, 0, wx.EXPAND)
self.mainsizer.Add(lowerpanel, 1, wx.EXPAND)
self.panel.SetSizer(self.mainsizer)
self.panel.Bind(wx.EVT_MOUSE_EVENTS, self.editbutton)
self.Bind(wx.EVT_CLOSE, self.kill)
self.mainsizer.Layout()
# This prevents resizing below a reasonnable value
# We sum the lowersizer (left pane / viz / log) min size
# the toolbar height and the statusbar/menubar sizes
minsize = [0, 0]
minsize[0] = self.lowersizer.GetMinSize()[0] # lower pane
minsize[1] = max(viz_pane.GetMinSize()[1], controls_sizer.GetMinSize()[1])
minsize[1] += self.toolbarsizer.GetMinSize()[1] # toolbar height
displaysize = wx.DisplaySize()
minsize[0] = min(minsize[0], displaysize[0])
minsize[1] = min(minsize[1], displaysize[1])
self.SetMinSize(self.ClientToWindowSize(minsize)) # client to window
self.cbuttons_reload()
def gui_set_connected(self):
self.xyb.enable()
self.zb.enable()
for control in self.printerControls:
control.Enable()
def gui_set_disconnected(self):
self.printbtn.Disable()
self.pausebtn.Disable()
self.recoverbtn.Disable()
for control in self.printerControls:
control.Disable()
self.xyb.disable()
self.zb.disable()
|
gpl-3.0
|
apixandru/intellij-community
|
python/lib/Lib/site-packages/django/conf/locale/nn/formats.py
|
685
|
1657
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
'%Y-%m-%d', # '2006-10-25',
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
|
apache-2.0
|
mdovgialo/steam-vr-wheel
|
steam_vr_wheel/pyvjoy/vjoydevice.py
|
1
|
1698
|
from .constants import *
from .exceptions import *
import steam_vr_wheel.pyvjoy._sdk as _sdk
class VJoyDevice(object):
"""Object-oriented API for a vJoy Device"""
def __init__(self,rID=None, data=None):
"""Constructor"""
self.rID=rID
self._sdk= _sdk
self._vj=self._sdk._vj
if data:
self.data = data
else:
#TODO maybe - have self.data as a wrapper object containing the Struct
self.data = self._sdk.CreateDataStructure(self.rID)
try:
_sdk.vJoyEnabled()
_sdk.AcquireVJD(rID)
#TODO FIXME
except vJoyException:
raise
def set_button(self,buttonID,state):
"""Set a given button (numbered from 1) to On (1 or True) or Off (0 or False)"""
return self._sdk.SetBtn(state,self.rID,buttonID)
def set_axis(self,AxisID, AxisValue):
"""Set a given Axis (one of pyvjoy.HID_USAGE_X etc) to a value (0x0000 - 0x8000)"""
return self._sdk.SetAxis(AxisValue,self.rID,AxisID)
def reset(self):
"""Reset all axes and buttons to default values"""
return self._sdk.ResetVJD(self.rID)
def reset_data(self):
"""Reset the data Struct to default (does not change vJoy device at all directly)"""
self.data=self._sdk.CreateDataStructure(self.rID)
def reset_buttons(self):
"""Reset all buttons on the vJoy Device to default"""
return self._sdk.ResetButtons(self.rID)
def reset_povs(self):
"""Reset all Povs on the vJoy Device to default"""
return self._sdk.ResetPovs(self.rID)
def update(self):
"""Send the stored Joystick data to the device in one go (the 'efficient' method)"""
return self._sdk.UpdateVJD(self.rID, self.data)
|
mit
|
gaursagar/cclib
|
src/cclib/bridge/cclib2pyquante.py
|
1
|
1092
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Bridge for using cclib data in PyQuante (http://pyquante.sourceforge.net)."""
from __future__ import print_function
import sys
try:
from PyQuante.Molecule import Molecule
except ImportError:
# Fail silently for now.
pass
def makepyquante(atomcoords, atomnos, charge=0, mult=1):
"""Create a PyQuante Molecule.
>>> import numpy
>>> from PyQuante.hartree_fock import hf
>>> atomnos = numpy.array([1,8,1],"i")
>>> a = numpy.array([[-1,1,0],[0,0,0],[1,1,0]],"f")
>>> pyqmol = makepyquante(a,atomnos)
>>> en,orbe,orbs = hf(pyqmol)
>>> print int(en * 10) / 10. # Should be around -73.8
-73.8
"""
return Molecule("notitle", list(zip(atomnos, atomcoords)), units="Angstrom",
charge=charge, multiplicity=mult)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
bsd-3-clause
|
vrv/tensorflow
|
tensorflow/python/ops/state_ops.py
|
20
|
9586
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variables. See the @{python/state_ops} guide.
@@Variable
@@global_variables
@@local_variables
@@model_variables
@@trainable_variables
@@moving_average_variables
@@global_variables_initializer
@@local_variables_initializer
@@variables_initializer
@@is_variable_initialized
@@report_uninitialized_variables
@@assert_variables_initialized
@@assign
@@assign_add
@@assign_sub
@@Saver
@@latest_checkpoint
@@get_checkpoint_state
@@update_checkpoint_state
@@get_variable
@@get_local_variable
@@VariableScope
@@variable_scope
@@variable_op_scope
@@get_variable_scope
@@make_template
@@no_regularizer
@@constant_initializer
@@random_normal_initializer
@@truncated_normal_initializer
@@random_uniform_initializer
@@uniform_unit_scaling_initializer
@@zeros_initializer
@@ones_initializer
@@orthogonal_initializer
@@fixed_size_partitioner
@@variable_axis_size_partitioner
@@min_max_variable_partitioner
@@scatter_update
@@scatter_add
@@scatter_sub
@@scatter_mul
@@scatter_div
@@scatter_nd_update
@@scatter_nd_add
@@scatter_nd_sub
@@sparse_mask
@@IndexedSlices
@@initialize_all_tables
@@tables_initializer
@@export_meta_graph
@@import_meta_graph
@@all_variables
@@initialize_all_variables
@@initialize_local_variables
@@initialize_variables
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import gen_state_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_state_ops import *
# pylint: enable=wildcard-import
# pylint: disable=protected-access,g-doc-return-or-yield,g-doc-args
def variable_op(shape, dtype, name="Variable", set_shape=True, container="",
shared_name=""):
"""Deprecated. Used variable_op_v2 instead."""
if not set_shape:
shape = tensor_shape.unknown_shape()
ret = gen_state_ops._variable(shape=shape, dtype=dtype, name=name,
container=container, shared_name=shared_name)
# TODO(mrry): Move this to where it is used, so we can get rid of this op
# wrapper?
if set_shape:
ret.set_shape(shape)
return ret
def variable_op_v2(shape, dtype, name="Variable", container="", shared_name=""):
"""Create a variable Operation.
See also variables.Variable.
Args:
shape: The shape of the tensor managed by this variable
dtype: The underlying type of the tensor values.
name: optional name to use for the variable op.
container: An optional string. Defaults to "".
If non-empty, this variable is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional string. Defaults to "".
If non-empty, this variable is named in the given bucket
with this shared_name. Otherwise, the node name is used instead.
Returns:
A variable tensor.1;5A
"""
return gen_state_ops._variable_v2(shape=shape,
dtype=dtype,
name=name,
container=container,
shared_name=shared_name)
def init_variable(v, init, name="init"):
"""Initializes variable with "init".
This op does the following:
if init is a Tensor, v = init
if callable(init): v = init(VariableShape(v), v.dtype)
Args:
v: Variable to initialize
init: Tensor to assign to v,
Or an object convertible to Tensor e.g. nparray,
Or an Initializer that generates a tensor given the shape and type of v.
An "Initializer" is a callable that returns a tensor that "v" should be
set to. It will be called as init(shape, dtype).
name: Optional name for the op.
Returns:
The operation that initializes v.
"""
with ops.name_scope(None, v.op.name + "/", [v, init]):
with ops.name_scope(name) as scope:
with ops.colocate_with(v):
if callable(init):
assert v.get_shape().is_fully_defined(), "Variable shape unknown."
# TODO(mrry): Convert to v.shape when the property and
# accessor are reconciled (and all initializers support
# tf.TensorShape objects).
value = init(v.get_shape().as_list(), v.dtype.base_dtype)
value = ops.convert_to_tensor(value, name="value")
return gen_state_ops.assign(v, value, name=scope)
else:
init = ops.convert_to_tensor(init, name="init")
return gen_state_ops.assign(v, init, name=scope)
def is_variable_initialized(ref, name=None):
"""Checks whether a tensor has been initialized.
Outputs boolean scalar indicating whether the tensor has been initialized.
Args:
ref: A mutable `Tensor`.
Should be from a `Variable` node. May be uninitialized.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.is_variable_initialized(ref=ref, name=name)
# Handle resource variables.
if ref.op.type == "VarHandleOp":
return gen_resource_variable_ops.var_is_initialized_op(ref.handle,
name=name)
def assign_sub(ref, value, use_locking=None, name=None):
"""Update 'ref' by subtracting 'value' from it.
This operation outputs "ref" after the update is done.
This makes it easier to chain operations that need to use the reset value.
Args:
ref: A mutable `Tensor`. Must be one of the following types:
`float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`,
`int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a `Variable` node.
value: A `Tensor`. Must have the same type as `ref`.
The value to be subtracted to the variable.
use_locking: An optional `bool`. Defaults to `False`.
If True, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as "ref". Returned as a convenience for operations that want
to use the new value after the variable has been updated.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.assign_sub(
ref, value, use_locking=use_locking, name=name)
return ref.assign_sub(value)
def assign_add(ref, value, use_locking=None, name=None):
"""Update 'ref' by adding 'value' to it.
This operation outputs "ref" after the update is done.
This makes it easier to chain operations that need to use the reset value.
Args:
ref: A mutable `Tensor`. Must be one of the following types:
`float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`,
`int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a `Variable` node.
value: A `Tensor`. Must have the same type as `ref`.
The value to be added to the variable.
use_locking: An optional `bool`. Defaults to `False`.
If True, the addition will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as "ref". Returned as a convenience for operations that want
to use the new value after the variable has been updated.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.assign_add(
ref, value, use_locking=use_locking, name=name)
return ref.assign_add(value)
def assign(ref, value, validate_shape=None, use_locking=None, name=None):
"""Update 'ref' by assigning 'value' to it.
This operation outputs a Tensor that holds the new value of 'ref' after
the value has been assigned. This makes it easier to chain operations
that need to use the reset value.
Args:
ref: A mutable `Tensor`.
Should be from a `Variable` node. May be uninitialized.
value: A `Tensor`. Must have the same type as `ref`.
The value to be assigned to the variable.
validate_shape: An optional `bool`. Defaults to `True`.
If true, the operation will validate that the shape
of 'value' matches the shape of the Tensor being assigned to. If false,
'ref' will take on the shape of 'value'.
use_locking: An optional `bool`. Defaults to `True`.
If True, the assignment will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A `Tensor` that will hold the new value of 'ref' after
the assignment has completed.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.assign(
ref, value, use_locking=use_locking, name=name,
validate_shape=validate_shape)
return ref.assign(value)
|
apache-2.0
|
popazerty/e2-gui
|
lib/python/Screens/InputDeviceSetup.py
|
5
|
11888
|
from Screens.Screen import Screen
from Screens.HelpMenu import HelpableScreen
from Screens.MessageBox import MessageBox
from Components.InputDevice import iInputDevices, iRcTypeControl
from Components.Sources.StaticText import StaticText
from Components.Sources.List import List
from Components.config import config, ConfigYesNo, getConfigListEntry, ConfigSelection
from Components.ConfigList import ConfigListScreen
from Components.ActionMap import ActionMap, HelpableActionMap
from Tools.Directories import resolveFilename, SCOPE_ACTIVE_SKIN
from Tools.LoadPixmap import LoadPixmap
from boxbranding import getBoxType, getMachineBrand, getMachineName
class InputDeviceSelection(Screen, HelpableScreen):
def __init__(self, session):
Screen.__init__(self, session)
HelpableScreen.__init__(self)
self.setTitle(_("Select input device"))
self.edittext = _("Press OK to edit the settings.")
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Select"))
self["key_yellow"] = StaticText("")
self["key_blue"] = StaticText("")
self["introduction"] = StaticText(self.edittext)
self.devices = [(iInputDevices.getDeviceName(x),x) for x in iInputDevices.getDeviceList()]
print "[InputDeviceSelection] found devices :->", len(self.devices),self.devices
self["OkCancelActions"] = HelpableActionMap(self, "OkCancelActions",
{
"cancel": (self.close, _("Exit input device selection")),
"ok": (self.okbuttonClick, _("Select input device")),
}, prio=-2)
self["ColorActions"] = HelpableActionMap(self, "ColorActions",
{
"red": (self.close, _("Exit input device selection")),
"green": (self.okbuttonClick, _("Select input device")),
}, prio=-2)
self.currentIndex = 0
self.list = []
self["list"] = List(self.list)
self.updateList()
self.onLayoutFinish.append(self.layoutFinished)
self.onClose.append(self.cleanup)
def layoutFinished(self):
self.setTitle(_("Select input device"))
def cleanup(self):
self.currentIndex = 0
def buildInterfaceList(self, device, description, type, isinputdevice=True):
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "div-h.png"))
activepng = None
devicepng = None
enabled = iInputDevices.getDeviceAttribute(device, 'enabled')
if type == 'remote':
if config.misc.rcused.value == 0:
if enabled:
devicepng = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/input_rcnew-configured.png"))
else:
devicepng = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/input_rcnew.png"))
else:
if enabled:
devicepng = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/input_rcold-configured.png"))
else:
devicepng = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/input_rcold.png"))
elif type == 'keyboard':
if enabled:
devicepng = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/input_keyboard-configured.png"))
else:
devicepng = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/input_keyboard.png"))
elif type == 'mouse':
if enabled:
devicepng = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/input_mouse-configured.png"))
else:
devicepng = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/input_mouse.png"))
elif isinputdevice:
devicepng = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/input_rcnew.png"))
return device, description, devicepng, divpng
def updateList(self):
self.list = []
if iRcTypeControl.multipleRcSupported():
self.list.append(self.buildInterfaceList('rctype', _('Configure remote control type'), None, False))
for x in self.devices:
dev_type = iInputDevices.getDeviceAttribute(x[1], 'type')
self.list.append(self.buildInterfaceList(x[1],_(x[0]), dev_type))
self["list"].setList(self.list)
self["list"].setIndex(self.currentIndex)
def okbuttonClick(self):
selection = self["list"].getCurrent()
self.currentIndex = self["list"].getIndex()
if selection is not None:
if selection[0] == 'rctype':
self.session.open(RemoteControlType)
else:
self.session.openWithCallback(self.DeviceSetupClosed, InputDeviceSetup, selection[0])
def DeviceSetupClosed(self, *ret):
self.updateList()
class InputDeviceSetup(Screen, ConfigListScreen):
def __init__(self, session, device):
Screen.__init__(self, session)
self.inputDevice = device
iInputDevices.currentDevice = self.inputDevice
self.onChangedEntry = []
self.setup_title = _("Input device setup")
self.isStepSlider = None
self.enableEntry = None
self.repeatEntry = None
self.delayEntry = None
self.nameEntry = None
self.enableConfigEntry = None
self.list = []
ConfigListScreen.__init__(self, self.list, session=session, on_change=self.changedEntry)
self["actions"] = ActionMap(["SetupActions", "MenuActions"],
{
"cancel": self.keyCancel,
"save": self.apply,
"menu": self.closeRecursive,
}, prio=-2)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText()
self["introduction"] = StaticText()
# for generating strings into .po only
devicenames = [_("%s %s front panel") % (getMachineBrand(), getMachineName()),_("%s %s front panel") % (getMachineBrand(), getMachineName()),_("%s %s remote control (native)") % (getMachineBrand(), getMachineName()),_("%s %s advanced remote control (native)") % (getMachineBrand(), getMachineName()),_("%s %s ir keyboard") % (getMachineBrand(), getMachineName()),_("%s %s ir mouse") % (getMachineBrand(), getMachineName())]
self.createSetup()
self.onLayoutFinish.append(self.layoutFinished)
self.onClose.append(self.cleanup)
def layoutFinished(self):
self.setTitle(self.setup_title)
listWidth = self["config"].l.getItemSize().width()
# use 20% of list width for sliders
self["config"].l.setSeperation(int(listWidth*.8))
def cleanup(self):
iInputDevices.currentDevice = ""
def createSetup(self):
self.list = []
label = _("Change repeat and delay settings?")
cmd = "self.enableEntry = getConfigListEntry(label, config.inputDevices." + self.inputDevice + ".enabled)"
exec cmd
label = _("Interval between keys when repeating:")
cmd = "self.repeatEntry = getConfigListEntry(label, config.inputDevices." + self.inputDevice + ".repeat)"
exec cmd
label = _("Delay before key repeat starts:")
cmd = "self.delayEntry = getConfigListEntry(label, config.inputDevices." + self.inputDevice + ".delay)"
exec cmd
label = _("Devicename:")
cmd = "self.nameEntry = getConfigListEntry(label, config.inputDevices." + self.inputDevice + ".name)"
exec cmd
if self.enableEntry:
if isinstance(self.enableEntry[1], ConfigYesNo):
self.enableConfigEntry = self.enableEntry[1]
self.list.append(self.enableEntry)
if self.enableConfigEntry:
if self.enableConfigEntry.value is True:
self.list.append(self.repeatEntry)
self.list.append(self.delayEntry)
else:
self.repeatEntry[1].setValue(self.repeatEntry[1].default)
self["config"].invalidate(self.repeatEntry)
self.delayEntry[1].setValue(self.delayEntry[1].default)
self["config"].invalidate(self.delayEntry)
self.nameEntry[1].setValue(self.nameEntry[1].default)
self["config"].invalidate(self.nameEntry)
self["config"].list = self.list
self["config"].l.setList(self.list)
if not self.selectionChanged in self["config"].onSelectionChanged:
self["config"].onSelectionChanged.append(self.selectionChanged)
self.selectionChanged()
def selectionChanged(self):
if self["config"].getCurrent() == self.enableEntry:
self["introduction"].setText(_("Current device: ") + str(iInputDevices.getDeviceAttribute(self.inputDevice, 'name')))
else:
self["introduction"].setText(_("Current value: ") + self.getCurrentValue() + ' ' + _("ms"))
def newConfig(self):
current = self["config"].getCurrent()
if current:
if current == self.enableEntry:
self.createSetup()
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.newConfig()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.newConfig()
def confirm(self, confirmed):
if not confirmed:
print "not confirmed"
return
else:
self.nameEntry[1].setValue(iInputDevices.getDeviceAttribute(self.inputDevice, 'name'))
cmd = "config.inputDevices." + self.inputDevice + ".name.save()"
exec cmd
self.keySave()
def apply(self):
self.session.openWithCallback(self.confirm, MessageBox, _("Use these input device settings?"), MessageBox.TYPE_YESNO, timeout=20, default=True)
def cancelConfirm(self, result):
if not result:
return
for x in self["config"].list:
x[1].cancel()
self.close()
def keyCancel(self):
if self["config"].isChanged():
self.session.openWithCallback(self.cancelConfirm, MessageBox, _("Really close without saving settings?"), MessageBox.TYPE_YESNO, timeout=20, default=True)
else:
self.close()
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
self.selectionChanged()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].value)
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
class RemoteControlType(Screen, ConfigListScreen):
odinRemote = "OdinM9"
if getBoxType() == "maram9":
odinRemote = "MaraM9"
rcList = [
("0", _("Default")),
("3", _(odinRemote)),
("4", _("DMM normal")),
("6", _("DMM advanced")),
("7", _("et5000/6000")),
("8", _("VU+")),
("9", _("et8000/et10000")),
("11", _("et9x00/6500")),
("13", _("et4000")),
("14", _("XP1000")),
("18", _("F1/F3")),
("16", _("et7x00"))
]
defaultRcList = [
("et4000", 13),
("et5000", 7),
("et6000", 7),
("et6500", 11),
("et8000", 9),
("et9000", 11),
("et9200", 11),
("et9500", 11),
("et10000", 9),
("formuler1", 18),
("formuler3", 18),
("xp1000", 14),
("et7x00", 16)
]
def __init__(self, session):
Screen.__init__(self, session)
self.skinName = ["RemoteControlType", "Setup"]
self["actions"] = ActionMap(["SetupActions"],
{
"cancel": self.keyCancel,
"save": self.keySave,
}, prio=-1)
self["key_green"] = StaticText(_("Save"))
self["key_red"] = StaticText(_("Cancel"))
self.list = []
ConfigListScreen.__init__(self, self.list, session=self.session)
rctype = config.plugins.remotecontroltype.rctype.value
self.rctype = ConfigSelection(choices=self.rcList, default=str(rctype))
self.list.append(getConfigListEntry(_("Remote control type"), self.rctype))
self["config"].list = self.list
self.defaultRcType = None
self.getDefaultRcType()
def getDefaultRcType(self):
data = iRcTypeControl.getBoxType()
for x in self.defaultRcList:
if x[0] in data:
self.defaultRcType = x[1]
break
def setDefaultRcType(self):
iRcTypeControl.writeRcType(self.defaultRcType)
def keySave(self):
if config.plugins.remotecontroltype.rctype.value == int(self.rctype.value):
self.close()
else:
self.setNewSetting()
self.session.openWithCallback(self.keySaveCallback, MessageBox, _("Is this setting ok?"), MessageBox.TYPE_YESNO, timeout=20, default=True, timeout_default=False)
def keySaveCallback(self, answer):
if answer is False:
self.restoreOldSetting()
else:
config.plugins.remotecontroltype.rctype.value = int(self.rctype.value)
config.plugins.remotecontroltype.save()
self.close()
def restoreOldSetting(self):
if config.plugins.remotecontroltype.rctype.value == 0:
self.setDefaultRcType()
else:
iRcTypeControl.writeRcType(config.plugins.remotecontroltype.rctype.value)
def setNewSetting(self):
if int(self.rctype.value) == 0:
self.setDefaultRcType()
else:
iRcTypeControl.writeRcType(int(self.rctype.value))
def keyCancel(self):
self.restoreOldSetting()
self.close()
|
gpl-2.0
|
memaldi/geopy
|
geopy/geocoders/googlev3.py
|
12
|
12755
|
"""
:class:`.GoogleV3` is the Google Maps V3 geocoder.
"""
import base64
import hashlib
import hmac
from geopy.compat import urlencode
from geopy.geocoders.base import Geocoder, DEFAULT_TIMEOUT, DEFAULT_SCHEME
from geopy.exc import (
GeocoderQueryError,
GeocoderQuotaExceeded,
ConfigurationError,
GeocoderParseError,
GeocoderQueryError,
)
from geopy.location import Location
from geopy.util import logger
try:
from pytz import timezone, UnknownTimeZoneError
from calendar import timegm
from datetime import datetime
from numbers import Number
pytz_available = True
except ImportError:
pytz_available = False
__all__ = ("GoogleV3", )
class GoogleV3(Geocoder): # pylint: disable=R0902
"""
Geocoder using the Google Maps v3 API. Documentation at:
https://developers.google.com/maps/documentation/geocoding/
"""
def __init__(
self,
api_key=None,
domain='maps.googleapis.com',
scheme=DEFAULT_SCHEME,
client_id=None,
secret_key=None,
timeout=DEFAULT_TIMEOUT,
proxies=None,
user_agent=None,
): # pylint: disable=R0913
"""
Initialize a customized Google geocoder.
API authentication is only required for Google Maps Premier customers.
:param string api_key: The API key required by Google to perform
geocoding requests. API keys are managed through the Google APIs
console (https://code.google.com/apis/console).
.. versionadded:: 0.98.2
:param string domain: Should be the localized Google Maps domain to
connect to. The default is 'maps.googleapis.com', but if you're
geocoding address in the UK (for example), you may want to set it
to 'maps.google.co.uk' to properly bias results.
:param string scheme: Use 'https' or 'http' as the API URL's scheme.
Default is https. Note that SSL connections' certificates are not
verified.
.. versionadded:: 0.97
:param string client_id: If using premier, the account client id.
:param string secret_key: If using premier, the account secret key.
:param dict proxies: If specified, routes this geocoder's requests
through the specified proxy. E.g., {"https": "192.0.2.0"}. For
more information, see documentation on
:class:`urllib2.ProxyHandler`.
.. versionadded:: 0.96
"""
super(GoogleV3, self).__init__(
scheme=scheme, timeout=timeout, proxies=proxies, user_agent=user_agent
)
if client_id and not secret_key:
raise ConfigurationError('Must provide secret_key with client_id.')
if secret_key and not client_id:
raise ConfigurationError('Must provide client_id with secret_key.')
self.api_key = api_key
self.domain = domain.strip('/')
self.scheme = scheme
self.doc = {}
if client_id and secret_key:
self.premier = True
self.client_id = client_id
self.secret_key = secret_key
else:
self.premier = False
self.client_id = None
self.secret_key = None
self.api = '%s://%s/maps/api/geocode/json' % (self.scheme, self.domain)
self.tz_api = '%s://%s/maps/api/timezone/json' % (
self.scheme,
self.domain
)
def _get_signed_url(self, params):
"""
Returns a Premier account signed url. Docs on signature:
https://developers.google.com/maps/documentation/business/webservices/auth#digital_signatures
"""
params['client'] = self.client_id
path = "?".join(('/maps/api/geocode/json', urlencode(params)))
signature = hmac.new(
base64.urlsafe_b64decode(self.secret_key),
path.encode('utf-8'),
hashlib.sha1
)
signature = base64.urlsafe_b64encode(
signature.digest()
).decode('utf-8')
return '%s://%s%s&signature=%s' % (
self.scheme, self.domain, path, signature
)
@staticmethod
def _format_components_param(components):
"""
Format the components dict to something Google understands.
"""
return "|".join(
(":".join(item)
for item in components.items()
)
)
@staticmethod
def _format_bounds_param(bounds):
"""
Format the bounds to something Google understands.
"""
return '%f,%f|%f,%f' % (bounds[0], bounds[1], bounds[2], bounds[3])
def geocode(
self,
query,
exactly_one=True,
timeout=None,
bounds=None,
region=None,
components=None,
language=None,
sensor=False,
): # pylint: disable=W0221,R0913
"""
Geocode a location query.
:param string query: The address or query you wish to geocode.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
.. versionadded:: 0.97
:param bounds: The bounding box of the viewport within which
to bias geocode results more prominently.
:type bounds: list or tuple
:param string region: The region code, specified as a ccTLD
("top-level domain") two-character value.
:param dict components: Restricts to an area. Can use any combination
of: route, locality, administrative_area, postal_code, country.
.. versionadded:: 0.97.1
:param string language: The language in which to return results.
:param bool sensor: Whether the geocoding request comes from a
device with a location sensor.
"""
params = {
'address': self.format_string % query,
'sensor': str(sensor).lower()
}
if self.api_key:
params['key'] = self.api_key
if bounds:
if len(bounds) != 4:
raise GeocoderQueryError(
"bounds must be a four-item iterable of lat,lon,lat,lon"
)
params['bounds'] = self._format_bounds_param(bounds)
if region:
params['region'] = region
if components:
params['components'] = self._format_components_param(components)
if language:
params['language'] = language
if self.premier is False:
url = "?".join((self.api, urlencode(params)))
else:
url = self._get_signed_url(params)
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
)
def reverse(
self,
query,
exactly_one=False,
timeout=None,
language=None,
sensor=False,
): # pylint: disable=W0221,R0913
"""
Given a point, find an address.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s"
:param boolean exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception.
.. versionadded:: 0.97
:param string language: The language in which to return results.
:param boolean sensor: Whether the geocoding request comes from a
device with a location sensor.
"""
params = {
'latlng': self._coerce_point_to_string(query),
'sensor': str(sensor).lower()
}
if language:
params['language'] = language
if self.api_key:
params['key'] = self.api_key
if not self.premier:
url = "?".join((self.api, urlencode(params)))
else:
url = self._get_signed_url(params)
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
)
def timezone(self, location, at_time=None, timeout=None):
"""
**This is an unstable API.**
Finds the timezone a `location` was in for a specified `at_time`,
and returns a pytz timezone object.
.. versionadded:: 1.2.0
:param location: The coordinates for which you want a timezone.
:type location: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s"
:param at_time: The time at which you want the timezone of this
location. This is optional, and defaults to the time that the
function is called in UTC.
:type at_time integer, long, float, datetime:
:rtype: pytz timezone
"""
if not pytz_available:
raise ImportError(
'pytz must be installed in order to locate timezones. '
' Install with `pip install geopy -e ".[timezone]"`.'
)
location = self._coerce_point_to_string(location)
if isinstance(at_time, Number):
timestamp = at_time
elif isinstance(at_time, datetime):
timestamp = timegm(at_time.utctimetuple())
elif at_time is None:
timestamp = timegm(datetime.utcnow().utctimetuple())
else:
raise GeocoderQueryError(
"`at_time` must be an epoch integer or "
"datetime.datetime object"
)
params = {
"location": location,
"timestamp": timestamp,
}
if self.api_key:
params['key'] = self.api_key
url = "?".join((self.tz_api, urlencode(params)))
logger.debug("%s.timezone: %s", self.__class__.__name__, url)
response = self._call_geocoder(url, timeout=timeout)
try:
tz = timezone(response["timeZoneId"])
except UnknownTimeZoneError:
raise GeocoderParseError(
"pytz could not parse the timezone identifier (%s) "
"returned by the service." % response["timeZoneId"]
)
except KeyError:
raise GeocoderParseError(
"geopy could not find a timezone in this response: %s" %
response
)
return tz
def _parse_json(self, page, exactly_one=True):
'''Returns location, (latitude, longitude) from json feed.'''
places = page.get('results', [])
if not len(places):
self._check_status(page.get('status'))
return None
def parse_place(place):
'''Get the location, lat, lng from a single json place.'''
location = place.get('formatted_address')
latitude = place['geometry']['location']['lat']
longitude = place['geometry']['location']['lng']
return Location(location, (latitude, longitude), place)
if exactly_one:
return parse_place(places[0])
else:
return [parse_place(place) for place in places]
@staticmethod
def _check_status(status):
"""
Validates error statuses.
"""
if status == 'ZERO_RESULTS':
# When there are no results, just return.
return
if status == 'OVER_QUERY_LIMIT':
raise GeocoderQuotaExceeded(
'The given key has gone over the requests limit in the 24'
' hour period or has submitted too many requests in too'
' short a period of time.'
)
elif status == 'REQUEST_DENIED':
raise GeocoderQueryError(
'Your request was denied.'
)
elif status == 'INVALID_REQUEST':
raise GeocoderQueryError('Probably missing address or latlng.')
else:
raise GeocoderQueryError('Unknown error.')
|
mit
|
VasuAgrawal/tartanHacks2015
|
site/flask/lib/python2.7/site-packages/pip/_vendor/requests/utils.py
|
121
|
20879
|
# -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import platform
import re
import sys
import socket
import struct
import warnings
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass, urlunparse)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
return os.fstat(fileno).st_size
if hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
return len(o.getvalue())
def get_netrc_auth(url):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc
host = ri.netloc.split(':')[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth
pass
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if name and isinstance(name, builtin_str) and name[0] != '<' and name[-1] != '>':
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved, unreserved,
# or '%')
return quote(unquote_unreserved(uri), safe="!#$%&'()*+,/:;=?@[]~")
def address_in_network(ip, net):
"""
This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""
Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""Very simple check of the cidr format in no_proxy variable"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = no_proxy.replace(' ', '').split(',')
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['%s/%s' % (name, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def default_headers():
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in re.split(", *<", value):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
'''Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.'''
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
|
mit
|
ibinti/intellij-community
|
python/lib/Lib/macurl2path.py
|
332
|
3275
|
"""Macintosh-specific module for conversion between pathnames and URLs.
Do not import directly; use urllib instead."""
import urllib
import os
__all__ = ["url2pathname","pathname2url"]
def url2pathname(pathname):
"""OS-specific conversion from a relative URL of the 'file' scheme
to a file system path; not recommended for general use."""
#
# XXXX The .. handling should be fixed...
#
tp = urllib.splittype(pathname)[0]
if tp and tp != 'file':
raise RuntimeError, 'Cannot convert non-local URL to pathname'
# Turn starting /// into /, an empty hostname means current host
if pathname[:3] == '///':
pathname = pathname[2:]
elif pathname[:2] == '//':
raise RuntimeError, 'Cannot convert non-local URL to pathname'
components = pathname.split('/')
# Remove . and embedded ..
i = 0
while i < len(components):
if components[i] == '.':
del components[i]
elif components[i] == '..' and i > 0 and \
components[i-1] not in ('', '..'):
del components[i-1:i+1]
i = i-1
elif components[i] == '' and i > 0 and components[i-1] != '':
del components[i]
else:
i = i+1
if not components[0]:
# Absolute unix path, don't start with colon
rv = ':'.join(components[1:])
else:
# relative unix path, start with colon. First replace
# leading .. by empty strings (giving ::file)
i = 0
while i < len(components) and components[i] == '..':
components[i] = ''
i = i + 1
rv = ':' + ':'.join(components)
# and finally unquote slashes and other funny characters
return urllib.unquote(rv)
def pathname2url(pathname):
"""OS-specific conversion from a file system path to a relative URL
of the 'file' scheme; not recommended for general use."""
if '/' in pathname:
raise RuntimeError, "Cannot convert pathname containing slashes"
components = pathname.split(':')
# Remove empty first and/or last component
if components[0] == '':
del components[0]
if components[-1] == '':
del components[-1]
# Replace empty string ('::') by .. (will result in '/../' later)
for i in range(len(components)):
if components[i] == '':
components[i] = '..'
# Truncate names longer than 31 bytes
components = map(_pncomp2url, components)
if os.path.isabs(pathname):
return '/' + '/'.join(components)
else:
return '/'.join(components)
def _pncomp2url(component):
component = urllib.quote(component[:31], safe='') # We want to quote slashes
return component
def test():
for url in ["index.html",
"bar/index.html",
"/foo/bar/index.html",
"/foo/bar/",
"/"]:
print '%r -> %r' % (url, url2pathname(url))
for path in ["drive:",
"drive:dir:",
"drive:dir:file",
"drive:file",
"file",
":file",
":dir:",
":dir:file"]:
print '%r -> %r' % (path, pathname2url(path))
if __name__ == '__main__':
test()
|
apache-2.0
|
charlyoleg/Cnc25D
|
cnc25d/gearbar.py
|
1
|
18085
|
# gearbar.py
# generates gearbar and simulates gear.
# created by charlyoleg on 2013/09/26
#
# (C) Copyright 2013 charlyoleg
#
# This file is part of the Cnc25D Python package.
#
# Cnc25D is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Cnc25D is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cnc25D. If not, see <http://www.gnu.org/licenses/>.
"""
gearbar.py is a parametric generator of gearbars.
The main function return the gear-bar as FreeCAD Part object.
You can also simulate or view of the gearbar and get a DXF, SVG or BRep file.
"""
################################################################
# header for Python / FreeCAD compatibility
################################################################
import cnc25d_api
cnc25d_api.importing_freecad()
#print("FreeCAD.Version:", FreeCAD.Version())
#FreeCAD.Console.PrintMessage("Hello from PrintMessage!\n") # avoid using this method because it is not printed in the FreeCAD GUI
################################################################
# import
################################################################
import math
import sys, argparse
#from datetime import datetime
#import os, errno
#import re
#import Tkinter # to display the outline in a small GUI
#
import Part
#from FreeCAD import Base
# 3rd parties
#import svgwrite
#from dxfwrite import DXFEngine
# cnc25d
import gear_profile
################################################################
# inheritance from gear_profile
################################################################
def inherit_gear_profile():
""" generate a small constraint set to be able to create the gear_profile-object for gearbar
"""
gp_c = {}
gp_c['gear_type'] = 'l'
gp_c['second_gear_type'] = 'e'
gp_c['gearbar_slope'] = 0.3 #hack the default value of gear_profile.gearbar_slope
i_gear_profile = gear_profile.gear_profile(gp_c)
return(i_gear_profile)
################################################################
# gearbar constraint_constructor
################################################################
def gearbar_constraint_constructor(ai_parser, ai_variant = 0):
"""
Add arguments relative to the gearbar design
"""
r_parser = ai_parser
### inherit arguments from gear_profile
i_gear_profile = inherit_gear_profile()
r_parser = i_gear_profile.get_constraint_constructor()(r_parser, 3)
### gearbar
r_parser.add_argument('--gearbar_height','--gbh', action='store', type=float, default=20.0,
help="Set the height of the gearbar (from the bottom to the gear-profile primitive line). Default: 20.0")
### gearbar-hole
r_parser.add_argument('--gearbar_hole_height_position','--gbhhp', action='store', type=float, default=10.0,
help="Set the height from the bottom of the gearbar to the center of the gearbar-hole. Default: 10.0")
r_parser.add_argument('--gearbar_hole_diameter','--gbhd', action='store', type=float, default=10.0,
help="Set the diameter of the gearbar-hole. If equal to 0.0, there are no gearbar-hole. Default: 10.0")
r_parser.add_argument('--gearbar_hole_offset','--gbho', action='store', type=int, default=0,
help="Set the initial number of teeth to position the first gearbar-hole. Default: 0")
r_parser.add_argument('--gearbar_hole_increment','--gbhi', action='store', type=int, default=1,
help="Set the number of teeth between two gearbar-holes. Default: 1")
# return
return(r_parser)
################################################################
# constraint conversion
################################################################
def gear_profile_constraint(c={}):
""" generate the gear_profile conversion with the gearbar constraint c
"""
gp_c = c.copy()
gp_c['gear_type'] = 'l'
gp_c['second_gear_type'] = 'e'
if((c['gearbar_slope']==0)and(c['gear_force_angle']==0)and(c['second_gear_base_diameter']==0)): # if gearbar_slope is not constraint
gp_c['gearbar_slope'] = 0.3 # default value
#gp_c['gear_router_bit_radius'] = c['gear_router_bit_radius']
#print("dbg097: gp_c:", gp_c)
return(gp_c)
################################################################
# gearbar constraint_check
################################################################
def gearbar_constraint_check(c):
""" check the gearbar constraint c and set the dynamic default values
"""
#print("dbg122: len(c)", len(c))
### precision
radian_epsilon = math.pi/1000
### check parameter coherence (part 1)
c['gearbar_hole_radius'] = float(c['gearbar_hole_diameter'])/2
# c['gearbar_hole_height_position']
if((c['gearbar_hole_height_position']+c['gearbar_hole_radius'])>c['gearbar_height']):
print("ERR215: Error, gearbar_hole_height_position {:0.3} and gearbar_hole_radius {:0.3f} are too big compare to gearbar_height {:0.3f} !".format(c['gearbar_hole_height_position'], c['gearbar_hole_radius'], c['gearbar_height']))
sys.exit(2)
# c['gearbar_hole_increment']
if(c['gearbar_hole_increment']==0):
print("ERR183: Error gearbar_hole_increment must be bigger than zero!")
sys.exit(2)
# c['gear_tooth_nb']
if(c['gear_tooth_nb']>0): # create a gear_profile
i_gear_profile = inherit_gear_profile() # inherit from gear_profile
i_gear_profile.apply_external_constraint(gear_profile_constraint(c))
gear_profile_parameters = i_gear_profile.get_constraint()
# extract some gear_profile high-level parameter
#print('dbg556: gear_profile_parameters:', gear_profile_parameters)
c['g1_ix'] = gear_profile_parameters['g1_param']['center_ox']
c['g1_iy'] = gear_profile_parameters['g1_param']['center_oy']
c['g1_inclination'] = gear_profile_parameters['g1_param']['gearbar_inclination']
gear_profile_for_length = i_gear_profile.get_A_figure('first_gear')[0]
gear_profile_for_length = cnc25d_api.outline_rotate(gear_profile_for_length, c['g1_ix'], c['g1_iy'], -1*c['g1_inclination'] + math.pi/2)
gear_profile_for_length = cnc25d_api.outline_shift_xy(gear_profile_for_length, -1*gear_profile_for_length[0][0], 1, -1*c['g1_iy'] + c['gearbar_height'], 1)
#print("dbg127: gear_profile_for_length:", gear_profile_for_length)
c['gearbar_length'] = gear_profile_for_length[-1][0] - gear_profile_for_length[0][0]
## get some parameters
c['minimal_gear_profile_height'] = c['gearbar_height'] - (gear_profile_parameters['g1_param']['hollow_height'] + gear_profile_parameters['g1_param']['dedendum_height'])
c['pi_module'] = gear_profile_parameters['g1_param']['pi_module']
pfe = gear_profile_parameters['g1_param']['portion_first_end']
full_positive_slope = gear_profile_parameters['g1_param']['full_positive_slope']
full_negative_slope = gear_profile_parameters['g1_param']['full_negative_slope']
bottom_land = gear_profile_parameters['g1_param']['bottom_land']
top_land = gear_profile_parameters['g1_param']['top_land']
if((top_land + full_positive_slope + bottom_land + full_negative_slope)!=c['pi_module']):
print("ERR269: Error with top_land {:0.3f} full_positive_slope {:0.3f} bottom_land {:0.3f} full_negative_slope {:0.3f} and pi_module {:0.3f}".format(top_land, full_positive_slope, bottom_land, full_negative_slope, c['pi_module']))
sys.exit(2)
if(pfe==0):
c['first_tooth_position'] = full_positive_slope + bottom_land + full_negative_slope + float(top_land)/2
elif(pfe==1):
c['first_tooth_position'] = full_positive_slope + bottom_land + full_negative_slope + top_land
elif(pfe==2):
c['first_tooth_position'] = full_negative_slope + float(top_land)/2
elif(pfe==3):
c['first_tooth_position'] = float(bottom_land)/2 + full_negative_slope + float(top_land)/2
else: # no gear_profile, just a circle
if(c['gear_primitive_diameter']<radian_epsilon):
print("ERR885: Error, the no-gear-profile line outline length gear_primitive_diameter {:0.2f} is too small!".format(c['gear_primitive_diameter']))
sys.exit(2)
#c['g1_ix'] = c['center_position_x
#c['g1_iy'] = c['center_position_y
c['gearbar_length'] = c['gear_primitive_diameter']
c['minimal_gear_profile_height'] = c['gearbar_height']
c['pi_module'] = c['gear_module'] * math.pi
c['first_tooth_position'] = float(c['pi_module'])/2
### check parameter coherence (part 2)
# minimal_gear_profile_height
if(c['minimal_gear_profile_height']<radian_epsilon):
print("ERR265: Error, minimal_gear_profile_height {:0.3f} is too small".format(c['minimal_gear_profile_height']))
sys.exit(2)
# gearbar_hole_diameter
if((c['gearbar_hole_height_position']+c['gearbar_hole_radius'])>c['minimal_gear_profile_height']):
print("ERR269: Error, gearbar_hole_height_position {:0.3f} and gearbar_hole_radius {:0.3f} are too big compare to minimal_gear_profile_height {:0.3f}".format(c['gearbar_hole_height_position'], c['gearbar_hole_radius'], c['minimal_gear_profile_height']))
sys.exit(2)
# pi_module
if(c['gearbar_hole_radius']>0):
if(c['pi_module']==0):
print("ERR277: Error, pi_module is null. You might need to use --gear_module")
sys.exit(2)
###
return(c)
################################################################
# gearbar 2D-figures construction
################################################################
def gearbar_2d_construction(c):
"""
construct the 2D-figures with outlines at the A-format for the gearbar design
"""
### gearbar outline
if(c['gear_tooth_nb']>0):
i_gear_profile = inherit_gear_profile() # inherit from gear_profile
i_gear_profile.apply_external_constraint(gear_profile_constraint(c))
gear_profile_A = i_gear_profile.get_A_figure('first_gear')[0] # Warning: gear_profile provide only B-format outline currently
gear_profile_A = cnc25d_api.outline_rotate(gear_profile_A, c['g1_ix'], c['g1_iy'], -1*c['g1_inclination'] + math.pi/2)
gear_profile_A = cnc25d_api.outline_shift_xy(gear_profile_A, -1*gear_profile_A[0][0], 1, -1*c['g1_iy'] + c['gearbar_height'], 1) # gearbar_fig inclinatiion is always zero. Inclination only visible in simulation
else:
gear_profile_A = [(0, c['gearbar_height']),(c['gearbar_length'], c['gearbar_height'])]
gearbar_outline = gear_profile_A
gearbar_outline.append((gearbar_outline[-1][0], 0))
gearbar_outline.append((0, 0))
gearbar_outline.append((0, gearbar_outline[0][1]))
#print("dbg200: gearbar_outline:", gearbar_outline)
### gearbar-hole figure
gearbar_hole_figure = []
if((c['gearbar_hole_radius']>0)and(c['pi_module']>0)):
hole_x = c['first_tooth_position'] + c['gearbar_hole_offset'] * c['pi_module']
while(hole_x<(c['gearbar_length']-c['gearbar_hole_radius'])):
#print("dbg312: hole_x {:0.3f}".format(hole_x))
gearbar_hole_figure.append([hole_x, c['gearbar_hole_height_position'], c['gearbar_hole_radius']])
hole_x += c['gearbar_hole_increment'] * c['pi_module']
### design output
gb_figure = [gearbar_outline]
gb_figure.extend(gearbar_hole_figure)
###
r_figures = {}
r_height = {}
#
r_figures['gearbar_fig'] = gb_figure
r_height['gearbar_fig'] = c['gear_profile_height']
###
return((r_figures, r_height))
################################################################
# gearbar simulation
################################################################
def gearbar_simulation_A(c):
""" define the gearbar simulation
"""
i_gear_profile = inherit_gear_profile() # inherit from gear_profile
i_gear_profile.apply_external_constraint(gear_profile_constraint(c))
i_gear_profile.run_simulation('gear_profile_simulation_A')
return(1)
def gearbar_2d_simulations():
""" return the dictionary defining the available simulation for gearbar
"""
r_sim = {}
r_sim['gearbar_simulation_A'] = gearbar_simulation_A
return(r_sim)
################################################################
# gearbar 3D assembly-configuration construction
################################################################
def gearbar_3d_construction(c):
""" construct the 3D-assembly-configurations of the gearbar
"""
# conf1
gearbar_3dconf1 = []
gearbar_3dconf1.append(('gearbar_fig', 0.0, 0.0, 0.0, 0.0, c['gear_profile_height'], 'i', 'xy', 0.0, 0.0, 0.0))
#
r_assembly = {}
r_slice = {}
r_assembly['gearbar_3dconf1'] = gearbar_3dconf1
hh = c['gear_profile_height']/2.0 # half-height
r_slice['gearbar_3dconf1'] = (c['gearbar_length'],c['gearbar_height'],c['gear_profile_height'], c['center_position_x'],c['center_position_y'],0.0, [hh], [], [])
#
return((r_assembly, r_slice))
################################################################
# gearbar_info
################################################################
def gearbar_info(c):
""" create the text info related to the gearbar
"""
r_info = ""
if(c['gear_tooth_nb']>0): # with gear-profile (normal case)
i_gear_profile = inherit_gear_profile() # inherit from gear_profile
i_gear_profile.apply_external_constraint(gear_profile_constraint(c))
r_info += i_gear_profile.get_info()
else: # when no gear-profile
r_info += "\nSimple line (no-gear-profile):\n"
r_info += "outline line length: \t{:0.3f}\n".format(c['gearbar_length'])
r_info += """
gearbar_length: \t{:0.3f}
gearbar_height: \t{:0.3f}
minimal_gear_profile_height: \t{:0.3f}
""".format(c['gearbar_length'], c['gearbar_height'], c['minimal_gear_profile_height'])
r_info += """
gearbar_hole_height_position: \t{:0.3f}
gearbar_hole_diameter: \t{:0.3f}
gearbar_hole_offset: \t{:d}
gearbar_hole_increment: \t{:d}
pi_module: \t{:0.3f}
""".format(c['gearbar_hole_height_position'], c['gearbar_hole_diameter'], c['gearbar_hole_offset'], c['gearbar_hole_increment'], c['pi_module'])
#print(gearbar_parameter_info)
return(r_info)
################################################################
# self test
################################################################
def gearbar_self_test():
"""
This is the non-regression test of gearbar.
Look at the simulation Tk window to check errors.
"""
r_tests = [
["simplest test" , "--gear_tooth_nb 12 --gear_module 10 --gearbar_slope 0.3 --gear_router_bit_radius 3.0 --gearbar_height 40.0 --gearbar_hole_height_position 20.0"],
["no tooth" , "--gear_tooth_nb 0 --gear_primitive_diameter 500.0 --gearbar_height 30.0 --gearbar_hole_height_position 15.0 --gear_module 10.0"],
["no gearbar-hole" , "--gear_tooth_nb 12 --gear_module 10 --gearbar_slope 0.3 --gear_router_bit_radius 3.0 --gearbar_height 40.0 --gearbar_hole_height_position 20.0 --gearbar_hole_diameter 0"],
["ends 3 3" , "--gear_tooth_nb 12 --gear_module 10 --gearbar_slope 0.3 --gear_router_bit_radius 3.0 --gearbar_height 40.0 --gearbar_hole_height_position 20.0 --cut_portion 20 3 3"],
["ends 2 1" , "--gear_tooth_nb 12 --gear_module 10 --gearbar_slope 0.3 --gear_router_bit_radius 3.0 --gearbar_height 40.0 --gearbar_hole_height_position 20.0 --cut_portion 19 2 1"],
["ends 1 3" , "--gear_tooth_nb 12 --gear_module 10 --gearbar_slope 0.3 --gear_router_bit_radius 3.0 --gearbar_height 40.0 --gearbar_hole_height_position 20.0 --cut_portion 18 1 3"],
[" gearbar-hole" , "--gear_tooth_nb 12 --gear_module 10 --gearbar_slope 0.3 --gear_router_bit_radius 3.0 --gearbar_height 40.0 --gearbar_hole_height_position 20.0 --cut_portion 17 3 3 --gearbar_hole_offset 1 --gearbar_hole_increment 3"],
["output dxf" , "--gear_tooth_nb 12 --gear_module 10 --gearbar_slope 0.3 --gear_router_bit_radius 3.0 --gearbar_height 40.0 --gearbar_hole_height_position 20.0 --output_file_basename test_output/gearbar_self_test.dxf"],
["last test" , "--gear_tooth_nb 12 --gear_module 10 --gearbar_slope 0.3 --gear_router_bit_radius 3.0 --gearbar_height 40.0 --gearbar_hole_height_position 20.0"]]
return(r_tests)
################################################################
# gearbar design declaration
################################################################
class gearbar(cnc25d_api.bare_design):
""" gearbar design
"""
def __init__(self, constraint={}):
""" configure the gearbar design
"""
self.design_setup(
s_design_name = "gearbar",
f_constraint_constructor = gearbar_constraint_constructor,
f_constraint_check = gearbar_constraint_check,
f_2d_constructor = gearbar_2d_construction,
d_2d_simulation = gearbar_2d_simulations(),
f_3d_constructor = gearbar_3d_construction,
f_info = gearbar_info,
l_display_figure_list = ['gearbar_fig'],
s_default_simulation = '',
l_2d_figure_file_list = ['gearbar_fig'],
l_3d_figure_file_list = ['gearbar_fig'],
l_3d_conf_file_list = ['gearbar_3dconf1'],
f_cli_return_type = None,
l_self_test_list = gearbar_self_test())
self.apply_constraint(constraint)
################################################################
# main
################################################################
# this works with python and freecad :)
if __name__ == "__main__":
FreeCAD.Console.PrintMessage("gearbar.py says hello!\n")
my_gb = gearbar()
#my_gb.cli()
#my_gb.cli("--gear_tooth_nb 12 --gear_module 10 --gearbar_slope 0.3 --gear_router_bit_radius 3.0 --gearbar_height 40.0 --gearbar_hole_height_position 20.0 --return_type freecad_object")
my_gb.cli("--gear_tooth_nb 12 --gear_module 10 --gearbar_slope 0.3 --gear_router_bit_radius 3.0 --gearbar_height 40.0 --gearbar_hole_height_position 20.0")
if(cnc25d_api.interpretor_is_freecad()):
Part.show(my_gb.get_fc_obj_3dconf('gearbar_3dconf1'))
|
gpl-3.0
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.6.0/Lib/fnmatch.py
|
27
|
3166
|
"""Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
"""
import os
import posixpath
import re
import functools
__all__ = ["filter", "fnmatch", "fnmatchcase", "translate"]
def fnmatch(name, pat):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
"""
name = os.path.normcase(name)
pat = os.path.normcase(pat)
return fnmatchcase(name, pat)
@functools.lru_cache(maxsize=256, typed=True)
def _compile_pattern(pat):
if isinstance(pat, bytes):
pat_str = str(pat, 'ISO-8859-1')
res_str = translate(pat_str)
res = bytes(res_str, 'ISO-8859-1')
else:
res = translate(pat)
return re.compile(res).match
def filter(names, pat):
"""Return the subset of the list NAMES that match PAT."""
result = []
pat = os.path.normcase(pat)
match = _compile_pattern(pat)
if os.path is posixpath:
# normcase on posix is NOP. Optimize it away from the loop.
for name in names:
if match(name):
result.append(name)
else:
for name in names:
if match(os.path.normcase(name)):
result.append(name)
return result
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
match = _compile_pattern(pat)
return match(name) is not None
def translate(pat):
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat)
res = ''
while i < n:
c = pat[i]
i = i+1
if c == '*':
res = res + '.*'
elif c == '?':
res = res + '.'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j+1
if j < n and pat[j] == ']':
j = j+1
while j < n and pat[j] != ']':
j = j+1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j].replace('\\','\\\\')
i = j+1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
return r'(?s:%s)\Z' % res
|
mit
|
codasus/django-blogages
|
blogages/apps/blogages_core/views.py
|
1
|
4123
|
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, render_to_response
from django.utils.functional import memoize
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import TemplateView, ListView as BaseListView
from bootstrap.views import (ListView,
CreateView,
UpdateView,
DeleteView)
from .models import (Post,
Comment,
User)
from .forms import CommentForm
class IndexView(BaseListView):
"""
List published posts
"""
template_name = 'index.html'
queryset = Post.objects.filter(state='published')
class SingleView(TemplateView):
"""
Display a single post
"""
template_name = 'single.html'
def get_context_data(self, slug):
context = super(SingleView, self).get_context_data()
context['object'] = get_object_or_404(Post, slug=slug)
return context
@csrf_exempt
def preview(request, template_name='preview.html'):
"""
Post preview
TODO: Transform into a TemplateView class
"""
text = request.POST.get('text', '')
data = {'text': text}
return render_to_response(template_name, data)
class PostCreateView(CreateView):
"""
Post creation view.
Set the current user as post author
If the post get updated the first author remains
"""
def get_form(self, form_class):
form = super(PostCreateView, self).get_form(form_class)
form.instance.user = self.request.user
return form
def get_template_names(self):
return ('post_create.html',)
class PostUpdateView(UpdateView):
"""
Post update view
"""
def get_template_names(self):
return ('post_update.html',)
class PostListView(ListView):
"""
List posts
"""
def get_template_names(self):
return ('post_list.html',)
class CommentMixin(object):
"""
Common comment forms methods
"""
def _get_post(self):
"""
Get comment post.
This method uses memoization for caching
"""
return self.get_object().content_object
get_post = memoize(_get_post, {}, 1)
def get_success_url(self):
post_pk = self.get_post().pk
return reverse('blogages_core:comment_list', args=(post_pk,))
class CommentUpdateView(CommentMixin, UpdateView):
"""
Comment update
"""
form_class = CommentForm
class CommentDeleteView(CommentMixin, DeleteView):
"""
Comment removing
"""
model = Comment
class PostCommentMixin(CommentMixin):
"""
Common PostComment methods
"""
def _get_post(self):
"""
Get comment post.
This method uses memoization for caching
"""
post_pk = self.kwargs.get('post_pk', 0)
return get_object_or_404(Post, pk=post_pk)
get_post = memoize(_get_post, {}, 1)
class CommentCreateView(PostCommentMixin, CreateView):
"""
Comment creation right now but may be used in future
for replying other comments.
"""
form_class = CommentForm
def get_form(self, form_class):
return self.form_class(self.get_post(), **self.get_form_kwargs())
class CommentListView(PostCommentMixin, ListView):
"""
Comment listing
"""
template_name = 'comment_list.html'
model = Comment
def get_queryset(self):
"""
Filter comments from specific post
"""
post_pk = self.get_post().pk
queryset = super(CommentListView, self).get_queryset()
return queryset.filter(object_pk=post_pk)
def _get_create_url(self):
kwargs = {'post_pk': self.get_post().pk}
return reverse('blogages_core:comment_form', kwargs=kwargs)
class UserListView(ListView):
"""
User listing
"""
model = User
def get_queryset(self):
queryset = super(UserListView, self).get_queryset()
# Exclude anonymous user
queryset = queryset.exclude(pk=-1)
return queryset
|
mit
|
stewnorriss/LibCloud
|
libcloud/common/brightbox.py
|
55
|
3413
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
from libcloud.common.base import ConnectionUserAndKey, JsonResponse
from libcloud.compute.types import InvalidCredsError
from libcloud.utils.py3 import b
from libcloud.utils.py3 import httplib
try:
import simplejson as json
except ImportError:
import json
class BrightboxResponse(JsonResponse):
def success(self):
return self.status >= httplib.OK and self.status < httplib.BAD_REQUEST
def parse_body(self):
if self.headers['content-type'].split(';')[0] == 'application/json':
return super(BrightboxResponse, self).parse_body()
else:
return self.body
def parse_error(self):
response = super(BrightboxResponse, self).parse_body()
if 'error' in response:
if response['error'] in ['invalid_client', 'unauthorized_client']:
raise InvalidCredsError(response['error'])
return response['error']
elif 'error_name' in response:
return '%s: %s' % (response['error_name'], response['errors'][0])
return self.body
class BrightboxConnection(ConnectionUserAndKey):
"""
Connection class for the Brightbox driver
"""
host = 'api.gb1.brightbox.com'
responseCls = BrightboxResponse
def _fetch_oauth_token(self):
body = json.dumps({'client_id': self.user_id, 'grant_type': 'none'})
authorization = 'Basic ' + str(base64.encodestring(b('%s:%s' %
(self.user_id, self.key)))).rstrip()
self.connect()
headers = {
'Host': self.host,
'User-Agent': self._user_agent(),
'Authorization': authorization,
'Content-Type': 'application/json',
'Content-Length': str(len(body))
}
response = self.connection.request(method='POST', url='/token',
body=body, headers=headers)
response = self.connection.getresponse()
if response.status == httplib.OK:
return json.loads(response.read())['access_token']
else:
responseCls = BrightboxResponse(response=response, connection=self)
message = responseCls.parse_error()
raise InvalidCredsError(message)
def add_default_headers(self, headers):
try:
headers['Authorization'] = 'OAuth ' + self.token
except AttributeError:
self.token = self._fetch_oauth_token()
headers['Authorization'] = 'OAuth ' + self.token
return headers
def encode_data(self, data):
return json.dumps(data)
|
apache-2.0
|
scripnichenko/nova
|
nova/tests/unit/api/openstack/compute/test_server_group_quotas.py
|
26
|
6143
|
# Copyright 2014 Hewlett-Packard Development Company, L.P
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_utils import uuidutils
import webob
from nova.api.openstack.compute.legacy_v2.contrib import server_groups
from nova.api.openstack.compute import server_groups as sg_v21
from nova.api.openstack import extensions
from nova import context
import nova.db
from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
CONF = cfg.CONF
class AttrDict(dict):
def __getattr__(self, k):
return self[k]
def server_group_template(**kwargs):
sgroup = kwargs.copy()
sgroup.setdefault('name', 'test')
return sgroup
def server_group_db(sg):
attrs = sg.copy()
if 'id' in attrs:
attrs['uuid'] = attrs.pop('id')
if 'policies' in attrs:
policies = attrs.pop('policies')
attrs['policies'] = policies
else:
attrs['policies'] = []
if 'members' in attrs:
members = attrs.pop('members')
attrs['members'] = members
else:
attrs['members'] = []
if 'metadata' in attrs:
attrs['metadetails'] = attrs.pop('metadata')
else:
attrs['metadetails'] = {}
attrs['deleted'] = 0
attrs['deleted_at'] = None
attrs['created_at'] = None
attrs['updated_at'] = None
if 'user_id' not in attrs:
attrs['user_id'] = 'user_id'
if 'project_id' not in attrs:
attrs['project_id'] = 'project_id'
attrs['id'] = 7
return AttrDict(attrs)
class ServerGroupQuotasTestV21(test.TestCase):
def setUp(self):
super(ServerGroupQuotasTestV21, self).setUp()
self._setup_controller()
self.req = fakes.HTTPRequest.blank('')
def _setup_controller(self):
self.controller = sg_v21.ServerGroupController()
def _setup_quotas(self):
pass
def _assert_server_groups_in_use(self, project_id, user_id, in_use):
ctxt = context.get_admin_context()
result = quota.QUOTAS.get_user_quotas(ctxt, project_id, user_id)
self.assertEqual(result['server_groups']['in_use'], in_use)
def test_create_server_group_normal(self):
self._setup_quotas()
sgroup = server_group_template()
policies = ['anti-affinity']
sgroup['policies'] = policies
res_dict = self.controller.create(self.req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'test')
self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
self.assertEqual(res_dict['server_group']['policies'], policies)
def test_create_server_group_quota_limit(self):
self._setup_quotas()
sgroup = server_group_template()
policies = ['anti-affinity']
sgroup['policies'] = policies
# Start by creating as many server groups as we're allowed to.
for i in range(CONF.quota_server_groups):
self.controller.create(self.req, body={'server_group': sgroup})
# Then, creating a server group should fail.
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create,
self.req, body={'server_group': sgroup})
def test_delete_server_group_by_admin(self):
self._setup_quotas()
sgroup = server_group_template()
policies = ['anti-affinity']
sgroup['policies'] = policies
res = self.controller.create(self.req, body={'server_group': sgroup})
sg_id = res['server_group']['id']
context = self.req.environ['nova.context']
self._assert_server_groups_in_use(context.project_id,
context.user_id, 1)
# Delete the server group we've just created.
req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.controller.delete(req, sg_id)
# Make sure the quota in use has been released.
self._assert_server_groups_in_use(context.project_id,
context.user_id, 0)
def test_delete_server_group_by_id(self):
self._setup_quotas()
sg = server_group_template(id='123')
self.called = False
def server_group_delete(context, id):
self.called = True
def return_server_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return server_group_db(sg)
self.stubs.Set(nova.db, 'instance_group_delete',
server_group_delete)
self.stubs.Set(nova.db, 'instance_group_get',
return_server_group)
resp = self.controller.delete(self.req, '123')
self.assertTrue(self.called)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.controller, sg_v21.ServerGroupController):
status_int = self.controller.delete.wsgi_code
else:
status_int = resp.status_int
self.assertEqual(204, status_int)
class ServerGroupQuotasTestV2(ServerGroupQuotasTestV21):
def _setup_controller(self):
self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
self.controller = server_groups.ServerGroupController(self.ext_mgr)
def _setup_quotas(self):
self.ext_mgr.is_loaded('os-server-group-quotas').MultipleTimes()\
.AndReturn(True)
self.mox.ReplayAll()
|
apache-2.0
|
robdennis/sideboard
|
tests/plugins/different_versions/rdflib3_1_0/env/lib/python2.7/site-packages/pip-1.3.1-py2.7.egg/pip/commands/completion.py
|
538
|
1838
|
import sys
from pip.basecommand import Command
BASE_COMPLETION = """
# pip %(shell)s completion start%(script)s# pip %(shell)s completion end
"""
COMPLETION_SCRIPTS = {
'bash': """
_pip_completion()
{
COMPREPLY=( $( COMP_WORDS="${COMP_WORDS[*]}" \\
COMP_CWORD=$COMP_CWORD \\
PIP_AUTO_COMPLETE=1 $1 ) )
}
complete -o default -F _pip_completion pip
""", 'zsh': """
function _pip_completion {
local words cword
read -Ac words
read -cn cword
reply=( $( COMP_WORDS="$words[*]" \\
COMP_CWORD=$(( cword-1 )) \\
PIP_AUTO_COMPLETE=1 $words[1] ) )
}
compctl -K _pip_completion pip
"""}
class CompletionCommand(Command):
"""A helper command to be used for command completion."""
name = 'completion'
summary = 'A helper command to be used for command completion'
hidden = True
def __init__(self, *args, **kw):
super(CompletionCommand, self).__init__(*args, **kw)
self.parser.add_option(
'--bash', '-b',
action='store_const',
const='bash',
dest='shell',
help='Emit completion code for bash')
self.parser.add_option(
'--zsh', '-z',
action='store_const',
const='zsh',
dest='shell',
help='Emit completion code for zsh')
def run(self, options, args):
"""Prints the completion code of the given shell"""
shells = COMPLETION_SCRIPTS.keys()
shell_options = ['--' + shell for shell in sorted(shells)]
if options.shell in shells:
script = COMPLETION_SCRIPTS.get(options.shell, '')
print(BASE_COMPLETION % {'script': script, 'shell': options.shell})
else:
sys.stderr.write('ERROR: You must pass %s\n' % ' or '.join(shell_options))
|
bsd-3-clause
|
Akagi201/learning-python
|
grpc/helloworld/helloworld_pb2.py
|
1
|
7591
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: helloworld.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='helloworld.proto',
package='helloworld',
# syntax='proto3',
serialized_pb=b'\n\x10helloworld.proto\x12\nhelloworld\"\x1c\n\x0cHelloRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1d\n\nHelloReply\x12\x0f\n\x07message\x18\x01 \x01(\t2I\n\x07Greeter\x12>\n\x08SayHello\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply\"\x00\x42\x18\n\x10io.grpc.examples\xa2\x02\x03HLWb\x06proto3'
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_HELLOREQUEST = _descriptor.Descriptor(
name='HelloRequest',
full_name='helloworld.HelloRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='helloworld.HelloRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
# syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=32,
serialized_end=60,
)
_HELLOREPLY = _descriptor.Descriptor(
name='HelloReply',
full_name='helloworld.HelloReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='helloworld.HelloReply.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
# syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=62,
serialized_end=91,
)
DESCRIPTOR.message_types_by_name['HelloRequest'] = _HELLOREQUEST
DESCRIPTOR.message_types_by_name['HelloReply'] = _HELLOREPLY
HelloRequest = _reflection.GeneratedProtocolMessageType('HelloRequest', (_message.Message,), dict(
DESCRIPTOR = _HELLOREQUEST,
__module__ = 'helloworld_pb2'
# @@protoc_insertion_point(class_scope:helloworld.HelloRequest)
))
_sym_db.RegisterMessage(HelloRequest)
HelloReply = _reflection.GeneratedProtocolMessageType('HelloReply', (_message.Message,), dict(
DESCRIPTOR = _HELLOREPLY,
__module__ = 'helloworld_pb2'
# @@protoc_insertion_point(class_scope:helloworld.HelloReply)
))
_sym_db.RegisterMessage(HelloReply)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\020io.grpc.examples\242\002\003HLW')
import abc
from grpc.beta import implementations as beta_implementations
from grpc.early_adopter import implementations as early_adopter_implementations
from grpc.framework.alpha import utilities as alpha_utilities
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class EarlyAdopterGreeterServicer(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def SayHello(self, request, context):
raise NotImplementedError()
class EarlyAdopterGreeterServer(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def start(self):
raise NotImplementedError()
@abc.abstractmethod
def stop(self):
raise NotImplementedError()
class EarlyAdopterGreeterStub(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def SayHello(self, request):
raise NotImplementedError()
SayHello.async = None
def early_adopter_create_Greeter_server(servicer, port, private_key=None, certificate_chain=None):
import helloworld_pb2
import helloworld_pb2
method_service_descriptions = {
"SayHello": alpha_utilities.unary_unary_service_description(
servicer.SayHello,
helloworld_pb2.HelloRequest.FromString,
helloworld_pb2.HelloReply.SerializeToString,
),
}
return early_adopter_implementations.server("helloworld.Greeter", method_service_descriptions, port, private_key=private_key, certificate_chain=certificate_chain)
def early_adopter_create_Greeter_stub(host, port, metadata_transformer=None, secure=False, root_certificates=None, private_key=None, certificate_chain=None, server_host_override=None):
import helloworld_pb2
import helloworld_pb2
method_invocation_descriptions = {
"SayHello": alpha_utilities.unary_unary_invocation_description(
helloworld_pb2.HelloRequest.SerializeToString,
helloworld_pb2.HelloReply.FromString,
),
}
return early_adopter_implementations.stub("helloworld.Greeter", method_invocation_descriptions, host, port, metadata_transformer=metadata_transformer, secure=secure, root_certificates=root_certificates, private_key=private_key, certificate_chain=certificate_chain, server_host_override=server_host_override)
class BetaGreeterServicer(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def SayHello(self, request, context):
raise NotImplementedError()
class BetaGreeterStub(object):
"""The interface to which stubs will conform."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def SayHello(self, request, timeout):
raise NotImplementedError()
SayHello.future = None
def beta_create_Greeter_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
import helloworld_pb2
import helloworld_pb2
request_deserializers = {
('helloworld.Greeter', 'SayHello'): helloworld_pb2.HelloRequest.FromString,
}
response_serializers = {
('helloworld.Greeter', 'SayHello'): helloworld_pb2.HelloReply.SerializeToString,
}
method_implementations = {
('helloworld.Greeter', 'SayHello'): face_utilities.unary_unary_inline(servicer.SayHello),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Greeter_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
import helloworld_pb2
import helloworld_pb2
request_serializers = {
('helloworld.Greeter', 'SayHello'): helloworld_pb2.HelloRequest.SerializeToString,
}
response_deserializers = {
('helloworld.Greeter', 'SayHello'): helloworld_pb2.HelloReply.FromString,
}
cardinalities = {
'SayHello': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'helloworld.Greeter', cardinalities, options=stub_options)
# @@protoc_insertion_point(module_scope)
|
mit
|
joelfiddes/topoMAPP
|
toposub/toposub_postInstant.py
|
2
|
1185
|
#!/usr/bin/env python
""" This module runs toposub
Example:
as import:
Attributes:$gridpath $samples $file1 $targV
Todo:
"""
path2script = "./rsrc/toposubPostInstant.R"
# main
def main(gridpath, samples, file1, targV):
"""Main entry point for the script."""
run_rscript_fileout(path2script,[gridpath, samples, file1, targV])
# functions
def run_rscript_stdout(path2script , args):
""" Function to define comands to run an Rscript. Returns an object. """
import subprocess
command = 'Rscript'
cmd = [command, path2script] + args
print("Running:" + str(cmd))
x = subprocess.check_output(cmd, universal_newlines=True)
return(x)
def run_rscript_fileout(path2script , args):
""" Function to define comands to run an Rscript. Outputs a file. """
import subprocess
command = 'Rscript'
cmd = [command, path2script] + args
print("Running:" + str(cmd))
subprocess.check_output(cmd)
# calling main
if __name__ == '__main__':
import sys
gridpath = sys.argv[1]
samples = sys.argv[2]
file1 = sys.argv[3]
targV = sys.argv[4]
main(gridpath, samples, file1, targV)
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.